repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
spacetelescope/pysynphot | pysynphot/locations.py | get_data_filename | def get_data_filename(filename):
"""Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory.
"""
global _data_map
if _data_map is None:
_data_map = {}
for root, dirs, files in os.walk(specdir):
for fname in files:
_data_map[fname] = os.path.join(root, fname)
if filename not in _data_map:
raise KeyError(filename + ' not found in ' + specdir)
return _data_map[filename] | python | def get_data_filename(filename):
"""Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory.
"""
global _data_map
if _data_map is None:
_data_map = {}
for root, dirs, files in os.walk(specdir):
for fname in files:
_data_map[fname] = os.path.join(root, fname)
if filename not in _data_map:
raise KeyError(filename + ' not found in ' + specdir)
return _data_map[filename] | [
"def",
"get_data_filename",
"(",
"filename",
")",
":",
"global",
"_data_map",
"if",
"_data_map",
"is",
"None",
":",
"_data_map",
"=",
"{",
"}",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"specdir",
")",
":",
"for",
"fname",... | Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory. | [
"Map",
"filename",
"to",
"its",
"actual",
"path",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/locations.py#L190-L214 | train | 31,800 |
spacetelescope/pysynphot | pysynphot/renorm.py | StdRenorm | def StdRenorm(spectrum, band, RNval, RNunitstring, force=False):
"""This is used by `~pysynphot.spectrum.SourceSpectrum` for
renormalization.
Parameters
----------
spectrum : `~pysynphot.spectrum.SourceSpectrum`
Spectrum to renormalize.
band, RNval, RNunitstring, force
See :meth:`~pysynphot.spectrum.SourceSpectrum.renorm`.
Returns
-------
newsp : `~pysynphot.spectrum.CompositeSourceSpectrum`
Renormalized spectrum.
"""
# Validate the overlap
if not force:
stat = band.check_overlap(spectrum)
if stat == 'full':
pass
elif stat == 'partial':
if band.check_sig(spectrum):
spectrum.warnings['PartialRenorm'] = True
print ('Warning: Spectrum is not defined everywhere in '
'renormalization bandpass. At least 99% of the band '
'throughput has data, therefore proceeding anyway. '
'Spectrum will be extrapolated at constant value.')
else:
raise OverlapError('Spectrum and renormalization band do not '
'fully overlap. You may use force=True to '
'force the renormalization to proceed.')
elif stat == 'none':
raise DisjointError('Spectrum and renormalization band are '
'disjoint.')
# Compute the flux of the spectrum through the bandpass and make sure
# the result makes sense.
sp = spectrum * band
totalflux = sp.integrate()
if totalflux <= 0.0:
raise ValueError('Integrated flux is <= 0')
if np.isnan(totalflux):
raise ValueError('Integrated flux is NaN')
if np.isinf(totalflux):
raise ValueError('Integrated flux is infinite')
# Get the standard unit spectrum in the renormalization units
RNunits = units.Units(RNunitstring)
if RNunits.isDensity:
up = RNunits.StdSpectrum * band
else:
up = RNunits.StdSpectrum
# Renormalize in magnitudes....
if RNunits.isMag:
ratio = totalflux / up.integrate()
dmag = RNval + 2.5 * math.log10(ratio)
newsp = spectrum.addmag(dmag)
#...or in linear flux units.
else:
const = RNval * (up.integrate() / totalflux)
newsp = spectrum * const
# Return the new spectrum
return newsp | python | def StdRenorm(spectrum, band, RNval, RNunitstring, force=False):
"""This is used by `~pysynphot.spectrum.SourceSpectrum` for
renormalization.
Parameters
----------
spectrum : `~pysynphot.spectrum.SourceSpectrum`
Spectrum to renormalize.
band, RNval, RNunitstring, force
See :meth:`~pysynphot.spectrum.SourceSpectrum.renorm`.
Returns
-------
newsp : `~pysynphot.spectrum.CompositeSourceSpectrum`
Renormalized spectrum.
"""
# Validate the overlap
if not force:
stat = band.check_overlap(spectrum)
if stat == 'full':
pass
elif stat == 'partial':
if band.check_sig(spectrum):
spectrum.warnings['PartialRenorm'] = True
print ('Warning: Spectrum is not defined everywhere in '
'renormalization bandpass. At least 99% of the band '
'throughput has data, therefore proceeding anyway. '
'Spectrum will be extrapolated at constant value.')
else:
raise OverlapError('Spectrum and renormalization band do not '
'fully overlap. You may use force=True to '
'force the renormalization to proceed.')
elif stat == 'none':
raise DisjointError('Spectrum and renormalization band are '
'disjoint.')
# Compute the flux of the spectrum through the bandpass and make sure
# the result makes sense.
sp = spectrum * band
totalflux = sp.integrate()
if totalflux <= 0.0:
raise ValueError('Integrated flux is <= 0')
if np.isnan(totalflux):
raise ValueError('Integrated flux is NaN')
if np.isinf(totalflux):
raise ValueError('Integrated flux is infinite')
# Get the standard unit spectrum in the renormalization units
RNunits = units.Units(RNunitstring)
if RNunits.isDensity:
up = RNunits.StdSpectrum * band
else:
up = RNunits.StdSpectrum
# Renormalize in magnitudes....
if RNunits.isMag:
ratio = totalflux / up.integrate()
dmag = RNval + 2.5 * math.log10(ratio)
newsp = spectrum.addmag(dmag)
#...or in linear flux units.
else:
const = RNval * (up.integrate() / totalflux)
newsp = spectrum * const
# Return the new spectrum
return newsp | [
"def",
"StdRenorm",
"(",
"spectrum",
",",
"band",
",",
"RNval",
",",
"RNunitstring",
",",
"force",
"=",
"False",
")",
":",
"# Validate the overlap",
"if",
"not",
"force",
":",
"stat",
"=",
"band",
".",
"check_overlap",
"(",
"spectrum",
")",
"if",
"stat",
... | This is used by `~pysynphot.spectrum.SourceSpectrum` for
renormalization.
Parameters
----------
spectrum : `~pysynphot.spectrum.SourceSpectrum`
Spectrum to renormalize.
band, RNval, RNunitstring, force
See :meth:`~pysynphot.spectrum.SourceSpectrum.renorm`.
Returns
-------
newsp : `~pysynphot.spectrum.CompositeSourceSpectrum`
Renormalized spectrum. | [
"This",
"is",
"used",
"by",
"~pysynphot",
".",
"spectrum",
".",
"SourceSpectrum",
"for",
"renormalization",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/renorm.py#L58-L127 | train | 31,801 |
spacetelescope/pysynphot | commissioning/convert/convertcases.py | setdirs | def setdirs(outfiles):
"""Create the directories if need be"""
for k in outfiles:
fname=outfiles[k]
dname= os.path.dirname(fname)
if not os.path.isdir(dname):
os.mkdir(dname) | python | def setdirs(outfiles):
"""Create the directories if need be"""
for k in outfiles:
fname=outfiles[k]
dname= os.path.dirname(fname)
if not os.path.isdir(dname):
os.mkdir(dname) | [
"def",
"setdirs",
"(",
"outfiles",
")",
":",
"for",
"k",
"in",
"outfiles",
":",
"fname",
"=",
"outfiles",
"[",
"k",
"]",
"dname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dname... | Create the directories if need be | [
"Create",
"the",
"directories",
"if",
"need",
"be"
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/commissioning/convert/convertcases.py#L76-L82 | train | 31,802 |
spacetelescope/pysynphot | pysynphot/planck.py | bb_photlam_arcsec | def bb_photlam_arcsec(wave, temperature):
"""Evaluate Planck's law in ``photlam`` per square arcsec.
.. note::
Uses :func:`llam_SI` for calculation, and then converts
SI units back to CGS.
Parameters
----------
wave : array_like
Wavelength values in Angstrom.
temperature : float
Blackbody temperature in Kelvin.
Returns
-------
result : array_like
Blackbody radiation in ``photlam`` per square arcsec.
"""
lam = wave * 1.0E-10 # Angstrom -> meter
return F * llam_SI(lam, temperature) / (HS * C / lam) | python | def bb_photlam_arcsec(wave, temperature):
"""Evaluate Planck's law in ``photlam`` per square arcsec.
.. note::
Uses :func:`llam_SI` for calculation, and then converts
SI units back to CGS.
Parameters
----------
wave : array_like
Wavelength values in Angstrom.
temperature : float
Blackbody temperature in Kelvin.
Returns
-------
result : array_like
Blackbody radiation in ``photlam`` per square arcsec.
"""
lam = wave * 1.0E-10 # Angstrom -> meter
return F * llam_SI(lam, temperature) / (HS * C / lam) | [
"def",
"bb_photlam_arcsec",
"(",
"wave",
",",
"temperature",
")",
":",
"lam",
"=",
"wave",
"*",
"1.0E-10",
"# Angstrom -> meter",
"return",
"F",
"*",
"llam_SI",
"(",
"lam",
",",
"temperature",
")",
"/",
"(",
"HS",
"*",
"C",
"/",
"lam",
")"
] | Evaluate Planck's law in ``photlam`` per square arcsec.
.. note::
Uses :func:`llam_SI` for calculation, and then converts
SI units back to CGS.
Parameters
----------
wave : array_like
Wavelength values in Angstrom.
temperature : float
Blackbody temperature in Kelvin.
Returns
-------
result : array_like
Blackbody radiation in ``photlam`` per square arcsec. | [
"Evaluate",
"Planck",
"s",
"law",
"in",
"photlam",
"per",
"square",
"arcsec",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/planck.py#L142-L166 | train | 31,803 |
spacetelescope/pysynphot | pysynphot/units.py | Units | def Units(uname):
"""Generate a unit object.
Parameters
----------
uname : str
Wavelength or flux unit name.
Returns
-------
unit : `BaseUnit` or `None`
Unit object. `None` means unitless.
Raises
------
ValueError
Unknown unit name.
"""
if isinstance(uname,BaseUnit):
return uname
else:
try:
if issubclass(uname,BaseUnit):
return uname()
except TypeError:
try:
return factory(uname)
except KeyError:
if uname == str(None):
return None
else:
raise ValueError("Unknown units %s"%uname) | python | def Units(uname):
"""Generate a unit object.
Parameters
----------
uname : str
Wavelength or flux unit name.
Returns
-------
unit : `BaseUnit` or `None`
Unit object. `None` means unitless.
Raises
------
ValueError
Unknown unit name.
"""
if isinstance(uname,BaseUnit):
return uname
else:
try:
if issubclass(uname,BaseUnit):
return uname()
except TypeError:
try:
return factory(uname)
except KeyError:
if uname == str(None):
return None
else:
raise ValueError("Unknown units %s"%uname) | [
"def",
"Units",
"(",
"uname",
")",
":",
"if",
"isinstance",
"(",
"uname",
",",
"BaseUnit",
")",
":",
"return",
"uname",
"else",
":",
"try",
":",
"if",
"issubclass",
"(",
"uname",
",",
"BaseUnit",
")",
":",
"return",
"uname",
"(",
")",
"except",
"Type... | Generate a unit object.
Parameters
----------
uname : str
Wavelength or flux unit name.
Returns
-------
unit : `BaseUnit` or `None`
Unit object. `None` means unitless.
Raises
------
ValueError
Unknown unit name. | [
"Generate",
"a",
"unit",
"object",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L27-L60 | train | 31,804 |
spacetelescope/pysynphot | pysynphot/units.py | ismatch | def ismatch(a,b):
"""Method to allow smart comparisons between classes, instances,
and string representations of units and give the right answer.
For internal use only."""
#Try the easy case
if a == b:
return True
else:
#Try isinstance in both orders
try:
if isinstance(a,b):
return True
except TypeError:
try:
if isinstance(b,a):
return True
except TypeError:
#Try isinstance(a, type(b)) in both orders
try:
if isinstance(a,type(b)):
return True
except TypeError:
try:
if isinstance(b,type(a)):
return True
except TypeError:
#Try the string representation
if str(a).lower() == str(b).lower():
return True
else:
return False | python | def ismatch(a,b):
"""Method to allow smart comparisons between classes, instances,
and string representations of units and give the right answer.
For internal use only."""
#Try the easy case
if a == b:
return True
else:
#Try isinstance in both orders
try:
if isinstance(a,b):
return True
except TypeError:
try:
if isinstance(b,a):
return True
except TypeError:
#Try isinstance(a, type(b)) in both orders
try:
if isinstance(a,type(b)):
return True
except TypeError:
try:
if isinstance(b,type(a)):
return True
except TypeError:
#Try the string representation
if str(a).lower() == str(b).lower():
return True
else:
return False | [
"def",
"ismatch",
"(",
"a",
",",
"b",
")",
":",
"#Try the easy case",
"if",
"a",
"==",
"b",
":",
"return",
"True",
"else",
":",
"#Try isinstance in both orders",
"try",
":",
"if",
"isinstance",
"(",
"a",
",",
"b",
")",
":",
"return",
"True",
"except",
... | Method to allow smart comparisons between classes, instances,
and string representations of units and give the right answer.
For internal use only. | [
"Method",
"to",
"allow",
"smart",
"comparisons",
"between",
"classes",
"instances",
"and",
"string",
"representations",
"of",
"units",
"and",
"give",
"the",
"right",
"answer",
".",
"For",
"internal",
"use",
"only",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L63-L93 | train | 31,805 |
spacetelescope/pysynphot | pysynphot/units.py | Photlam.ToABMag | def ToABMag(self, wave, flux, **kwargs):
"""Convert to ``abmag``.
.. math::
\\textnormal{AB}_{\\nu} = -2.5 \\; \\log(h \\lambda \\; \\textnormal{photlam}) - 48.6
where :math:`h` is as defined in :ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
arg = H * flux * wave
return -1.085736 * N.log(arg) + ABZERO | python | def ToABMag(self, wave, flux, **kwargs):
"""Convert to ``abmag``.
.. math::
\\textnormal{AB}_{\\nu} = -2.5 \\; \\log(h \\lambda \\; \\textnormal{photlam}) - 48.6
where :math:`h` is as defined in :ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
arg = H * flux * wave
return -1.085736 * N.log(arg) + ABZERO | [
"def",
"ToABMag",
"(",
"self",
",",
"wave",
",",
"flux",
",",
"*",
"*",
"kwargs",
")",
":",
"arg",
"=",
"H",
"*",
"flux",
"*",
"wave",
"return",
"-",
"1.085736",
"*",
"N",
".",
"log",
"(",
"arg",
")",
"+",
"ABZERO"
] | Convert to ``abmag``.
.. math::
\\textnormal{AB}_{\\nu} = -2.5 \\; \\log(h \\lambda \\; \\textnormal{photlam}) - 48.6
where :math:`h` is as defined in :ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values. | [
"Convert",
"to",
"abmag",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L800-L824 | train | 31,806 |
spacetelescope/pysynphot | pysynphot/units.py | Photlam.ToSTMag | def ToSTMag(self, wave, flux, **kwargs):
"""Convert to ``stmag``.
.. math::
\\textnormal{ST}_{\\lambda} = -2.5 \\; \\log(\\frac{hc}{\\lambda} \\; \\textnormal{photlam}) - 21.1
where :math:`h` and :math:`c` are as defined in
:ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
arg = H * C * flux / wave
return -1.085736 * N.log(arg) + STZERO | python | def ToSTMag(self, wave, flux, **kwargs):
"""Convert to ``stmag``.
.. math::
\\textnormal{ST}_{\\lambda} = -2.5 \\; \\log(\\frac{hc}{\\lambda} \\; \\textnormal{photlam}) - 21.1
where :math:`h` and :math:`c` are as defined in
:ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
arg = H * C * flux / wave
return -1.085736 * N.log(arg) + STZERO | [
"def",
"ToSTMag",
"(",
"self",
",",
"wave",
",",
"flux",
",",
"*",
"*",
"kwargs",
")",
":",
"arg",
"=",
"H",
"*",
"C",
"*",
"flux",
"/",
"wave",
"return",
"-",
"1.085736",
"*",
"N",
".",
"log",
"(",
"arg",
")",
"+",
"STZERO"
] | Convert to ``stmag``.
.. math::
\\textnormal{ST}_{\\lambda} = -2.5 \\; \\log(\\frac{hc}{\\lambda} \\; \\textnormal{photlam}) - 21.1
where :math:`h` and :math:`c` are as defined in
:ref:`pysynphot-constants`.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values. | [
"Convert",
"to",
"stmag",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L826-L851 | train | 31,807 |
spacetelescope/pysynphot | pysynphot/units.py | Photlam.ToOBMag | def ToOBMag(self, wave, flux, area=None):
"""Convert to ``obmag``.
.. math::
\\textnormal{obmag} = -2.5 \\; \\log(\\delta \\lambda \\; \\times \\; \\textnormal{area} \\; \\times \\; \\textnormal{photlam})
where :math:`\\delta \\lambda` represent bin widths derived from
:func:`~pysynphot.binning.calculate_bin_edges` and
:func:`~pysynphot.binning.calculate_bin_widths`, using the input
wavelength values as bin centers.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
area : number or `None`
Telescope collecting area. If not given, default value from
:ref:`pysynphot-refdata` is used.
Returns
-------
result : number or array_like
Converted values.
"""
area = area if area else refs.PRIMARY_AREA
bin_widths = \
binning.calculate_bin_widths(binning.calculate_bin_edges(wave))
arg = flux * bin_widths * area
return -1.085736 * N.log(arg) | python | def ToOBMag(self, wave, flux, area=None):
"""Convert to ``obmag``.
.. math::
\\textnormal{obmag} = -2.5 \\; \\log(\\delta \\lambda \\; \\times \\; \\textnormal{area} \\; \\times \\; \\textnormal{photlam})
where :math:`\\delta \\lambda` represent bin widths derived from
:func:`~pysynphot.binning.calculate_bin_edges` and
:func:`~pysynphot.binning.calculate_bin_widths`, using the input
wavelength values as bin centers.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
area : number or `None`
Telescope collecting area. If not given, default value from
:ref:`pysynphot-refdata` is used.
Returns
-------
result : number or array_like
Converted values.
"""
area = area if area else refs.PRIMARY_AREA
bin_widths = \
binning.calculate_bin_widths(binning.calculate_bin_edges(wave))
arg = flux * bin_widths * area
return -1.085736 * N.log(arg) | [
"def",
"ToOBMag",
"(",
"self",
",",
"wave",
",",
"flux",
",",
"area",
"=",
"None",
")",
":",
"area",
"=",
"area",
"if",
"area",
"else",
"refs",
".",
"PRIMARY_AREA",
"bin_widths",
"=",
"binning",
".",
"calculate_bin_widths",
"(",
"binning",
".",
"calculat... | Convert to ``obmag``.
.. math::
\\textnormal{obmag} = -2.5 \\; \\log(\\delta \\lambda \\; \\times \\; \\textnormal{area} \\; \\times \\; \\textnormal{photlam})
where :math:`\\delta \\lambda` represent bin widths derived from
:func:`~pysynphot.binning.calculate_bin_edges` and
:func:`~pysynphot.binning.calculate_bin_widths`, using the input
wavelength values as bin centers.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
area : number or `None`
Telescope collecting area. If not given, default value from
:ref:`pysynphot-refdata` is used.
Returns
-------
result : number or array_like
Converted values. | [
"Convert",
"to",
"obmag",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L853-L884 | train | 31,808 |
spacetelescope/pysynphot | pysynphot/units.py | Photlam.ToVegaMag | def ToVegaMag(self, wave, flux, **kwargs):
"""Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
from . import spectrum
resampled = spectrum.Vega.resample(wave)
normalized = flux / resampled._fluxtable
return -2.5 * N.log10(normalized) | python | def ToVegaMag(self, wave, flux, **kwargs):
"""Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
from . import spectrum
resampled = spectrum.Vega.resample(wave)
normalized = flux / resampled._fluxtable
return -2.5 * N.log10(normalized) | [
"def",
"ToVegaMag",
"(",
"self",
",",
"wave",
",",
"flux",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"import",
"spectrum",
"resampled",
"=",
"spectrum",
".",
"Vega",
".",
"resample",
"(",
"wave",
")",
"normalized",
"=",
"flux",
"/",
"resampled",
... | Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values. | [
"Convert",
"to",
"vegamag",
"."
] | a125ff956f4d94beb157bd51899747a13234bb97 | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L886-L914 | train | 31,809 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackage.acquire | def acquire(graftm_package_path):
'''Acquire a new graftm Package
Parameters
----------
graftm_output_path: str
path to base directory of graftm
'''
contents_hash = json.load(
open(
os.path.join(
graftm_package_path,
GraftMPackage._CONTENTS_FILE_NAME
),
)
)
v=contents_hash[GraftMPackage.VERSION_KEY]
logging.debug("Loading version %i GraftM package: %s" % (v, graftm_package_path))
if v == 2:
pkg = GraftMPackageVersion2()
elif v == 3:
pkg = GraftMPackageVersion3()
else:
raise InsufficientGraftMPackageException("Bad version: %s" % v)
pkg._contents_hash = contents_hash
pkg._base_directory = graftm_package_path
# check we are at current version otherwise choke
pkg.check_universal_keys(v)
pkg.check_required_keys(GraftMPackage._REQUIRED_KEYS[str(v)])
return pkg | python | def acquire(graftm_package_path):
'''Acquire a new graftm Package
Parameters
----------
graftm_output_path: str
path to base directory of graftm
'''
contents_hash = json.load(
open(
os.path.join(
graftm_package_path,
GraftMPackage._CONTENTS_FILE_NAME
),
)
)
v=contents_hash[GraftMPackage.VERSION_KEY]
logging.debug("Loading version %i GraftM package: %s" % (v, graftm_package_path))
if v == 2:
pkg = GraftMPackageVersion2()
elif v == 3:
pkg = GraftMPackageVersion3()
else:
raise InsufficientGraftMPackageException("Bad version: %s" % v)
pkg._contents_hash = contents_hash
pkg._base_directory = graftm_package_path
# check we are at current version otherwise choke
pkg.check_universal_keys(v)
pkg.check_required_keys(GraftMPackage._REQUIRED_KEYS[str(v)])
return pkg | [
"def",
"acquire",
"(",
"graftm_package_path",
")",
":",
"contents_hash",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"graftm_package_path",
",",
"GraftMPackage",
".",
"_CONTENTS_FILE_NAME",
")",
",",
")",
")",
"v",
"=",... | Acquire a new graftm Package
Parameters
----------
graftm_output_path: str
path to base directory of graftm | [
"Acquire",
"a",
"new",
"graftm",
"Package"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L59-L92 | train | 31,810 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackage.check_required_keys | def check_required_keys(self, required_keys):
'''raise InsufficientGraftMPackageException if this package does not
conform to the standard of the given package'''
h = self._contents_hash
for key in required_keys:
if key not in h:
raise InsufficientGraftMPackageException("Package missing key %s" % key) | python | def check_required_keys(self, required_keys):
'''raise InsufficientGraftMPackageException if this package does not
conform to the standard of the given package'''
h = self._contents_hash
for key in required_keys:
if key not in h:
raise InsufficientGraftMPackageException("Package missing key %s" % key) | [
"def",
"check_required_keys",
"(",
"self",
",",
"required_keys",
")",
":",
"h",
"=",
"self",
".",
"_contents_hash",
"for",
"key",
"in",
"required_keys",
":",
"if",
"key",
"not",
"in",
"h",
":",
"raise",
"InsufficientGraftMPackageException",
"(",
"\"Package missi... | raise InsufficientGraftMPackageException if this package does not
conform to the standard of the given package | [
"raise",
"InsufficientGraftMPackageException",
"if",
"this",
"package",
"does",
"not",
"conform",
"to",
"the",
"standard",
"of",
"the",
"given",
"package"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L103-L109 | train | 31,811 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackageVersion2.compile | def compile(output_package_path, refpkg_path, hmm_path, diamond_database_file, max_range,
trusted_cutoff=False, search_hmm_files=None):
'''Create a new GraftM package with the given inputs. Any files
specified as parameters are copied into the final package so can
be removed after calling this function.
Parameters
----------
output_package_path: str
path to the package being created (must not exist)
refpkg_path: str
path to pplacer reference package
hmm_path: str
path to the align HMM. Used as the search HMM if search_hmm_files
is None
diamond_database_file: str
path to diamond DB file, or None for nucleotide packages
max_rage: str
as per maximum_range()
trusted_cutoff: boolean
set TC in search HMM
search_hmm_files: list of str or None
use these HMMs for search instead of the hmm_path. All
basenames of these paths must be unique, and not the same as
hmm_path.
Returns
-------
Nothing
'''
if os.path.exists(output_package_path):
raise Exception("Not writing new GraftM package to already existing file/directory with name %s" % output_package_path)
os.mkdir(output_package_path)
hmm_file_in_gpkg = os.path.basename(hmm_path)
shutil.copyfile(hmm_path, os.path.join(output_package_path, hmm_file_in_gpkg))
if diamond_database_file:
diamond_database_file_in_gpkg = os.path.basename(diamond_database_file)
shutil.copyfile(diamond_database_file, os.path.join(output_package_path, diamond_database_file_in_gpkg))
refpkg_in_gpkg = os.path.basename(refpkg_path)
shutil.copytree(refpkg_path, os.path.join(output_package_path, refpkg_in_gpkg))
if search_hmm_files:
search_hmm_files_in_gpkg_names =\
[os.path.basename(path) for path in search_hmm_files]
if hmm_file_in_gpkg in search_hmm_files_in_gpkg_names:
raise Exception("Search and align HMMs must all have different basenames to create a new gpkg")
if len(set(search_hmm_files_in_gpkg_names)) != len(search_hmm_files_in_gpkg_names):
raise Exception("Search HMMs must have different basenames to create a new gpkg")
for i, search_hmm in enumerate(search_hmm_files):
shutil.copyfile(search_hmm, os.path.join(output_package_path, search_hmm_files_in_gpkg_names[i]))
else:
search_hmm_files_in_gpkg_names = [hmm_file_in_gpkg]
contents = {GraftMPackage.VERSION_KEY: GraftMPackageVersion2.version,
GraftMPackage.ALIGNMENT_HMM_KEY: hmm_file_in_gpkg,
GraftMPackage.SEARCH_HMM_KEY: search_hmm_files_in_gpkg_names,
GraftMPackage.REFERENCE_PACKAGE_KEY: refpkg_in_gpkg,
GraftMPackage.HMM_TRUSTED_CUTOFF_KEY: trusted_cutoff,
GraftMPackage.RANGE_KEY: max_range}
if diamond_database_file:
contents[GraftMPackage.DIAMOND_DATABASE_KEY] = diamond_database_file_in_gpkg
json.dump(contents, open(os.path.join(output_package_path, GraftMPackage._CONTENTS_FILE_NAME), 'w')) | python | def compile(output_package_path, refpkg_path, hmm_path, diamond_database_file, max_range,
trusted_cutoff=False, search_hmm_files=None):
'''Create a new GraftM package with the given inputs. Any files
specified as parameters are copied into the final package so can
be removed after calling this function.
Parameters
----------
output_package_path: str
path to the package being created (must not exist)
refpkg_path: str
path to pplacer reference package
hmm_path: str
path to the align HMM. Used as the search HMM if search_hmm_files
is None
diamond_database_file: str
path to diamond DB file, or None for nucleotide packages
max_rage: str
as per maximum_range()
trusted_cutoff: boolean
set TC in search HMM
search_hmm_files: list of str or None
use these HMMs for search instead of the hmm_path. All
basenames of these paths must be unique, and not the same as
hmm_path.
Returns
-------
Nothing
'''
if os.path.exists(output_package_path):
raise Exception("Not writing new GraftM package to already existing file/directory with name %s" % output_package_path)
os.mkdir(output_package_path)
hmm_file_in_gpkg = os.path.basename(hmm_path)
shutil.copyfile(hmm_path, os.path.join(output_package_path, hmm_file_in_gpkg))
if diamond_database_file:
diamond_database_file_in_gpkg = os.path.basename(diamond_database_file)
shutil.copyfile(diamond_database_file, os.path.join(output_package_path, diamond_database_file_in_gpkg))
refpkg_in_gpkg = os.path.basename(refpkg_path)
shutil.copytree(refpkg_path, os.path.join(output_package_path, refpkg_in_gpkg))
if search_hmm_files:
search_hmm_files_in_gpkg_names =\
[os.path.basename(path) for path in search_hmm_files]
if hmm_file_in_gpkg in search_hmm_files_in_gpkg_names:
raise Exception("Search and align HMMs must all have different basenames to create a new gpkg")
if len(set(search_hmm_files_in_gpkg_names)) != len(search_hmm_files_in_gpkg_names):
raise Exception("Search HMMs must have different basenames to create a new gpkg")
for i, search_hmm in enumerate(search_hmm_files):
shutil.copyfile(search_hmm, os.path.join(output_package_path, search_hmm_files_in_gpkg_names[i]))
else:
search_hmm_files_in_gpkg_names = [hmm_file_in_gpkg]
contents = {GraftMPackage.VERSION_KEY: GraftMPackageVersion2.version,
GraftMPackage.ALIGNMENT_HMM_KEY: hmm_file_in_gpkg,
GraftMPackage.SEARCH_HMM_KEY: search_hmm_files_in_gpkg_names,
GraftMPackage.REFERENCE_PACKAGE_KEY: refpkg_in_gpkg,
GraftMPackage.HMM_TRUSTED_CUTOFF_KEY: trusted_cutoff,
GraftMPackage.RANGE_KEY: max_range}
if diamond_database_file:
contents[GraftMPackage.DIAMOND_DATABASE_KEY] = diamond_database_file_in_gpkg
json.dump(contents, open(os.path.join(output_package_path, GraftMPackage._CONTENTS_FILE_NAME), 'w')) | [
"def",
"compile",
"(",
"output_package_path",
",",
"refpkg_path",
",",
"hmm_path",
",",
"diamond_database_file",
",",
"max_range",
",",
"trusted_cutoff",
"=",
"False",
",",
"search_hmm_files",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",... | Create a new GraftM package with the given inputs. Any files
specified as parameters are copied into the final package so can
be removed after calling this function.
Parameters
----------
output_package_path: str
path to the package being created (must not exist)
refpkg_path: str
path to pplacer reference package
hmm_path: str
path to the align HMM. Used as the search HMM if search_hmm_files
is None
diamond_database_file: str
path to diamond DB file, or None for nucleotide packages
max_rage: str
as per maximum_range()
trusted_cutoff: boolean
set TC in search HMM
search_hmm_files: list of str or None
use these HMMs for search instead of the hmm_path. All
basenames of these paths must be unique, and not the same as
hmm_path.
Returns
-------
Nothing | [
"Create",
"a",
"new",
"GraftM",
"package",
"with",
"the",
"given",
"inputs",
".",
"Any",
"files",
"specified",
"as",
"parameters",
"are",
"copied",
"into",
"the",
"final",
"package",
"so",
"can",
"be",
"removed",
"after",
"calling",
"this",
"function",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L166-L234 | train | 31,812 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackageVersion3.create_diamond_db | def create_diamond_db(self):
'''Create a diamond database from the unaligned sequences in this package.
Returns
-------
path to the created diamond db e.g. 'my_sequences.dmnd'
'''
base = self.unaligned_sequence_database_path()
cmd = "diamond makedb --in '%s' -d '%s'" % (self.unaligned_sequence_database_path(), base)
extern.run(cmd)
diamondb = '%s.dmnd' % base
# Mostly this moves a file to it's current location because Create
# follows this same logic, but there's a specially crafted
# test/data/mcrA.gpkg which is slightly different.
os.rename(diamondb, self.diamond_database_path())
return diamondb | python | def create_diamond_db(self):
'''Create a diamond database from the unaligned sequences in this package.
Returns
-------
path to the created diamond db e.g. 'my_sequences.dmnd'
'''
base = self.unaligned_sequence_database_path()
cmd = "diamond makedb --in '%s' -d '%s'" % (self.unaligned_sequence_database_path(), base)
extern.run(cmd)
diamondb = '%s.dmnd' % base
# Mostly this moves a file to it's current location because Create
# follows this same logic, but there's a specially crafted
# test/data/mcrA.gpkg which is slightly different.
os.rename(diamondb, self.diamond_database_path())
return diamondb | [
"def",
"create_diamond_db",
"(",
"self",
")",
":",
"base",
"=",
"self",
".",
"unaligned_sequence_database_path",
"(",
")",
"cmd",
"=",
"\"diamond makedb --in '%s' -d '%s'\"",
"%",
"(",
"self",
".",
"unaligned_sequence_database_path",
"(",
")",
",",
"base",
")",
"e... | Create a diamond database from the unaligned sequences in this package.
Returns
-------
path to the created diamond db e.g. 'my_sequences.dmnd' | [
"Create",
"a",
"diamond",
"database",
"from",
"the",
"unaligned",
"sequences",
"in",
"this",
"package",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L248-L263 | train | 31,813 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackageVersion3.graftm_package_is_protein | def graftm_package_is_protein(graftm_package):
'''Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. In general it is best to use
'is_protein_package' instead.
'''
found = None
with open(graftm_package.alignment_hmm_path()) as f:
r = f.read().split("\n")
for line in r:
if line=='ALPH DNA':
found = False
break
elif line=='ALPH amino':
found = True
break
if found is None:
raise Exception("Unable to determine whether the HMM was amino acid or dna")
return found | python | def graftm_package_is_protein(graftm_package):
'''Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. In general it is best to use
'is_protein_package' instead.
'''
found = None
with open(graftm_package.alignment_hmm_path()) as f:
r = f.read().split("\n")
for line in r:
if line=='ALPH DNA':
found = False
break
elif line=='ALPH amino':
found = True
break
if found is None:
raise Exception("Unable to determine whether the HMM was amino acid or dna")
return found | [
"def",
"graftm_package_is_protein",
"(",
"graftm_package",
")",
":",
"found",
"=",
"None",
"with",
"open",
"(",
"graftm_package",
".",
"alignment_hmm_path",
"(",
")",
")",
"as",
"f",
":",
"r",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\""... | Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. In general it is best to use
'is_protein_package' instead. | [
"Return",
"true",
"if",
"this",
"package",
"is",
"an",
"Amino",
"Acid",
"alignment",
"package",
"otherwise",
"False",
"i",
".",
"e",
".",
"it",
"is",
"a",
"nucleotide",
"package",
".",
"In",
"general",
"it",
"is",
"best",
"to",
"use",
"is_protein_package",... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L275-L293 | train | 31,814 |
geronimp/graftM | graftm/graftm_package.py | GraftMPackageVersion3.is_protein_package | def is_protein_package(self):
'''Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. Cache the result for speed.
'''
if not hasattr(self, '_is_protein_package'):
self._is_protein_package = GraftMPackageVersion3.graftm_package_is_protein(self)
return self._is_protein_package | python | def is_protein_package(self):
'''Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. Cache the result for speed.
'''
if not hasattr(self, '_is_protein_package'):
self._is_protein_package = GraftMPackageVersion3.graftm_package_is_protein(self)
return self._is_protein_package | [
"def",
"is_protein_package",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_is_protein_package'",
")",
":",
"self",
".",
"_is_protein_package",
"=",
"GraftMPackageVersion3",
".",
"graftm_package_is_protein",
"(",
"self",
")",
"return",
"self",... | Return true if this package is an Amino Acid alignment package, otherwise
False i.e. it is a nucleotide package. Cache the result for speed. | [
"Return",
"true",
"if",
"this",
"package",
"is",
"an",
"Amino",
"Acid",
"alignment",
"package",
"otherwise",
"False",
"i",
".",
"e",
".",
"it",
"is",
"a",
"nucleotide",
"package",
".",
"Cache",
"the",
"result",
"for",
"speed",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/graftm_package.py#L295-L302 | train | 31,815 |
geronimp/graftM | graftm/unpack_sequences.py | UnpackRawReads.basename | def basename(self):
'''Return the name of the file with the '.fasta' or 'fq.gz' etc
removed'''
return os.path.basename(self.read_file)[:-len(self._get_extension(self.read_file))] | python | def basename(self):
'''Return the name of the file with the '.fasta' or 'fq.gz' etc
removed'''
return os.path.basename(self.read_file)[:-len(self._get_extension(self.read_file))] | [
"def",
"basename",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"read_file",
")",
"[",
":",
"-",
"len",
"(",
"self",
".",
"_get_extension",
"(",
"self",
".",
"read_file",
")",
")",
"]"
] | Return the name of the file with the '.fasta' or 'fq.gz' etc
removed | [
"Return",
"the",
"name",
"of",
"the",
"file",
"with",
"the",
".",
"fasta",
"or",
"fq",
".",
"gz",
"etc",
"removed"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/unpack_sequences.py#L108-L111 | train | 31,816 |
geronimp/graftM | graftm/dendropy_tree_cleaner.py | DendropyTreeCleaner.match_alignment_and_tree_sequence_ids | def match_alignment_and_tree_sequence_ids(self, sequence_names, tree):
'''Check to make sure that the sequences specified in the alignment
and the tree are the same, otherwise raise an Exception detailing
the problem for the user to fix
Parameters
----------
sequence_names: list of str
names of sequences to ensure are in the tree
tree: dendropy.Tree
tree to find names in
'''
tip_names_count = {}
for t in tree.leaf_node_iter():
# replace spaces with underscores as this is how they are given to FastTree.
name = t.taxon.label.replace(' ','_')
if name in tip_names_count:
raise Exception("Duplicate tip name found in tree: '%s'" % name)
else:
tip_names_count[name] = 1
for name in sequence_names:
if name not in tip_names_count:
raise Exception("The alignment sequence '%s' was found in the alignment but not the tree" % name)
elif tip_names_count[name] > 1:
raise Exception("Found duplicate sequence name '%s'" % name)
else:
tip_names_count[name] += 1
for name, count in tip_names_count.iteritems():
if count < 2:
raise Exception("Sequence '%s' was found in the tree but not the alignment" % name) | python | def match_alignment_and_tree_sequence_ids(self, sequence_names, tree):
'''Check to make sure that the sequences specified in the alignment
and the tree are the same, otherwise raise an Exception detailing
the problem for the user to fix
Parameters
----------
sequence_names: list of str
names of sequences to ensure are in the tree
tree: dendropy.Tree
tree to find names in
'''
tip_names_count = {}
for t in tree.leaf_node_iter():
# replace spaces with underscores as this is how they are given to FastTree.
name = t.taxon.label.replace(' ','_')
if name in tip_names_count:
raise Exception("Duplicate tip name found in tree: '%s'" % name)
else:
tip_names_count[name] = 1
for name in sequence_names:
if name not in tip_names_count:
raise Exception("The alignment sequence '%s' was found in the alignment but not the tree" % name)
elif tip_names_count[name] > 1:
raise Exception("Found duplicate sequence name '%s'" % name)
else:
tip_names_count[name] += 1
for name, count in tip_names_count.iteritems():
if count < 2:
raise Exception("Sequence '%s' was found in the tree but not the alignment" % name) | [
"def",
"match_alignment_and_tree_sequence_ids",
"(",
"self",
",",
"sequence_names",
",",
"tree",
")",
":",
"tip_names_count",
"=",
"{",
"}",
"for",
"t",
"in",
"tree",
".",
"leaf_node_iter",
"(",
")",
":",
"# replace spaces with underscores as this is how they are given ... | Check to make sure that the sequences specified in the alignment
and the tree are the same, otherwise raise an Exception detailing
the problem for the user to fix
Parameters
----------
sequence_names: list of str
names of sequences to ensure are in the tree
tree: dendropy.Tree
tree to find names in | [
"Check",
"to",
"make",
"sure",
"that",
"the",
"sequences",
"specified",
"in",
"the",
"alignment",
"and",
"the",
"tree",
"are",
"the",
"same",
"otherwise",
"raise",
"an",
"Exception",
"detailing",
"the",
"problem",
"for",
"the",
"user",
"to",
"fix"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/dendropy_tree_cleaner.py#L22-L53 | train | 31,817 |
geronimp/graftM | graftm/dendropy_tree_cleaner.py | DendropyTreeCleaner.remove_sequences | def remove_sequences(self, tree, sequence_names):
'''Remove sequences with in the given sequence_names array from the tree in
place. Assumes the sequences are found in the tree, and that they are
all unique.
Parameters
----------
tree: dendropy.Tree
tree to remove from
sequence_names: list of str
list of tip names to remove
'''
tree.prune_taxa_with_labels(sequence_names)
tree.prune_taxa_with_labels([s.replace('_',' ') for s in sequence_names]) | python | def remove_sequences(self, tree, sequence_names):
'''Remove sequences with in the given sequence_names array from the tree in
place. Assumes the sequences are found in the tree, and that they are
all unique.
Parameters
----------
tree: dendropy.Tree
tree to remove from
sequence_names: list of str
list of tip names to remove
'''
tree.prune_taxa_with_labels(sequence_names)
tree.prune_taxa_with_labels([s.replace('_',' ') for s in sequence_names]) | [
"def",
"remove_sequences",
"(",
"self",
",",
"tree",
",",
"sequence_names",
")",
":",
"tree",
".",
"prune_taxa_with_labels",
"(",
"sequence_names",
")",
"tree",
".",
"prune_taxa_with_labels",
"(",
"[",
"s",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"for",... | Remove sequences with in the given sequence_names array from the tree in
place. Assumes the sequences are found in the tree, and that they are
all unique.
Parameters
----------
tree: dendropy.Tree
tree to remove from
sequence_names: list of str
list of tip names to remove | [
"Remove",
"sequences",
"with",
"in",
"the",
"given",
"sequence_names",
"array",
"from",
"the",
"tree",
"in",
"place",
".",
"Assumes",
"the",
"sequences",
"are",
"found",
"in",
"the",
"tree",
"and",
"that",
"they",
"are",
"all",
"unique",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/dendropy_tree_cleaner.py#L55-L69 | train | 31,818 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher._hmmalign | def _hmmalign(self, input_path, directions, pipeline,
forward_reads_output_path, reverse_reads_output_path):
'''
Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing.
'''
if pipeline == PIPELINE_AA:
reverse_direction_reads_present=False
else:
reverse_direction_reads_present=False in directions.values()
with tempfile.NamedTemporaryFile(prefix='for_file', suffix='.fa') as for_file_fh:
for_file = for_file_fh.name
with tempfile.NamedTemporaryFile(prefix='rev_file', suffix='.fa') as rev_file_fh:
rev_file = rev_file_fh.name
# Align input reads to a specified hmm.
if reverse_direction_reads_present: # Any that are in the reverse direction would be True
reverse = []
forward = []
records = list(SeqIO.parse(open(input_path), 'fasta'))
# Split the reads into reverse and forward lists
for record in records:
read_id = record.id
if directions[read_id] == True:
forward.append(record)
elif directions[read_id] == False:
reverse.append(record)
else:
raise Exception(logging.error('Programming error: hmmalign'))
exit(1)
logging.debug("Found %i forward direction reads" % len(forward))
logging.debug("Found %i reverse direction reads" % len(reverse))
# Write reverse complement and forward reads to files
with open(for_file, 'w') as for_aln:
logging.debug("Writing forward direction reads to %s" % for_file)
for record in forward:
for_aln.write('>' + record.id + '\n')
for_aln.write(str(record.seq) + '\n')
# HMMalign and convert to fasta format
if any(forward):
self.hmmalign_sequences(self.aln_hmm, for_file, forward_reads_output_path)
else:
cmd = 'touch %s' % (forward_reads_output_path)
extern.run(cmd)
with open(rev_file, 'w') as rev_aln:
logging.debug("Writing reverse direction reads to %s" % rev_file)
for record in reverse:
if record.id and record.seq:
rev_aln.write('>' + record.id + '\n')
rev_aln.write(str(record.seq.reverse_complement()) + '\n')
self.hmmalign_sequences(self.aln_hmm, rev_file, reverse_reads_output_path)
conv_files = [forward_reads_output_path, reverse_reads_output_path]
return conv_files
else:
# If there are only forward reads, just hmmalign and be done with it.
self.hmmalign_sequences(self.aln_hmm, input_path, forward_reads_output_path)
conv_files = [forward_reads_output_path]
return conv_files | python | def _hmmalign(self, input_path, directions, pipeline,
forward_reads_output_path, reverse_reads_output_path):
'''
Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing.
'''
if pipeline == PIPELINE_AA:
reverse_direction_reads_present=False
else:
reverse_direction_reads_present=False in directions.values()
with tempfile.NamedTemporaryFile(prefix='for_file', suffix='.fa') as for_file_fh:
for_file = for_file_fh.name
with tempfile.NamedTemporaryFile(prefix='rev_file', suffix='.fa') as rev_file_fh:
rev_file = rev_file_fh.name
# Align input reads to a specified hmm.
if reverse_direction_reads_present: # Any that are in the reverse direction would be True
reverse = []
forward = []
records = list(SeqIO.parse(open(input_path), 'fasta'))
# Split the reads into reverse and forward lists
for record in records:
read_id = record.id
if directions[read_id] == True:
forward.append(record)
elif directions[read_id] == False:
reverse.append(record)
else:
raise Exception(logging.error('Programming error: hmmalign'))
exit(1)
logging.debug("Found %i forward direction reads" % len(forward))
logging.debug("Found %i reverse direction reads" % len(reverse))
# Write reverse complement and forward reads to files
with open(for_file, 'w') as for_aln:
logging.debug("Writing forward direction reads to %s" % for_file)
for record in forward:
for_aln.write('>' + record.id + '\n')
for_aln.write(str(record.seq) + '\n')
# HMMalign and convert to fasta format
if any(forward):
self.hmmalign_sequences(self.aln_hmm, for_file, forward_reads_output_path)
else:
cmd = 'touch %s' % (forward_reads_output_path)
extern.run(cmd)
with open(rev_file, 'w') as rev_aln:
logging.debug("Writing reverse direction reads to %s" % rev_file)
for record in reverse:
if record.id and record.seq:
rev_aln.write('>' + record.id + '\n')
rev_aln.write(str(record.seq.reverse_complement()) + '\n')
self.hmmalign_sequences(self.aln_hmm, rev_file, reverse_reads_output_path)
conv_files = [forward_reads_output_path, reverse_reads_output_path]
return conv_files
else:
# If there are only forward reads, just hmmalign and be done with it.
self.hmmalign_sequences(self.aln_hmm, input_path, forward_reads_output_path)
conv_files = [forward_reads_output_path]
return conv_files | [
"def",
"_hmmalign",
"(",
"self",
",",
"input_path",
",",
"directions",
",",
"pipeline",
",",
"forward_reads_output_path",
",",
"reverse_reads_output_path",
")",
":",
"if",
"pipeline",
"==",
"PIPELINE_AA",
":",
"reverse_direction_reads_present",
"=",
"False",
"else",
... | Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing. | [
"Align",
"reads",
"to",
"the",
"aln_hmm",
".",
"Receives",
"unaligned",
"sequences",
"and",
"aligns",
"them",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L47-L127 | train | 31,819 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.hmmalign_sequences | def hmmalign_sequences(self, hmm, sequences, output_file):
'''Run hmmalign and convert output to aligned fasta format
Parameters
----------
hmm: str
path to hmm file
sequences: str
path to file of sequences to be aligned
output_file: str
write sequences to this file
Returns
-------
nothing
'''
cmd = 'hmmalign --trim %s %s' % (hmm, sequences)
output = extern.run(cmd)
with open(output_file, 'w') as f:
SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), f, 'fasta') | python | def hmmalign_sequences(self, hmm, sequences, output_file):
'''Run hmmalign and convert output to aligned fasta format
Parameters
----------
hmm: str
path to hmm file
sequences: str
path to file of sequences to be aligned
output_file: str
write sequences to this file
Returns
-------
nothing
'''
cmd = 'hmmalign --trim %s %s' % (hmm, sequences)
output = extern.run(cmd)
with open(output_file, 'w') as f:
SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), f, 'fasta') | [
"def",
"hmmalign_sequences",
"(",
"self",
",",
"hmm",
",",
"sequences",
",",
"output_file",
")",
":",
"cmd",
"=",
"'hmmalign --trim %s %s'",
"%",
"(",
"hmm",
",",
"sequences",
")",
"output",
"=",
"extern",
".",
"run",
"(",
"cmd",
")",
"with",
"open",
"("... | Run hmmalign and convert output to aligned fasta format
Parameters
----------
hmm: str
path to hmm file
sequences: str
path to file of sequences to be aligned
output_file: str
write sequences to this file
Returns
-------
nothing | [
"Run",
"hmmalign",
"and",
"convert",
"output",
"to",
"aligned",
"fasta",
"format"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L129-L149 | train | 31,820 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.hmmsearch | def hmmsearch(self, output_path, input_path, unpack, seq_type, threads, cutoff, orfm):
'''
hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM.
'''
# Define the base hmmsearch command.
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: expected 1 or more HMMs")
# Choose an input to this base command based off the file format found.
if seq_type == 'nucleotide': # If the input is nucleotide sequence
input_cmd = orfm.command_line(input_path)
elif seq_type == 'aminoacid': # If the input is amino acid sequence
input_cmd = unpack.command_line()
else:
raise Exception('Programming Error: error guessing input sequence type')
# Run the HMMsearches
if cutoff == "--cut_tc":
searcher = HmmSearcher(threads, cutoff)
else:
searcher = HmmSearcher(threads, '--domE %s' % cutoff)
searcher.hmmsearch(input_cmd, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_hmmsearch_table(x) for x in output_table_list]
return hmmtables | python | def hmmsearch(self, output_path, input_path, unpack, seq_type, threads, cutoff, orfm):
'''
hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM.
'''
# Define the base hmmsearch command.
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: expected 1 or more HMMs")
# Choose an input to this base command based off the file format found.
if seq_type == 'nucleotide': # If the input is nucleotide sequence
input_cmd = orfm.command_line(input_path)
elif seq_type == 'aminoacid': # If the input is amino acid sequence
input_cmd = unpack.command_line()
else:
raise Exception('Programming Error: error guessing input sequence type')
# Run the HMMsearches
if cutoff == "--cut_tc":
searcher = HmmSearcher(threads, cutoff)
else:
searcher = HmmSearcher(threads, '--domE %s' % cutoff)
searcher.hmmsearch(input_cmd, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_hmmsearch_table(x) for x in output_table_list]
return hmmtables | [
"def",
"hmmsearch",
"(",
"self",
",",
"output_path",
",",
"input_path",
",",
"unpack",
",",
"seq_type",
",",
"threads",
",",
"cutoff",
",",
"orfm",
")",
":",
"# Define the base hmmsearch command.",
"logging",
".",
"debug",
"(",
"\"Using %i HMMs to search\"",
"%",
... | hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM. | [
"hmmsearch",
"-",
"Search",
"raw",
"reads",
"for",
"hits",
"using",
"search_hmm",
"list"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L155-L223 | train | 31,821 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.merge_forev_aln | def merge_forev_aln(self, forward_aln_list, reverse_aln_list, outputs):
'''
merge_forev_aln - Merges forward and reverse alignments for a given run
Parameters
----------
aln_list : array
List of the forward and reverse alignments for each of the runs
given to graftM. **MUST** be the following pattern:
[forward_run1, reverse_run1, forward_run2, reverse_run2 ...]
outputs : array
List of paths to output file to which the merged alignments from the
aln_list will go into. Must be exactly half the size of the aln_list
(i.e. one output file for every forward and reverse file provided)
Returns
-------
Nothing - output files are known.
'''
orfm_regex = OrfM.regular_expression()
def remove_orfm_end(records):
new_dict = {}
for key, record in records.iteritems():
orfmregex = orfm_regex.match(key)
if orfmregex:
new_dict[orfmregex.groups(0)[0]] = record
else:
new_dict[key] = record
return new_dict
for idx, (forward_path, reverse_path) in enumerate(zip(forward_aln_list, reverse_aln_list)):
output_path = outputs[idx]
logging.info('Merging pair %s, %s' % (os.path.basename(forward_path), os.path.basename(reverse_path)))
forward_reads = SeqIO.parse(forward_path, 'fasta')
reverse_reads = remove_orfm_end(SeqIO.to_dict(SeqIO.parse(reverse_path, 'fasta')))
with open(output_path, 'w') as out:
for forward_record in forward_reads:
regex_match = orfm_regex.match(forward_record.id)
if regex_match:
id = regex_match.groups(0)[0]
else:
id = forward_record.id
forward_sequence = str(forward_record.seq)
try:
reverse_sequence = str(reverse_reads[id].seq)
new_seq = ''
if len(forward_sequence) == len(reverse_sequence):
for f, r in zip(forward_sequence, reverse_sequence):
if f == r:
new_seq += f
elif f == '-' and r != '-':
new_seq += r
elif r == '-' and f != '-':
new_seq += f
elif f != '-' and r != '-':
if f != r:
new_seq += f
else:
new_seq += '-'
else:
logging.error('Alignments do not match')
raise Exception('Merging alignments failed: Alignments do not match')
out.write('>%s\n' % forward_record.id)
out.write('%s\n' % (new_seq))
del reverse_reads[id]
except:
out.write('>%s\n' % forward_record.id)
out.write('%s\n' % (forward_sequence))
for record_id, record in reverse_reads.iteritems():
out.write('>%s\n' % record.id)
out.write('%s\n' % (str(record.seq))) | python | def merge_forev_aln(self, forward_aln_list, reverse_aln_list, outputs):
'''
merge_forev_aln - Merges forward and reverse alignments for a given run
Parameters
----------
aln_list : array
List of the forward and reverse alignments for each of the runs
given to graftM. **MUST** be the following pattern:
[forward_run1, reverse_run1, forward_run2, reverse_run2 ...]
outputs : array
List of paths to output file to which the merged alignments from the
aln_list will go into. Must be exactly half the size of the aln_list
(i.e. one output file for every forward and reverse file provided)
Returns
-------
Nothing - output files are known.
'''
orfm_regex = OrfM.regular_expression()
def remove_orfm_end(records):
new_dict = {}
for key, record in records.iteritems():
orfmregex = orfm_regex.match(key)
if orfmregex:
new_dict[orfmregex.groups(0)[0]] = record
else:
new_dict[key] = record
return new_dict
for idx, (forward_path, reverse_path) in enumerate(zip(forward_aln_list, reverse_aln_list)):
output_path = outputs[idx]
logging.info('Merging pair %s, %s' % (os.path.basename(forward_path), os.path.basename(reverse_path)))
forward_reads = SeqIO.parse(forward_path, 'fasta')
reverse_reads = remove_orfm_end(SeqIO.to_dict(SeqIO.parse(reverse_path, 'fasta')))
with open(output_path, 'w') as out:
for forward_record in forward_reads:
regex_match = orfm_regex.match(forward_record.id)
if regex_match:
id = regex_match.groups(0)[0]
else:
id = forward_record.id
forward_sequence = str(forward_record.seq)
try:
reverse_sequence = str(reverse_reads[id].seq)
new_seq = ''
if len(forward_sequence) == len(reverse_sequence):
for f, r in zip(forward_sequence, reverse_sequence):
if f == r:
new_seq += f
elif f == '-' and r != '-':
new_seq += r
elif r == '-' and f != '-':
new_seq += f
elif f != '-' and r != '-':
if f != r:
new_seq += f
else:
new_seq += '-'
else:
logging.error('Alignments do not match')
raise Exception('Merging alignments failed: Alignments do not match')
out.write('>%s\n' % forward_record.id)
out.write('%s\n' % (new_seq))
del reverse_reads[id]
except:
out.write('>%s\n' % forward_record.id)
out.write('%s\n' % (forward_sequence))
for record_id, record in reverse_reads.iteritems():
out.write('>%s\n' % record.id)
out.write('%s\n' % (str(record.seq))) | [
"def",
"merge_forev_aln",
"(",
"self",
",",
"forward_aln_list",
",",
"reverse_aln_list",
",",
"outputs",
")",
":",
"orfm_regex",
"=",
"OrfM",
".",
"regular_expression",
"(",
")",
"def",
"remove_orfm_end",
"(",
"records",
")",
":",
"new_dict",
"=",
"{",
"}",
... | merge_forev_aln - Merges forward and reverse alignments for a given run
Parameters
----------
aln_list : array
List of the forward and reverse alignments for each of the runs
given to graftM. **MUST** be the following pattern:
[forward_run1, reverse_run1, forward_run2, reverse_run2 ...]
outputs : array
List of paths to output file to which the merged alignments from the
aln_list will go into. Must be exactly half the size of the aln_list
(i.e. one output file for every forward and reverse file provided)
Returns
-------
Nothing - output files are known. | [
"merge_forev_aln",
"-",
"Merges",
"forward",
"and",
"reverse",
"alignments",
"for",
"a",
"given",
"run"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L225-L300 | train | 31,822 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.nhmmer | def nhmmer(self, output_path, unpack, threads, evalue):
'''
nhmmer - Search input path using nhmmer
Parameters
----------
output_path : str
A string containing the path to the input sequences
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
threads : str
Number of threads to run. For compiling command line.
evalue : str
evalue to use. For compiling commmand line.
Returns
-------
output_table_list : array
Includes the name of the output domtblout table given by hmmer
'''
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: Expected 1 or more HMMs")
input_pipe = unpack.command_line()
searcher = NhmmerSearcher(threads, extra_args='--incE %s -E %s' % (evalue, evalue))
searcher.hmmsearch(input_pipe, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_nhmmer_table(x) for x in output_table_list]
return hmmtables, output_table_list | python | def nhmmer(self, output_path, unpack, threads, evalue):
'''
nhmmer - Search input path using nhmmer
Parameters
----------
output_path : str
A string containing the path to the input sequences
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
threads : str
Number of threads to run. For compiling command line.
evalue : str
evalue to use. For compiling commmand line.
Returns
-------
output_table_list : array
Includes the name of the output domtblout table given by hmmer
'''
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: Expected 1 or more HMMs")
input_pipe = unpack.command_line()
searcher = NhmmerSearcher(threads, extra_args='--incE %s -E %s' % (evalue, evalue))
searcher.hmmsearch(input_pipe, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_nhmmer_table(x) for x in output_table_list]
return hmmtables, output_table_list | [
"def",
"nhmmer",
"(",
"self",
",",
"output_path",
",",
"unpack",
",",
"threads",
",",
"evalue",
")",
":",
"logging",
".",
"debug",
"(",
"\"Using %i HMMs to search\"",
"%",
"(",
"len",
"(",
"self",
".",
"search_hmm",
")",
")",
")",
"output_table_list",
"=",... | nhmmer - Search input path using nhmmer
Parameters
----------
output_path : str
A string containing the path to the input sequences
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
threads : str
Number of threads to run. For compiling command line.
evalue : str
evalue to use. For compiling commmand line.
Returns
-------
output_table_list : array
Includes the name of the output domtblout table given by hmmer | [
"nhmmer",
"-",
"Search",
"input",
"path",
"using",
"nhmmer"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L302-L341 | train | 31,823 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher._check_euk_contamination | def _check_euk_contamination(self, hmm_hit_tables):
'''
check_euk_contamination - Check output HMM tables hits reads that hit
the 18S HMM with a higher bit score.
Parameters
----------
hmm_hit_tables : array
Array of paths to the output files produced by hmmsearch or
nhmmer.
run_stats : dict
A dictionary to updatewith the number of unique 18S reads and reads
detected by both 18S and non-18S HMMs
Returns
-------
euk_reads : set
Non-redundant set of all read names deemed to be eukaryotic
'''
euk_hit_table = HMMreader(hmm_hit_tables.pop(-1))
other_hit_tables = [HMMreader(x) for x in hmm_hit_tables]
reads_unique_to_eukaryotes = []
reads_with_better_euk_hit = []
for hit in euk_hit_table.names():
bits = []
for hit_table in other_hit_tables:
if hit in hit_table.names():
bits.append(hit_table.bit(hit))
else:
reads_unique_to_eukaryotes.append(hit)
if bits:
if any([x for x in bits if x > euk_hit_table.bit(hit)]):
continue
else:
reads_with_better_euk_hit.append(hit)
else:
continue
if len(reads_with_better_euk_hit) == 0:
logging.info("No contaminating eukaryotic reads detected")
else:
logging.info("Found %s read(s) that may be eukaryotic" % len(reads_with_better_euk_hit + reads_unique_to_eukaryotes))
euk_reads = set(reads_with_better_euk_hit + reads_unique_to_eukaryotes)
return euk_reads | python | def _check_euk_contamination(self, hmm_hit_tables):
'''
check_euk_contamination - Check output HMM tables hits reads that hit
the 18S HMM with a higher bit score.
Parameters
----------
hmm_hit_tables : array
Array of paths to the output files produced by hmmsearch or
nhmmer.
run_stats : dict
A dictionary to updatewith the number of unique 18S reads and reads
detected by both 18S and non-18S HMMs
Returns
-------
euk_reads : set
Non-redundant set of all read names deemed to be eukaryotic
'''
euk_hit_table = HMMreader(hmm_hit_tables.pop(-1))
other_hit_tables = [HMMreader(x) for x in hmm_hit_tables]
reads_unique_to_eukaryotes = []
reads_with_better_euk_hit = []
for hit in euk_hit_table.names():
bits = []
for hit_table in other_hit_tables:
if hit in hit_table.names():
bits.append(hit_table.bit(hit))
else:
reads_unique_to_eukaryotes.append(hit)
if bits:
if any([x for x in bits if x > euk_hit_table.bit(hit)]):
continue
else:
reads_with_better_euk_hit.append(hit)
else:
continue
if len(reads_with_better_euk_hit) == 0:
logging.info("No contaminating eukaryotic reads detected")
else:
logging.info("Found %s read(s) that may be eukaryotic" % len(reads_with_better_euk_hit + reads_unique_to_eukaryotes))
euk_reads = set(reads_with_better_euk_hit + reads_unique_to_eukaryotes)
return euk_reads | [
"def",
"_check_euk_contamination",
"(",
"self",
",",
"hmm_hit_tables",
")",
":",
"euk_hit_table",
"=",
"HMMreader",
"(",
"hmm_hit_tables",
".",
"pop",
"(",
"-",
"1",
")",
")",
"other_hit_tables",
"=",
"[",
"HMMreader",
"(",
"x",
")",
"for",
"x",
"in",
"hmm... | check_euk_contamination - Check output HMM tables hits reads that hit
the 18S HMM with a higher bit score.
Parameters
----------
hmm_hit_tables : array
Array of paths to the output files produced by hmmsearch or
nhmmer.
run_stats : dict
A dictionary to updatewith the number of unique 18S reads and reads
detected by both 18S and non-18S HMMs
Returns
-------
euk_reads : set
Non-redundant set of all read names deemed to be eukaryotic | [
"check_euk_contamination",
"-",
"Check",
"output",
"HMM",
"tables",
"hits",
"reads",
"that",
"hit",
"the",
"18S",
"HMM",
"with",
"a",
"higher",
"bit",
"score",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L343-L390 | train | 31,824 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher._extract_multiple_hits | def _extract_multiple_hits(self, hits, reads_path, output_path):
'''
splits out regions of a read that hit the HMM. For example when two of
same gene are identified within the same contig, The regions mapping to
the HMM will be split out and written out to a new file as a new record.
Parameters
----------
hits : dict
A dictionary where the keys are the read names, the entry for each
is a list of lists, each containing the range within the contig
(or read) that mapped to the HMM. e.g.:
{'read1': [[3, 126], [305, 413]],
'read2': [[1, 125]],
...
reads_path : str
path to reads file containing each read or contig in FASTA format to
be opened, and split.
output_path : str
path to file to which split reads will be written to in FASTA
format.
Returns
-------
Nothing, output path is known.
'''
complement_information = {}
try:
reads = SeqIO.to_dict(SeqIO.parse(reads_path, "fasta")) # open up reads as dictionary
except:
logging.error("Multiple sequences found with the same ID. The input sequences are either ill formated or are interleaved. \
If you provided GraftM with an interleaved sequence file, please split them into forward and reverse reads, and provide to the the appropriate \
flags (--forward, --reverse). Otherwise, it appears that you have provided sequences with redundant IDs. GraftM doesn't know how to \
deal with these, so please remove/rename sequences with duplicate keys.")
raise InterleavedFileError()
with open(output_path, 'w') as out:
for read_name, entry in hits.iteritems(): # For each contig
ranges = entry["entry"]
complements = entry["strand"]
index = 1
if len(ranges) > 1: # if there are multiple hits in that contig
for r, c in zip(ranges, complements): # for each of those hits
new_record = reads[read_name][r[0] - 1:r[1]] # subset the record by the span of that hit
new_record.id = new_record.id + '_split_%i' % index # give that subset record a new header
SeqIO.write(new_record, out, "fasta") # and write it to output
index += 1 # increment the split counter
complement_information[new_record.id]=c
else: # Otherwise, just write the read back to the file
complement_information[read_name] = entry["strand"][0]
SeqIO.write(reads[read_name], out, "fasta")
return complement_information | python | def _extract_multiple_hits(self, hits, reads_path, output_path):
'''
splits out regions of a read that hit the HMM. For example when two of
same gene are identified within the same contig, The regions mapping to
the HMM will be split out and written out to a new file as a new record.
Parameters
----------
hits : dict
A dictionary where the keys are the read names, the entry for each
is a list of lists, each containing the range within the contig
(or read) that mapped to the HMM. e.g.:
{'read1': [[3, 126], [305, 413]],
'read2': [[1, 125]],
...
reads_path : str
path to reads file containing each read or contig in FASTA format to
be opened, and split.
output_path : str
path to file to which split reads will be written to in FASTA
format.
Returns
-------
Nothing, output path is known.
'''
complement_information = {}
try:
reads = SeqIO.to_dict(SeqIO.parse(reads_path, "fasta")) # open up reads as dictionary
except:
logging.error("Multiple sequences found with the same ID. The input sequences are either ill formated or are interleaved. \
If you provided GraftM with an interleaved sequence file, please split them into forward and reverse reads, and provide to the the appropriate \
flags (--forward, --reverse). Otherwise, it appears that you have provided sequences with redundant IDs. GraftM doesn't know how to \
deal with these, so please remove/rename sequences with duplicate keys.")
raise InterleavedFileError()
with open(output_path, 'w') as out:
for read_name, entry in hits.iteritems(): # For each contig
ranges = entry["entry"]
complements = entry["strand"]
index = 1
if len(ranges) > 1: # if there are multiple hits in that contig
for r, c in zip(ranges, complements): # for each of those hits
new_record = reads[read_name][r[0] - 1:r[1]] # subset the record by the span of that hit
new_record.id = new_record.id + '_split_%i' % index # give that subset record a new header
SeqIO.write(new_record, out, "fasta") # and write it to output
index += 1 # increment the split counter
complement_information[new_record.id]=c
else: # Otherwise, just write the read back to the file
complement_information[read_name] = entry["strand"][0]
SeqIO.write(reads[read_name], out, "fasta")
return complement_information | [
"def",
"_extract_multiple_hits",
"(",
"self",
",",
"hits",
",",
"reads_path",
",",
"output_path",
")",
":",
"complement_information",
"=",
"{",
"}",
"try",
":",
"reads",
"=",
"SeqIO",
".",
"to_dict",
"(",
"SeqIO",
".",
"parse",
"(",
"reads_path",
",",
"\"f... | splits out regions of a read that hit the HMM. For example when two of
same gene are identified within the same contig, The regions mapping to
the HMM will be split out and written out to a new file as a new record.
Parameters
----------
hits : dict
A dictionary where the keys are the read names, the entry for each
is a list of lists, each containing the range within the contig
(or read) that mapped to the HMM. e.g.:
{'read1': [[3, 126], [305, 413]],
'read2': [[1, 125]],
...
reads_path : str
path to reads file containing each read or contig in FASTA format to
be opened, and split.
output_path : str
path to file to which split reads will be written to in FASTA
format.
Returns
-------
Nothing, output path is known. | [
"splits",
"out",
"regions",
"of",
"a",
"read",
"that",
"hit",
"the",
"HMM",
".",
"For",
"example",
"when",
"two",
"of",
"same",
"gene",
"are",
"identified",
"within",
"the",
"same",
"contig",
"The",
"regions",
"mapping",
"to",
"the",
"HMM",
"will",
"be",... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L393-L449 | train | 31,825 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.alignment_correcter | def alignment_correcter(self, alignment_file_list, output_file_name,
filter_minimum=None):
'''
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
'''
corrected_sequences = {}
for alignment_file in alignment_file_list:
insert_list = [] # Define list containing inserted positions to be removed (lower case characters)
sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta'))
for sequence in sequence_list: # For each sequence in the alignment
for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence
if nt.islower(): # Check for lower case character
insert_list.append(idx) # Add to the insert list if it is
insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions
for sequence in sequence_list: # For each sequence in the alignment
new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing
for position in insert_list: # For each position in the removal list
del new_seq[position] # Delete that inserted position in every sequence
corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-')
pre_filter_count=len(corrected_sequences)
if filter_minimum:
# Use '>' not '>=' here because the sequence is on a single line,
# but also includes a newline character at the end of the sequence
corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum}
post_filter_count=len(corrected_sequences)
logging.info("Filtered %i short sequences from the alignment" % \
(pre_filter_count-post_filter_count)
)
logging.info("%i sequences remaining" % post_filter_count)
if len(corrected_sequences) >= 1:
with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to
for fasta_id, fasta_seq in corrected_sequences.iteritems():
output_file.write(fasta_id)
output_file.write(fasta_seq)
return True
else:
return False | python | def alignment_correcter(self, alignment_file_list, output_file_name,
filter_minimum=None):
'''
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
'''
corrected_sequences = {}
for alignment_file in alignment_file_list:
insert_list = [] # Define list containing inserted positions to be removed (lower case characters)
sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta'))
for sequence in sequence_list: # For each sequence in the alignment
for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence
if nt.islower(): # Check for lower case character
insert_list.append(idx) # Add to the insert list if it is
insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions
for sequence in sequence_list: # For each sequence in the alignment
new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing
for position in insert_list: # For each position in the removal list
del new_seq[position] # Delete that inserted position in every sequence
corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-')
pre_filter_count=len(corrected_sequences)
if filter_minimum:
# Use '>' not '>=' here because the sequence is on a single line,
# but also includes a newline character at the end of the sequence
corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum}
post_filter_count=len(corrected_sequences)
logging.info("Filtered %i short sequences from the alignment" % \
(pre_filter_count-post_filter_count)
)
logging.info("%i sequences remaining" % post_filter_count)
if len(corrected_sequences) >= 1:
with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to
for fasta_id, fasta_seq in corrected_sequences.iteritems():
output_file.write(fasta_id)
output_file.write(fasta_seq)
return True
else:
return False | [
"def",
"alignment_correcter",
"(",
"self",
",",
"alignment_file_list",
",",
"output_file_name",
",",
"filter_minimum",
"=",
"None",
")",
":",
"corrected_sequences",
"=",
"{",
"}",
"for",
"alignment_file",
"in",
"alignment_file_list",
":",
"insert_list",
"=",
"[",
... | Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file | [
"Remove",
"lower",
"case",
"insertions",
"in",
"alignment",
"outputs",
"from",
"HMM",
"align",
".",
"Give",
"a",
"list",
"of",
"alignments",
"and",
"an",
"output",
"file",
"name",
"and",
"each",
"alignment",
"will",
"be",
"corrected",
"and",
"written",
"to",... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L509-L567 | train | 31,826 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher._extract_orfs | def _extract_orfs(self, input_path, orfm, hit_readnames, output_path, search_method, sequence_frame_info_list=None):
'''
Call ORFs on a file with nucleotide sequences and extract the proteins
whose name is in `hit_readnames`.
Parameters
----------
input_path : str
Path to input nucleotide sequences in FASTA format.
orfm: obj
graftm.OrfM object with parameters already set
hit_readnames : str
path to a file containin the readnames of hits to the HMM, one per
line.
output_path : str
Path to output orfs into, in FASTA format.
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
sequence_frame_info : list
A dataframe (list of lists) containing readname, alignment direction
and alignment start point information
'''
if search_method == "hmmsearch":
# Build and run command to extract ORF sequences:
orfm_cmd = orfm.command_line()
cmd = 'fxtract -H -X -f /dev/stdin <(%s %s) > %s' % (orfm_cmd, input_path, output_path)
process = subprocess.Popen(["bash", "-c", cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.communicate('\n'.join(hit_readnames))
elif search_method == "diamond":
sequence_frame_info_dict = {x[0]:[x[1], x[2], x[3]] for x in sequence_frame_info_list}
records = SeqIO.parse(input_path, "fasta")
with open(output_path, 'w') as open_output_path:
for record in records:
entry=sequence_frame_info_dict[record.id]
indfrom=(min(entry[2], entry[1])-1)
indto=max(entry[2], entry[1])
if entry[0] == False:
record.seq = max([x for x in record.seq[indfrom:indto].reverse_complement().translate().split("*")], key=len)
else:
record.seq = max([x for x in record.seq[indfrom:indto].translate().split("*")], key=len)
SeqIO.write(record, open_output_path, "fasta")
open_output_path.flush() | python | def _extract_orfs(self, input_path, orfm, hit_readnames, output_path, search_method, sequence_frame_info_list=None):
'''
Call ORFs on a file with nucleotide sequences and extract the proteins
whose name is in `hit_readnames`.
Parameters
----------
input_path : str
Path to input nucleotide sequences in FASTA format.
orfm: obj
graftm.OrfM object with parameters already set
hit_readnames : str
path to a file containin the readnames of hits to the HMM, one per
line.
output_path : str
Path to output orfs into, in FASTA format.
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
sequence_frame_info : list
A dataframe (list of lists) containing readname, alignment direction
and alignment start point information
'''
if search_method == "hmmsearch":
# Build and run command to extract ORF sequences:
orfm_cmd = orfm.command_line()
cmd = 'fxtract -H -X -f /dev/stdin <(%s %s) > %s' % (orfm_cmd, input_path, output_path)
process = subprocess.Popen(["bash", "-c", cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.communicate('\n'.join(hit_readnames))
elif search_method == "diamond":
sequence_frame_info_dict = {x[0]:[x[1], x[2], x[3]] for x in sequence_frame_info_list}
records = SeqIO.parse(input_path, "fasta")
with open(output_path, 'w') as open_output_path:
for record in records:
entry=sequence_frame_info_dict[record.id]
indfrom=(min(entry[2], entry[1])-1)
indto=max(entry[2], entry[1])
if entry[0] == False:
record.seq = max([x for x in record.seq[indfrom:indto].reverse_complement().translate().split("*")], key=len)
else:
record.seq = max([x for x in record.seq[indfrom:indto].translate().split("*")], key=len)
SeqIO.write(record, open_output_path, "fasta")
open_output_path.flush() | [
"def",
"_extract_orfs",
"(",
"self",
",",
"input_path",
",",
"orfm",
",",
"hit_readnames",
",",
"output_path",
",",
"search_method",
",",
"sequence_frame_info_list",
"=",
"None",
")",
":",
"if",
"search_method",
"==",
"\"hmmsearch\"",
":",
"# Build and run command t... | Call ORFs on a file with nucleotide sequences and extract the proteins
whose name is in `hit_readnames`.
Parameters
----------
input_path : str
Path to input nucleotide sequences in FASTA format.
orfm: obj
graftm.OrfM object with parameters already set
hit_readnames : str
path to a file containin the readnames of hits to the HMM, one per
line.
output_path : str
Path to output orfs into, in FASTA format.
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
sequence_frame_info : list
A dataframe (list of lists) containing readname, alignment direction
and alignment start point information | [
"Call",
"ORFs",
"on",
"a",
"file",
"with",
"nucleotide",
"sequences",
"and",
"extract",
"the",
"proteins",
"whose",
"name",
"is",
"in",
"hit_readnames",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L570-L615 | train | 31,827 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.aa_db_search | def aa_db_search(self, files, base, unpack, search_method,
maximum_range, threads, evalue, min_orf_length,
restrict_read_length, diamond_database):
'''
Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
if search_method == 'hmmsearch':
output_search_file = files.hmmsearch_output_path(base)
elif search_method == 'diamond':
output_search_file = files.diamond_search_output_basename(base)
hit_reads_fasta = files.fa_output_path(base)
hit_reads_orfs_fasta = files.orf_fasta_output_path(base)
return self.search_and_extract_orfs_matching_protein_database(\
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta) | python | def aa_db_search(self, files, base, unpack, search_method,
maximum_range, threads, evalue, min_orf_length,
restrict_read_length, diamond_database):
'''
Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
if search_method == 'hmmsearch':
output_search_file = files.hmmsearch_output_path(base)
elif search_method == 'diamond':
output_search_file = files.diamond_search_output_basename(base)
hit_reads_fasta = files.fa_output_path(base)
hit_reads_orfs_fasta = files.orf_fasta_output_path(base)
return self.search_and_extract_orfs_matching_protein_database(\
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta) | [
"def",
"aa_db_search",
"(",
"self",
",",
"files",
",",
"base",
",",
"unpack",
",",
"search_method",
",",
"maximum_range",
",",
"threads",
",",
"evalue",
",",
"min_orf_length",
",",
"restrict_read_length",
",",
"diamond_database",
")",
":",
"# Define outputs",
"i... | Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit | [
"Amino",
"acid",
"database",
"search",
"pipeline",
"-",
"pipeline",
"where",
"reads",
"are",
"searched",
"as",
"amino",
"acids",
"and",
"hits",
"are",
"identified",
"using",
"hmmsearch",
"or",
"diamond",
"searches"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L760-L821 | train | 31,828 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.nt_db_search | def nt_db_search(self, files, base, unpack, euk_check,
search_method, maximum_range, threads, evalue):
'''
Nucleotide database search pipeline - pipeline where reads are searched
as nucleotides, and hits are identified using nhmmer searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
euk_check : bool
True indicates the sample will be checked for eukaryotic reads,
False indicates not.
search_method : str
The method for searching e.g. 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : str
Number of threads for hmmer to use
evalue : str
Evalue cutoff for hmmer to use
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
hmmsearch_output_table = files.hmmsearch_output_path(base)
hit_reads_fasta = files.fa_output_path(base)
return \
self.search_and_extract_nucleotides_matching_nucleotide_database(\
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta) | python | def nt_db_search(self, files, base, unpack, euk_check,
search_method, maximum_range, threads, evalue):
'''
Nucleotide database search pipeline - pipeline where reads are searched
as nucleotides, and hits are identified using nhmmer searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
euk_check : bool
True indicates the sample will be checked for eukaryotic reads,
False indicates not.
search_method : str
The method for searching e.g. 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : str
Number of threads for hmmer to use
evalue : str
Evalue cutoff for hmmer to use
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
hmmsearch_output_table = files.hmmsearch_output_path(base)
hit_reads_fasta = files.fa_output_path(base)
return \
self.search_and_extract_nucleotides_matching_nucleotide_database(\
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta) | [
"def",
"nt_db_search",
"(",
"self",
",",
"files",
",",
"base",
",",
"unpack",
",",
"euk_check",
",",
"search_method",
",",
"maximum_range",
",",
"threads",
",",
"evalue",
")",
":",
"# Define outputs",
"hmmsearch_output_table",
"=",
"files",
".",
"hmmsearch_outpu... | Nucleotide database search pipeline - pipeline where reads are searched
as nucleotides, and hits are identified using nhmmer searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
euk_check : bool
True indicates the sample will be checked for eukaryotic reads,
False indicates not.
search_method : str
The method for searching e.g. 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : str
Number of threads for hmmer to use
evalue : str
Evalue cutoff for hmmer to use
Returns
-------
String path to amino acid fasta file of reads that hit | [
"Nucleotide",
"database",
"search",
"pipeline",
"-",
"pipeline",
"where",
"reads",
"are",
"searched",
"as",
"nucleotides",
"and",
"hits",
"are",
"identified",
"using",
"nhmmer",
"searches"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L964-L1013 | train | 31,829 |
geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.align | def align(self, input_path, output_path, directions, pipeline,
filter_minimum):
'''align - Takes input path to fasta of unaligned reads, aligns them to
a HMM, and returns the aligned reads in the output path
Parameters
----------
input_path : str
output_path : str
reverse_direction : dict
A dictionary of read names, with the entries being the complement
strand of the read (True = forward, False = reverse)
pipeline : str
Either "P" or "D" corresponding to the protein and nucleotide (DNA)
pipelines, respectively.
Returns
-------
N/A - output alignment path known.
'''
# HMMalign the forward reads, and reverse complement reads.
with tempfile.NamedTemporaryFile(prefix='for_conv_file', suffix='.fa') as fwd_fh:
fwd_conv_file = fwd_fh.name
with tempfile.NamedTemporaryFile(prefix='rev_conv_file', suffix='.fa') as rev_fh:
rev_conv_file = rev_fh.name
alignments = self._hmmalign(
input_path,
directions,
pipeline,
fwd_conv_file,
rev_conv_file)
alignment_result = self.alignment_correcter(alignments,
output_path,
filter_minimum)
return alignment_result | python | def align(self, input_path, output_path, directions, pipeline,
filter_minimum):
'''align - Takes input path to fasta of unaligned reads, aligns them to
a HMM, and returns the aligned reads in the output path
Parameters
----------
input_path : str
output_path : str
reverse_direction : dict
A dictionary of read names, with the entries being the complement
strand of the read (True = forward, False = reverse)
pipeline : str
Either "P" or "D" corresponding to the protein and nucleotide (DNA)
pipelines, respectively.
Returns
-------
N/A - output alignment path known.
'''
# HMMalign the forward reads, and reverse complement reads.
with tempfile.NamedTemporaryFile(prefix='for_conv_file', suffix='.fa') as fwd_fh:
fwd_conv_file = fwd_fh.name
with tempfile.NamedTemporaryFile(prefix='rev_conv_file', suffix='.fa') as rev_fh:
rev_conv_file = rev_fh.name
alignments = self._hmmalign(
input_path,
directions,
pipeline,
fwd_conv_file,
rev_conv_file)
alignment_result = self.alignment_correcter(alignments,
output_path,
filter_minimum)
return alignment_result | [
"def",
"align",
"(",
"self",
",",
"input_path",
",",
"output_path",
",",
"directions",
",",
"pipeline",
",",
"filter_minimum",
")",
":",
"# HMMalign the forward reads, and reverse complement reads.",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"... | align - Takes input path to fasta of unaligned reads, aligns them to
a HMM, and returns the aligned reads in the output path
Parameters
----------
input_path : str
output_path : str
reverse_direction : dict
A dictionary of read names, with the entries being the complement
strand of the read (True = forward, False = reverse)
pipeline : str
Either "P" or "D" corresponding to the protein and nucleotide (DNA)
pipelines, respectively.
Returns
-------
N/A - output alignment path known. | [
"align",
"-",
"Takes",
"input",
"path",
"to",
"fasta",
"of",
"unaligned",
"reads",
"aligns",
"them",
"to",
"a",
"HMM",
"and",
"returns",
"the",
"aligned",
"reads",
"in",
"the",
"output",
"path"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L1111-L1147 | train | 31,830 |
geronimp/graftM | graftm/rerooter.py | Rerooter.reroot_by_tree | def reroot_by_tree(self, old_tree, new_tree):
'''reroot the new tree so that it matches the old tree's root, if
possible. If more than one rerooting is possible, root at the longest
internal branch that is consistent with the root of the old_tree.
Assumes that the tree is binary. Both the old and new trees may be
modified by this method.
Parameters
----------
old_tree: dendropy.Tree
The old tree to try to match the root from
new_tree: dendropy.Tree
tree to be rerooted in the same place as the old_tree.
Must include at least one leaf from each side of the old_tree's
root (matching by node.name), but may not have all leaves from
the old tree, and can have extra leaves.
Returns
-------
The new_tree rooted by the root of the old tree
Exceptions
----------
TreeParaphyleticException
If either of the old_tree's branches are not monophyletic in the
new tree
'''
# Ensure that the tree is rooted to avoid gotchas
# e.g. https://github.com/jeetsukumaran/DendroPy/issues/51
old_tree.is_rooted = True
new_tree.is_rooted = True
if len(old_tree.seed_node.child_nodes()) != 2:
raise Exception("Unexpectedly found a non-binary tree. Perhaps need to use Rerooter.reroot() ?")
# make a list of the left and right leaf names that are in the new tree
new_tip_names = set([tip.taxon.label for tip in new_tree.leaf_node_iter()])
old_left_tip_names = [tip.taxon.label for tip in old_tree.seed_node.child_nodes()[0].leaf_nodes() if tip.taxon.label in new_tip_names]
old_right_tip_names = [tip.taxon.label for tip in old_tree.seed_node.child_nodes()[1].leaf_nodes() if tip.taxon.label in new_tip_names]
# find the LCA of the lefts and the rights, giving up at the root.
left_lca = new_tree.mrca(taxon_labels=old_left_tip_names)
right_lca = new_tree.mrca(taxon_labels=old_right_tip_names)
# if both didn't LCA before hitting the root, tree paraphyletic
# take the first one where the LCA was hit, reroot here.
# find the LCA of the other in the other half of the tree.
# if failed before getting to the root, then tree paraphyletic
# reroot on one side of the internal branch between the two LCAs
if left_lca == new_tree.seed_node:
if right_lca == new_tree.seed_node:
raise TreeParaphyleticException("Tree paraphyletic case #1")
else:
new_tree.reroot_at_edge(right_lca.edge)
new_lca = new_tree.mrca(taxon_labels=old_left_tip_names)
else:
new_tree.reroot_at_edge(left_lca.edge, length1=left_lca.edge.length)
new_lca = new_tree.mrca(taxon_labels=old_right_tip_names)
if new_lca.edge.rootedge:
raise TreeParaphyleticException("Tree paraphyletic case #2")
rerooting_edge = self._find_longest_internal_edge(new_lca)
if rerooting_edge and rerooting_edge.head_node and rerooting_edge.tail_node:
new_tree.reroot_at_edge(rerooting_edge, length1=rerooting_edge.length)
return new_tree | python | def reroot_by_tree(self, old_tree, new_tree):
'''reroot the new tree so that it matches the old tree's root, if
possible. If more than one rerooting is possible, root at the longest
internal branch that is consistent with the root of the old_tree.
Assumes that the tree is binary. Both the old and new trees may be
modified by this method.
Parameters
----------
old_tree: dendropy.Tree
The old tree to try to match the root from
new_tree: dendropy.Tree
tree to be rerooted in the same place as the old_tree.
Must include at least one leaf from each side of the old_tree's
root (matching by node.name), but may not have all leaves from
the old tree, and can have extra leaves.
Returns
-------
The new_tree rooted by the root of the old tree
Exceptions
----------
TreeParaphyleticException
If either of the old_tree's branches are not monophyletic in the
new tree
'''
# Ensure that the tree is rooted to avoid gotchas
# e.g. https://github.com/jeetsukumaran/DendroPy/issues/51
old_tree.is_rooted = True
new_tree.is_rooted = True
if len(old_tree.seed_node.child_nodes()) != 2:
raise Exception("Unexpectedly found a non-binary tree. Perhaps need to use Rerooter.reroot() ?")
# make a list of the left and right leaf names that are in the new tree
new_tip_names = set([tip.taxon.label for tip in new_tree.leaf_node_iter()])
old_left_tip_names = [tip.taxon.label for tip in old_tree.seed_node.child_nodes()[0].leaf_nodes() if tip.taxon.label in new_tip_names]
old_right_tip_names = [tip.taxon.label for tip in old_tree.seed_node.child_nodes()[1].leaf_nodes() if tip.taxon.label in new_tip_names]
# find the LCA of the lefts and the rights, giving up at the root.
left_lca = new_tree.mrca(taxon_labels=old_left_tip_names)
right_lca = new_tree.mrca(taxon_labels=old_right_tip_names)
# if both didn't LCA before hitting the root, tree paraphyletic
# take the first one where the LCA was hit, reroot here.
# find the LCA of the other in the other half of the tree.
# if failed before getting to the root, then tree paraphyletic
# reroot on one side of the internal branch between the two LCAs
if left_lca == new_tree.seed_node:
if right_lca == new_tree.seed_node:
raise TreeParaphyleticException("Tree paraphyletic case #1")
else:
new_tree.reroot_at_edge(right_lca.edge)
new_lca = new_tree.mrca(taxon_labels=old_left_tip_names)
else:
new_tree.reroot_at_edge(left_lca.edge, length1=left_lca.edge.length)
new_lca = new_tree.mrca(taxon_labels=old_right_tip_names)
if new_lca.edge.rootedge:
raise TreeParaphyleticException("Tree paraphyletic case #2")
rerooting_edge = self._find_longest_internal_edge(new_lca)
if rerooting_edge and rerooting_edge.head_node and rerooting_edge.tail_node:
new_tree.reroot_at_edge(rerooting_edge, length1=rerooting_edge.length)
return new_tree | [
"def",
"reroot_by_tree",
"(",
"self",
",",
"old_tree",
",",
"new_tree",
")",
":",
"# Ensure that the tree is rooted to avoid gotchas",
"# e.g. https://github.com/jeetsukumaran/DendroPy/issues/51",
"old_tree",
".",
"is_rooted",
"=",
"True",
"new_tree",
".",
"is_rooted",
"=",
... | reroot the new tree so that it matches the old tree's root, if
possible. If more than one rerooting is possible, root at the longest
internal branch that is consistent with the root of the old_tree.
Assumes that the tree is binary. Both the old and new trees may be
modified by this method.
Parameters
----------
old_tree: dendropy.Tree
The old tree to try to match the root from
new_tree: dendropy.Tree
tree to be rerooted in the same place as the old_tree.
Must include at least one leaf from each side of the old_tree's
root (matching by node.name), but may not have all leaves from
the old tree, and can have extra leaves.
Returns
-------
The new_tree rooted by the root of the old tree
Exceptions
----------
TreeParaphyleticException
If either of the old_tree's branches are not monophyletic in the
new tree | [
"reroot",
"the",
"new",
"tree",
"so",
"that",
"it",
"matches",
"the",
"old",
"tree",
"s",
"root",
"if",
"possible",
".",
"If",
"more",
"than",
"one",
"rerooting",
"is",
"possible",
"root",
"at",
"the",
"longest",
"internal",
"branch",
"that",
"is",
"cons... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/rerooter.py#L89-L154 | train | 31,831 |
geronimp/graftM | graftm/run.py | Run._assign_taxonomy_with_diamond | def _assign_taxonomy_with_diamond(self, base_list, db_search_results,
graftm_package, graftm_files):
'''Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected.
'''
runner = Diamond(graftm_package.diamond_database_path(),
self.args.threads,
self.args.evalue)
taxonomy_definition = Getaxnseq().read_taxtastic_taxonomy_and_seqinfo\
(open(graftm_package.taxtastic_taxonomy_path()),
open(graftm_package.taxtastic_seqinfo_path()))
results = {}
# For each of the search results,
for i, search_result in enumerate(db_search_results):
sequence_id_to_hit = {}
# Run diamond
logging.debug("Running diamond on %s" % search_result.hit_fasta())
diamond_result = runner.run(search_result.hit_fasta(),
UnpackRawReads.PROTEIN_SEQUENCE_TYPE,
daa_file_basename=graftm_files.diamond_assignment_output_basename(base_list[i]))
for res in diamond_result.each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.HIT_ID_FIELD]):
if res[0] in sequence_id_to_hit:
# do not accept duplicates
if sequence_id_to_hit[res[0]] != res[1]:
raise Exception("Diamond unexpectedly gave two hits for a single query sequence for %s" % res[0])
else:
sequence_id_to_hit[res[0]] = res[1]
# Extract taxonomy of the best hit, and add in the no hits
sequence_id_to_taxonomy = {}
for seqio in SequenceIO().read_fasta_file(search_result.hit_fasta()):
name = seqio.name
if name in sequence_id_to_hit:
# Add Root; to be in line with pplacer assignment method
sequence_id_to_taxonomy[name] = ['Root']+taxonomy_definition[sequence_id_to_hit[name]]
else:
# picked up in the initial search (by hmmsearch, say), but diamond misses it
sequence_id_to_taxonomy[name] = ['Root']
results[base_list[i]] = sequence_id_to_taxonomy
return results | python | def _assign_taxonomy_with_diamond(self, base_list, db_search_results,
graftm_package, graftm_files):
'''Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected.
'''
runner = Diamond(graftm_package.diamond_database_path(),
self.args.threads,
self.args.evalue)
taxonomy_definition = Getaxnseq().read_taxtastic_taxonomy_and_seqinfo\
(open(graftm_package.taxtastic_taxonomy_path()),
open(graftm_package.taxtastic_seqinfo_path()))
results = {}
# For each of the search results,
for i, search_result in enumerate(db_search_results):
sequence_id_to_hit = {}
# Run diamond
logging.debug("Running diamond on %s" % search_result.hit_fasta())
diamond_result = runner.run(search_result.hit_fasta(),
UnpackRawReads.PROTEIN_SEQUENCE_TYPE,
daa_file_basename=graftm_files.diamond_assignment_output_basename(base_list[i]))
for res in diamond_result.each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.HIT_ID_FIELD]):
if res[0] in sequence_id_to_hit:
# do not accept duplicates
if sequence_id_to_hit[res[0]] != res[1]:
raise Exception("Diamond unexpectedly gave two hits for a single query sequence for %s" % res[0])
else:
sequence_id_to_hit[res[0]] = res[1]
# Extract taxonomy of the best hit, and add in the no hits
sequence_id_to_taxonomy = {}
for seqio in SequenceIO().read_fasta_file(search_result.hit_fasta()):
name = seqio.name
if name in sequence_id_to_hit:
# Add Root; to be in line with pplacer assignment method
sequence_id_to_taxonomy[name] = ['Root']+taxonomy_definition[sequence_id_to_hit[name]]
else:
# picked up in the initial search (by hmmsearch, say), but diamond misses it
sequence_id_to_taxonomy[name] = ['Root']
results[base_list[i]] = sequence_id_to_taxonomy
return results | [
"def",
"_assign_taxonomy_with_diamond",
"(",
"self",
",",
"base_list",
",",
"db_search_results",
",",
"graftm_package",
",",
"graftm_files",
")",
":",
"runner",
"=",
"Diamond",
"(",
"graftm_package",
".",
"diamond_database_path",
"(",
")",
",",
"self",
".",
"args"... | Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected. | [
"Run",
"diamond",
"to",
"assign",
"taxonomy"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/run.py#L514-L573 | train | 31,832 |
geronimp/graftM | graftm/update.py | Update._concatenate_file | def _concatenate_file(self, file_list, output):
'''
Call unix "cat" to concatenate a list of files
Parameters
----------
file_list: list
List of strings, each leading to a file. These files are the ones to
be concatenate together. E.g.:
["/path/to/file1", "/path/to/file2"]
output: str
Path to file to which to the files in file_list will be concatenated
into.
'''
to_cat = ' '.join(file_list)
logging.debug("Concatenating files: %s" % (to_cat))
cmd = "cat %s > %s" % (to_cat, output)
extern.run(cmd) | python | def _concatenate_file(self, file_list, output):
'''
Call unix "cat" to concatenate a list of files
Parameters
----------
file_list: list
List of strings, each leading to a file. These files are the ones to
be concatenate together. E.g.:
["/path/to/file1", "/path/to/file2"]
output: str
Path to file to which to the files in file_list will be concatenated
into.
'''
to_cat = ' '.join(file_list)
logging.debug("Concatenating files: %s" % (to_cat))
cmd = "cat %s > %s" % (to_cat, output)
extern.run(cmd) | [
"def",
"_concatenate_file",
"(",
"self",
",",
"file_list",
",",
"output",
")",
":",
"to_cat",
"=",
"' '",
".",
"join",
"(",
"file_list",
")",
"logging",
".",
"debug",
"(",
"\"Concatenating files: %s\"",
"%",
"(",
"to_cat",
")",
")",
"cmd",
"=",
"\"cat %s >... | Call unix "cat" to concatenate a list of files
Parameters
----------
file_list: list
List of strings, each leading to a file. These files are the ones to
be concatenate together. E.g.:
["/path/to/file1", "/path/to/file2"]
output: str
Path to file to which to the files in file_list will be concatenated
into. | [
"Call",
"unix",
"cat",
"to",
"concatenate",
"a",
"list",
"of",
"files"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/update.py#L24-L42 | train | 31,833 |
geronimp/graftM | graftm/greengenes_taxonomy.py | GreenGenesTaxonomy.write | def write(self, output_io):
'''Write a taxonomy to an open stream out in GG format. Code calling this
function must open and close the io object.'''
for name, tax in self.taxonomy.items():
output_io.write("%s\t%s\n" % (name, '; '.join(tax))) | python | def write(self, output_io):
'''Write a taxonomy to an open stream out in GG format. Code calling this
function must open and close the io object.'''
for name, tax in self.taxonomy.items():
output_io.write("%s\t%s\n" % (name, '; '.join(tax))) | [
"def",
"write",
"(",
"self",
",",
"output_io",
")",
":",
"for",
"name",
",",
"tax",
"in",
"self",
".",
"taxonomy",
".",
"items",
"(",
")",
":",
"output_io",
".",
"write",
"(",
"\"%s\\t%s\\n\"",
"%",
"(",
"name",
",",
"'; '",
".",
"join",
"(",
"tax"... | Write a taxonomy to an open stream out in GG format. Code calling this
function must open and close the io object. | [
"Write",
"a",
"taxonomy",
"to",
"an",
"open",
"stream",
"out",
"in",
"GG",
"format",
".",
"Code",
"calling",
"this",
"function",
"must",
"open",
"and",
"close",
"the",
"io",
"object",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/greengenes_taxonomy.py#L55-L59 | train | 31,834 |
geronimp/graftM | graftm/decorator.py | Decorator._reroot | def _reroot(self):
'''Run the re-rooting algorithm in the Rerooter class.'''
rerooter = Rerooter()
self.tree = rerooter.reroot_by_tree(self.reference_tree,
self.tree) | python | def _reroot(self):
'''Run the re-rooting algorithm in the Rerooter class.'''
rerooter = Rerooter()
self.tree = rerooter.reroot_by_tree(self.reference_tree,
self.tree) | [
"def",
"_reroot",
"(",
"self",
")",
":",
"rerooter",
"=",
"Rerooter",
"(",
")",
"self",
".",
"tree",
"=",
"rerooter",
".",
"reroot_by_tree",
"(",
"self",
".",
"reference_tree",
",",
"self",
".",
"tree",
")"
] | Run the re-rooting algorithm in the Rerooter class. | [
"Run",
"the",
"re",
"-",
"rooting",
"algorithm",
"in",
"the",
"Rerooter",
"class",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/decorator.py#L46-L50 | train | 31,835 |
geronimp/graftM | graftm/housekeeping.py | HouseKeeping.file_basename | def file_basename(self, file):
'''
Strips the path and last extension from the file variable. If the
extension is found to be valid, the basename will be returned. Otherwise
an error will be raise and graftM will exit
'''
valid_extensions = set(".tree",
".tre")
split_file = os.path.basename(file).split('.')
base, suffix = '.'.join(split_file[:-1]), split_file[-1]
if suffix in valid_extensions:
return base
else:
logging.error("Invalid file extension found on file: %s" % file)
logging.error("For trees, please provide a file with one of the \
following extensions: %s" % ' '.join(valid_extensions.keys()))
raise InvalidFileExtensionError | python | def file_basename(self, file):
'''
Strips the path and last extension from the file variable. If the
extension is found to be valid, the basename will be returned. Otherwise
an error will be raise and graftM will exit
'''
valid_extensions = set(".tree",
".tre")
split_file = os.path.basename(file).split('.')
base, suffix = '.'.join(split_file[:-1]), split_file[-1]
if suffix in valid_extensions:
return base
else:
logging.error("Invalid file extension found on file: %s" % file)
logging.error("For trees, please provide a file with one of the \
following extensions: %s" % ' '.join(valid_extensions.keys()))
raise InvalidFileExtensionError | [
"def",
"file_basename",
"(",
"self",
",",
"file",
")",
":",
"valid_extensions",
"=",
"set",
"(",
"\".tree\"",
",",
"\".tre\"",
")",
"split_file",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
")",
".",
"split",
"(",
"'.'",
")",
"base",
",",
"... | Strips the path and last extension from the file variable. If the
extension is found to be valid, the basename will be returned. Otherwise
an error will be raise and graftM will exit | [
"Strips",
"the",
"path",
"and",
"last",
"extension",
"from",
"the",
"file",
"variable",
".",
"If",
"the",
"extension",
"is",
"found",
"to",
"be",
"valid",
"the",
"basename",
"will",
"be",
"returned",
".",
"Otherwise",
"an",
"error",
"will",
"be",
"raise",
... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/housekeeping.py#L24-L41 | train | 31,836 |
geronimp/graftM | graftm/housekeeping.py | HouseKeeping.set_euk_hmm | def set_euk_hmm(self, args):
'Set the hmm used by graftM to cross check for euks.'
if hasattr(args, 'euk_hmm_file'):
pass
elif not hasattr(args, 'euk_hmm_file'):
# set to path based on the location of bin/graftM, which has
# a more stable relative path to the HMM when installed through
# pip.
setattr(args, 'euk_hmm_file', os.path.join(os.path.dirname(inspect.stack()[-1][1]),'..','share', '18S.hmm'))
else:
raise Exception('Programming Error: setting the euk HMM') | python | def set_euk_hmm(self, args):
'Set the hmm used by graftM to cross check for euks.'
if hasattr(args, 'euk_hmm_file'):
pass
elif not hasattr(args, 'euk_hmm_file'):
# set to path based on the location of bin/graftM, which has
# a more stable relative path to the HMM when installed through
# pip.
setattr(args, 'euk_hmm_file', os.path.join(os.path.dirname(inspect.stack()[-1][1]),'..','share', '18S.hmm'))
else:
raise Exception('Programming Error: setting the euk HMM') | [
"def",
"set_euk_hmm",
"(",
"self",
",",
"args",
")",
":",
"if",
"hasattr",
"(",
"args",
",",
"'euk_hmm_file'",
")",
":",
"pass",
"elif",
"not",
"hasattr",
"(",
"args",
",",
"'euk_hmm_file'",
")",
":",
"# set to path based on the location of bin/graftM, which has",... | Set the hmm used by graftM to cross check for euks. | [
"Set",
"the",
"hmm",
"used",
"by",
"graftM",
"to",
"cross",
"check",
"for",
"euks",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/housekeeping.py#L44-L54 | train | 31,837 |
geronimp/graftM | graftm/housekeeping.py | HouseKeeping.get_maximum_range | def get_maximum_range(self, hmm):
'''
If no maximum range has been specified, and if using a hmm search, a
maximum range can be determined by using the length of the HMM
Parameters
----------
hmm : str
path to hmm profile
Returns
-------
Length to search to when linking hits on a single contig
'''
length=int([x for x in open(hmm) if x.startswith("LENG")][0].split()[1])
max_length=round(length*1.5, 0)
return max_length | python | def get_maximum_range(self, hmm):
'''
If no maximum range has been specified, and if using a hmm search, a
maximum range can be determined by using the length of the HMM
Parameters
----------
hmm : str
path to hmm profile
Returns
-------
Length to search to when linking hits on a single contig
'''
length=int([x for x in open(hmm) if x.startswith("LENG")][0].split()[1])
max_length=round(length*1.5, 0)
return max_length | [
"def",
"get_maximum_range",
"(",
"self",
",",
"hmm",
")",
":",
"length",
"=",
"int",
"(",
"[",
"x",
"for",
"x",
"in",
"open",
"(",
"hmm",
")",
"if",
"x",
".",
"startswith",
"(",
"\"LENG\"",
")",
"]",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
... | If no maximum range has been specified, and if using a hmm search, a
maximum range can be determined by using the length of the HMM
Parameters
----------
hmm : str
path to hmm profile
Returns
-------
Length to search to when linking hits on a single contig | [
"If",
"no",
"maximum",
"range",
"has",
"been",
"specified",
"and",
"if",
"using",
"a",
"hmm",
"search",
"a",
"maximum",
"range",
"can",
"be",
"determined",
"by",
"using",
"the",
"length",
"of",
"the",
"HMM"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/housekeeping.py#L128-L145 | train | 31,838 |
geronimp/graftM | graftm/pplacer.py | Pplacer.place | def place(self, reverse_pipe, seqs_list, resolve_placements, files, args,
slash_endings, tax_descr, clusterer):
'''
placement - This is the placement pipeline in GraftM, in aligned reads
are placed into phylogenetic trees, and the results interpreted.
If reverse reads are used, this is where the comparisons are made
between placements, for the summary tables to be build in the
next stage.
Parameters
----------
reverse_pipe : bool
True: reverse reads are placed separately
False: no reverse reads to place.
seqs_list : list
list of paths to alignment fastas to be placed into the tree
resolve_placements : bool
True:resolve placements to their most trusted taxonomy
False: classify reads to their most trusted taxonomy, until the
confidence cutoff is reached.
files : list
graftM output file name object
args : obj
argparse object
Returns
-------
trusted_placements : dict
dictionary of reads and their trusted placements
'''
trusted_placements = {}
files_to_delete = []
# Merge the alignments so they can all be placed at once.
alias_hash = self.alignment_merger(seqs_list, files.comb_aln_fa())
files_to_delete += seqs_list
files_to_delete.append(files.comb_aln_fa())
if os.path.getsize(files.comb_aln_fa()) == 0:
logging.debug("Combined alignment file has 0 size, not running pplacer")
to_return = {}
for idx, file in enumerate(seqs_list):
base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '')
to_return[base_file] = {}
return to_return
# Run pplacer on merged file
jplace = self.pplacer(files.jplace_output_path(), args.output_directory, files.comb_aln_fa(), args.threads)
files_to_delete.append(jplace)
logging.info("Placements finished")
#Read the json of refpkg
logging.info("Reading classifications")
classifications=Classify(tax_descr).assignPlacement(
jplace,
args.placements_cutoff,
resolve_placements
)
logging.info("Reads classified")
# If the reverse pipe has been specified, run the comparisons between the two pipelines. If not then just return.
for idx, file in enumerate(seqs_list):
if reverse_pipe:
base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '')
forward_gup=classifications.pop(sorted(classifications.keys())[0])
reverse_gup=classifications.pop(sorted(classifications.keys())[0])
seqs_list.pop(idx+1)
placements_hash = Compare().compare_placements(
forward_gup,
reverse_gup,
args.placements_cutoff,
slash_endings,
base_file
)
trusted_placements[base_file]=placements_hash['trusted_placements']
else: # Set the trusted placements as
base_file=os.path.basename(file).replace('_hits.aln.fa', '')
trusted_placements[base_file]={}
if str(idx) in classifications:
for read, entry in classifications[str(idx)].items():
trusted_placements[base_file][read] = entry['placement']
# Split the original jplace file
# and write split jplaces to separate file directories
with open(jplace) as f: jplace_json = json.load(f)
cluster_dict = self.convert_cluster_dict_keys_to_aliases(clusterer.seq_library,
alias_hash)
hash_with_placements = self.jplace_split(jplace_json,
cluster_dict)
for file_alias, placement_entries_list in hash_with_placements.items():
alias_hash[file_alias]['place'] = placement_entries_list
for k in alias_hash.keys():
if 'place' not in alias_hash[k]:
alias_hash[k]['place'] = []
self.write_jplace(jplace_json,
alias_hash)
self.hk.delete(files_to_delete)# Remove combined split, not really useful
return trusted_placements | python | def place(self, reverse_pipe, seqs_list, resolve_placements, files, args,
slash_endings, tax_descr, clusterer):
'''
placement - This is the placement pipeline in GraftM, in aligned reads
are placed into phylogenetic trees, and the results interpreted.
If reverse reads are used, this is where the comparisons are made
between placements, for the summary tables to be build in the
next stage.
Parameters
----------
reverse_pipe : bool
True: reverse reads are placed separately
False: no reverse reads to place.
seqs_list : list
list of paths to alignment fastas to be placed into the tree
resolve_placements : bool
True:resolve placements to their most trusted taxonomy
False: classify reads to their most trusted taxonomy, until the
confidence cutoff is reached.
files : list
graftM output file name object
args : obj
argparse object
Returns
-------
trusted_placements : dict
dictionary of reads and their trusted placements
'''
trusted_placements = {}
files_to_delete = []
# Merge the alignments so they can all be placed at once.
alias_hash = self.alignment_merger(seqs_list, files.comb_aln_fa())
files_to_delete += seqs_list
files_to_delete.append(files.comb_aln_fa())
if os.path.getsize(files.comb_aln_fa()) == 0:
logging.debug("Combined alignment file has 0 size, not running pplacer")
to_return = {}
for idx, file in enumerate(seqs_list):
base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '')
to_return[base_file] = {}
return to_return
# Run pplacer on merged file
jplace = self.pplacer(files.jplace_output_path(), args.output_directory, files.comb_aln_fa(), args.threads)
files_to_delete.append(jplace)
logging.info("Placements finished")
#Read the json of refpkg
logging.info("Reading classifications")
classifications=Classify(tax_descr).assignPlacement(
jplace,
args.placements_cutoff,
resolve_placements
)
logging.info("Reads classified")
# If the reverse pipe has been specified, run the comparisons between the two pipelines. If not then just return.
for idx, file in enumerate(seqs_list):
if reverse_pipe:
base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '')
forward_gup=classifications.pop(sorted(classifications.keys())[0])
reverse_gup=classifications.pop(sorted(classifications.keys())[0])
seqs_list.pop(idx+1)
placements_hash = Compare().compare_placements(
forward_gup,
reverse_gup,
args.placements_cutoff,
slash_endings,
base_file
)
trusted_placements[base_file]=placements_hash['trusted_placements']
else: # Set the trusted placements as
base_file=os.path.basename(file).replace('_hits.aln.fa', '')
trusted_placements[base_file]={}
if str(idx) in classifications:
for read, entry in classifications[str(idx)].items():
trusted_placements[base_file][read] = entry['placement']
# Split the original jplace file
# and write split jplaces to separate file directories
with open(jplace) as f: jplace_json = json.load(f)
cluster_dict = self.convert_cluster_dict_keys_to_aliases(clusterer.seq_library,
alias_hash)
hash_with_placements = self.jplace_split(jplace_json,
cluster_dict)
for file_alias, placement_entries_list in hash_with_placements.items():
alias_hash[file_alias]['place'] = placement_entries_list
for k in alias_hash.keys():
if 'place' not in alias_hash[k]:
alias_hash[k]['place'] = []
self.write_jplace(jplace_json,
alias_hash)
self.hk.delete(files_to_delete)# Remove combined split, not really useful
return trusted_placements | [
"def",
"place",
"(",
"self",
",",
"reverse_pipe",
",",
"seqs_list",
",",
"resolve_placements",
",",
"files",
",",
"args",
",",
"slash_endings",
",",
"tax_descr",
",",
"clusterer",
")",
":",
"trusted_placements",
"=",
"{",
"}",
"files_to_delete",
"=",
"[",
"]... | placement - This is the placement pipeline in GraftM, in aligned reads
are placed into phylogenetic trees, and the results interpreted.
If reverse reads are used, this is where the comparisons are made
between placements, for the summary tables to be build in the
next stage.
Parameters
----------
reverse_pipe : bool
True: reverse reads are placed separately
False: no reverse reads to place.
seqs_list : list
list of paths to alignment fastas to be placed into the tree
resolve_placements : bool
True:resolve placements to their most trusted taxonomy
False: classify reads to their most trusted taxonomy, until the
confidence cutoff is reached.
files : list
graftM output file name object
args : obj
argparse object
Returns
-------
trusted_placements : dict
dictionary of reads and their trusted placements | [
"placement",
"-",
"This",
"is",
"the",
"placement",
"pipeline",
"in",
"GraftM",
"in",
"aligned",
"reads",
"are",
"placed",
"into",
"phylogenetic",
"trees",
"and",
"the",
"results",
"interpreted",
".",
"If",
"reverse",
"reads",
"are",
"used",
"this",
"is",
"w... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/pplacer.py#L145-L244 | train | 31,839 |
geronimp/graftM | graftm/sequence_extractor.py | SequenceExtractor.extract | def extract(self, reads_to_extract, database_fasta_file, output_file):
'''Extract the reads_to_extract from the database_fasta_file and put them in
output_file.
Parameters
----------
reads_to_extract: Iterable of str
IDs of reads to be extracted
database_fasta_file: str
path the fasta file that containing the reads
output_file: str
path to the file where they are put
Returns
-------
Nothing'''
cmd = "fxtract -XH -f /dev/stdin '%s' > %s" % (
database_fasta_file, output_file)
extern.run(cmd, stdin='\n'.join(reads_to_extract)) | python | def extract(self, reads_to_extract, database_fasta_file, output_file):
'''Extract the reads_to_extract from the database_fasta_file and put them in
output_file.
Parameters
----------
reads_to_extract: Iterable of str
IDs of reads to be extracted
database_fasta_file: str
path the fasta file that containing the reads
output_file: str
path to the file where they are put
Returns
-------
Nothing'''
cmd = "fxtract -XH -f /dev/stdin '%s' > %s" % (
database_fasta_file, output_file)
extern.run(cmd, stdin='\n'.join(reads_to_extract)) | [
"def",
"extract",
"(",
"self",
",",
"reads_to_extract",
",",
"database_fasta_file",
",",
"output_file",
")",
":",
"cmd",
"=",
"\"fxtract -XH -f /dev/stdin '%s' > %s\"",
"%",
"(",
"database_fasta_file",
",",
"output_file",
")",
"extern",
".",
"run",
"(",
"cmd",
","... | Extract the reads_to_extract from the database_fasta_file and put them in
output_file.
Parameters
----------
reads_to_extract: Iterable of str
IDs of reads to be extracted
database_fasta_file: str
path the fasta file that containing the reads
output_file: str
path to the file where they are put
Returns
-------
Nothing | [
"Extract",
"the",
"reads_to_extract",
"from",
"the",
"database_fasta_file",
"and",
"put",
"them",
"in",
"output_file",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_extractor.py#L6-L25 | train | 31,840 |
geronimp/graftM | graftm/sequence_extractor.py | SequenceExtractor.extract_forward_and_reverse_complement | def extract_forward_and_reverse_complement(
self, forward_reads_to_extract, reverse_reads_to_extract, database_fasta_file,
output_file):
'''As per extract except also reverse complement the sequences.'''
self.extract(forward_reads_to_extract, database_fasta_file, output_file)
cmd_rev = "fxtract -XH -f /dev/stdin '%s'" % database_fasta_file
output = extern.run(cmd_rev, stdin='\n'.join(reverse_reads_to_extract))
with open(output_file, 'a') as f:
for record in SeqIO.parse(StringIO(output), 'fasta'):
record.seq = record.reverse_complement().seq
SeqIO.write(record, f, 'fasta') | python | def extract_forward_and_reverse_complement(
self, forward_reads_to_extract, reverse_reads_to_extract, database_fasta_file,
output_file):
'''As per extract except also reverse complement the sequences.'''
self.extract(forward_reads_to_extract, database_fasta_file, output_file)
cmd_rev = "fxtract -XH -f /dev/stdin '%s'" % database_fasta_file
output = extern.run(cmd_rev, stdin='\n'.join(reverse_reads_to_extract))
with open(output_file, 'a') as f:
for record in SeqIO.parse(StringIO(output), 'fasta'):
record.seq = record.reverse_complement().seq
SeqIO.write(record, f, 'fasta') | [
"def",
"extract_forward_and_reverse_complement",
"(",
"self",
",",
"forward_reads_to_extract",
",",
"reverse_reads_to_extract",
",",
"database_fasta_file",
",",
"output_file",
")",
":",
"self",
".",
"extract",
"(",
"forward_reads_to_extract",
",",
"database_fasta_file",
","... | As per extract except also reverse complement the sequences. | [
"As",
"per",
"extract",
"except",
"also",
"reverse",
"complement",
"the",
"sequences",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_extractor.py#L27-L39 | train | 31,841 |
geronimp/graftM | graftm/summarise.py | Stats_And_Summary.write_tabular_otu_table | def write_tabular_otu_table(self, sample_names, read_taxonomies, combined_output_otu_table_io):
'''A function that takes a hash of trusted placements, and compiles them
into an OTU-esque table.'''
delim = u'\t'
combined_output_otu_table_io.write(delim.join(['#ID',
delim.join(sample_names),
'ConsensusLineage']))
combined_output_otu_table_io.write(u"\n")
for otu_id, tax, counts in self._iterate_otu_table_rows(read_taxonomies):
combined_output_otu_table_io.write(delim.join(\
(str(otu_id),
delim.join([str(c) for c in counts]),
'; '.join(tax)))+"\n") | python | def write_tabular_otu_table(self, sample_names, read_taxonomies, combined_output_otu_table_io):
'''A function that takes a hash of trusted placements, and compiles them
into an OTU-esque table.'''
delim = u'\t'
combined_output_otu_table_io.write(delim.join(['#ID',
delim.join(sample_names),
'ConsensusLineage']))
combined_output_otu_table_io.write(u"\n")
for otu_id, tax, counts in self._iterate_otu_table_rows(read_taxonomies):
combined_output_otu_table_io.write(delim.join(\
(str(otu_id),
delim.join([str(c) for c in counts]),
'; '.join(tax)))+"\n") | [
"def",
"write_tabular_otu_table",
"(",
"self",
",",
"sample_names",
",",
"read_taxonomies",
",",
"combined_output_otu_table_io",
")",
":",
"delim",
"=",
"u'\\t'",
"combined_output_otu_table_io",
".",
"write",
"(",
"delim",
".",
"join",
"(",
"[",
"'#ID'",
",",
"del... | A function that takes a hash of trusted placements, and compiles them
into an OTU-esque table. | [
"A",
"function",
"that",
"takes",
"a",
"hash",
"of",
"trusted",
"placements",
"and",
"compiles",
"them",
"into",
"an",
"OTU",
"-",
"esque",
"table",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/summarise.py#L147-L159 | train | 31,842 |
geronimp/graftM | graftm/summarise.py | Stats_And_Summary.write_krona_plot | def write_krona_plot(self, sample_names, read_taxonomies, output_krona_filename):
'''Creates krona plot at the given location. Assumes the krona executable
ktImportText is available on the shell PATH'''
tempfiles = []
for n in sample_names:
tempfiles.append(tempfile.NamedTemporaryFile(prefix='GraftMkronaInput', suffix=n))
delim=u'\t'
for _, tax, counts in self._iterate_otu_table_rows(read_taxonomies):
for i, c in enumerate(counts):
if c != 0:
tempfiles[i].write(delim.join((str(c),
delim.join(tax)
))+"\n")
for t in tempfiles:
t.flush()
cmd = ["ktImportText",'-o',output_krona_filename]
for i, tmp in enumerate(tempfiles):
cmd.append(','.join([tmp.name,sample_names[i]]))
# run the actual krona
cmd = ' '.join(cmd)
extern.run(cmd)
# close tempfiles
for t in tempfiles:
t.close() | python | def write_krona_plot(self, sample_names, read_taxonomies, output_krona_filename):
'''Creates krona plot at the given location. Assumes the krona executable
ktImportText is available on the shell PATH'''
tempfiles = []
for n in sample_names:
tempfiles.append(tempfile.NamedTemporaryFile(prefix='GraftMkronaInput', suffix=n))
delim=u'\t'
for _, tax, counts in self._iterate_otu_table_rows(read_taxonomies):
for i, c in enumerate(counts):
if c != 0:
tempfiles[i].write(delim.join((str(c),
delim.join(tax)
))+"\n")
for t in tempfiles:
t.flush()
cmd = ["ktImportText",'-o',output_krona_filename]
for i, tmp in enumerate(tempfiles):
cmd.append(','.join([tmp.name,sample_names[i]]))
# run the actual krona
cmd = ' '.join(cmd)
extern.run(cmd)
# close tempfiles
for t in tempfiles:
t.close() | [
"def",
"write_krona_plot",
"(",
"self",
",",
"sample_names",
",",
"read_taxonomies",
",",
"output_krona_filename",
")",
":",
"tempfiles",
"=",
"[",
"]",
"for",
"n",
"in",
"sample_names",
":",
"tempfiles",
".",
"append",
"(",
"tempfile",
".",
"NamedTemporaryFile"... | Creates krona plot at the given location. Assumes the krona executable
ktImportText is available on the shell PATH | [
"Creates",
"krona",
"plot",
"at",
"the",
"given",
"location",
".",
"Assumes",
"the",
"krona",
"executable",
"ktImportText",
"is",
"available",
"on",
"the",
"shell",
"PATH"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/summarise.py#L161-L189 | train | 31,843 |
geronimp/graftM | graftm/clusterer.py | Clusterer.uncluster_annotations | def uncluster_annotations(self, input_annotations, reverse_pipe):
'''
Update the annotations hash provided by pplacer to include all
representatives within each cluster
Parameters
----------
input_annotations : hash
Classifications for each representative sequence of the clusters.
each key being the sequence name, and the entry being the taxonomy
string as a list.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_annotations : hash
An updated version of the above, which includes all reads from
each cluster
'''
output_annotations = {}
for placed_alignment_file_path, clusters in self.seq_library.iteritems():
if reverse_pipe and placed_alignment_file_path.endswith("_reverse_clustered.fa"): continue
placed_alignment_file = os.path.basename(placed_alignment_file_path)
cluster_classifications = input_annotations[placed_alignment_file]
if reverse_pipe:
placed_alignment_base = placed_alignment_file.replace('_forward_clustered.fa', '')
else:
placed_alignment_base = placed_alignment_file.replace('_clustered.fa', '')
output_annotations[placed_alignment_base] = {}
for rep_read_name, rep_read_taxonomy in cluster_classifications.iteritems():
if reverse_pipe:
orfm_regex = OrfM.regular_expression()
clusters={(orfm_regex.match(key).groups(0)[0] if orfm_regex.match(key) else key): item for key, item in clusters.iteritems()}
for read in clusters[rep_read_name]:
output_annotations[placed_alignment_base][read.name] = rep_read_taxonomy
return output_annotations | python | def uncluster_annotations(self, input_annotations, reverse_pipe):
'''
Update the annotations hash provided by pplacer to include all
representatives within each cluster
Parameters
----------
input_annotations : hash
Classifications for each representative sequence of the clusters.
each key being the sequence name, and the entry being the taxonomy
string as a list.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_annotations : hash
An updated version of the above, which includes all reads from
each cluster
'''
output_annotations = {}
for placed_alignment_file_path, clusters in self.seq_library.iteritems():
if reverse_pipe and placed_alignment_file_path.endswith("_reverse_clustered.fa"): continue
placed_alignment_file = os.path.basename(placed_alignment_file_path)
cluster_classifications = input_annotations[placed_alignment_file]
if reverse_pipe:
placed_alignment_base = placed_alignment_file.replace('_forward_clustered.fa', '')
else:
placed_alignment_base = placed_alignment_file.replace('_clustered.fa', '')
output_annotations[placed_alignment_base] = {}
for rep_read_name, rep_read_taxonomy in cluster_classifications.iteritems():
if reverse_pipe:
orfm_regex = OrfM.regular_expression()
clusters={(orfm_regex.match(key).groups(0)[0] if orfm_regex.match(key) else key): item for key, item in clusters.iteritems()}
for read in clusters[rep_read_name]:
output_annotations[placed_alignment_base][read.name] = rep_read_taxonomy
return output_annotations | [
"def",
"uncluster_annotations",
"(",
"self",
",",
"input_annotations",
",",
"reverse_pipe",
")",
":",
"output_annotations",
"=",
"{",
"}",
"for",
"placed_alignment_file_path",
",",
"clusters",
"in",
"self",
".",
"seq_library",
".",
"iteritems",
"(",
")",
":",
"i... | Update the annotations hash provided by pplacer to include all
representatives within each cluster
Parameters
----------
input_annotations : hash
Classifications for each representative sequence of the clusters.
each key being the sequence name, and the entry being the taxonomy
string as a list.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_annotations : hash
An updated version of the above, which includes all reads from
each cluster | [
"Update",
"the",
"annotations",
"hash",
"provided",
"by",
"pplacer",
"to",
"include",
"all",
"representatives",
"within",
"each",
"cluster"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/clusterer.py#L16-L56 | train | 31,844 |
geronimp/graftM | graftm/clusterer.py | Clusterer.cluster | def cluster(self, input_fasta_list, reverse_pipe):
'''
cluster - Clusters reads at 100% identity level and writes them to
file. Resets the input_fasta variable as the FASTA file containing the
clusters.
Parameters
----------
input_fasta_list : list
list of strings, each a path to input fasta files to be clustered.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_fasta_list : list
list of strings, each a path to the output fasta file to which
clusters were written to.
'''
output_fasta_list = []
for input_fasta in input_fasta_list:
output_path = input_fasta.replace('_hits.aln.fa', '_clustered.fa')
cluster_dict = {}
logging.debug('Clustering reads')
if os.path.exists(input_fasta):
reads=self.seqio.read_fasta_file(input_fasta) # Read in FASTA records
logging.debug('Found %i reads' % len(reads)) # Report number found
clusters=self.clust.deduplicate(reads) # Cluster redundant sequences
logging.debug('Clustered to %s groups' % len(clusters)) # Report number of clusters
logging.debug('Writing representative sequences of each cluster to: %s' % output_path) # Report the name of the file
else:
logging.debug("Found no reads to be clustered")
clusters = []
self.seqio.write_fasta_file(
[x[0] for x in clusters],
output_path
) # Choose the first sequence to write to file as representative (all the same anyway)
for cluster in clusters:
cluster_dict[cluster[0].name]=cluster # assign the cluster to the dictionary
self.seq_library[output_path]= cluster_dict
output_fasta_list.append(output_path)
return output_fasta_list | python | def cluster(self, input_fasta_list, reverse_pipe):
'''
cluster - Clusters reads at 100% identity level and writes them to
file. Resets the input_fasta variable as the FASTA file containing the
clusters.
Parameters
----------
input_fasta_list : list
list of strings, each a path to input fasta files to be clustered.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_fasta_list : list
list of strings, each a path to the output fasta file to which
clusters were written to.
'''
output_fasta_list = []
for input_fasta in input_fasta_list:
output_path = input_fasta.replace('_hits.aln.fa', '_clustered.fa')
cluster_dict = {}
logging.debug('Clustering reads')
if os.path.exists(input_fasta):
reads=self.seqio.read_fasta_file(input_fasta) # Read in FASTA records
logging.debug('Found %i reads' % len(reads)) # Report number found
clusters=self.clust.deduplicate(reads) # Cluster redundant sequences
logging.debug('Clustered to %s groups' % len(clusters)) # Report number of clusters
logging.debug('Writing representative sequences of each cluster to: %s' % output_path) # Report the name of the file
else:
logging.debug("Found no reads to be clustered")
clusters = []
self.seqio.write_fasta_file(
[x[0] for x in clusters],
output_path
) # Choose the first sequence to write to file as representative (all the same anyway)
for cluster in clusters:
cluster_dict[cluster[0].name]=cluster # assign the cluster to the dictionary
self.seq_library[output_path]= cluster_dict
output_fasta_list.append(output_path)
return output_fasta_list | [
"def",
"cluster",
"(",
"self",
",",
"input_fasta_list",
",",
"reverse_pipe",
")",
":",
"output_fasta_list",
"=",
"[",
"]",
"for",
"input_fasta",
"in",
"input_fasta_list",
":",
"output_path",
"=",
"input_fasta",
".",
"replace",
"(",
"'_hits.aln.fa'",
",",
"'_clus... | cluster - Clusters reads at 100% identity level and writes them to
file. Resets the input_fasta variable as the FASTA file containing the
clusters.
Parameters
----------
input_fasta_list : list
list of strings, each a path to input fasta files to be clustered.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_fasta_list : list
list of strings, each a path to the output fasta file to which
clusters were written to. | [
"cluster",
"-",
"Clusters",
"reads",
"at",
"100%",
"identity",
"level",
"and",
"writes",
"them",
"to",
"file",
".",
"Resets",
"the",
"input_fasta",
"variable",
"as",
"the",
"FASTA",
"file",
"containing",
"the",
"clusters",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/clusterer.py#L58-L102 | train | 31,845 |
geronimp/graftM | graftm/archive.py | Archive.create | def create(self, input_package_path, output_package_path, **kwargs):
"""Create an archived GraftM package
Parameters
----------
input_package_path: str
path to gpkg to be archived
output_pacakge_path: str
output package path
kwargs:
force: bool
overwrite an existing directory
"""
force = kwargs.pop('force',ArchiveDefaultOptions.force)
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
logging.info("Archiving GraftM package '%s' as '%s'" % (input_package_path, output_package_path))
gpkg = GraftMPackage.acquire(input_package_path)
if gpkg.version != 3:
raise Exception("Archiving GraftM packages only works with format 3 packages")
self._setup_output(output_package_path, force)
logging.debug("Compressing contents for archive")
with tarfile.open(output_package_path, 'w:gz') as tar:
for path in itertools.chain([gpkg.contents_file_path(),
gpkg.alignment_hmm_path(),
gpkg.reference_package_path(),
gpkg.unaligned_sequence_database_path()],
[hmm for hmm in gpkg.search_hmm_paths() if hmm != gpkg.alignment_hmm_path()]):
logging.debug("Compressing '%s'" % path)
# Put the gpkg folder itself in the archive so as not to tar bomb.
tar.add(path,
os.path.join(os.path.basename(os.path.abspath(gpkg._base_directory)),
os.path.basename(path)))
logging.info("Archive successfully created") | python | def create(self, input_package_path, output_package_path, **kwargs):
"""Create an archived GraftM package
Parameters
----------
input_package_path: str
path to gpkg to be archived
output_pacakge_path: str
output package path
kwargs:
force: bool
overwrite an existing directory
"""
force = kwargs.pop('force',ArchiveDefaultOptions.force)
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
logging.info("Archiving GraftM package '%s' as '%s'" % (input_package_path, output_package_path))
gpkg = GraftMPackage.acquire(input_package_path)
if gpkg.version != 3:
raise Exception("Archiving GraftM packages only works with format 3 packages")
self._setup_output(output_package_path, force)
logging.debug("Compressing contents for archive")
with tarfile.open(output_package_path, 'w:gz') as tar:
for path in itertools.chain([gpkg.contents_file_path(),
gpkg.alignment_hmm_path(),
gpkg.reference_package_path(),
gpkg.unaligned_sequence_database_path()],
[hmm for hmm in gpkg.search_hmm_paths() if hmm != gpkg.alignment_hmm_path()]):
logging.debug("Compressing '%s'" % path)
# Put the gpkg folder itself in the archive so as not to tar bomb.
tar.add(path,
os.path.join(os.path.basename(os.path.abspath(gpkg._base_directory)),
os.path.basename(path)))
logging.info("Archive successfully created") | [
"def",
"create",
"(",
"self",
",",
"input_package_path",
",",
"output_package_path",
",",
"*",
"*",
"kwargs",
")",
":",
"force",
"=",
"kwargs",
".",
"pop",
"(",
"'force'",
",",
"ArchiveDefaultOptions",
".",
"force",
")",
"if",
"len",
"(",
"kwargs",
")",
... | Create an archived GraftM package
Parameters
----------
input_package_path: str
path to gpkg to be archived
output_pacakge_path: str
output package path
kwargs:
force: bool
overwrite an existing directory | [
"Create",
"an",
"archived",
"GraftM",
"package"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/archive.py#L14-L51 | train | 31,846 |
geronimp/graftM | graftm/archive.py | Archive.extract | def extract(self, archive_path, output_package_path, **kwargs):
'''Extract an archived GraftM package.
Parameters
----------
archive_path: str
path to archive
output_package_path: str
path to where to put the extracted file
kwargs:
force: bool
overwrite an existing directory
'''
force = kwargs.pop('force', ArchiveDefaultOptions.force)
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
logging.info("Un-archiving GraftM package '%s' from '%s'" % (output_package_path, archive_path))
archive = os.path.abspath(archive_path)
output = os.path.abspath(output_package_path)
self._setup_output(output, force)
with tempdir.in_tempdir():
with tarfile.open(archive) as tar:
tar.extractall()
for tarinfo in tar:
folder = os.path.dirname(tarinfo.name)
break
# recreate diamond file
gpkg = GraftMPackage.acquire(folder)
if gpkg.version != 3:
raise Exception("Encountered an archived gpkg of unexpected version: %d" % gpkg.version)
if gpkg.diamond_database_path():
logging.debug("Creating diamond DB")
gpkg.create_diamond_db()
shutil.move(folder, output)
logging.info("Archive successfully extracted") | python | def extract(self, archive_path, output_package_path, **kwargs):
'''Extract an archived GraftM package.
Parameters
----------
archive_path: str
path to archive
output_package_path: str
path to where to put the extracted file
kwargs:
force: bool
overwrite an existing directory
'''
force = kwargs.pop('force', ArchiveDefaultOptions.force)
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
logging.info("Un-archiving GraftM package '%s' from '%s'" % (output_package_path, archive_path))
archive = os.path.abspath(archive_path)
output = os.path.abspath(output_package_path)
self._setup_output(output, force)
with tempdir.in_tempdir():
with tarfile.open(archive) as tar:
tar.extractall()
for tarinfo in tar:
folder = os.path.dirname(tarinfo.name)
break
# recreate diamond file
gpkg = GraftMPackage.acquire(folder)
if gpkg.version != 3:
raise Exception("Encountered an archived gpkg of unexpected version: %d" % gpkg.version)
if gpkg.diamond_database_path():
logging.debug("Creating diamond DB")
gpkg.create_diamond_db()
shutil.move(folder, output)
logging.info("Archive successfully extracted") | [
"def",
"extract",
"(",
"self",
",",
"archive_path",
",",
"output_package_path",
",",
"*",
"*",
"kwargs",
")",
":",
"force",
"=",
"kwargs",
".",
"pop",
"(",
"'force'",
",",
"ArchiveDefaultOptions",
".",
"force",
")",
"if",
"len",
"(",
"kwargs",
")",
">",
... | Extract an archived GraftM package.
Parameters
----------
archive_path: str
path to archive
output_package_path: str
path to where to put the extracted file
kwargs:
force: bool
overwrite an existing directory | [
"Extract",
"an",
"archived",
"GraftM",
"package",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/archive.py#L53-L93 | train | 31,847 |
geronimp/graftM | graftm/archive.py | Archive._setup_output | def _setup_output(self, path, force):
'''Clear the way for an output to be placed at path'''
# Allow for special case of output being a pipe
if os.path.isdir(path) or os.path.isfile(path):
if force:
logging.warn("Deleting previous file/directory '%s'" % path)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
raise Exception("Cowardly refusing to overwrite already existing path at %s" % path) | python | def _setup_output(self, path, force):
'''Clear the way for an output to be placed at path'''
# Allow for special case of output being a pipe
if os.path.isdir(path) or os.path.isfile(path):
if force:
logging.warn("Deleting previous file/directory '%s'" % path)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
raise Exception("Cowardly refusing to overwrite already existing path at %s" % path) | [
"def",
"_setup_output",
"(",
"self",
",",
"path",
",",
"force",
")",
":",
"# Allow for special case of output being a pipe",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"if",
"fo... | Clear the way for an output to be placed at path | [
"Clear",
"the",
"way",
"for",
"an",
"output",
"to",
"be",
"placed",
"at",
"path"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/archive.py#L95-L106 | train | 31,848 |
geronimp/graftM | graftm/sequence_search_results.py | HMMSearchResult.import_from_nhmmer_table | def import_from_nhmmer_table(hmmout_path):
'''Generate new results object from the output of nhmmer search'''
# nhmmer format is
# qseqid queryname hmmfrom hmmto alifrom alito envfrom envto sqlen strand evalue bitscore bias description
# 0 2 4 5 6 7 8 9 10 11 12 13 14 15
res=HMMSearchResult()
res.fields = [
SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.HMM_NAME_FIELD,
SequenceSearchResult.ALIGNMENT_LENGTH_FIELD,
SequenceSearchResult.QUERY_FROM_FIELD,
SequenceSearchResult.QUERY_TO_FIELD,
SequenceSearchResult.HIT_FROM_FIELD,
SequenceSearchResult.HIT_TO_FIELD,
SequenceSearchResult.ALIGNMENT_BIT_SCORE,
SequenceSearchResult.ALIGNMENT_DIRECTION,
]
for row in [x.rstrip().split() for x in open(hmmout_path) if not x.startswith('#')]:
alifrom = int(row[6])
alito = int(row[7])
aln_length = (alito-alifrom if alito-alifrom>0 else alifrom-alito)
res.results.append([row[0],
row[2],
aln_length,
int(row[4]),
int(row[5]),
alifrom,
alito,
row[13],
alito > alifrom
])
return res | python | def import_from_nhmmer_table(hmmout_path):
'''Generate new results object from the output of nhmmer search'''
# nhmmer format is
# qseqid queryname hmmfrom hmmto alifrom alito envfrom envto sqlen strand evalue bitscore bias description
# 0 2 4 5 6 7 8 9 10 11 12 13 14 15
res=HMMSearchResult()
res.fields = [
SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.HMM_NAME_FIELD,
SequenceSearchResult.ALIGNMENT_LENGTH_FIELD,
SequenceSearchResult.QUERY_FROM_FIELD,
SequenceSearchResult.QUERY_TO_FIELD,
SequenceSearchResult.HIT_FROM_FIELD,
SequenceSearchResult.HIT_TO_FIELD,
SequenceSearchResult.ALIGNMENT_BIT_SCORE,
SequenceSearchResult.ALIGNMENT_DIRECTION,
]
for row in [x.rstrip().split() for x in open(hmmout_path) if not x.startswith('#')]:
alifrom = int(row[6])
alito = int(row[7])
aln_length = (alito-alifrom if alito-alifrom>0 else alifrom-alito)
res.results.append([row[0],
row[2],
aln_length,
int(row[4]),
int(row[5]),
alifrom,
alito,
row[13],
alito > alifrom
])
return res | [
"def",
"import_from_nhmmer_table",
"(",
"hmmout_path",
")",
":",
"# nhmmer format is",
"# qseqid queryname hmmfrom hmmto alifrom alito envfrom envto sqlen strand evalue bitscore bias description",
"# 0 2 4 5 6 7 8 9 10 11 12 13 14 15 ... | Generate new results object from the output of nhmmer search | [
"Generate",
"new",
"results",
"object",
"from",
"the",
"output",
"of",
"nhmmer",
"search"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_search_results.py#L115-L147 | train | 31,849 |
geronimp/graftM | graftm/hmmsearcher.py | HmmSearcher.hmmsearch | def hmmsearch(self, input_pipe, hmms, output_files):
r"""Run HMMsearch with all the HMMs, generating output files
Parameters
----------
input_pipe: String
A string which is a partial command line. When this command is run
is outputs to STDOUT fasta formatted protein sequences, which
hmmsearch runs on.
hmms: list of paths
A list of (string) paths to HMM files which are used to search with.
output_files: list of paths
A list of (string) paths to output CSV files to be generated by the
HMM searching
Returns
-------
N/A
May raise an exception if hmmsearching went amiss"""
# Check input and output paths are the same length
if len(hmms) != len(output_files):
raise Exception("Programming error: number of supplied HMMs differs from the number of supplied output files")
# Create queue data structure
queue = []
for i, hmm in enumerate(hmms):
queue.append( [hmm, output_files[i]] )
# While there are more things left in the queue
while len(queue) > 0:
pairs_to_run = self._munch_off_batch(queue)
# Run hmmsearches with each of the pairs
cmd = self._hmm_command(input_pipe, pairs_to_run)
logging.debug("Running command: %s" % cmd)
try:
extern.run(cmd)
except extern.ExternCalledProcessError, e:
if e.stderr == '\nError: Sequence file - is empty or misformatted\n\n':
raise NoInputSequencesException(cmd)
else:
raise e | python | def hmmsearch(self, input_pipe, hmms, output_files):
r"""Run HMMsearch with all the HMMs, generating output files
Parameters
----------
input_pipe: String
A string which is a partial command line. When this command is run
is outputs to STDOUT fasta formatted protein sequences, which
hmmsearch runs on.
hmms: list of paths
A list of (string) paths to HMM files which are used to search with.
output_files: list of paths
A list of (string) paths to output CSV files to be generated by the
HMM searching
Returns
-------
N/A
May raise an exception if hmmsearching went amiss"""
# Check input and output paths are the same length
if len(hmms) != len(output_files):
raise Exception("Programming error: number of supplied HMMs differs from the number of supplied output files")
# Create queue data structure
queue = []
for i, hmm in enumerate(hmms):
queue.append( [hmm, output_files[i]] )
# While there are more things left in the queue
while len(queue) > 0:
pairs_to_run = self._munch_off_batch(queue)
# Run hmmsearches with each of the pairs
cmd = self._hmm_command(input_pipe, pairs_to_run)
logging.debug("Running command: %s" % cmd)
try:
extern.run(cmd)
except extern.ExternCalledProcessError, e:
if e.stderr == '\nError: Sequence file - is empty or misformatted\n\n':
raise NoInputSequencesException(cmd)
else:
raise e | [
"def",
"hmmsearch",
"(",
"self",
",",
"input_pipe",
",",
"hmms",
",",
"output_files",
")",
":",
"# Check input and output paths are the same length",
"if",
"len",
"(",
"hmms",
")",
"!=",
"len",
"(",
"output_files",
")",
":",
"raise",
"Exception",
"(",
"\"Program... | r"""Run HMMsearch with all the HMMs, generating output files
Parameters
----------
input_pipe: String
A string which is a partial command line. When this command is run
is outputs to STDOUT fasta formatted protein sequences, which
hmmsearch runs on.
hmms: list of paths
A list of (string) paths to HMM files which are used to search with.
output_files: list of paths
A list of (string) paths to output CSV files to be generated by the
HMM searching
Returns
-------
N/A
May raise an exception if hmmsearching went amiss | [
"r",
"Run",
"HMMsearch",
"with",
"all",
"the",
"HMMs",
"generating",
"output",
"files"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/hmmsearcher.py#L25-L69 | train | 31,850 |
geronimp/graftM | graftm/hmmsearcher.py | HmmSearcher._munch_off_batch | def _munch_off_batch(self, queue):
r"""Take a batch of sequences off the queue, and return pairs_to_run.
The queue given as a parameter is affected
"""
# if the number of CPUs used == 1, just pop one off (which == below)
# elif the the number of things in the queue is 1, use all the CPUs and just that hmm
if len(queue) == 1 or self._num_cpus == 1:
pairs_to_run = [[queue.pop(0), self._num_cpus]]
else:
# else share out CPUs among hmmers
pairs_to_run = []
while len(queue) > 0 and len(pairs_to_run) < self._num_cpus:
pairs_to_run.append([queue.pop(0), 1])
# Share out any remaining CPUs
num_cpus_left = self._num_cpus - len(pairs_to_run)
while num_cpus_left > 0:
for i, _ in enumerate(pairs_to_run):
pairs_to_run[i][1] += 1
num_cpus_left -= 1
if num_cpus_left == 0: break
return pairs_to_run | python | def _munch_off_batch(self, queue):
r"""Take a batch of sequences off the queue, and return pairs_to_run.
The queue given as a parameter is affected
"""
# if the number of CPUs used == 1, just pop one off (which == below)
# elif the the number of things in the queue is 1, use all the CPUs and just that hmm
if len(queue) == 1 or self._num_cpus == 1:
pairs_to_run = [[queue.pop(0), self._num_cpus]]
else:
# else share out CPUs among hmmers
pairs_to_run = []
while len(queue) > 0 and len(pairs_to_run) < self._num_cpus:
pairs_to_run.append([queue.pop(0), 1])
# Share out any remaining CPUs
num_cpus_left = self._num_cpus - len(pairs_to_run)
while num_cpus_left > 0:
for i, _ in enumerate(pairs_to_run):
pairs_to_run[i][1] += 1
num_cpus_left -= 1
if num_cpus_left == 0: break
return pairs_to_run | [
"def",
"_munch_off_batch",
"(",
"self",
",",
"queue",
")",
":",
"# if the number of CPUs used == 1, just pop one off (which == below)",
"# elif the the number of things in the queue is 1, use all the CPUs and just that hmm",
"if",
"len",
"(",
"queue",
")",
"==",
"1",
"or",
"self"... | r"""Take a batch of sequences off the queue, and return pairs_to_run.
The queue given as a parameter is affected | [
"r",
"Take",
"a",
"batch",
"of",
"sequences",
"off",
"the",
"queue",
"and",
"return",
"pairs_to_run",
".",
"The",
"queue",
"given",
"as",
"a",
"parameter",
"is",
"affected"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/hmmsearcher.py#L71-L93 | train | 31,851 |
geronimp/graftM | graftm/hmmsearcher.py | HmmSearcher._hmm_command | def _hmm_command(self, input_pipe, pairs_to_run):
r"""INTERNAL method for getting cmdline for running a batch of HMMs.
Parameters
----------
input_pipe: as hmmsearch
pairs_to_run: list
list with 2 members: (1) list of hmm and output file, (2) number of
CPUs to use when searching
Returns
-------
A string command to be run with bash
"""
element = pairs_to_run.pop()
hmmsearch_cmd = self._individual_hmm_command(element[0][0],
element[0][1],
element[1])
while len(pairs_to_run) > 0:
element = pairs_to_run.pop()
hmmsearch_cmd = "tee >(%s) | %s" % (self._individual_hmm_command(element[0][0],
element[0][1],
element[1]),
hmmsearch_cmd)
# Run the actual command
hmmsearch_cmd = "%s | %s" % (input_pipe, hmmsearch_cmd)
return hmmsearch_cmd | python | def _hmm_command(self, input_pipe, pairs_to_run):
r"""INTERNAL method for getting cmdline for running a batch of HMMs.
Parameters
----------
input_pipe: as hmmsearch
pairs_to_run: list
list with 2 members: (1) list of hmm and output file, (2) number of
CPUs to use when searching
Returns
-------
A string command to be run with bash
"""
element = pairs_to_run.pop()
hmmsearch_cmd = self._individual_hmm_command(element[0][0],
element[0][1],
element[1])
while len(pairs_to_run) > 0:
element = pairs_to_run.pop()
hmmsearch_cmd = "tee >(%s) | %s" % (self._individual_hmm_command(element[0][0],
element[0][1],
element[1]),
hmmsearch_cmd)
# Run the actual command
hmmsearch_cmd = "%s | %s" % (input_pipe, hmmsearch_cmd)
return hmmsearch_cmd | [
"def",
"_hmm_command",
"(",
"self",
",",
"input_pipe",
",",
"pairs_to_run",
")",
":",
"element",
"=",
"pairs_to_run",
".",
"pop",
"(",
")",
"hmmsearch_cmd",
"=",
"self",
".",
"_individual_hmm_command",
"(",
"element",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"... | r"""INTERNAL method for getting cmdline for running a batch of HMMs.
Parameters
----------
input_pipe: as hmmsearch
pairs_to_run: list
list with 2 members: (1) list of hmm and output file, (2) number of
CPUs to use when searching
Returns
-------
A string command to be run with bash | [
"r",
"INTERNAL",
"method",
"for",
"getting",
"cmdline",
"for",
"running",
"a",
"batch",
"of",
"HMMs",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/hmmsearcher.py#L97-L124 | train | 31,852 |
geronimp/graftM | graftm/create.py | Create._parse_contents | def _parse_contents(self, contents_file_path):
'''
Parse the contents .json file and return the dictionary
Parameters
----------
contents_file_path: str
Path to file containing .json file to parse.
Returns
-------
contents_dict: dict
parsed .json file
'''
logging.debug("Parsing %s" % (contents_file_path))
contents_dict = json.load(open(contents_file_path))
return contents_dict | python | def _parse_contents(self, contents_file_path):
'''
Parse the contents .json file and return the dictionary
Parameters
----------
contents_file_path: str
Path to file containing .json file to parse.
Returns
-------
contents_dict: dict
parsed .json file
'''
logging.debug("Parsing %s" % (contents_file_path))
contents_dict = json.load(open(contents_file_path))
return contents_dict | [
"def",
"_parse_contents",
"(",
"self",
",",
"contents_file_path",
")",
":",
"logging",
".",
"debug",
"(",
"\"Parsing %s\"",
"%",
"(",
"contents_file_path",
")",
")",
"contents_dict",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"contents_file_path",
")",
")",
... | Parse the contents .json file and return the dictionary
Parameters
----------
contents_file_path: str
Path to file containing .json file to parse.
Returns
-------
contents_dict: dict
parsed .json file | [
"Parse",
"the",
"contents",
".",
"json",
"file",
"and",
"return",
"the",
"dictionary"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L54-L70 | train | 31,853 |
geronimp/graftM | graftm/create.py | Create._check_reads_hit | def _check_reads_hit(self, alignment_io, min_aligned_fraction):
'''Given an alignment return a list of sequence names that are less
than the min_aligned_fraction'''
to_return = []
alignment_length = None
for s in SeqIO.parse(alignment_io, "fasta"):
if not alignment_length:
alignment_length = len(s.seq)
min_length = int(min_aligned_fraction * alignment_length)
logging.debug("Determined min number of aligned bases to be %s" % min_length)
elif len(s.seq) != alignment_length:
raise Exception("Alignment file appears to not be of uniform length")
num_unaligned = s.seq.count('-')
num_aligned = alignment_length-num_unaligned
logging.debug("Sequence %s has %d aligned positions" % (s.name, alignment_length-num_unaligned))
if num_aligned <= min_length:
to_return.append(s.name)
return to_return | python | def _check_reads_hit(self, alignment_io, min_aligned_fraction):
'''Given an alignment return a list of sequence names that are less
than the min_aligned_fraction'''
to_return = []
alignment_length = None
for s in SeqIO.parse(alignment_io, "fasta"):
if not alignment_length:
alignment_length = len(s.seq)
min_length = int(min_aligned_fraction * alignment_length)
logging.debug("Determined min number of aligned bases to be %s" % min_length)
elif len(s.seq) != alignment_length:
raise Exception("Alignment file appears to not be of uniform length")
num_unaligned = s.seq.count('-')
num_aligned = alignment_length-num_unaligned
logging.debug("Sequence %s has %d aligned positions" % (s.name, alignment_length-num_unaligned))
if num_aligned <= min_length:
to_return.append(s.name)
return to_return | [
"def",
"_check_reads_hit",
"(",
"self",
",",
"alignment_io",
",",
"min_aligned_fraction",
")",
":",
"to_return",
"=",
"[",
"]",
"alignment_length",
"=",
"None",
"for",
"s",
"in",
"SeqIO",
".",
"parse",
"(",
"alignment_io",
",",
"\"fasta\"",
")",
":",
"if",
... | Given an alignment return a list of sequence names that are less
than the min_aligned_fraction | [
"Given",
"an",
"alignment",
"return",
"a",
"list",
"of",
"sequence",
"names",
"that",
"are",
"less",
"than",
"the",
"min_aligned_fraction"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L72-L90 | train | 31,854 |
geronimp/graftM | graftm/create.py | Create._align_sequences | def _align_sequences(self, input_sequences_path, output_alignment_path,
threads):
'''Align sequences into alignment_file
Parameters
----------
input_sequences_path: str
path to input sequences in fasta format
output_alignment_path: str
path to output alignment path
threads: str
number of threads to use
Returns
-------
Nothing
'''
logging.debug("Aligning sequences using mafft")
cmd = "mafft --anysymbol --thread %s --auto /dev/stdin > %s" % (
threads,
output_alignment_path)
inputs = []
with open(input_sequences_path) as f:
for name,seq,_ in SequenceIO().each(f):
inputs.append('>%s' % name)
# Do not include * characters in the HMM, as this means tree
# insertion fails.
inputs.append(seq.replace('*',''))
extern.run(cmd, stdin="\n".join(inputs)) | python | def _align_sequences(self, input_sequences_path, output_alignment_path,
threads):
'''Align sequences into alignment_file
Parameters
----------
input_sequences_path: str
path to input sequences in fasta format
output_alignment_path: str
path to output alignment path
threads: str
number of threads to use
Returns
-------
Nothing
'''
logging.debug("Aligning sequences using mafft")
cmd = "mafft --anysymbol --thread %s --auto /dev/stdin > %s" % (
threads,
output_alignment_path)
inputs = []
with open(input_sequences_path) as f:
for name,seq,_ in SequenceIO().each(f):
inputs.append('>%s' % name)
# Do not include * characters in the HMM, as this means tree
# insertion fails.
inputs.append(seq.replace('*',''))
extern.run(cmd, stdin="\n".join(inputs)) | [
"def",
"_align_sequences",
"(",
"self",
",",
"input_sequences_path",
",",
"output_alignment_path",
",",
"threads",
")",
":",
"logging",
".",
"debug",
"(",
"\"Aligning sequences using mafft\"",
")",
"cmd",
"=",
"\"mafft --anysymbol --thread %s --auto /dev/stdin > %s\"",
"%",... | Align sequences into alignment_file
Parameters
----------
input_sequences_path: str
path to input sequences in fasta format
output_alignment_path: str
path to output alignment path
threads: str
number of threads to use
Returns
-------
Nothing | [
"Align",
"sequences",
"into",
"alignment_file"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L92-L119 | train | 31,855 |
geronimp/graftM | graftm/create.py | Create._get_hmm_from_alignment | def _get_hmm_from_alignment(self, alignment, hmm_filename, output_alignment_filename):
'''Return a HMM file and alignment of sequences to that HMM
Parameters
----------
alignment: str
path to aligned proteins
hmm_filename: str
write the hmm to this file path
output_alignment_filename: str
write the output alignment to this file path
Returns
-------
Return the pipeline type of the HMM.
'''
logging.info("Building HMM from alignment")
with tempfile.NamedTemporaryFile(suffix='.fasta',prefix='graftm') as tempaln:
cmd = "hmmbuild -O /dev/stdout -o /dev/stderr '%s' '%s'" % (
hmm_filename, alignment)
process = subprocess.Popen(["bash", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
logging.debug("Got STDERR from hmmbuild: %s" % error)
if process.returncode != 0:
logging.error(
"hmmbuild exitstatus was non-zero, likely indicating an error of "
"some description")
logging.error("Got STDERR from hmmbuild: %s" % error)
SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), tempaln, 'fasta')
tempaln.flush()
ptype, _ = self._pipe_type(hmm_filename)
SequenceSearcher(hmm_filename).alignment_correcter([tempaln.name],
output_alignment_filename)
return ptype | python | def _get_hmm_from_alignment(self, alignment, hmm_filename, output_alignment_filename):
'''Return a HMM file and alignment of sequences to that HMM
Parameters
----------
alignment: str
path to aligned proteins
hmm_filename: str
write the hmm to this file path
output_alignment_filename: str
write the output alignment to this file path
Returns
-------
Return the pipeline type of the HMM.
'''
logging.info("Building HMM from alignment")
with tempfile.NamedTemporaryFile(suffix='.fasta',prefix='graftm') as tempaln:
cmd = "hmmbuild -O /dev/stdout -o /dev/stderr '%s' '%s'" % (
hmm_filename, alignment)
process = subprocess.Popen(["bash", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
logging.debug("Got STDERR from hmmbuild: %s" % error)
if process.returncode != 0:
logging.error(
"hmmbuild exitstatus was non-zero, likely indicating an error of "
"some description")
logging.error("Got STDERR from hmmbuild: %s" % error)
SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), tempaln, 'fasta')
tempaln.flush()
ptype, _ = self._pipe_type(hmm_filename)
SequenceSearcher(hmm_filename).alignment_correcter([tempaln.name],
output_alignment_filename)
return ptype | [
"def",
"_get_hmm_from_alignment",
"(",
"self",
",",
"alignment",
",",
"hmm_filename",
",",
"output_alignment_filename",
")",
":",
"logging",
".",
"info",
"(",
"\"Building HMM from alignment\"",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
... | Return a HMM file and alignment of sequences to that HMM
Parameters
----------
alignment: str
path to aligned proteins
hmm_filename: str
write the hmm to this file path
output_alignment_filename: str
write the output alignment to this file path
Returns
-------
Return the pipeline type of the HMM. | [
"Return",
"a",
"HMM",
"file",
"and",
"alignment",
"of",
"sequences",
"to",
"that",
"HMM"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L121-L159 | train | 31,856 |
geronimp/graftM | graftm/create.py | Create._align_sequences_to_hmm | def _align_sequences_to_hmm(self, hmm_file, sequences_file, output_alignment_file):
'''Align sequences to an HMM, and write an alignment of
these proteins after cleanup so that they can be used for tree-making
Parameters
----------
sequences_file: str
path to file of unaligned protein sequences
hmm_file: str
path to hmm file
output_alignment_file: str
write alignment to this file
Returns
-------
nothing
'''
ss = SequenceSearcher(hmm_file)
with tempfile.NamedTemporaryFile(prefix='graftm', suffix='.aln.fasta') as tempalign:
ss.hmmalign_sequences(hmm_file, sequences_file, tempalign.name)
ss.alignment_correcter([tempalign.name], output_alignment_file) | python | def _align_sequences_to_hmm(self, hmm_file, sequences_file, output_alignment_file):
'''Align sequences to an HMM, and write an alignment of
these proteins after cleanup so that they can be used for tree-making
Parameters
----------
sequences_file: str
path to file of unaligned protein sequences
hmm_file: str
path to hmm file
output_alignment_file: str
write alignment to this file
Returns
-------
nothing
'''
ss = SequenceSearcher(hmm_file)
with tempfile.NamedTemporaryFile(prefix='graftm', suffix='.aln.fasta') as tempalign:
ss.hmmalign_sequences(hmm_file, sequences_file, tempalign.name)
ss.alignment_correcter([tempalign.name], output_alignment_file) | [
"def",
"_align_sequences_to_hmm",
"(",
"self",
",",
"hmm_file",
",",
"sequences_file",
",",
"output_alignment_file",
")",
":",
"ss",
"=",
"SequenceSearcher",
"(",
"hmm_file",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'graftm'",
",",
... | Align sequences to an HMM, and write an alignment of
these proteins after cleanup so that they can be used for tree-making
Parameters
----------
sequences_file: str
path to file of unaligned protein sequences
hmm_file: str
path to hmm file
output_alignment_file: str
write alignment to this file
Returns
-------
nothing | [
"Align",
"sequences",
"to",
"an",
"HMM",
"and",
"write",
"an",
"alignment",
"of",
"these",
"proteins",
"after",
"cleanup",
"so",
"that",
"they",
"can",
"be",
"used",
"for",
"tree",
"-",
"making"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L161-L182 | train | 31,857 |
geronimp/graftM | graftm/create.py | Create._define_range | def _define_range(self, sequences):
'''
define_range - define the maximum range within which two hits in a db
search can be linked. This is defined as 1.5X the average length of all
reads in the database.
Parameters
----------
sequences : str
A path to the sequences in FASTA format. This fasta file is assumed
to be in the correct format. i.e. headers start with '>'
Returns
-------
max_range : int
As described above, 1.5X the size of the average length of genes
within the database
'''
sequence_count = 0
total_sequence = 0
for record in SeqIO.parse(open(sequences), 'fasta'): # For every sequence in the output
total_sequence+=1 # increment the total sequence count
sequence_count+=len(record.seq) # add the length of that sequence to the total string count
# Get the average, multiply by 1.5, and return
max_range = (sequence_count/total_sequence)*1.5
return max_range | python | def _define_range(self, sequences):
'''
define_range - define the maximum range within which two hits in a db
search can be linked. This is defined as 1.5X the average length of all
reads in the database.
Parameters
----------
sequences : str
A path to the sequences in FASTA format. This fasta file is assumed
to be in the correct format. i.e. headers start with '>'
Returns
-------
max_range : int
As described above, 1.5X the size of the average length of genes
within the database
'''
sequence_count = 0
total_sequence = 0
for record in SeqIO.parse(open(sequences), 'fasta'): # For every sequence in the output
total_sequence+=1 # increment the total sequence count
sequence_count+=len(record.seq) # add the length of that sequence to the total string count
# Get the average, multiply by 1.5, and return
max_range = (sequence_count/total_sequence)*1.5
return max_range | [
"def",
"_define_range",
"(",
"self",
",",
"sequences",
")",
":",
"sequence_count",
"=",
"0",
"total_sequence",
"=",
"0",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"open",
"(",
"sequences",
")",
",",
"'fasta'",
")",
":",
"# For every sequence in the ... | define_range - define the maximum range within which two hits in a db
search can be linked. This is defined as 1.5X the average length of all
reads in the database.
Parameters
----------
sequences : str
A path to the sequences in FASTA format. This fasta file is assumed
to be in the correct format. i.e. headers start with '>'
Returns
-------
max_range : int
As described above, 1.5X the size of the average length of genes
within the database | [
"define_range",
"-",
"define",
"the",
"maximum",
"range",
"within",
"which",
"two",
"hits",
"in",
"a",
"db",
"search",
"can",
"be",
"linked",
".",
"This",
"is",
"defined",
"as",
"1",
".",
"5X",
"the",
"average",
"length",
"of",
"all",
"reads",
"in",
"t... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L306-L333 | train | 31,858 |
geronimp/graftM | graftm/create.py | Create._generate_tree_log_file | def _generate_tree_log_file(self, tree, alignment, output_tree_file_path,
output_log_file_path, residue_type, fasttree):
'''Generate the FastTree log file given a tree and the alignment that
made that tree
Returns
-------
Nothing. The log file as parameter is written as the log file.
'''
if residue_type==Create._NUCLEOTIDE_PACKAGE_TYPE:
cmd = "%s -quiet -gtr -nt -nome -mllen -intree '%s' -log %s -out %s %s" %\
(fasttree, tree, output_log_file_path,
output_tree_file_path, alignment)
elif residue_type==Create._PROTEIN_PACKAGE_TYPE:
cmd = "%s -quiet -nome -mllen -intree '%s' -log %s -out %s %s" %\
(fasttree, tree, output_log_file_path,
output_tree_file_path, alignment)
extern.run(cmd) | python | def _generate_tree_log_file(self, tree, alignment, output_tree_file_path,
output_log_file_path, residue_type, fasttree):
'''Generate the FastTree log file given a tree and the alignment that
made that tree
Returns
-------
Nothing. The log file as parameter is written as the log file.
'''
if residue_type==Create._NUCLEOTIDE_PACKAGE_TYPE:
cmd = "%s -quiet -gtr -nt -nome -mllen -intree '%s' -log %s -out %s %s" %\
(fasttree, tree, output_log_file_path,
output_tree_file_path, alignment)
elif residue_type==Create._PROTEIN_PACKAGE_TYPE:
cmd = "%s -quiet -nome -mllen -intree '%s' -log %s -out %s %s" %\
(fasttree, tree, output_log_file_path,
output_tree_file_path, alignment)
extern.run(cmd) | [
"def",
"_generate_tree_log_file",
"(",
"self",
",",
"tree",
",",
"alignment",
",",
"output_tree_file_path",
",",
"output_log_file_path",
",",
"residue_type",
",",
"fasttree",
")",
":",
"if",
"residue_type",
"==",
"Create",
".",
"_NUCLEOTIDE_PACKAGE_TYPE",
":",
"cmd"... | Generate the FastTree log file given a tree and the alignment that
made that tree
Returns
-------
Nothing. The log file as parameter is written as the log file. | [
"Generate",
"the",
"FastTree",
"log",
"file",
"given",
"a",
"tree",
"and",
"the",
"alignment",
"that",
"made",
"that",
"tree"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L335-L352 | train | 31,859 |
geronimp/graftM | graftm/create.py | Create._remove_sequences_from_alignment | def _remove_sequences_from_alignment(self, sequence_names, input_alignment_file, output_alignment_file):
'''Remove sequences from the alignment file that have names in
sequence_names
Parameters
----------
sequence_names: list of str
names of sequences to remove
input_alignment_file: str
path to alignment file to remove from
output_alignment_file: str
path to alignment file to write to
Returns
-------
int: number of sequences written to file'''
nameset = set(sequence_names)
num_written = 0
with open(output_alignment_file, 'w') as f:
for s in SeqIO.parse(open(input_alignment_file), "fasta"):
if s.name not in nameset:
SeqIO.write(s, f, "fasta")
num_written += 1
logging.debug("After removing sequences from alignment, %i remain" % num_written)
return num_written | python | def _remove_sequences_from_alignment(self, sequence_names, input_alignment_file, output_alignment_file):
'''Remove sequences from the alignment file that have names in
sequence_names
Parameters
----------
sequence_names: list of str
names of sequences to remove
input_alignment_file: str
path to alignment file to remove from
output_alignment_file: str
path to alignment file to write to
Returns
-------
int: number of sequences written to file'''
nameset = set(sequence_names)
num_written = 0
with open(output_alignment_file, 'w') as f:
for s in SeqIO.parse(open(input_alignment_file), "fasta"):
if s.name not in nameset:
SeqIO.write(s, f, "fasta")
num_written += 1
logging.debug("After removing sequences from alignment, %i remain" % num_written)
return num_written | [
"def",
"_remove_sequences_from_alignment",
"(",
"self",
",",
"sequence_names",
",",
"input_alignment_file",
",",
"output_alignment_file",
")",
":",
"nameset",
"=",
"set",
"(",
"sequence_names",
")",
"num_written",
"=",
"0",
"with",
"open",
"(",
"output_alignment_file"... | Remove sequences from the alignment file that have names in
sequence_names
Parameters
----------
sequence_names: list of str
names of sequences to remove
input_alignment_file: str
path to alignment file to remove from
output_alignment_file: str
path to alignment file to write to
Returns
-------
int: number of sequences written to file | [
"Remove",
"sequences",
"from",
"the",
"alignment",
"file",
"that",
"have",
"names",
"in",
"sequence_names"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L354-L378 | train | 31,860 |
geronimp/graftM | graftm/create.py | Create._create_dmnd_database | def _create_dmnd_database(self, unaligned_sequences_path, daa_output):
'''
Build a diamond database using diamond makedb
Parameters
----------
unaligned_sequences_path: str
path to a FASTA file containing unaligned sequences
daa_output: str
Name of output database.
'''
logging.debug("Building diamond database")
cmd = "diamond makedb --in '%s' -d '%s'" % (unaligned_sequences_path, daa_output)
extern.run(cmd) | python | def _create_dmnd_database(self, unaligned_sequences_path, daa_output):
'''
Build a diamond database using diamond makedb
Parameters
----------
unaligned_sequences_path: str
path to a FASTA file containing unaligned sequences
daa_output: str
Name of output database.
'''
logging.debug("Building diamond database")
cmd = "diamond makedb --in '%s' -d '%s'" % (unaligned_sequences_path, daa_output)
extern.run(cmd) | [
"def",
"_create_dmnd_database",
"(",
"self",
",",
"unaligned_sequences_path",
",",
"daa_output",
")",
":",
"logging",
".",
"debug",
"(",
"\"Building diamond database\"",
")",
"cmd",
"=",
"\"diamond makedb --in '%s' -d '%s'\"",
"%",
"(",
"unaligned_sequences_path",
",",
... | Build a diamond database using diamond makedb
Parameters
----------
unaligned_sequences_path: str
path to a FASTA file containing unaligned sequences
daa_output: str
Name of output database. | [
"Build",
"a",
"diamond",
"database",
"using",
"diamond",
"makedb"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L425-L439 | train | 31,861 |
geronimp/graftM | graftm/create.py | Create._check_for_duplicate_sequence_names | def _check_for_duplicate_sequence_names(self, fasta_file_path):
"""Test if the given fasta file contains sequences with duplicate
sequence names.
Parameters
----------
fasta_file_path: string
path to file that is to be checked
Returns
-------
The name of the first duplicate sequence found, else False.
"""
found_sequence_names = set()
for record in SeqIO.parse(fasta_file_path, 'fasta'):
name = record.name
if name in found_sequence_names:
return name
found_sequence_names.add(name)
return False | python | def _check_for_duplicate_sequence_names(self, fasta_file_path):
"""Test if the given fasta file contains sequences with duplicate
sequence names.
Parameters
----------
fasta_file_path: string
path to file that is to be checked
Returns
-------
The name of the first duplicate sequence found, else False.
"""
found_sequence_names = set()
for record in SeqIO.parse(fasta_file_path, 'fasta'):
name = record.name
if name in found_sequence_names:
return name
found_sequence_names.add(name)
return False | [
"def",
"_check_for_duplicate_sequence_names",
"(",
"self",
",",
"fasta_file_path",
")",
":",
"found_sequence_names",
"=",
"set",
"(",
")",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"fasta_file_path",
",",
"'fasta'",
")",
":",
"name",
"=",
"record",
".... | Test if the given fasta file contains sequences with duplicate
sequence names.
Parameters
----------
fasta_file_path: string
path to file that is to be checked
Returns
-------
The name of the first duplicate sequence found, else False. | [
"Test",
"if",
"the",
"given",
"fasta",
"file",
"contains",
"sequences",
"with",
"duplicate",
"sequence",
"names",
"."
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/create.py#L495-L515 | train | 31,862 |
geronimp/graftM | graftm/getaxnseq.py | Getaxnseq.read_taxtastic_taxonomy_and_seqinfo | def read_taxtastic_taxonomy_and_seqinfo(self, taxonomy_io, seqinfo_io):
'''Read the taxonomy and seqinfo files into a dictionary of
sequence_name => taxonomy, where the taxonomy is an array of lineages
given to that sequence.
Possibly this method is unable to handle the full definition of these
files? It doesn't return what each of the ranks are, for starters.
Doesn't deal with duplicate taxon names either.
'''
# read in taxonomy file
lineages = [] #array of hashes where each taxon points to its parent taxon's name
taxon_to_lineage_index = {}
expected_number_of_fields = None
for line in taxonomy_io:
splits = line.strip().split(',')
if expected_number_of_fields is None:
expected_number_of_fields = len(splits)
lineages = [{}]* (expected_number_of_fields-4)
continue #this is the header line
elif len(splits) != expected_number_of_fields:
raise Exception("Encountered error parsing taxonomy file, expected %i fields but found %i on line: %s" %
(expected_number_of_fields, len(splits), line))
# e.g. 'tax_id,parent_id,rank,tax_name,root,kingdom,phylum,class,order,family,genus,species
tax_id = splits[0]
parent_id = splits[1]
try:
lineage_index = splits.index('')-5
except ValueError:
lineage_index = len(splits)-5
taxon_to_lineage_index[tax_id] = lineage_index
lineages[lineage_index][tax_id] = parent_id
taxonomy_dictionary = {}
for i, line in enumerate(seqinfo_io):
if i==0: continue #skip header line
splits = line.strip().split(',')
if len(splits) != 2:
raise Exception("Bad formatting of seqinfo file on this line: %s" % line)
seq_name = splits[0]
taxon = splits[1]
lineage_index = taxon_to_lineage_index[taxon]
if lineage_index==0:
# Don't include Root in the taxonomy
taxonomy_dictionary[seq_name] = []
else:
full_taxonomy_rev = []
while lineage_index > 0:
full_taxonomy_rev.append(taxon)
taxon = lineages[lineage_index][taxon]
lineage_index = lineage_index-1
taxonomy_dictionary[seq_name] = list(reversed(full_taxonomy_rev))
return taxonomy_dictionary | python | def read_taxtastic_taxonomy_and_seqinfo(self, taxonomy_io, seqinfo_io):
'''Read the taxonomy and seqinfo files into a dictionary of
sequence_name => taxonomy, where the taxonomy is an array of lineages
given to that sequence.
Possibly this method is unable to handle the full definition of these
files? It doesn't return what each of the ranks are, for starters.
Doesn't deal with duplicate taxon names either.
'''
# read in taxonomy file
lineages = [] #array of hashes where each taxon points to its parent taxon's name
taxon_to_lineage_index = {}
expected_number_of_fields = None
for line in taxonomy_io:
splits = line.strip().split(',')
if expected_number_of_fields is None:
expected_number_of_fields = len(splits)
lineages = [{}]* (expected_number_of_fields-4)
continue #this is the header line
elif len(splits) != expected_number_of_fields:
raise Exception("Encountered error parsing taxonomy file, expected %i fields but found %i on line: %s" %
(expected_number_of_fields, len(splits), line))
# e.g. 'tax_id,parent_id,rank,tax_name,root,kingdom,phylum,class,order,family,genus,species
tax_id = splits[0]
parent_id = splits[1]
try:
lineage_index = splits.index('')-5
except ValueError:
lineage_index = len(splits)-5
taxon_to_lineage_index[tax_id] = lineage_index
lineages[lineage_index][tax_id] = parent_id
taxonomy_dictionary = {}
for i, line in enumerate(seqinfo_io):
if i==0: continue #skip header line
splits = line.strip().split(',')
if len(splits) != 2:
raise Exception("Bad formatting of seqinfo file on this line: %s" % line)
seq_name = splits[0]
taxon = splits[1]
lineage_index = taxon_to_lineage_index[taxon]
if lineage_index==0:
# Don't include Root in the taxonomy
taxonomy_dictionary[seq_name] = []
else:
full_taxonomy_rev = []
while lineage_index > 0:
full_taxonomy_rev.append(taxon)
taxon = lineages[lineage_index][taxon]
lineage_index = lineage_index-1
taxonomy_dictionary[seq_name] = list(reversed(full_taxonomy_rev))
return taxonomy_dictionary | [
"def",
"read_taxtastic_taxonomy_and_seqinfo",
"(",
"self",
",",
"taxonomy_io",
",",
"seqinfo_io",
")",
":",
"# read in taxonomy file",
"lineages",
"=",
"[",
"]",
"#array of hashes where each taxon points to its parent taxon's name",
"taxon_to_lineage_index",
"=",
"{",
"}",
"e... | Read the taxonomy and seqinfo files into a dictionary of
sequence_name => taxonomy, where the taxonomy is an array of lineages
given to that sequence.
Possibly this method is unable to handle the full definition of these
files? It doesn't return what each of the ranks are, for starters.
Doesn't deal with duplicate taxon names either. | [
"Read",
"the",
"taxonomy",
"and",
"seqinfo",
"files",
"into",
"a",
"dictionary",
"of",
"sequence_name",
"=",
">",
"taxonomy",
"where",
"the",
"taxonomy",
"is",
"an",
"array",
"of",
"lineages",
"given",
"to",
"that",
"sequence",
".",
"Possibly",
"this",
"meth... | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/getaxnseq.py#L48-L104 | train | 31,863 |
geronimp/graftM | graftm/sequence_io.py | SequenceIO.each_sequence | def each_sequence(self, fp):
'''Like each except iterate over Sequence objects'''
for name, seq, _ in self.each(fp):
yield Sequence(name, seq) | python | def each_sequence(self, fp):
'''Like each except iterate over Sequence objects'''
for name, seq, _ in self.each(fp):
yield Sequence(name, seq) | [
"def",
"each_sequence",
"(",
"self",
",",
"fp",
")",
":",
"for",
"name",
",",
"seq",
",",
"_",
"in",
"self",
".",
"each",
"(",
"fp",
")",
":",
"yield",
"Sequence",
"(",
"name",
",",
"seq",
")"
] | Like each except iterate over Sequence objects | [
"Like",
"each",
"except",
"iterate",
"over",
"Sequence",
"objects"
] | c82576517290167f605fd0bc4facd009cee29f48 | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_io.py#L42-L45 | train | 31,864 |
bitly/asyncmongo | asyncmongo/helpers.py | _fields_list_to_dict | def _fields_list_to_dict(fields):
"""Takes a list of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
for key in fields:
assert isinstance(key, (str,unicode))
return dict([[key, 1] for key in fields]) | python | def _fields_list_to_dict(fields):
"""Takes a list of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
for key in fields:
assert isinstance(key, (str,unicode))
return dict([[key, 1] for key in fields]) | [
"def",
"_fields_list_to_dict",
"(",
"fields",
")",
":",
"for",
"key",
"in",
"fields",
":",
"assert",
"isinstance",
"(",
"key",
",",
"(",
"str",
",",
"unicode",
")",
")",
"return",
"dict",
"(",
"[",
"[",
"key",
",",
"1",
"]",
"for",
"key",
"in",
"fi... | Takes a list of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1} | [
"Takes",
"a",
"list",
"of",
"field",
"names",
"and",
"returns",
"a",
"matching",
"dictionary",
"."
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/helpers.py#L54-L65 | train | 31,865 |
bitly/asyncmongo | asyncmongo/connection.py | Connection._socket_connect | def _socket_connect(self):
"""create a socket, connect, register a stream with the async backend"""
self.usage_count = 0
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((self._host, self._port))
self.__stream = self.__backend.register_stream(s, **self.__kwargs)
self.__stream.set_close_callback(self._socket_close)
self.__alive = True
except socket.error, error:
raise InterfaceError(error) | python | def _socket_connect(self):
"""create a socket, connect, register a stream with the async backend"""
self.usage_count = 0
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((self._host, self._port))
self.__stream = self.__backend.register_stream(s, **self.__kwargs)
self.__stream.set_close_callback(self._socket_close)
self.__alive = True
except socket.error, error:
raise InterfaceError(error) | [
"def",
"_socket_connect",
"(",
"self",
")",
":",
"self",
".",
"usage_count",
"=",
"0",
"try",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
",",
"0",
")",
"s",
".",
"connect",
"(",
"(",
"s... | create a socket, connect, register a stream with the async backend | [
"create",
"a",
"socket",
"connect",
"register",
"a",
"stream",
"with",
"the",
"async",
"backend"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/connection.py#L111-L121 | train | 31,866 |
bitly/asyncmongo | asyncmongo/connection.py | Connection._socket_close | def _socket_close(self):
"""cleanup after the socket is closed by the other end"""
callback = self.__callback
self.__callback = None
try:
if callback:
callback(None, InterfaceError('connection closed'))
finally:
# Flush the job queue, don't call the callbacks associated with the remaining jobs
# since they have already been called as error callback on connection closing
self.__job_queue = []
self.__alive = False
self.__pool.cache(self) | python | def _socket_close(self):
"""cleanup after the socket is closed by the other end"""
callback = self.__callback
self.__callback = None
try:
if callback:
callback(None, InterfaceError('connection closed'))
finally:
# Flush the job queue, don't call the callbacks associated with the remaining jobs
# since they have already been called as error callback on connection closing
self.__job_queue = []
self.__alive = False
self.__pool.cache(self) | [
"def",
"_socket_close",
"(",
"self",
")",
":",
"callback",
"=",
"self",
".",
"__callback",
"self",
".",
"__callback",
"=",
"None",
"try",
":",
"if",
"callback",
":",
"callback",
"(",
"None",
",",
"InterfaceError",
"(",
"'connection closed'",
")",
")",
"fin... | cleanup after the socket is closed by the other end | [
"cleanup",
"after",
"the",
"socket",
"is",
"closed",
"by",
"the",
"other",
"end"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/connection.py#L123-L135 | train | 31,867 |
bitly/asyncmongo | asyncmongo/connection.py | Connection._close | def _close(self):
"""close the socket and cleanup"""
callback = self.__callback
self.__callback = None
try:
if callback:
callback(None, InterfaceError('connection closed'))
finally:
# Flush the job queue, don't call the callbacks associated with the remaining jobs
# since they have already been called as error callback on connection closing
self.__job_queue = []
self.__alive = False
self.__stream.close() | python | def _close(self):
"""close the socket and cleanup"""
callback = self.__callback
self.__callback = None
try:
if callback:
callback(None, InterfaceError('connection closed'))
finally:
# Flush the job queue, don't call the callbacks associated with the remaining jobs
# since they have already been called as error callback on connection closing
self.__job_queue = []
self.__alive = False
self.__stream.close() | [
"def",
"_close",
"(",
"self",
")",
":",
"callback",
"=",
"self",
".",
"__callback",
"self",
".",
"__callback",
"=",
"None",
"try",
":",
"if",
"callback",
":",
"callback",
"(",
"None",
",",
"InterfaceError",
"(",
"'connection closed'",
")",
")",
"finally",
... | close the socket and cleanup | [
"close",
"the",
"socket",
"and",
"cleanup"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/connection.py#L137-L149 | train | 31,868 |
bitly/asyncmongo | asyncmongo/connection.py | Connection.send_message | def send_message(self, message, callback):
""" send a message over the wire; callback=None indicates a safe=False call where we write and forget about it"""
if self.__callback is not None:
raise ProgrammingError('connection already in use')
if callback:
err_callback = functools.partial(callback, None)
else:
err_callback = None
# Go and update err_callback for async jobs in queue if any
for job in self.__job_queue:
# this is a dirty hack and I hate it, but there is no way of setting the correct
# err_callback during the connection time
if isinstance(job, asyncjobs.AsyncJob):
job.update_err_callback(err_callback)
if not self.__alive:
if self.__autoreconnect:
self.__connect(err_callback)
else:
raise InterfaceError('connection invalid. autoreconnect=False')
# Put the current message on the bottom of the queue
self._put_job(asyncjobs.AsyncMessage(self, message, callback), 0)
self._next_job() | python | def send_message(self, message, callback):
""" send a message over the wire; callback=None indicates a safe=False call where we write and forget about it"""
if self.__callback is not None:
raise ProgrammingError('connection already in use')
if callback:
err_callback = functools.partial(callback, None)
else:
err_callback = None
# Go and update err_callback for async jobs in queue if any
for job in self.__job_queue:
# this is a dirty hack and I hate it, but there is no way of setting the correct
# err_callback during the connection time
if isinstance(job, asyncjobs.AsyncJob):
job.update_err_callback(err_callback)
if not self.__alive:
if self.__autoreconnect:
self.__connect(err_callback)
else:
raise InterfaceError('connection invalid. autoreconnect=False')
# Put the current message on the bottom of the queue
self._put_job(asyncjobs.AsyncMessage(self, message, callback), 0)
self._next_job() | [
"def",
"send_message",
"(",
"self",
",",
"message",
",",
"callback",
")",
":",
"if",
"self",
".",
"__callback",
"is",
"not",
"None",
":",
"raise",
"ProgrammingError",
"(",
"'connection already in use'",
")",
"if",
"callback",
":",
"err_callback",
"=",
"functoo... | send a message over the wire; callback=None indicates a safe=False call where we write and forget about it | [
"send",
"a",
"message",
"over",
"the",
"wire",
";",
"callback",
"=",
"None",
"indicates",
"a",
"safe",
"=",
"False",
"call",
"where",
"we",
"write",
"and",
"forget",
"about",
"it"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/connection.py#L158-L184 | train | 31,869 |
bitly/asyncmongo | asyncmongo/connection.py | Connection._next_job | def _next_job(self):
"""execute the next job from the top of the queue"""
if self.__job_queue:
# Produce message from the top of the queue
job = self.__job_queue.pop()
# logging.debug("queue = %s, popped %r", self.__job_queue, job)
job.process() | python | def _next_job(self):
"""execute the next job from the top of the queue"""
if self.__job_queue:
# Produce message from the top of the queue
job = self.__job_queue.pop()
# logging.debug("queue = %s, popped %r", self.__job_queue, job)
job.process() | [
"def",
"_next_job",
"(",
"self",
")",
":",
"if",
"self",
".",
"__job_queue",
":",
"# Produce message from the top of the queue",
"job",
"=",
"self",
".",
"__job_queue",
".",
"pop",
"(",
")",
"# logging.debug(\"queue = %s, popped %r\", self.__job_queue, job)",
"job",
"."... | execute the next job from the top of the queue | [
"execute",
"the",
"next",
"job",
"from",
"the",
"top",
"of",
"the",
"queue"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/connection.py#L191-L197 | train | 31,870 |
bitly/asyncmongo | asyncmongo/pool.py | ConnectionPools.get_connection_pool | def get_connection_pool(self, pool_id, *args, **kwargs):
"""get a connection pool, transparently creating it if it doesn't already exist
:Parameters:
- `pool_id`: unique id for a connection pool
"""
assert isinstance(pool_id, (str, unicode))
if not hasattr(self, '_pools'):
self._pools = {}
if pool_id not in self._pools:
self._pools[pool_id] = ConnectionPool(*args, **kwargs)
# logging.debug("%s: _connections = %d", pool_id, self._pools[pool_id]._connections)
return self._pools[pool_id] | python | def get_connection_pool(self, pool_id, *args, **kwargs):
"""get a connection pool, transparently creating it if it doesn't already exist
:Parameters:
- `pool_id`: unique id for a connection pool
"""
assert isinstance(pool_id, (str, unicode))
if not hasattr(self, '_pools'):
self._pools = {}
if pool_id not in self._pools:
self._pools[pool_id] = ConnectionPool(*args, **kwargs)
# logging.debug("%s: _connections = %d", pool_id, self._pools[pool_id]._connections)
return self._pools[pool_id] | [
"def",
"get_connection_pool",
"(",
"self",
",",
"pool_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"pool_id",
",",
"(",
"str",
",",
"unicode",
")",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_pools'",
"... | get a connection pool, transparently creating it if it doesn't already exist
:Parameters:
- `pool_id`: unique id for a connection pool | [
"get",
"a",
"connection",
"pool",
"transparently",
"creating",
"it",
"if",
"it",
"doesn",
"t",
"already",
"exist"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/pool.py#L26-L38 | train | 31,871 |
bitly/asyncmongo | asyncmongo/pool.py | ConnectionPools.close_idle_connections | def close_idle_connections(self, pool_id=None):
"""close idle connections to mongo"""
if not hasattr(self, '_pools'):
return
if pool_id:
if pool_id not in self._pools:
raise ProgrammingError("pool %r does not exist" % pool_id)
else:
pool = self._pools[pool_id]
pool.close()
else:
for pool_id, pool in self._pools.items():
pool.close() | python | def close_idle_connections(self, pool_id=None):
"""close idle connections to mongo"""
if not hasattr(self, '_pools'):
return
if pool_id:
if pool_id not in self._pools:
raise ProgrammingError("pool %r does not exist" % pool_id)
else:
pool = self._pools[pool_id]
pool.close()
else:
for pool_id, pool in self._pools.items():
pool.close() | [
"def",
"close_idle_connections",
"(",
"self",
",",
"pool_id",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_pools'",
")",
":",
"return",
"if",
"pool_id",
":",
"if",
"pool_id",
"not",
"in",
"self",
".",
"_pools",
":",
"raise",
"Pro... | close idle connections to mongo | [
"close",
"idle",
"connections",
"to",
"mongo"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/pool.py#L41-L54 | train | 31,872 |
bitly/asyncmongo | asyncmongo/pool.py | ConnectionPool.connection | def connection(self):
""" get a cached connection from the pool """
self._condition.acquire()
try:
if (self._maxconnections and self._connections >= self._maxconnections):
raise TooManyConnections("%d connections are already equal to the max: %d" % (self._connections, self._maxconnections))
# connection limit not reached, get a dedicated connection
try: # first try to get it from the idle cache
con = self._idle_cache.pop(0)
except IndexError: # else get a fresh connection
con = self.new_connection()
self._connections += 1
finally:
self._condition.release()
return con | python | def connection(self):
""" get a cached connection from the pool """
self._condition.acquire()
try:
if (self._maxconnections and self._connections >= self._maxconnections):
raise TooManyConnections("%d connections are already equal to the max: %d" % (self._connections, self._maxconnections))
# connection limit not reached, get a dedicated connection
try: # first try to get it from the idle cache
con = self._idle_cache.pop(0)
except IndexError: # else get a fresh connection
con = self.new_connection()
self._connections += 1
finally:
self._condition.release()
return con | [
"def",
"connection",
"(",
"self",
")",
":",
"self",
".",
"_condition",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"(",
"self",
".",
"_maxconnections",
"and",
"self",
".",
"_connections",
">=",
"self",
".",
"_maxconnections",
")",
":",
"raise",
"TooManyC... | get a cached connection from the pool | [
"get",
"a",
"cached",
"connection",
"from",
"the",
"pool"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/pool.py#L110-L125 | train | 31,873 |
bitly/asyncmongo | asyncmongo/cursor.py | Cursor.__query_options | def __query_options(self):
"""Get the query options string to use for this query."""
options = 0
if self.__tailable:
options |= _QUERY_OPTIONS["tailable_cursor"]
if self.__slave_okay or self.__pool._slave_okay:
options |= _QUERY_OPTIONS["slave_okay"]
if not self.__timeout:
options |= _QUERY_OPTIONS["no_timeout"]
return options | python | def __query_options(self):
"""Get the query options string to use for this query."""
options = 0
if self.__tailable:
options |= _QUERY_OPTIONS["tailable_cursor"]
if self.__slave_okay or self.__pool._slave_okay:
options |= _QUERY_OPTIONS["slave_okay"]
if not self.__timeout:
options |= _QUERY_OPTIONS["no_timeout"]
return options | [
"def",
"__query_options",
"(",
"self",
")",
":",
"options",
"=",
"0",
"if",
"self",
".",
"__tailable",
":",
"options",
"|=",
"_QUERY_OPTIONS",
"[",
"\"tailable_cursor\"",
"]",
"if",
"self",
".",
"__slave_okay",
"or",
"self",
".",
"__pool",
".",
"_slave_okay"... | Get the query options string to use for this query. | [
"Get",
"the",
"query",
"options",
"string",
"to",
"use",
"for",
"this",
"query",
"."
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/cursor.py#L421-L430 | train | 31,874 |
djungelorm/sphinx-tabs | sphinx_tabs/tabs.py | update_context | def update_context(app, pagename, templatename, context, doctree):
""" Remove sphinx-tabs CSS and JS asset files if not used in a page """
if doctree is None:
return
visitor = _FindTabsDirectiveVisitor(doctree)
doctree.walk(visitor)
if not visitor.found_tabs_directive:
paths = [posixpath.join('_static', 'sphinx_tabs/' + f) for f in FILES]
if 'css_files' in context:
context['css_files'] = context['css_files'][:]
for path in paths:
if path.endswith('.css'):
context['css_files'].remove(path)
if 'script_files' in context:
context['script_files'] = context['script_files'][:]
for path in paths:
if path.endswith('.js'):
context['script_files'].remove(path) | python | def update_context(app, pagename, templatename, context, doctree):
""" Remove sphinx-tabs CSS and JS asset files if not used in a page """
if doctree is None:
return
visitor = _FindTabsDirectiveVisitor(doctree)
doctree.walk(visitor)
if not visitor.found_tabs_directive:
paths = [posixpath.join('_static', 'sphinx_tabs/' + f) for f in FILES]
if 'css_files' in context:
context['css_files'] = context['css_files'][:]
for path in paths:
if path.endswith('.css'):
context['css_files'].remove(path)
if 'script_files' in context:
context['script_files'] = context['script_files'][:]
for path in paths:
if path.endswith('.js'):
context['script_files'].remove(path) | [
"def",
"update_context",
"(",
"app",
",",
"pagename",
",",
"templatename",
",",
"context",
",",
"doctree",
")",
":",
"if",
"doctree",
"is",
"None",
":",
"return",
"visitor",
"=",
"_FindTabsDirectiveVisitor",
"(",
"doctree",
")",
"doctree",
".",
"walk",
"(",
... | Remove sphinx-tabs CSS and JS asset files if not used in a page | [
"Remove",
"sphinx",
"-",
"tabs",
"CSS",
"and",
"JS",
"asset",
"files",
"if",
"not",
"used",
"in",
"a",
"page"
] | 2f17b5ca82a91613b42d58d01aafbd484525915c | https://github.com/djungelorm/sphinx-tabs/blob/2f17b5ca82a91613b42d58d01aafbd484525915c/sphinx_tabs/tabs.py#L269-L286 | train | 31,875 |
djungelorm/sphinx-tabs | sphinx_tabs/tabs.py | copy_assets | def copy_assets(app, exception):
""" Copy asset files to the output """
if 'getLogger' in dir(logging):
log = logging.getLogger(__name__).info # pylint: disable=no-member
else:
log = app.info
builders = get_compatible_builders(app)
if exception:
return
if app.builder.name not in builders:
if not app.config['sphinx_tabs_nowarn']:
app.warn(
'Not copying tabs assets! Not compatible with %s builder' %
app.builder.name)
return
log('Copying tabs assets')
installdir = os.path.join(app.builder.outdir, '_static', 'sphinx_tabs')
for path in FILES:
source = resource_filename('sphinx_tabs', path)
dest = os.path.join(installdir, path)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
copyfile(source, dest) | python | def copy_assets(app, exception):
""" Copy asset files to the output """
if 'getLogger' in dir(logging):
log = logging.getLogger(__name__).info # pylint: disable=no-member
else:
log = app.info
builders = get_compatible_builders(app)
if exception:
return
if app.builder.name not in builders:
if not app.config['sphinx_tabs_nowarn']:
app.warn(
'Not copying tabs assets! Not compatible with %s builder' %
app.builder.name)
return
log('Copying tabs assets')
installdir = os.path.join(app.builder.outdir, '_static', 'sphinx_tabs')
for path in FILES:
source = resource_filename('sphinx_tabs', path)
dest = os.path.join(installdir, path)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
copyfile(source, dest) | [
"def",
"copy_assets",
"(",
"app",
",",
"exception",
")",
":",
"if",
"'getLogger'",
"in",
"dir",
"(",
"logging",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"info",
"# pylint: disable=no-member",
"else",
":",
"log",
"=",
... | Copy asset files to the output | [
"Copy",
"asset",
"files",
"to",
"the",
"output"
] | 2f17b5ca82a91613b42d58d01aafbd484525915c | https://github.com/djungelorm/sphinx-tabs/blob/2f17b5ca82a91613b42d58d01aafbd484525915c/sphinx_tabs/tabs.py#L290-L318 | train | 31,876 |
djungelorm/sphinx-tabs | sphinx_tabs/tabs.py | setup | def setup(app):
""" Set up the plugin """
app.add_config_value('sphinx_tabs_nowarn', False, '')
app.add_config_value('sphinx_tabs_valid_builders', [], '')
app.add_directive('tabs', TabsDirective)
app.add_directive('tab', TabDirective)
app.add_directive('group-tab', GroupTabDirective)
app.add_directive('code-tab', CodeTabDirective)
for path in ['sphinx_tabs/' + f for f in FILES]:
if path.endswith('.css'):
if 'add_css_file' in dir(app):
app.add_css_file(path)
else:
app.add_stylesheet(path)
if path.endswith('.js'):
if 'add_script_file' in dir(app):
app.add_script_file(path)
else:
app.add_javascript(path)
app.connect('html-page-context', update_context)
app.connect('build-finished', copy_assets) | python | def setup(app):
""" Set up the plugin """
app.add_config_value('sphinx_tabs_nowarn', False, '')
app.add_config_value('sphinx_tabs_valid_builders', [], '')
app.add_directive('tabs', TabsDirective)
app.add_directive('tab', TabDirective)
app.add_directive('group-tab', GroupTabDirective)
app.add_directive('code-tab', CodeTabDirective)
for path in ['sphinx_tabs/' + f for f in FILES]:
if path.endswith('.css'):
if 'add_css_file' in dir(app):
app.add_css_file(path)
else:
app.add_stylesheet(path)
if path.endswith('.js'):
if 'add_script_file' in dir(app):
app.add_script_file(path)
else:
app.add_javascript(path)
app.connect('html-page-context', update_context)
app.connect('build-finished', copy_assets) | [
"def",
"setup",
"(",
"app",
")",
":",
"app",
".",
"add_config_value",
"(",
"'sphinx_tabs_nowarn'",
",",
"False",
",",
"''",
")",
"app",
".",
"add_config_value",
"(",
"'sphinx_tabs_valid_builders'",
",",
"[",
"]",
",",
"''",
")",
"app",
".",
"add_directive",
... | Set up the plugin | [
"Set",
"up",
"the",
"plugin"
] | 2f17b5ca82a91613b42d58d01aafbd484525915c | https://github.com/djungelorm/sphinx-tabs/blob/2f17b5ca82a91613b42d58d01aafbd484525915c/sphinx_tabs/tabs.py#L321-L341 | train | 31,877 |
djungelorm/sphinx-tabs | sphinx_tabs/tabs.py | TabsDirective.run | def run(self):
""" Parse a tabs directive """
self.assert_has_content()
env = self.state.document.settings.env
node = nodes.container()
node['classes'] = ['sphinx-tabs']
if 'next_tabs_id' not in env.temp_data:
env.temp_data['next_tabs_id'] = 0
if 'tabs_stack' not in env.temp_data:
env.temp_data['tabs_stack'] = []
tabs_id = env.temp_data['next_tabs_id']
tabs_key = 'tabs_%d' % tabs_id
env.temp_data['next_tabs_id'] += 1
env.temp_data['tabs_stack'].append(tabs_id)
env.temp_data[tabs_key] = {}
env.temp_data[tabs_key]['tab_ids'] = []
env.temp_data[tabs_key]['tab_titles'] = []
env.temp_data[tabs_key]['is_first_tab'] = True
self.state.nested_parse(self.content, self.content_offset, node)
if env.app.builder.name in get_compatible_builders(env.app):
tabs_node = nodes.container()
tabs_node.tagname = 'div'
classes = 'ui top attached tabular menu sphinx-menu'
tabs_node['classes'] = classes.split(' ')
tab_titles = env.temp_data[tabs_key]['tab_titles']
for idx, [data_tab, tab_name] in enumerate(tab_titles):
tab = nodes.container()
tab.tagname = 'a'
tab['classes'] = ['item'] if idx > 0 else ['active', 'item']
tab['classes'].append(data_tab)
tab += tab_name
tabs_node += tab
node.children.insert(0, tabs_node)
env.temp_data['tabs_stack'].pop()
return [node] | python | def run(self):
""" Parse a tabs directive """
self.assert_has_content()
env = self.state.document.settings.env
node = nodes.container()
node['classes'] = ['sphinx-tabs']
if 'next_tabs_id' not in env.temp_data:
env.temp_data['next_tabs_id'] = 0
if 'tabs_stack' not in env.temp_data:
env.temp_data['tabs_stack'] = []
tabs_id = env.temp_data['next_tabs_id']
tabs_key = 'tabs_%d' % tabs_id
env.temp_data['next_tabs_id'] += 1
env.temp_data['tabs_stack'].append(tabs_id)
env.temp_data[tabs_key] = {}
env.temp_data[tabs_key]['tab_ids'] = []
env.temp_data[tabs_key]['tab_titles'] = []
env.temp_data[tabs_key]['is_first_tab'] = True
self.state.nested_parse(self.content, self.content_offset, node)
if env.app.builder.name in get_compatible_builders(env.app):
tabs_node = nodes.container()
tabs_node.tagname = 'div'
classes = 'ui top attached tabular menu sphinx-menu'
tabs_node['classes'] = classes.split(' ')
tab_titles = env.temp_data[tabs_key]['tab_titles']
for idx, [data_tab, tab_name] in enumerate(tab_titles):
tab = nodes.container()
tab.tagname = 'a'
tab['classes'] = ['item'] if idx > 0 else ['active', 'item']
tab['classes'].append(data_tab)
tab += tab_name
tabs_node += tab
node.children.insert(0, tabs_node)
env.temp_data['tabs_stack'].pop()
return [node] | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"assert_has_content",
"(",
")",
"env",
"=",
"self",
".",
"state",
".",
"document",
".",
"settings",
".",
"env",
"node",
"=",
"nodes",
".",
"container",
"(",
")",
"node",
"[",
"'classes'",
"]",
"=",
... | Parse a tabs directive | [
"Parse",
"a",
"tabs",
"directive"
] | 2f17b5ca82a91613b42d58d01aafbd484525915c | https://github.com/djungelorm/sphinx-tabs/blob/2f17b5ca82a91613b42d58d01aafbd484525915c/sphinx_tabs/tabs.py#L45-L89 | train | 31,878 |
bitly/asyncmongo | asyncmongo/client.py | Client.connection | def connection(self, collectionname, dbname=None):
"""Get a cursor to a collection by name.
raises `DataError` on names with unallowable characters.
:Parameters:
- `collectionname`: the name of the collection
- `dbname`: (optional) overide the default db for a connection
"""
if not collectionname or ".." in collectionname:
raise DataError("collection names cannot be empty")
if "$" in collectionname and not (collectionname.startswith("oplog.$main") or
collectionname.startswith("$cmd")):
raise DataError("collection names must not "
"contain '$': %r" % collectionname)
if collectionname.startswith(".") or collectionname.endswith("."):
raise DataError("collecion names must not start "
"or end with '.': %r" % collectionname)
if "\x00" in collectionname:
raise DataError("collection names must not contain the "
"null character")
return Cursor(dbname or self._pool._dbname, collectionname, self._pool) | python | def connection(self, collectionname, dbname=None):
"""Get a cursor to a collection by name.
raises `DataError` on names with unallowable characters.
:Parameters:
- `collectionname`: the name of the collection
- `dbname`: (optional) overide the default db for a connection
"""
if not collectionname or ".." in collectionname:
raise DataError("collection names cannot be empty")
if "$" in collectionname and not (collectionname.startswith("oplog.$main") or
collectionname.startswith("$cmd")):
raise DataError("collection names must not "
"contain '$': %r" % collectionname)
if collectionname.startswith(".") or collectionname.endswith("."):
raise DataError("collecion names must not start "
"or end with '.': %r" % collectionname)
if "\x00" in collectionname:
raise DataError("collection names must not contain the "
"null character")
return Cursor(dbname or self._pool._dbname, collectionname, self._pool) | [
"def",
"connection",
"(",
"self",
",",
"collectionname",
",",
"dbname",
"=",
"None",
")",
":",
"if",
"not",
"collectionname",
"or",
"\"..\"",
"in",
"collectionname",
":",
"raise",
"DataError",
"(",
"\"collection names cannot be empty\"",
")",
"if",
"\"$\"",
"in"... | Get a cursor to a collection by name.
raises `DataError` on names with unallowable characters.
:Parameters:
- `collectionname`: the name of the collection
- `dbname`: (optional) overide the default db for a connection | [
"Get",
"a",
"cursor",
"to",
"a",
"collection",
"by",
"name",
"."
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/client.py#L69-L91 | train | 31,879 |
bitly/asyncmongo | asyncmongo/client.py | Client.collection_names | def collection_names(self, callback):
"""Get a list of all the collection names in selected database"""
callback = partial(self._collection_names_result, callback)
self["system.namespaces"].find(_must_use_master=True, callback=callback) | python | def collection_names(self, callback):
"""Get a list of all the collection names in selected database"""
callback = partial(self._collection_names_result, callback)
self["system.namespaces"].find(_must_use_master=True, callback=callback) | [
"def",
"collection_names",
"(",
"self",
",",
"callback",
")",
":",
"callback",
"=",
"partial",
"(",
"self",
".",
"_collection_names_result",
",",
"callback",
")",
"self",
"[",
"\"system.namespaces\"",
"]",
".",
"find",
"(",
"_must_use_master",
"=",
"True",
","... | Get a list of all the collection names in selected database | [
"Get",
"a",
"list",
"of",
"all",
"the",
"collection",
"names",
"in",
"selected",
"database"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/client.py#L93-L96 | train | 31,880 |
bitly/asyncmongo | asyncmongo/client.py | Client._collection_names_result | def _collection_names_result(self, callback, results, error=None):
"""callback to for collection names query, filters out collection names"""
names = [r['name'] for r in results if r['name'].count('.') == 1]
assert error == None, repr(error)
strip = len(self._pool._dbname) + 1
callback([name[strip:] for name in names]) | python | def _collection_names_result(self, callback, results, error=None):
"""callback to for collection names query, filters out collection names"""
names = [r['name'] for r in results if r['name'].count('.') == 1]
assert error == None, repr(error)
strip = len(self._pool._dbname) + 1
callback([name[strip:] for name in names]) | [
"def",
"_collection_names_result",
"(",
"self",
",",
"callback",
",",
"results",
",",
"error",
"=",
"None",
")",
":",
"names",
"=",
"[",
"r",
"[",
"'name'",
"]",
"for",
"r",
"in",
"results",
"if",
"r",
"[",
"'name'",
"]",
".",
"count",
"(",
"'.'",
... | callback to for collection names query, filters out collection names | [
"callback",
"to",
"for",
"collection",
"names",
"query",
"filters",
"out",
"collection",
"names"
] | 3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b | https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/client.py#L98-L103 | train | 31,881 |
DBuildService/dockerfile-parse | dockerfile_parse/parser.py | DockerfileParser.parent_images | def parent_images(self, parents):
"""
setter for images in 'FROM' instructions.
Images are updated per build stage with the given parents in the order they appear.
Raises RuntimeError if a different number of parents are given than there are stages
as that is likely to be a mistake.
:param parents: list of image strings
"""
parents = list(parents)
change_instrs = []
for instr in self.structure:
if instr['instruction'] != 'FROM':
continue
old_image, stage = image_from(instr['value'])
if not old_image:
continue # broken FROM, fixing would just confuse things
if not parents:
raise RuntimeError("not enough parents to match build stages")
image = parents.pop(0)
if image != old_image:
instr['value'] = '{0} AS {1}'.format(image, stage) if stage else image
instr['content'] = 'FROM {0}\n'.format(instr['value'])
change_instrs.append(instr)
if parents:
raise RuntimeError("trying to update too many parents for build stages")
lines = self.lines
for instr in reversed(change_instrs):
lines[instr['startline']:instr['endline']+1] = [instr['content']]
self.lines = lines | python | def parent_images(self, parents):
"""
setter for images in 'FROM' instructions.
Images are updated per build stage with the given parents in the order they appear.
Raises RuntimeError if a different number of parents are given than there are stages
as that is likely to be a mistake.
:param parents: list of image strings
"""
parents = list(parents)
change_instrs = []
for instr in self.structure:
if instr['instruction'] != 'FROM':
continue
old_image, stage = image_from(instr['value'])
if not old_image:
continue # broken FROM, fixing would just confuse things
if not parents:
raise RuntimeError("not enough parents to match build stages")
image = parents.pop(0)
if image != old_image:
instr['value'] = '{0} AS {1}'.format(image, stage) if stage else image
instr['content'] = 'FROM {0}\n'.format(instr['value'])
change_instrs.append(instr)
if parents:
raise RuntimeError("trying to update too many parents for build stages")
lines = self.lines
for instr in reversed(change_instrs):
lines[instr['startline']:instr['endline']+1] = [instr['content']]
self.lines = lines | [
"def",
"parent_images",
"(",
"self",
",",
"parents",
")",
":",
"parents",
"=",
"list",
"(",
"parents",
")",
"change_instrs",
"=",
"[",
"]",
"for",
"instr",
"in",
"self",
".",
"structure",
":",
"if",
"instr",
"[",
"'instruction'",
"]",
"!=",
"'FROM'",
"... | setter for images in 'FROM' instructions.
Images are updated per build stage with the given parents in the order they appear.
Raises RuntimeError if a different number of parents are given than there are stages
as that is likely to be a mistake.
:param parents: list of image strings | [
"setter",
"for",
"images",
"in",
"FROM",
"instructions",
".",
"Images",
"are",
"updated",
"per",
"build",
"stage",
"with",
"the",
"given",
"parents",
"in",
"the",
"order",
"they",
"appear",
".",
"Raises",
"RuntimeError",
"if",
"a",
"different",
"number",
"of... | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L285-L319 | train | 31,882 |
DBuildService/dockerfile-parse | dockerfile_parse/parser.py | DockerfileParser.baseimage | def baseimage(self, new_image):
"""
change image of final stage FROM instruction
"""
images = self.parent_images or [None]
images[-1] = new_image
self.parent_images = images | python | def baseimage(self, new_image):
"""
change image of final stage FROM instruction
"""
images = self.parent_images or [None]
images[-1] = new_image
self.parent_images = images | [
"def",
"baseimage",
"(",
"self",
",",
"new_image",
")",
":",
"images",
"=",
"self",
".",
"parent_images",
"or",
"[",
"None",
"]",
"images",
"[",
"-",
"1",
"]",
"=",
"new_image",
"self",
".",
"parent_images",
"=",
"images"
] | change image of final stage FROM instruction | [
"change",
"image",
"of",
"final",
"stage",
"FROM",
"instruction"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L333-L339 | train | 31,883 |
DBuildService/dockerfile-parse | dockerfile_parse/parser.py | DockerfileParser.cmd | def cmd(self, value):
"""
setter for final 'CMD' instruction in final build stage
"""
cmd = None
for insndesc in self.structure:
if insndesc['instruction'] == 'FROM': # new stage, reset
cmd = None
elif insndesc['instruction'] == 'CMD':
cmd = insndesc
new_cmd = 'CMD ' + value
if cmd:
self.add_lines_at(cmd, new_cmd, replace=True)
else:
self.add_lines(new_cmd) | python | def cmd(self, value):
"""
setter for final 'CMD' instruction in final build stage
"""
cmd = None
for insndesc in self.structure:
if insndesc['instruction'] == 'FROM': # new stage, reset
cmd = None
elif insndesc['instruction'] == 'CMD':
cmd = insndesc
new_cmd = 'CMD ' + value
if cmd:
self.add_lines_at(cmd, new_cmd, replace=True)
else:
self.add_lines(new_cmd) | [
"def",
"cmd",
"(",
"self",
",",
"value",
")",
":",
"cmd",
"=",
"None",
"for",
"insndesc",
"in",
"self",
".",
"structure",
":",
"if",
"insndesc",
"[",
"'instruction'",
"]",
"==",
"'FROM'",
":",
"# new stage, reset",
"cmd",
"=",
"None",
"elif",
"insndesc",... | setter for final 'CMD' instruction in final build stage | [
"setter",
"for",
"final",
"CMD",
"instruction",
"in",
"final",
"build",
"stage"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L357-L373 | train | 31,884 |
DBuildService/dockerfile-parse | dockerfile_parse/parser.py | DockerfileParser._instruction_getter | def _instruction_getter(self, name, env_replace):
"""
Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance
"""
if name != 'LABEL' and name != 'ENV':
raise ValueError("Unsupported instruction '%s'", name)
instructions = {}
envs = {}
for instruction_desc in self.structure:
this_instruction = instruction_desc['instruction']
if this_instruction == 'FROM':
instructions.clear()
envs = self.parent_env.copy()
elif this_instruction in (name, 'ENV'):
logger.debug("%s value: %r", name.lower(), instruction_desc['value'])
key_val_list = extract_labels_or_envs(env_replace=env_replace,
envs=envs,
instruction_value=instruction_desc['value'])
for key, value in key_val_list:
if this_instruction == name:
instructions[key] = value
logger.debug("new %s %r=%r", name.lower(), key, value)
if this_instruction == 'ENV':
envs[key] = value
logger.debug("instructions: %r", instructions)
return Labels(instructions, self) if name == 'LABEL' else Envs(instructions, self) | python | def _instruction_getter(self, name, env_replace):
"""
Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance
"""
if name != 'LABEL' and name != 'ENV':
raise ValueError("Unsupported instruction '%s'", name)
instructions = {}
envs = {}
for instruction_desc in self.structure:
this_instruction = instruction_desc['instruction']
if this_instruction == 'FROM':
instructions.clear()
envs = self.parent_env.copy()
elif this_instruction in (name, 'ENV'):
logger.debug("%s value: %r", name.lower(), instruction_desc['value'])
key_val_list = extract_labels_or_envs(env_replace=env_replace,
envs=envs,
instruction_value=instruction_desc['value'])
for key, value in key_val_list:
if this_instruction == name:
instructions[key] = value
logger.debug("new %s %r=%r", name.lower(), key, value)
if this_instruction == 'ENV':
envs[key] = value
logger.debug("instructions: %r", instructions)
return Labels(instructions, self) if name == 'LABEL' else Envs(instructions, self) | [
"def",
"_instruction_getter",
"(",
"self",
",",
"name",
",",
"env_replace",
")",
":",
"if",
"name",
"!=",
"'LABEL'",
"and",
"name",
"!=",
"'ENV'",
":",
"raise",
"ValueError",
"(",
"\"Unsupported instruction '%s'\"",
",",
"name",
")",
"instructions",
"=",
"{",
... | Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance | [
"Get",
"LABEL",
"or",
"ENV",
"instructions",
"with",
"environment",
"replacement"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L391-L422 | train | 31,885 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | b2u | def b2u(string):
""" bytes to unicode """
if (isinstance(string, bytes) or
(PY2 and isinstance(string, str))):
return string.decode('utf-8')
return string | python | def b2u(string):
""" bytes to unicode """
if (isinstance(string, bytes) or
(PY2 and isinstance(string, str))):
return string.decode('utf-8')
return string | [
"def",
"b2u",
"(",
"string",
")",
":",
"if",
"(",
"isinstance",
"(",
"string",
",",
"bytes",
")",
"or",
"(",
"PY2",
"and",
"isinstance",
"(",
"string",
",",
"str",
")",
")",
")",
":",
"return",
"string",
".",
"decode",
"(",
"'utf-8'",
")",
"return"... | bytes to unicode | [
"bytes",
"to",
"unicode"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L18-L23 | train | 31,886 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | u2b | def u2b(string):
""" unicode to bytes"""
if ((PY2 and isinstance(string, unicode)) or
((not PY2) and isinstance(string, str))):
return string.encode('utf-8')
return string | python | def u2b(string):
""" unicode to bytes"""
if ((PY2 and isinstance(string, unicode)) or
((not PY2) and isinstance(string, str))):
return string.encode('utf-8')
return string | [
"def",
"u2b",
"(",
"string",
")",
":",
"if",
"(",
"(",
"PY2",
"and",
"isinstance",
"(",
"string",
",",
"unicode",
")",
")",
"or",
"(",
"(",
"not",
"PY2",
")",
"and",
"isinstance",
"(",
"string",
",",
"str",
")",
")",
")",
":",
"return",
"string",... | unicode to bytes | [
"unicode",
"to",
"bytes"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L26-L31 | train | 31,887 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | WordSplitter._update_quoting_state | def _update_quoting_state(self, ch):
"""
Update self.quotes and self.escaped
:param ch: str, current character
:return: ch if it was not used to update quoting state, else ''
"""
# Set whether the next character is escaped
# Unquoted:
# a backslash escapes the next character
# Double-quoted:
# a backslash escapes the next character only if it is a double-quote
# Single-quoted:
# a backslash is not special
is_escaped = self.escaped
self.escaped = (not self.escaped and
ch == '\\' and
self.quotes != self.SQUOTE)
if self.escaped:
return ''
if is_escaped:
if self.quotes == self.DQUOTE:
if ch == '"':
return ch
return "{0}{1}".format('\\', ch)
return ch
if self.quotes is None:
if ch in (self.SQUOTE, self.DQUOTE):
self.quotes = ch
return ''
elif self.quotes == ch:
self.quotes = None
return ''
return ch | python | def _update_quoting_state(self, ch):
"""
Update self.quotes and self.escaped
:param ch: str, current character
:return: ch if it was not used to update quoting state, else ''
"""
# Set whether the next character is escaped
# Unquoted:
# a backslash escapes the next character
# Double-quoted:
# a backslash escapes the next character only if it is a double-quote
# Single-quoted:
# a backslash is not special
is_escaped = self.escaped
self.escaped = (not self.escaped and
ch == '\\' and
self.quotes != self.SQUOTE)
if self.escaped:
return ''
if is_escaped:
if self.quotes == self.DQUOTE:
if ch == '"':
return ch
return "{0}{1}".format('\\', ch)
return ch
if self.quotes is None:
if ch in (self.SQUOTE, self.DQUOTE):
self.quotes = ch
return ''
elif self.quotes == ch:
self.quotes = None
return ''
return ch | [
"def",
"_update_quoting_state",
"(",
"self",
",",
"ch",
")",
":",
"# Set whether the next character is escaped",
"# Unquoted:",
"# a backslash escapes the next character",
"# Double-quoted:",
"# a backslash escapes the next character only if it is a double-quote",
"# Single-quoted:",
... | Update self.quotes and self.escaped
:param ch: str, current character
:return: ch if it was not used to update quoting state, else '' | [
"Update",
"self",
".",
"quotes",
"and",
"self",
".",
"escaped"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L63-L102 | train | 31,888 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | WordSplitter.split | def split(self, maxsplit=None, dequote=True):
"""
Generator for the words of the string
:param maxsplit: perform at most maxsplit splits;
if None, do not limit the number of splits
:param dequote: remove quotes and escape characters once consumed
"""
class Word(object):
"""
A None-or-str object which can always be appended to.
Similar to a defaultdict but with only a single value.
"""
def __init__(self):
self.value = None
@property
def valid(self):
return self.value is not None
def append(self, s):
if self.value is None:
self.value = s
else:
self.value += s
num_splits = 0
word = Word()
while True:
ch = self.stream.read(1)
if not ch:
# EOF
if word.valid:
yield word.value
return
if (not self.escaped and
self.envs is not None and
ch == '$' and
self.quotes != self.SQUOTE):
while True:
# Substitute environment variable
braced = False
varname = ''
while True:
ch = self.stream.read(1)
if varname == '' and ch == '{':
braced = True
continue
if not ch:
# EOF
break
if braced and ch == '}':
break
if not ch.isalnum() and ch != '_':
break
varname += ch
try:
word.append(self.envs[varname])
except KeyError:
pass
# Check whether there is another envvar
if ch != '$':
break
if braced and ch == '}':
continue
# ch now holds the next character
# Figure out what our quoting/escaping state will be
# after this character
is_escaped = self.escaped
ch_unless_consumed = self._update_quoting_state(ch)
if dequote:
# If we just processed a quote or escape character,
# and were asked to dequote the string, consume it now
ch = ch_unless_consumed
# If word-splitting has been requested, check whether we are
# at a whitespace character
may_split = maxsplit != 0 and (maxsplit is None or
num_splits < maxsplit)
at_split = may_split and (self.quotes is None and
not is_escaped and
ch.isspace())
if at_split:
# It is time to yield a word
if word.valid:
num_splits += 1
yield word.value
word = Word()
else:
word.append(ch) | python | def split(self, maxsplit=None, dequote=True):
"""
Generator for the words of the string
:param maxsplit: perform at most maxsplit splits;
if None, do not limit the number of splits
:param dequote: remove quotes and escape characters once consumed
"""
class Word(object):
"""
A None-or-str object which can always be appended to.
Similar to a defaultdict but with only a single value.
"""
def __init__(self):
self.value = None
@property
def valid(self):
return self.value is not None
def append(self, s):
if self.value is None:
self.value = s
else:
self.value += s
num_splits = 0
word = Word()
while True:
ch = self.stream.read(1)
if not ch:
# EOF
if word.valid:
yield word.value
return
if (not self.escaped and
self.envs is not None and
ch == '$' and
self.quotes != self.SQUOTE):
while True:
# Substitute environment variable
braced = False
varname = ''
while True:
ch = self.stream.read(1)
if varname == '' and ch == '{':
braced = True
continue
if not ch:
# EOF
break
if braced and ch == '}':
break
if not ch.isalnum() and ch != '_':
break
varname += ch
try:
word.append(self.envs[varname])
except KeyError:
pass
# Check whether there is another envvar
if ch != '$':
break
if braced and ch == '}':
continue
# ch now holds the next character
# Figure out what our quoting/escaping state will be
# after this character
is_escaped = self.escaped
ch_unless_consumed = self._update_quoting_state(ch)
if dequote:
# If we just processed a quote or escape character,
# and were asked to dequote the string, consume it now
ch = ch_unless_consumed
# If word-splitting has been requested, check whether we are
# at a whitespace character
may_split = maxsplit != 0 and (maxsplit is None or
num_splits < maxsplit)
at_split = may_split and (self.quotes is None and
not is_escaped and
ch.isspace())
if at_split:
# It is time to yield a word
if word.valid:
num_splits += 1
yield word.value
word = Word()
else:
word.append(ch) | [
"def",
"split",
"(",
"self",
",",
"maxsplit",
"=",
"None",
",",
"dequote",
"=",
"True",
")",
":",
"class",
"Word",
"(",
"object",
")",
":",
"\"\"\"\n A None-or-str object which can always be appended to.\n Similar to a defaultdict but with only a single ... | Generator for the words of the string
:param maxsplit: perform at most maxsplit splits;
if None, do not limit the number of splits
:param dequote: remove quotes and escape characters once consumed | [
"Generator",
"for",
"the",
"words",
"of",
"the",
"string"
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L107-L211 | train | 31,889 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | Context.get_line_value | def get_line_value(self, context_type):
"""
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
"""
if context_type.upper() == "ENV":
return self.line_envs
elif context_type.upper() == "LABEL":
return self.line_labels | python | def get_line_value(self, context_type):
"""
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
"""
if context_type.upper() == "ENV":
return self.line_envs
elif context_type.upper() == "LABEL":
return self.line_labels | [
"def",
"get_line_value",
"(",
"self",
",",
"context_type",
")",
":",
"if",
"context_type",
".",
"upper",
"(",
")",
"==",
"\"ENV\"",
":",
"return",
"self",
".",
"line_envs",
"elif",
"context_type",
".",
"upper",
"(",
")",
"==",
"\"LABEL\"",
":",
"return",
... | Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line | [
"Get",
"the",
"values",
"defined",
"on",
"this",
"line",
"."
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L287-L297 | train | 31,890 |
DBuildService/dockerfile-parse | dockerfile_parse/util.py | Context.get_values | def get_values(self, context_type):
"""
Get the values valid on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type valid on this line
"""
if context_type.upper() == "ENV":
return self.envs
elif context_type.upper() == "LABEL":
return self.labels | python | def get_values(self, context_type):
"""
Get the values valid on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type valid on this line
"""
if context_type.upper() == "ENV":
return self.envs
elif context_type.upper() == "LABEL":
return self.labels | [
"def",
"get_values",
"(",
"self",
",",
"context_type",
")",
":",
"if",
"context_type",
".",
"upper",
"(",
")",
"==",
"\"ENV\"",
":",
"return",
"self",
".",
"envs",
"elif",
"context_type",
".",
"upper",
"(",
")",
"==",
"\"LABEL\"",
":",
"return",
"self",
... | Get the values valid on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type valid on this line | [
"Get",
"the",
"values",
"valid",
"on",
"this",
"line",
"."
] | 3d7b514d8b8eded1b33529cf0f6a0770a573aee0 | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L299-L309 | train | 31,891 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/usefulFunctions.py | uniqueStates | def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums | python | def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums | [
"def",
"uniqueStates",
"(",
"states",
",",
"rates",
")",
":",
"order",
"=",
"np",
".",
"lexsort",
"(",
"states",
".",
"T",
")",
"states",
"=",
"states",
"[",
"order",
"]",
"diff",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"states",
")",
",",
"'boo... | Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state | [
"Returns",
"unique",
"states",
"and",
"sums",
"up",
"the",
"corresponding",
"rates",
".",
"States",
"should",
"be",
"a",
"2d",
"numpy",
"array",
"with",
"on",
"each",
"row",
"a",
"state",
"and",
"rates",
"a",
"1d",
"numpy",
"array",
"with",
"length",
"eq... | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/usefulFunctions.py#L4-L16 | train | 31,892 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.checkInitialState | def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState | python | def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState | [
"def",
"checkInitialState",
"(",
"self",
",",
"initialState",
")",
":",
"assert",
"initialState",
"is",
"not",
"None",
",",
"\"Initial state has not been specified.\"",
"assert",
"isinstance",
"(",
"initialState",
",",
"(",
"int",
",",
"list",
",",
"tuple",
",",
... | Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple. | [
"Check",
"whether",
"the",
"initial",
"state",
"is",
"of",
"the",
"correct",
"type",
".",
"The",
"state",
"should",
"be",
"either",
"an",
"int",
"list",
"tuple",
"or",
"np",
".",
"array",
"and",
"all",
"its",
"elements",
"must",
"be",
"integer",
".",
"... | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L122-L155 | train | 31,893 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.indirectInitialMatrix | def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr() | python | def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr() | [
"def",
"indirectInitialMatrix",
"(",
"self",
",",
"initialState",
")",
":",
"mapping",
"=",
"{",
"}",
"rates",
"=",
"OrderedDict",
"(",
")",
"#Check whether the initial state is defined and of the correct type, and convert to a tuple or int. ",
"convertedState",
"=",
"self",
... | Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary. | [
"Given",
"some",
"initial",
"state",
"this",
"iteratively",
"determines",
"new",
"states",
".",
"We",
"repeatedly",
"call",
"the",
"transition",
"function",
"on",
"unvisited",
"states",
"in",
"the",
"frontier",
"set",
".",
"Each",
"newly",
"visited",
"state",
... | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L194-L243 | train | 31,894 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getStateCode | def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode) | python | def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode) | [
"def",
"getStateCode",
"(",
"self",
",",
"state",
")",
":",
"return",
"np",
".",
"dot",
"(",
"state",
"-",
"self",
".",
"minvalues",
",",
"self",
".",
"statecode",
")"
] | Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base. | [
"Calculates",
"the",
"state",
"code",
"for",
"a",
"specific",
"state",
"or",
"set",
"of",
"states",
".",
"We",
"transform",
"the",
"states",
"so",
"that",
"they",
"are",
"nonnegative",
"and",
"take",
"an",
"inner",
"product",
".",
"The",
"resulting",
"numb... | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L246-L252 | train | 31,895 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getStateIndex | def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int) | python | def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int) | [
"def",
"getStateIndex",
"(",
"self",
",",
"state",
")",
":",
"statecodes",
"=",
"self",
".",
"getStateCode",
"(",
"state",
")",
"return",
"np",
".",
"searchsorted",
"(",
"self",
".",
"codes",
",",
"statecodes",
")",
".",
"astype",
"(",
"int",
")"
] | Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once. | [
"Returns",
"the",
"index",
"of",
"a",
"state",
"by",
"calculating",
"the",
"state",
"code",
"and",
"searching",
"for",
"this",
"code",
"a",
"sorted",
"list",
".",
"Can",
"be",
"called",
"on",
"multiple",
"states",
"at",
"once",
"."
] | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L284-L290 | train | 31,896 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.transitionStates | def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates | python | def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates | [
"def",
"transitionStates",
"(",
"self",
",",
"state",
")",
":",
"newstates",
",",
"rates",
"=",
"self",
".",
"transition",
"(",
"state",
")",
"newindices",
"=",
"self",
".",
"getStateIndex",
"(",
"newstates",
")",
"return",
"newindices",
",",
"rates"
] | Return the indices of new states and their rates. | [
"Return",
"the",
"indices",
"of",
"new",
"states",
"and",
"their",
"rates",
"."
] | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L292-L298 | train | 31,897 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.convertToRateMatrix | def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag | python | def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag | [
"def",
"convertToRateMatrix",
"(",
"self",
",",
"Q",
")",
":",
"rowSums",
"=",
"Q",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"getA1",
"(",
")",
"idxRange",
"=",
"np",
".",
"arange",
"(",
"Q",
".",
"shape",
"[",
"0",
"]",
")",
"Qdiag",
"=",
... | Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal. | [
"Converts",
"the",
"initial",
"matrix",
"to",
"a",
"rate",
"matrix",
".",
"We",
"make",
"all",
"rows",
"in",
"Q",
"sum",
"to",
"zero",
"by",
"subtracting",
"the",
"row",
"sums",
"from",
"the",
"diagonal",
"."
] | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L341-L349 | train | 31,898 |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getTransitionMatrix | def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P | python | def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P | [
"def",
"getTransitionMatrix",
"(",
"self",
",",
"probabilities",
"=",
"True",
")",
":",
"if",
"self",
".",
"P",
"is",
"not",
"None",
":",
"if",
"isspmatrix",
"(",
"self",
".",
"P",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"self",
".",
"P",
")",
... | If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix. | [
"If",
"self",
".",
"P",
"has",
"been",
"given",
"already",
"we",
"will",
"reuse",
"it",
"and",
"convert",
"it",
"to",
"a",
"sparse",
"csr",
"matrix",
"if",
"needed",
".",
"Otherwise",
"we",
"will",
"generate",
"it",
"using",
"the",
"direct",
"or",
"ind... | 8325ffdb791c109eee600684ee0dc9126ce80700 | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L373-L399 | train | 31,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.