repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
librosa/librosa | librosa/beat.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L408-L414 | def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same') | [
"def",
"__beat_local_score",
"(",
"onset_envelope",
",",
"period",
")",
":",
"window",
"=",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"np",
".",
"arange",
"(",
"-",
"period",
",",
"period",
"+",
"1",
")",
"*",
"32.0",
"/",
"period",
")",
"**",
... | Construct the local score for an onset envlope and given period | [
"Construct",
"the",
"local",
"score",
"for",
"an",
"onset",
"envlope",
"and",
"given",
"period"
] | python | test |
CalebBell/thermo | thermo/utils.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/utils.py#L427-L489 | def phase_identification_parameter(V, dP_dT, dP_dV, d2P_dV2, d2P_dVdT):
r'''Calculate the Phase Identification Parameter developed in [1]_ for
the accurate and efficient determination of whether a fluid is a liquid or
a gas based on the results of an equation of state. For supercritical
conditions, this provides a good method for choosing which property
correlations to use.
.. math::
\Pi = V \left[\frac{\frac{\partial^2 P}{\partial V \partial T}}
{\frac{\partial P }{\partial T}}- \frac{\frac{\partial^2 P}{\partial
V^2}}{\frac{\partial P}{\partial V}} \right]
Parameters
----------
V : float
Molar volume at `T` and `P`, [m^3/mol]
dP_dT : float
Derivative of `P` with respect to `T`, [Pa/K]
dP_dV : float
Derivative of `P` with respect to `V`, [Pa*mol/m^3]
d2P_dV2 : float
Second derivative of `P` with respect to `V`, [Pa*mol^2/m^6]
d2P_dVdT : float
Second derivative of `P` with respect to both `V` and `T`, [Pa*mol/m^3/K]
Returns
-------
PIP : float
Phase Identification Parameter, [-]
Notes
-----
Heuristics were used by process simulators before the invent of this
parameter.
The criteria for liquid is Pi > 1; for vapor, Pi <= 1.
There is also a solid phase mechanism available. For solids, the Solid
Phase Identification Parameter is greater than 1, like liquids; however,
unlike liquids, d2P_dVdT is always >0; it is < 0 for liquids and gases.
Examples
--------
Calculated for hexane from the PR EOS at 299 K and 1 MPa (liquid):
>>> phase_identification_parameter(0.000130229900874, 582169.397484,
... -3.66431747236e+12, 4.48067893805e+17, -20518995218.2)
11.33428990564796
References
----------
.. [1] Venkatarathnam, G., and L. R. Oellrich. "Identification of the Phase
of a Fluid Using Partial Derivatives of Pressure, Volume, and
Temperature without Reference to Saturation Properties: Applications in
Phase Equilibria Calculations." Fluid Phase Equilibria 301, no. 2
(February 25, 2011): 225-33. doi:10.1016/j.fluid.2010.12.001.
.. [2] Jayanti, Pranava Chaitanya, and G. Venkatarathnam. "Identification
of the Phase of a Substance from the Derivatives of Pressure, Volume and
Temperature, without Prior Knowledge of Saturation Properties: Extension
to Solid Phase." Fluid Phase Equilibria 425 (October 15, 2016): 269-277.
doi:10.1016/j.fluid.2016.06.001.
'''
return V*(d2P_dVdT/dP_dT - d2P_dV2/dP_dV) | [
"def",
"phase_identification_parameter",
"(",
"V",
",",
"dP_dT",
",",
"dP_dV",
",",
"d2P_dV2",
",",
"d2P_dVdT",
")",
":",
"return",
"V",
"*",
"(",
"d2P_dVdT",
"/",
"dP_dT",
"-",
"d2P_dV2",
"/",
"dP_dV",
")"
] | r'''Calculate the Phase Identification Parameter developed in [1]_ for
the accurate and efficient determination of whether a fluid is a liquid or
a gas based on the results of an equation of state. For supercritical
conditions, this provides a good method for choosing which property
correlations to use.
.. math::
\Pi = V \left[\frac{\frac{\partial^2 P}{\partial V \partial T}}
{\frac{\partial P }{\partial T}}- \frac{\frac{\partial^2 P}{\partial
V^2}}{\frac{\partial P}{\partial V}} \right]
Parameters
----------
V : float
Molar volume at `T` and `P`, [m^3/mol]
dP_dT : float
Derivative of `P` with respect to `T`, [Pa/K]
dP_dV : float
Derivative of `P` with respect to `V`, [Pa*mol/m^3]
d2P_dV2 : float
Second derivative of `P` with respect to `V`, [Pa*mol^2/m^6]
d2P_dVdT : float
Second derivative of `P` with respect to both `V` and `T`, [Pa*mol/m^3/K]
Returns
-------
PIP : float
Phase Identification Parameter, [-]
Notes
-----
Heuristics were used by process simulators before the invent of this
parameter.
The criteria for liquid is Pi > 1; for vapor, Pi <= 1.
There is also a solid phase mechanism available. For solids, the Solid
Phase Identification Parameter is greater than 1, like liquids; however,
unlike liquids, d2P_dVdT is always >0; it is < 0 for liquids and gases.
Examples
--------
Calculated for hexane from the PR EOS at 299 K and 1 MPa (liquid):
>>> phase_identification_parameter(0.000130229900874, 582169.397484,
... -3.66431747236e+12, 4.48067893805e+17, -20518995218.2)
11.33428990564796
References
----------
.. [1] Venkatarathnam, G., and L. R. Oellrich. "Identification of the Phase
of a Fluid Using Partial Derivatives of Pressure, Volume, and
Temperature without Reference to Saturation Properties: Applications in
Phase Equilibria Calculations." Fluid Phase Equilibria 301, no. 2
(February 25, 2011): 225-33. doi:10.1016/j.fluid.2010.12.001.
.. [2] Jayanti, Pranava Chaitanya, and G. Venkatarathnam. "Identification
of the Phase of a Substance from the Derivatives of Pressure, Volume and
Temperature, without Prior Knowledge of Saturation Properties: Extension
to Solid Phase." Fluid Phase Equilibria 425 (October 15, 2016): 269-277.
doi:10.1016/j.fluid.2016.06.001. | [
"r",
"Calculate",
"the",
"Phase",
"Identification",
"Parameter",
"developed",
"in",
"[",
"1",
"]",
"_",
"for",
"the",
"accurate",
"and",
"efficient",
"determination",
"of",
"whether",
"a",
"fluid",
"is",
"a",
"liquid",
"or",
"a",
"gas",
"based",
"on",
"the... | python | valid |
astroswego/plotypus | src/plotypus/lightcurve.py | https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L411-L492 | def plot_lightcurve(name, lightcurve, period, data,
output='.', legend=False, sanitize_latex=False,
color=True, n_phases=100,
err_const=0.005,
**kwargs):
"""plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs)
Save a plot of the given *lightcurve* to directory *output*.
**Parameters**
name : str
Name of the star. Used in filename and plot title.
lightcurve : array-like, shape = [n_samples]
Fitted lightcurve.
period : number
Period to phase time by.
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Photometry array containing columns *time*, *magnitude*, and
(optional) *error*. *time* should be unphased.
output : str, optional
Directory to save plot to (default '.').
legend : boolean, optional
Whether or not to display legend on plot (default False).
color : boolean, optional
Whether or not to display color in plot (default True).
n_phases : integer, optional
Number of phase points in fit (default 100).
err_const : number, optional
Constant to use in absence of error (default 0.005).
**Returns**
None
"""
phases = numpy.linspace(0, 1, n_phases, endpoint=False)
ax = plt.gca()
ax.invert_yaxis()
plt.xlim(0,2)
# Plot points used
phase, mag, *err = get_signal(data).T
error = err[0] if err else mag*err_const
inliers = plt.errorbar(numpy.hstack((phase,1+phase)),
numpy.hstack((mag, mag)),
yerr=numpy.hstack((error, error)),
ls='None',
ms=.01, mew=.01, capsize=0)
# Plot outliers rejected
phase, mag, *err = get_noise(data).T
error = err[0] if err else mag*err_const
outliers = plt.errorbar(numpy.hstack((phase,1+phase)),
numpy.hstack((mag, mag)),
yerr=numpy.hstack((error, error)),
ls='None', marker='o' if color else 'x',
ms=.01 if color else 4,
mew=.01 if color else 1,
capsize=0 if color else 1)
# Plot the fitted light curve
signal, = plt.plot(numpy.hstack((phases,1+phases)),
numpy.hstack((lightcurve, lightcurve)),
linewidth=1)
if legend:
plt.legend([signal, inliers, outliers],
["Light Curve", "Inliers", "Outliers"],
loc='best')
plt.xlabel('Phase ({0:0.7} day period)'.format(period))
plt.ylabel('Magnitude')
plt.title(utils.sanitize_latex(name) if sanitize_latex else name)
plt.tight_layout(pad=0.1)
make_sure_path_exists(output)
plt.savefig(path.join(output, name))
plt.clf() | [
"def",
"plot_lightcurve",
"(",
"name",
",",
"lightcurve",
",",
"period",
",",
"data",
",",
"output",
"=",
"'.'",
",",
"legend",
"=",
"False",
",",
"sanitize_latex",
"=",
"False",
",",
"color",
"=",
"True",
",",
"n_phases",
"=",
"100",
",",
"err_const",
... | plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs)
Save a plot of the given *lightcurve* to directory *output*.
**Parameters**
name : str
Name of the star. Used in filename and plot title.
lightcurve : array-like, shape = [n_samples]
Fitted lightcurve.
period : number
Period to phase time by.
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Photometry array containing columns *time*, *magnitude*, and
(optional) *error*. *time* should be unphased.
output : str, optional
Directory to save plot to (default '.').
legend : boolean, optional
Whether or not to display legend on plot (default False).
color : boolean, optional
Whether or not to display color in plot (default True).
n_phases : integer, optional
Number of phase points in fit (default 100).
err_const : number, optional
Constant to use in absence of error (default 0.005).
**Returns**
None | [
"plot_lightcurve",
"(",
"name",
"lightcurve",
"period",
"data",
"output",
"=",
".",
"legend",
"=",
"False",
"color",
"=",
"True",
"n_phases",
"=",
"100",
"err_const",
"=",
"0",
".",
"005",
"**",
"kwargs",
")"
] | python | train |
hydraplatform/hydra-base | hydra_base/util/__init__.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/__init__.py#L220-L236 | def get_layout_as_string(layout):
"""
Take a dict or string and return a string.
The dict will be json dumped.
The string will json parsed to check for json validity. In order to deal
with strings which have been json encoded multiple times, keep json decoding
until a dict is retrieved or until a non-json structure is identified.
"""
if isinstance(layout, dict):
return json.dumps(layout)
if(isinstance(layout, six.string_types)):
try:
return get_layout_as_string(json.loads(layout))
except:
return layout | [
"def",
"get_layout_as_string",
"(",
"layout",
")",
":",
"if",
"isinstance",
"(",
"layout",
",",
"dict",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"layout",
")",
"if",
"(",
"isinstance",
"(",
"layout",
",",
"six",
".",
"string_types",
")",
")",
":"... | Take a dict or string and return a string.
The dict will be json dumped.
The string will json parsed to check for json validity. In order to deal
with strings which have been json encoded multiple times, keep json decoding
until a dict is retrieved or until a non-json structure is identified. | [
"Take",
"a",
"dict",
"or",
"string",
"and",
"return",
"a",
"string",
".",
"The",
"dict",
"will",
"be",
"json",
"dumped",
".",
"The",
"string",
"will",
"json",
"parsed",
"to",
"check",
"for",
"json",
"validity",
".",
"In",
"order",
"to",
"deal",
"with",... | python | train |
antonybholmes/libdna | libdna/decode.py | https://github.com/antonybholmes/libdna/blob/96badfd33c8896c799b1c633bb9fb75cec65a83a/libdna/decode.py#L340-L377 | def dna(self, loc, mask='lower', rev_comp=False, lowercase=False):
"""
Returns the DNA for a location.
Parameters
----------
mask : str, optional
Indicate whether masked bases should be represented as is
('upper'), lowercase ('lower'), or as N ('n')
lowercase : bool, optional
Indicates whether sequence should be displayed as upper or
lowercase. Default is False so sequence is uppercase. Note that
this only affects the reference DNA and does not affect the
mask.
Returns
-------
list
List of base chars.
"""
l = libdna.parse_loc(loc)
ret = self._read_dna(l, lowercase=lowercase)
self._read_n(l, ret)
self._read_mask(l, ret, mask=mask)
if rev_comp:
DNA2Bit._rev_comp(ret)
ret = ret.decode('utf-8')
if lowercase:
ret = ret.lower()
return ret | [
"def",
"dna",
"(",
"self",
",",
"loc",
",",
"mask",
"=",
"'lower'",
",",
"rev_comp",
"=",
"False",
",",
"lowercase",
"=",
"False",
")",
":",
"l",
"=",
"libdna",
".",
"parse_loc",
"(",
"loc",
")",
"ret",
"=",
"self",
".",
"_read_dna",
"(",
"l",
",... | Returns the DNA for a location.
Parameters
----------
mask : str, optional
Indicate whether masked bases should be represented as is
('upper'), lowercase ('lower'), or as N ('n')
lowercase : bool, optional
Indicates whether sequence should be displayed as upper or
lowercase. Default is False so sequence is uppercase. Note that
this only affects the reference DNA and does not affect the
mask.
Returns
-------
list
List of base chars. | [
"Returns",
"the",
"DNA",
"for",
"a",
"location",
".",
"Parameters",
"----------",
"mask",
":",
"str",
"optional",
"Indicate",
"whether",
"masked",
"bases",
"should",
"be",
"represented",
"as",
"is",
"(",
"upper",
")",
"lowercase",
"(",
"lower",
")",
"or",
... | python | train |
pkgw/pwkit | pwkit/synphot.py | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/synphot.py#L333-L344 | def mag_to_fnu(self, mag):
"""Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case.
"""
if self.native_flux_kind == 'flam':
return flam_ang_to_fnu_cgs(self.mag_to_flam(mag), self.pivot_wavelength())
raise PKError('dont\'t know how to get f_ν from mag for bandpass %s/%s',
self.telescope, self.band) | [
"def",
"mag_to_fnu",
"(",
"self",
",",
"mag",
")",
":",
"if",
"self",
".",
"native_flux_kind",
"==",
"'flam'",
":",
"return",
"flam_ang_to_fnu_cgs",
"(",
"self",
".",
"mag_to_flam",
"(",
"mag",
")",
",",
"self",
".",
"pivot_wavelength",
"(",
")",
")",
"r... | Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case. | [
"Convert",
"a",
"magnitude",
"in",
"this",
"band",
"to",
"a",
"f_ν",
"flux",
"density",
"."
] | python | train |
lvjiyong/configreset | configreset/__init__.py | https://github.com/lvjiyong/configreset/blob/cde0a426e993a6aa483d6934358e61750c944de9/configreset/__init__.py#L286-L306 | def _get_value(first, second):
"""
数据转化
:param first:
:param second:
:return:
>>> _get_value(1,'2')
2
>>> _get_value([1,2],[2,3])
[1, 2, 3]
"""
if isinstance(first, list) and isinstance(second, list):
return list(set(first).union(set(second)))
elif isinstance(first, dict) and isinstance(second, dict):
first.update(second)
return first
elif first is not None and second is not None and not isinstance(first, type(second)):
return type(first)(second)
else:
return second | [
"def",
"_get_value",
"(",
"first",
",",
"second",
")",
":",
"if",
"isinstance",
"(",
"first",
",",
"list",
")",
"and",
"isinstance",
"(",
"second",
",",
"list",
")",
":",
"return",
"list",
"(",
"set",
"(",
"first",
")",
".",
"union",
"(",
"set",
"(... | 数据转化
:param first:
:param second:
:return:
>>> _get_value(1,'2')
2
>>> _get_value([1,2],[2,3])
[1, 2, 3] | [
"数据转化",
":",
"param",
"first",
":",
":",
"param",
"second",
":",
":",
"return",
":",
">>>",
"_get_value",
"(",
"1",
"2",
")",
"2",
">>>",
"_get_value",
"(",
"[",
"1",
"2",
"]",
"[",
"2",
"3",
"]",
")",
"[",
"1",
"2",
"3",
"]"
] | python | train |
sdcooke/django_bundles | django_bundles/utils/__init__.py | https://github.com/sdcooke/django_bundles/blob/2810fc455ec7391283792c1f108f4e8340f5d12f/django_bundles/utils/__init__.py#L1-L21 | def get_class(class_string):
"""
Get a class from a dotted string
"""
split_string = class_string.encode('ascii').split('.')
import_path = '.'.join(split_string[:-1])
class_name = split_string[-1]
if class_name:
try:
if import_path:
mod = __import__(import_path, globals(), {}, [class_name])
cls = getattr(mod, class_name)
else:
cls = __import__(class_name, globals(), {})
if cls:
return cls
except (ImportError, AttributeError):
pass
return None | [
"def",
"get_class",
"(",
"class_string",
")",
":",
"split_string",
"=",
"class_string",
".",
"encode",
"(",
"'ascii'",
")",
".",
"split",
"(",
"'.'",
")",
"import_path",
"=",
"'.'",
".",
"join",
"(",
"split_string",
"[",
":",
"-",
"1",
"]",
")",
"class... | Get a class from a dotted string | [
"Get",
"a",
"class",
"from",
"a",
"dotted",
"string"
] | python | train |
google/pyringe | pyringe/payload/libpython.py | https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L223-L260 | def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval)) | [
"def",
"proxyval",
"(",
"self",
",",
"visited",
")",
":",
"class",
"FakeRepr",
"(",
"object",
")",
":",
"\"\"\"\n Class representing a non-descript PyObject* value in the inferior\n process for when we don't have a custom scraper, intended to have\n a san... | Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave | [
"Scrape",
"a",
"value",
"from",
"the",
"inferior",
"process",
"and",
"try",
"to",
"represent",
"it",
"within",
"the",
"gdb",
"process",
"whilst",
"(",
"hopefully",
")",
"avoiding",
"crashes",
"when",
"the",
"remote",
"data",
"is",
"corrupt",
"."
] | python | train |
mozilla/amo-validator | validator/errorbundler.py | https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/errorbundler.py#L263-L271 | def get_resource(self, name):
'Retrieves an object that has been stored by another test.'
if name in self.resources:
return self.resources[name]
elif name in self.pushable_resources:
return self.pushable_resources[name]
else:
return False | [
"def",
"get_resource",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"resources",
":",
"return",
"self",
".",
"resources",
"[",
"name",
"]",
"elif",
"name",
"in",
"self",
".",
"pushable_resources",
":",
"return",
"self",
".",
"pu... | Retrieves an object that has been stored by another test. | [
"Retrieves",
"an",
"object",
"that",
"has",
"been",
"stored",
"by",
"another",
"test",
"."
] | python | train |
pyscaffold/configupdater | src/configupdater/configupdater.py | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L478-L492 | def set_values(self, values, separator='\n', indent=4*' '):
"""Sets the value to a given list of options, e.g. multi-line values
Args:
values (list): list of values
separator (str): separator for values, default: line separator
indent (str): indentation depth in case of line separator
"""
self._updated = True
self._multiline_value_joined = True
self._values = values
if separator == '\n':
values.insert(0, '')
separator = separator + indent
self._value = separator.join(values) | [
"def",
"set_values",
"(",
"self",
",",
"values",
",",
"separator",
"=",
"'\\n'",
",",
"indent",
"=",
"4",
"*",
"' '",
")",
":",
"self",
".",
"_updated",
"=",
"True",
"self",
".",
"_multiline_value_joined",
"=",
"True",
"self",
".",
"_values",
"=",
"val... | Sets the value to a given list of options, e.g. multi-line values
Args:
values (list): list of values
separator (str): separator for values, default: line separator
indent (str): indentation depth in case of line separator | [
"Sets",
"the",
"value",
"to",
"a",
"given",
"list",
"of",
"options",
"e",
".",
"g",
".",
"multi",
"-",
"line",
"values"
] | python | train |
ChristopherRogers1991/python-irsend | py_irsend/irsend.py | https://github.com/ChristopherRogers1991/python-irsend/blob/aab8ee05d47cc0e3c8c84d220bc6777aa720b232/py_irsend/irsend.py#L26-L50 | def list_remotes(device=None, address=None):
"""
List the available remotes.
All parameters are passed to irsend. See the man page for irsend
for details about their usage.
Parameters
----------
device: str
address: str
Returns
-------
[str]
Notes
-----
No attempt is made to catch or handle errors. See the documentation
for subprocess.check_output to see the types of exceptions it may raise.
"""
output = _call(["list", "", ""], None, device, address)
remotes = [l.split()[-1] for l in output.splitlines() if l]
return remotes | [
"def",
"list_remotes",
"(",
"device",
"=",
"None",
",",
"address",
"=",
"None",
")",
":",
"output",
"=",
"_call",
"(",
"[",
"\"list\"",
",",
"\"\"",
",",
"\"\"",
"]",
",",
"None",
",",
"device",
",",
"address",
")",
"remotes",
"=",
"[",
"l",
".",
... | List the available remotes.
All parameters are passed to irsend. See the man page for irsend
for details about their usage.
Parameters
----------
device: str
address: str
Returns
-------
[str]
Notes
-----
No attempt is made to catch or handle errors. See the documentation
for subprocess.check_output to see the types of exceptions it may raise. | [
"List",
"the",
"available",
"remotes",
"."
] | python | train |
markuskiller/textblob-de | textblob_de/classifiers.py | https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L100-L105 | def contains_extractor(document):
"""A basic document feature extractor that returns a dict of words that the
document contains."""
tokens = _get_document_tokens(document)
features = dict((u'contains({0})'.format(w), True) for w in tokens)
return features | [
"def",
"contains_extractor",
"(",
"document",
")",
":",
"tokens",
"=",
"_get_document_tokens",
"(",
"document",
")",
"features",
"=",
"dict",
"(",
"(",
"u'contains({0})'",
".",
"format",
"(",
"w",
")",
",",
"True",
")",
"for",
"w",
"in",
"tokens",
")",
"... | A basic document feature extractor that returns a dict of words that the
document contains. | [
"A",
"basic",
"document",
"feature",
"extractor",
"that",
"returns",
"a",
"dict",
"of",
"words",
"that",
"the",
"document",
"contains",
"."
] | python | train |
pycontribs/pyrax | pyrax/object_storage.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1659-L1668 | def change_content_type(self, new_ctype, guess=False):
"""
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None.
Failure during the put will result in a swift exception.
"""
self.container.change_object_content_type(self, new_ctype=new_ctype,
guess=guess) | [
"def",
"change_content_type",
"(",
"self",
",",
"new_ctype",
",",
"guess",
"=",
"False",
")",
":",
"self",
".",
"container",
".",
"change_object_content_type",
"(",
"self",
",",
"new_ctype",
"=",
"new_ctype",
",",
"guess",
"=",
"guess",
")"
] | Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None.
Failure during the put will result in a swift exception. | [
"Copies",
"object",
"to",
"itself",
"but",
"applies",
"a",
"new",
"content",
"-",
"type",
".",
"The",
"guess",
"feature",
"requires",
"the",
"container",
"to",
"be",
"CDN",
"-",
"enabled",
".",
"If",
"not",
"then",
"the",
"content",
"-",
"type",
"must",
... | python | train |
facetoe/zenpy | zenpy/lib/generator.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/generator.py#L59-L63 | def update_attrs(self):
""" Add attributes such as count/end_time that can be present """
for key, value in self._response_json.items():
if key != 'results' and type(value) not in (list, dict):
setattr(self, key, value) | [
"def",
"update_attrs",
"(",
"self",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_response_json",
".",
"items",
"(",
")",
":",
"if",
"key",
"!=",
"'results'",
"and",
"type",
"(",
"value",
")",
"not",
"in",
"(",
"list",
",",
"dict",
")"... | Add attributes such as count/end_time that can be present | [
"Add",
"attributes",
"such",
"as",
"count",
"/",
"end_time",
"that",
"can",
"be",
"present"
] | python | train |
summa-tx/riemann | riemann/simple.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L62-L69 | def output(value, address):
'''
int, str -> TxOut
accepts base58 or bech32 addresses
'''
script = addr.to_output_script(address)
value = utils.i2le_padded(value, 8)
return tb._make_output(value, script) | [
"def",
"output",
"(",
"value",
",",
"address",
")",
":",
"script",
"=",
"addr",
".",
"to_output_script",
"(",
"address",
")",
"value",
"=",
"utils",
".",
"i2le_padded",
"(",
"value",
",",
"8",
")",
"return",
"tb",
".",
"_make_output",
"(",
"value",
","... | int, str -> TxOut
accepts base58 or bech32 addresses | [
"int",
"str",
"-",
">",
"TxOut",
"accepts",
"base58",
"or",
"bech32",
"addresses"
] | python | train |
GNS3/gns3-server | gns3server/controller/gns3vm/__init__.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/gns3vm/__init__.py#L222-L236 | def list(self, engine):
"""
List VMS for an engine
"""
engine = self._get_engine(engine)
vms = []
try:
for vm in (yield from engine.list()):
vms.append({"vmname": vm["vmname"]})
except GNS3VMError as e:
# We raise error only if user activated the GNS3 VM
# otherwise you have noise when VMware is not installed
if self.enable:
raise e
return vms | [
"def",
"list",
"(",
"self",
",",
"engine",
")",
":",
"engine",
"=",
"self",
".",
"_get_engine",
"(",
"engine",
")",
"vms",
"=",
"[",
"]",
"try",
":",
"for",
"vm",
"in",
"(",
"yield",
"from",
"engine",
".",
"list",
"(",
")",
")",
":",
"vms",
"."... | List VMS for an engine | [
"List",
"VMS",
"for",
"an",
"engine"
] | python | train |
angr/claripy | claripy/vsa/strided_interval.py | https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/strided_interval.py#L559-L592 | def _signed_bounds(self):
"""
Get lower bound and upper bound for `self` in signed arithmetic.
:return: a list of (lower_bound, upper_bound) tuples
"""
nsplit = self._nsplit()
if len(nsplit) == 1:
lb = nsplit[0].lower_bound
ub = nsplit[0].upper_bound
lb = self._unsigned_to_signed(lb, self.bits)
ub = self._unsigned_to_signed(ub, self.bits)
return [(lb, ub)]
elif len(nsplit) == 2:
# nsplit[0] is on the left hemisphere, and nsplit[1] is on the right hemisphere
# The left one
lb_1 = nsplit[0].lower_bound
ub_1 = nsplit[0].upper_bound
# The right one
lb_2 = nsplit[1].lower_bound
ub_2 = nsplit[1].upper_bound
# Then convert them to negative numbers
lb_2 = self._unsigned_to_signed(lb_2, self.bits)
ub_2 = self._unsigned_to_signed(ub_2, self.bits)
return [ (lb_1, ub_1), (lb_2, ub_2) ]
else:
raise Exception('WTF') | [
"def",
"_signed_bounds",
"(",
"self",
")",
":",
"nsplit",
"=",
"self",
".",
"_nsplit",
"(",
")",
"if",
"len",
"(",
"nsplit",
")",
"==",
"1",
":",
"lb",
"=",
"nsplit",
"[",
"0",
"]",
".",
"lower_bound",
"ub",
"=",
"nsplit",
"[",
"0",
"]",
".",
"... | Get lower bound and upper bound for `self` in signed arithmetic.
:return: a list of (lower_bound, upper_bound) tuples | [
"Get",
"lower",
"bound",
"and",
"upper",
"bound",
"for",
"self",
"in",
"signed",
"arithmetic",
"."
] | python | train |
marcomusy/vtkplotter | vtkplotter/actors.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1453-L1492 | def threshold(self, scalars, vmin=None, vmax=None, useCells=False):
"""
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
"""
if utils.isSequence(scalars):
self.addPointScalars(scalars, "threshold")
scalars = "threshold"
elif self.scalars(scalars) is None:
colors.printc("~times No scalars found with name", scalars, c=1)
exit()
thres = vtk.vtkThreshold()
thres.SetInputData(self.poly)
if useCells:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
else:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
thres.SetInputArrayToProcess(0, 0, 0, asso, scalars)
if vmin is None and vmax is not None:
thres.ThresholdByLower(vmax)
elif vmax is None and vmin is not None:
thres.ThresholdByUpper(vmin)
else:
thres.ThresholdBetween(vmin, vmax)
thres.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(thres.GetOutput())
gf.Update()
return self.updateMesh(gf.GetOutput()) | [
"def",
"threshold",
"(",
"self",
",",
"scalars",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"useCells",
"=",
"False",
")",
":",
"if",
"utils",
".",
"isSequence",
"(",
"scalars",
")",
":",
"self",
".",
"addPointScalars",
"(",
"scalars",
... | Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_ | [
"Extracts",
"cells",
"where",
"scalar",
"value",
"satisfies",
"threshold",
"criterion",
"."
] | python | train |
willkg/everett | everett/ext/inifile.py | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/ext/inifile.py#L142-L156 | def parse_ini_file(self, path):
"""Parse ini file at ``path`` and return dict."""
cfgobj = ConfigObj(path, list_values=False)
def extract_section(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(d[key], dict):
cfg.update(extract_section(namespace + [key], d[key]))
else:
cfg['_'.join(namespace + [key]).upper()] = val
return cfg
return extract_section([], cfgobj.dict()) | [
"def",
"parse_ini_file",
"(",
"self",
",",
"path",
")",
":",
"cfgobj",
"=",
"ConfigObj",
"(",
"path",
",",
"list_values",
"=",
"False",
")",
"def",
"extract_section",
"(",
"namespace",
",",
"d",
")",
":",
"cfg",
"=",
"{",
"}",
"for",
"key",
",",
"val... | Parse ini file at ``path`` and return dict. | [
"Parse",
"ini",
"file",
"at",
"path",
"and",
"return",
"dict",
"."
] | python | train |
thespacedoctor/sherlock | sherlock/transient_catalogue_crossmatch.py | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L264-L512 | def angular_crossmatch_against_catalogue(
self,
objectList,
searchPara={},
search_name="",
brightnessFilter=False,
physicalSearch=False,
classificationType=False
):
"""*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``angular_crossmatch_against_catalogue`` method')
self.log.info("STARTING %s SEARCH" %
(search_name,))
start_time = time.time()
# DEFAULTS
# print search_name, classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara["database table"]
if not "mag column" in searchPara:
searchPara["mag column"] = None
if brightnessFilter:
if "mag column" in searchPara and searchPara["mag column"]:
magnitudeLimitFilter = self.colMaps[
catalogueName][searchPara["mag column"] + "ColName"]
theseSearchPara = searchPara[brightnessFilter]
else:
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym":
radius = self.settings["synonym radius arcsec"]
matchedType = theseSearchPara["synonym"]
elif classificationType == "association":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["association"]
elif classificationType == "annotation":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["annotation"]
if brightnessFilter == "faint":
upperMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "bright":
lowerMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "general":
if "faint" in searchPara:
lowerMagnitudeLimit = searchPara["faint"]["mag limit"]
if "bright" in searchPara:
upperMagnitudeLimit = searchPara["bright"]["mag limit"]
# VARIABLES
matchedObjects = []
matchSubset = []
transRAs = []
transRAs[:] = [t['ra'] for t in objectList]
transDecs = []
transDecs[:] = [t['dec'] for t in objectList]
if len(transRAs) == 0:
return []
cs = catalogue_conesearch(
log=self.log,
ra=transRAs,
dec=transDecs,
radiusArcsec=radius,
colMaps=self.colMaps,
tableName=catalogueName,
dbConn=self.dbConn,
nearestOnly=False,
physicalSearch=physicalSearch,
upperMagnitudeLimit=upperMagnitudeLimit,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=magnitudeLimitFilter
)
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices, catalogueMatches = cs.search()
count = 1
annotatedcatalogueMatches = []
for i, xm in zip(indices, catalogueMatches):
# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN
if "cmSepArcsec" in xm:
xm["separationArcsec"] = xm["cmSepArcsec"]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations(
log=self.log,
ra1=objectList[i]["ra"],
dec1=objectList[i]["dec"],
ra2=xm["ra"],
dec2=xm["dec"]
)
angularSeparation, north, east = calculator.get()
xm["northSeparationArcsec"] = north
xm["eastSeparationArcsec"] = east
del xm["cmSepArcsec"]
xm["association_type"] = matchedType
xm["catalogue_view_name"] = catalogueName
xm["transient_object_id"] = objectList[i]["id"]
xm["catalogue_table_name"] = self.colMaps[
catalogueName]["description"]
xm["catalogue_table_id"] = self.colMaps[
catalogueName]["table_id"]
xm["catalogue_view_id"] = self.colMaps[
catalogueName]["id"]
if classificationType == "synonym":
xm["classificationReliability"] = 1
elif classificationType == "association":
xm["classificationReliability"] = 2
elif classificationType == "annotation":
xm["classificationReliability"] = 3
xm = self._annotate_crossmatch_with_value_added_parameters(
crossmatchDict=xm,
catalogueName=catalogueName,
searchPara=theseSearchPara,
search_name=search_name
)
annotatedcatalogueMatches.append(xm)
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name:
catalogueMatches = self._bright_star_match(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara:
catalogueMatches = self._galaxy_association_cuts(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
upperMagnitudeLimit=upperMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if "match nearest source only" in theseSearchPara and theseSearchPara["match nearest source only"] == True and len(catalogueMatches):
nearestMatches = []
transList = []
for c in catalogueMatches:
if c["transient_object_id"] not in transList:
transList.append(c["transient_object_id"])
nearestMatches.append(c)
catalogueMatches = nearestMatches
self.log.debug(
'completed the ``angular_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return catalogueMatches | [
"def",
"angular_crossmatch_against_catalogue",
"(",
"self",
",",
"objectList",
",",
"searchPara",
"=",
"{",
"}",
",",
"search_name",
"=",
"\"\"",
",",
"brightnessFilter",
"=",
"False",
",",
"physicalSearch",
"=",
"False",
",",
"classificationType",
"=",
"False",
... | *perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"perform",
"an",
"angular",
"separation",
"crossmatch",
"against",
"a",
"given",
"catalogue",
"in",
"the",
"database",
"and",
"annotate",
"the",
"crossmatch",
"with",
"some",
"value",
"added",
"parameters",
"(",
"distances",
"physical",
"separations",
"sub",
... | python | train |
JNRowe/upoints | upoints/utils.py | https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/utils.py#L436-L449 | def isoformat(self):
"""Generate an ISO 8601 formatted time stamp.
Returns:
str: `ISO 8601`_ formatted time stamp
.. _ISO 8601: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
text = [self.strftime('%Y-%m-%dT%H:%M:%S'), ]
if self.tzinfo:
text.append(self.tzinfo.as_timezone())
else:
text.append('+00:00')
return ''.join(text) | [
"def",
"isoformat",
"(",
"self",
")",
":",
"text",
"=",
"[",
"self",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S'",
")",
",",
"]",
"if",
"self",
".",
"tzinfo",
":",
"text",
".",
"append",
"(",
"self",
".",
"tzinfo",
".",
"as_timezone",
"(",
")",
")",
... | Generate an ISO 8601 formatted time stamp.
Returns:
str: `ISO 8601`_ formatted time stamp
.. _ISO 8601: http://www.cl.cam.ac.uk/~mgk25/iso-time.html | [
"Generate",
"an",
"ISO",
"8601",
"formatted",
"time",
"stamp",
"."
] | python | train |
fastai/fastai | fastai/vision/data.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L191-L196 | def download_images(urls:Collection[str], dest:PathOrStr, max_pics:int=1000, max_workers:int=8, timeout=4):
"Download images listed in text file `urls` to path `dest`, at most `max_pics`"
urls = open(urls).read().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout), urls, max_workers=max_workers) | [
"def",
"download_images",
"(",
"urls",
":",
"Collection",
"[",
"str",
"]",
",",
"dest",
":",
"PathOrStr",
",",
"max_pics",
":",
"int",
"=",
"1000",
",",
"max_workers",
":",
"int",
"=",
"8",
",",
"timeout",
"=",
"4",
")",
":",
"urls",
"=",
"open",
"... | Download images listed in text file `urls` to path `dest`, at most `max_pics` | [
"Download",
"images",
"listed",
"in",
"text",
"file",
"urls",
"to",
"path",
"dest",
"at",
"most",
"max_pics"
] | python | train |
jeffrimko/Auxly | lib/auxly/filesys.py | https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/filesys.py#L239-L263 | def delete(path, regex=None, recurse=False, test=False):
"""Deletes the file or directory at `path`. If `path` is a directory and
`regex` is provided, matching files will be deleted; `recurse` controls
whether subdirectories are recursed. A list of deleted items is returned.
If `test` is true, nothing will be deleted and a list of items that would
have been deleted is returned.
"""
deleted = []
if op.isfile(path):
if not test: os.remove(path)
else: return [path]
return [] if op.exists(path) else [path]
elif op.isdir(path):
if regex:
for r,ds,fs in os.walk(path):
for i in fs:
if _is_match(regex, i):
deleted += delete(op.join(r,i), test=test)
if not recurse:
break
else:
if not test: shutil.rmtree(path)
else: return [path]
return [] if op.exists(path) else [path]
return deleted | [
"def",
"delete",
"(",
"path",
",",
"regex",
"=",
"None",
",",
"recurse",
"=",
"False",
",",
"test",
"=",
"False",
")",
":",
"deleted",
"=",
"[",
"]",
"if",
"op",
".",
"isfile",
"(",
"path",
")",
":",
"if",
"not",
"test",
":",
"os",
".",
"remove... | Deletes the file or directory at `path`. If `path` is a directory and
`regex` is provided, matching files will be deleted; `recurse` controls
whether subdirectories are recursed. A list of deleted items is returned.
If `test` is true, nothing will be deleted and a list of items that would
have been deleted is returned. | [
"Deletes",
"the",
"file",
"or",
"directory",
"at",
"path",
".",
"If",
"path",
"is",
"a",
"directory",
"and",
"regex",
"is",
"provided",
"matching",
"files",
"will",
"be",
"deleted",
";",
"recurse",
"controls",
"whether",
"subdirectories",
"are",
"recursed",
... | python | train |
fulfilio/python-magento | magento/catalog.py | https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/catalog.py#L80-L93 | def update(self, category_id, data, store_view=None):
"""
Update Category
:param category_id: ID of category
:param data: Category Data
:param store_view: Store view ID or code
:return: Boolean
"""
return bool(
self.call(
'catalog_category.update', [category_id, data, store_view]
)
) | [
"def",
"update",
"(",
"self",
",",
"category_id",
",",
"data",
",",
"store_view",
"=",
"None",
")",
":",
"return",
"bool",
"(",
"self",
".",
"call",
"(",
"'catalog_category.update'",
",",
"[",
"category_id",
",",
"data",
",",
"store_view",
"]",
")",
")"
... | Update Category
:param category_id: ID of category
:param data: Category Data
:param store_view: Store view ID or code
:return: Boolean | [
"Update",
"Category"
] | python | train |
numenta/nupic | src/nupic/algorithms/knn_classifier.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/knn_classifier.py#L294-L306 | def prototypeSetCategory(self, idToCategorize, newCategory):
"""
Allows ids to be assigned a category and subsequently enables users to use:
- :meth:`~.KNNClassifier.KNNClassifier.removeCategory`
- :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern`
- :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern`
"""
if idToCategorize not in self._categoryRecencyList:
return
recordIndex = self._categoryRecencyList.index(idToCategorize)
self._categoryList[recordIndex] = newCategory | [
"def",
"prototypeSetCategory",
"(",
"self",
",",
"idToCategorize",
",",
"newCategory",
")",
":",
"if",
"idToCategorize",
"not",
"in",
"self",
".",
"_categoryRecencyList",
":",
"return",
"recordIndex",
"=",
"self",
".",
"_categoryRecencyList",
".",
"index",
"(",
... | Allows ids to be assigned a category and subsequently enables users to use:
- :meth:`~.KNNClassifier.KNNClassifier.removeCategory`
- :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern`
- :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern` | [
"Allows",
"ids",
"to",
"be",
"assigned",
"a",
"category",
"and",
"subsequently",
"enables",
"users",
"to",
"use",
":"
] | python | valid |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py#L184-L201 | def mac_access_list_extended_hide_mac_acl_ext_seq_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac = ET.SubElement(config, "mac", xmlns="urn:brocade.com:mgmt:brocade-mac-access-list")
access_list = ET.SubElement(mac, "access-list")
extended = ET.SubElement(access_list, "extended")
name_key = ET.SubElement(extended, "name")
name_key.text = kwargs.pop('name')
hide_mac_acl_ext = ET.SubElement(extended, "hide-mac-acl-ext")
seq = ET.SubElement(hide_mac_acl_ext, "seq")
seq_id_key = ET.SubElement(seq, "seq-id")
seq_id_key.text = kwargs.pop('seq_id')
action = ET.SubElement(seq, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"mac_access_list_extended_hide_mac_acl_ext_seq_action",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"mac",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"mac\"",
",",
"xmlns",
"=",... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
ToFuProject/tofu | tofu/geom/_core.py | https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L721-L758 | def get_InsideConvexPoly(self, RelOff=_def.TorRelOff, ZLim='Def',
Spline=True, Splprms=_def.TorSplprms,
NP=_def.TorInsideNP, Plot=False, Test=True):
""" Return a polygon that is a smaller and smoothed approximation of Ves.Poly, useful for excluding the divertor region in a Tokamak
For some uses, it can be practical to approximate the polygon defining the Ves object (which can be non-convex, like with a divertor), by a simpler, sligthly smaller and convex polygon.
This method provides a fast solution for computing such a proxy.
Parameters
----------
RelOff : float
Fraction by which an homothetic polygon should be reduced (1.-RelOff)*(Poly-BaryS)
ZLim : None / str / tuple
Flag indicating what limits shall be put to the height of the polygon (used for excluding divertor)
Spline : bool
Flag indiating whether the reduced and truncated polygon shall be smoothed by 2D b-spline curves
Splprms : list
List of 3 parameters to be used for the smoothing [weights,smoothness,b-spline order], fed to scipy.interpolate.splprep()
NP : int
Number of points to be used to define the smoothed polygon
Plot : bool
Flag indicating whether the result shall be plotted for visual inspection
Test : bool
Flag indicating whether the inputs should be tested for conformity
Returns
-------
Poly : np.ndarray
(2,N) polygon resulting from homothetic transform, truncating and optional smoothing
"""
return _comp._Ves_get_InsideConvexPoly(self.Poly_closed,
self.dgeom['P2Min'],
self.dgeom['P2Max'],
self.dgeom['BaryS'],
RelOff=RelOff, ZLim=ZLim,
Spline=Spline, Splprms=Splprms,
NP=NP, Plot=Plot, Test=Test) | [
"def",
"get_InsideConvexPoly",
"(",
"self",
",",
"RelOff",
"=",
"_def",
".",
"TorRelOff",
",",
"ZLim",
"=",
"'Def'",
",",
"Spline",
"=",
"True",
",",
"Splprms",
"=",
"_def",
".",
"TorSplprms",
",",
"NP",
"=",
"_def",
".",
"TorInsideNP",
",",
"Plot",
"=... | Return a polygon that is a smaller and smoothed approximation of Ves.Poly, useful for excluding the divertor region in a Tokamak
For some uses, it can be practical to approximate the polygon defining the Ves object (which can be non-convex, like with a divertor), by a simpler, sligthly smaller and convex polygon.
This method provides a fast solution for computing such a proxy.
Parameters
----------
RelOff : float
Fraction by which an homothetic polygon should be reduced (1.-RelOff)*(Poly-BaryS)
ZLim : None / str / tuple
Flag indicating what limits shall be put to the height of the polygon (used for excluding divertor)
Spline : bool
Flag indiating whether the reduced and truncated polygon shall be smoothed by 2D b-spline curves
Splprms : list
List of 3 parameters to be used for the smoothing [weights,smoothness,b-spline order], fed to scipy.interpolate.splprep()
NP : int
Number of points to be used to define the smoothed polygon
Plot : bool
Flag indicating whether the result shall be plotted for visual inspection
Test : bool
Flag indicating whether the inputs should be tested for conformity
Returns
-------
Poly : np.ndarray
(2,N) polygon resulting from homothetic transform, truncating and optional smoothing | [
"Return",
"a",
"polygon",
"that",
"is",
"a",
"smaller",
"and",
"smoothed",
"approximation",
"of",
"Ves",
".",
"Poly",
"useful",
"for",
"excluding",
"the",
"divertor",
"region",
"in",
"a",
"Tokamak"
] | python | train |
klavinslab/coral | coral/reaction/_central_dogma.py | https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_central_dogma.py#L42-L86 | def coding_sequence(rna):
'''Extract coding sequence from an RNA template.
:param seq: Sequence from which to extract a coding sequence.
:type seq: coral.RNA
:param material: Type of sequence ('dna' or 'rna')
:type material: str
:returns: The first coding sequence (start codon -> stop codon) matched
from 5' to 3'.
:rtype: coral.RNA
:raises: ValueError if rna argument has no start codon.
ValueError if rna argument has no stop codon in-frame with the
first start codon.
'''
if isinstance(rna, coral.DNA):
rna = transcribe(rna)
codons_left = len(rna) // 3
start_codon = coral.RNA('aug')
stop_codons = [coral.RNA('uag'), coral.RNA('uga'), coral.RNA('uaa')]
start = None
stop = None
valid = [None, None]
index = 0
while codons_left:
codon = rna[index:index + 3]
if valid[0] is None:
if codon in start_codon:
start = index
valid[0] = True
else:
if codon in stop_codons:
stop = index + 3
valid[1] = True
break
index += 3
codons_left -= 1
if valid[0] is None:
raise ValueError('Sequence has no start codon.')
elif stop is None:
raise ValueError('Sequence has no stop codon.')
coding_rna = rna[start:stop]
return coding_rna | [
"def",
"coding_sequence",
"(",
"rna",
")",
":",
"if",
"isinstance",
"(",
"rna",
",",
"coral",
".",
"DNA",
")",
":",
"rna",
"=",
"transcribe",
"(",
"rna",
")",
"codons_left",
"=",
"len",
"(",
"rna",
")",
"//",
"3",
"start_codon",
"=",
"coral",
".",
... | Extract coding sequence from an RNA template.
:param seq: Sequence from which to extract a coding sequence.
:type seq: coral.RNA
:param material: Type of sequence ('dna' or 'rna')
:type material: str
:returns: The first coding sequence (start codon -> stop codon) matched
from 5' to 3'.
:rtype: coral.RNA
:raises: ValueError if rna argument has no start codon.
ValueError if rna argument has no stop codon in-frame with the
first start codon. | [
"Extract",
"coding",
"sequence",
"from",
"an",
"RNA",
"template",
"."
] | python | train |
pipermerriam/flex | flex/cli.py | https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/cli.py#L12-L29 | def main(source):
"""
For a given command line supplied argument, negotiate the content, parse
the schema and then return any issues to stdout or if no schema issues,
return success exit code.
"""
if source is None:
click.echo(
"You need to supply a file or url to a schema to a swagger schema, for"
"the validator to work."
)
return 1
try:
load(source)
click.echo("Validation passed")
return 0
except ValidationError as e:
raise click.ClickException(str(e)) | [
"def",
"main",
"(",
"source",
")",
":",
"if",
"source",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"\"You need to supply a file or url to a schema to a swagger schema, for\"",
"\"the validator to work.\"",
")",
"return",
"1",
"try",
":",
"load",
"(",
"source",
")... | For a given command line supplied argument, negotiate the content, parse
the schema and then return any issues to stdout or if no schema issues,
return success exit code. | [
"For",
"a",
"given",
"command",
"line",
"supplied",
"argument",
"negotiate",
"the",
"content",
"parse",
"the",
"schema",
"and",
"then",
"return",
"any",
"issues",
"to",
"stdout",
"or",
"if",
"no",
"schema",
"issues",
"return",
"success",
"exit",
"code",
"."
... | python | train |
materialsproject/pymatgen | pymatgen/optimization/linear_assignment_numpy.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/optimization/linear_assignment_numpy.py#L91-L98 | def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost | [
"def",
"min_cost",
"(",
"self",
")",
":",
"if",
"self",
".",
"_min_cost",
":",
"return",
"self",
".",
"_min_cost",
"self",
".",
"_min_cost",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"c",
"[",
"np",
".",
"arange",
"(",
"self",
".",
"nx",
")",
",",... | Returns the cost of the best assignment | [
"Returns",
"the",
"cost",
"of",
"the",
"best",
"assignment"
] | python | train |
mitsei/dlkit | dlkit/handcar/learning/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L2402-L2427 | def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
"""
if not self.supports_objective_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | [
"def",
"get_objective_query_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_objective_query",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"... | Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.* | [
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"objective",
"query",
"service",
"."
] | python | train |
rosenbrockc/fortpy | fortpy/scripts/analyze.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L954-L979 | def _plot_generic(self, filename=None):
"""Plots the current state of the shell, saving the value to the specified file
if specified.
"""
#Since the filename is being passed directly from the argument, check its validity.
if filename == "":
filename = None
if "x" not in self.curargs["labels"]:
#Set a default x-label since we know what variable is being plotted.
self.curargs["labels"]["x"] = "Value of '{}' (unknown units)".format(self.curargs["independent"])
args = self.curargs
a = self.tests[self.active]
self._make_fits()
#Before we can pass the markers in, we need to translate from keys to values so
#that matplotlib understands.
markdict = self._get_matplot_dict("markers", "marker", self._possible_markers)
linedict = self._get_matplot_dict("lines", "style", self._possible_linestyles)
#Set the remaining arguments to have the right keyword name.
args["savefile"] = filename
args["markers"] = markdict
args["lines"] = linedict
a.plot(**args) | [
"def",
"_plot_generic",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"#Since the filename is being passed directly from the argument, check its validity.",
"if",
"filename",
"==",
"\"\"",
":",
"filename",
"=",
"None",
"if",
"\"x\"",
"not",
"in",
"self",
".",
... | Plots the current state of the shell, saving the value to the specified file
if specified. | [
"Plots",
"the",
"current",
"state",
"of",
"the",
"shell",
"saving",
"the",
"value",
"to",
"the",
"specified",
"file",
"if",
"specified",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/cocoapy.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/cocoapy.py#L1085-L1090 | def cfset_to_set(cfset):
"""Convert CFSet to python set."""
count = cf.CFSetGetCount(cfset)
buffer = (c_void_p * count)()
cf.CFSetGetValues(cfset, byref(buffer))
return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)]) | [
"def",
"cfset_to_set",
"(",
"cfset",
")",
":",
"count",
"=",
"cf",
".",
"CFSetGetCount",
"(",
"cfset",
")",
"buffer",
"=",
"(",
"c_void_p",
"*",
"count",
")",
"(",
")",
"cf",
".",
"CFSetGetValues",
"(",
"cfset",
",",
"byref",
"(",
"buffer",
")",
")",... | Convert CFSet to python set. | [
"Convert",
"CFSet",
"to",
"python",
"set",
"."
] | python | train |
pvlib/pvlib-python | pvlib/forecast.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L308-L325 | def rename(self, data, variables=None):
"""
Renames the columns according the variable mapping.
Parameters
----------
data: DataFrame
variables: None or dict, default None
If None, uses self.variables
Returns
-------
data: DataFrame
Renamed data.
"""
if variables is None:
variables = self.variables
return data.rename(columns={y: x for x, y in variables.items()}) | [
"def",
"rename",
"(",
"self",
",",
"data",
",",
"variables",
"=",
"None",
")",
":",
"if",
"variables",
"is",
"None",
":",
"variables",
"=",
"self",
".",
"variables",
"return",
"data",
".",
"rename",
"(",
"columns",
"=",
"{",
"y",
":",
"x",
"for",
"... | Renames the columns according the variable mapping.
Parameters
----------
data: DataFrame
variables: None or dict, default None
If None, uses self.variables
Returns
-------
data: DataFrame
Renamed data. | [
"Renames",
"the",
"columns",
"according",
"the",
"variable",
"mapping",
"."
] | python | train |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/nodepool/protocol.py | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/nodepool/protocol.py#L76-L88 | def cred_def_id(self, issuer_did: str, schema_seq_no: int) -> str:
"""
Return credential definition identifier for input issuer DID and schema sequence number.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:return: credential definition identifier
"""
return '{}:3:CL:{}{}'.format( # 3 marks indy cred def id, CL is sig type
issuer_did,
schema_seq_no,
self.cd_id_tag(True)) | [
"def",
"cred_def_id",
"(",
"self",
",",
"issuer_did",
":",
"str",
",",
"schema_seq_no",
":",
"int",
")",
"->",
"str",
":",
"return",
"'{}:3:CL:{}{}'",
".",
"format",
"(",
"# 3 marks indy cred def id, CL is sig type",
"issuer_did",
",",
"schema_seq_no",
",",
"self"... | Return credential definition identifier for input issuer DID and schema sequence number.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:return: credential definition identifier | [
"Return",
"credential",
"definition",
"identifier",
"for",
"input",
"issuer",
"DID",
"and",
"schema",
"sequence",
"number",
"."
] | python | train |
MillionIntegrals/vel | vel/rl/modules/action_head.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/action_head.py#L68-L87 | def kl_divergence(self, params_q, params_p):
"""
Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
Formula is:
log(sigma_p) - log(sigma_q) + (sigma_q^2 + (mu_q - mu_p)^2))/(2 * sigma_p^2)
"""
means_q = params_q[:, :, 0]
log_std_q = params_q[:, :, 1]
means_p = params_p[:, :, 0]
log_std_p = params_p[:, :, 1]
std_q = torch.exp(log_std_q)
std_p = torch.exp(log_std_p)
kl_div = log_std_p - log_std_q + (std_q ** 2 + (means_q - means_p) ** 2) / (2.0 * std_p ** 2) - 0.5
return kl_div.sum(dim=-1) | [
"def",
"kl_divergence",
"(",
"self",
",",
"params_q",
",",
"params_p",
")",
":",
"means_q",
"=",
"params_q",
"[",
":",
",",
":",
",",
"0",
"]",
"log_std_q",
"=",
"params_q",
"[",
":",
",",
":",
",",
"1",
"]",
"means_p",
"=",
"params_p",
"[",
":",
... | Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
Formula is:
log(sigma_p) - log(sigma_q) + (sigma_q^2 + (mu_q - mu_p)^2))/(2 * sigma_p^2) | [
"Categorical",
"distribution",
"KL",
"divergence",
"calculation",
"KL",
"(",
"Q",
"||",
"P",
")",
"=",
"sum",
"Q_i",
"log",
"(",
"Q_i",
"/",
"P_i",
")"
] | python | train |
DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L65-L76 | def binPack(self, jobShapes):
"""Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times."""
# TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting
logger.debug('Running bin packing for node shapes %s and %s job(s).',
self.nodeShapes, len(jobShapes))
# Sort in descending order from largest to smallest. The FFD like-strategy will pack the
# jobs in order from longest to shortest.
jobShapes.sort()
jobShapes.reverse()
assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]
for jS in jobShapes:
self.addJobShape(jS) | [
"def",
"binPack",
"(",
"self",
",",
"jobShapes",
")",
":",
"# TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting",
"logger",
".",
"debug",
"(",
"'Running bin packing for node shapes %s and %s job(s).'",
",",
"self",
".",
"nodeShapes",
",",
"len",
"(",
"jo... | Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times. | [
"Pack",
"a",
"list",
"of",
"jobShapes",
"into",
"the",
"fewest",
"nodes",
"reasonable",
".",
"Can",
"be",
"run",
"multiple",
"times",
"."
] | python | train |
earwig/mwparserfromhell | mwparserfromhell/parser/tokenizer.py | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L1223-L1234 | def _handle_end(self):
"""Handle the end of the stream of wikitext."""
if self._context & contexts.FAIL:
if self._context & contexts.TAG_BODY:
if is_single(self._stack[1].text):
return self._handle_single_tag_end()
if self._context & contexts.TABLE_CELL_OPEN:
self._pop()
if self._context & contexts.DOUBLE:
self._pop()
self._fail_route()
return self._pop() | [
"def",
"_handle_end",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"&",
"contexts",
".",
"FAIL",
":",
"if",
"self",
".",
"_context",
"&",
"contexts",
".",
"TAG_BODY",
":",
"if",
"is_single",
"(",
"self",
".",
"_stack",
"[",
"1",
"]",
".",
... | Handle the end of the stream of wikitext. | [
"Handle",
"the",
"end",
"of",
"the",
"stream",
"of",
"wikitext",
"."
] | python | train |
aliyun/aliyun-odps-python-sdk | odps/df/backends/frame.py | https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/backends/frame.py#L364-L406 | def to_html(self, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False,
notebook=False):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
"""
formatter = fmt.ResultFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_html(classes=classes, notebook=notebook)
if buf is None:
return formatter.buf.getvalue() | [
"def",
"to_html",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"col_space",
"=",
"None",
",",
"header",
"=",
"True",
",",
"index",
"=",
"True",
",",
"na_rep",
"=",
"'NaN'",
",",
"formatters",
"=",
"None",
",",
"float_format... | Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all. | [
"Render",
"a",
"DataFrame",
"as",
"an",
"HTML",
"table",
"."
] | python | train |
GPflow/GPflow | gpflow/expectations.py | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/expectations.py#L858-L880 | def _expectation(p, kern1, feat1, kern2, feat2, nghp=None):
r"""
Compute the expectation:
expectation[n] = <(\Sum_i K1_i_{Z1, x_n}) (\Sum_j K2_j_{x_n, Z2})>_p(x_n)
- \Sum_i K1_i_{.,.}, \Sum_j K2_j_{.,.} :: Sum kernels
:return: NxM1xM2
"""
crossexps = []
if kern1 == kern2 and feat1 == feat2: # avoid duplicate computation by using transposes
for i, k1 in enumerate(kern1.kernels):
crossexps.append(expectation(p, (k1, feat1), (k1, feat1), nghp=nghp))
for k2 in kern1.kernels[:i]:
eKK = expectation(p, (k1, feat1), (k2, feat2), nghp=nghp)
eKK += tf.matrix_transpose(eKK)
crossexps.append(eKK)
else:
for k1, k2 in it.product(kern1.kernels, kern2.kernels):
crossexps.append(expectation(p, (k1, feat1), (k2, feat2), nghp=nghp))
return functools.reduce(tf.add, crossexps) | [
"def",
"_expectation",
"(",
"p",
",",
"kern1",
",",
"feat1",
",",
"kern2",
",",
"feat2",
",",
"nghp",
"=",
"None",
")",
":",
"crossexps",
"=",
"[",
"]",
"if",
"kern1",
"==",
"kern2",
"and",
"feat1",
"==",
"feat2",
":",
"# avoid duplicate computation by u... | r"""
Compute the expectation:
expectation[n] = <(\Sum_i K1_i_{Z1, x_n}) (\Sum_j K2_j_{x_n, Z2})>_p(x_n)
- \Sum_i K1_i_{.,.}, \Sum_j K2_j_{.,.} :: Sum kernels
:return: NxM1xM2 | [
"r",
"Compute",
"the",
"expectation",
":",
"expectation",
"[",
"n",
"]",
"=",
"<",
"(",
"\\",
"Sum_i",
"K1_i_",
"{",
"Z1",
"x_n",
"}",
")",
"(",
"\\",
"Sum_j",
"K2_j_",
"{",
"x_n",
"Z2",
"}",
")",
">",
"_p",
"(",
"x_n",
")",
"-",
"\\",
"Sum_i",... | python | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L364-L368 | def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier | [
"def",
"_get_classifier",
"(",
"self",
",",
"prefix",
")",
":",
"with",
"self",
".",
"name_scope",
"(",
")",
":",
"classifier",
"=",
"nn",
".",
"Dense",
"(",
"2",
",",
"prefix",
"=",
"prefix",
")",
"return",
"classifier"
] | Construct a decoder for the next sentence prediction task | [
"Construct",
"a",
"decoder",
"for",
"the",
"next",
"sentence",
"prediction",
"task"
] | python | train |
quantopian/pyfolio | pyfolio/txn.py | https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/txn.py#L83-L110 | def get_txn_vol(transactions):
"""
Extract daily transaction data from set of transaction objects.
Parameters
----------
transactions : pd.DataFrame
Time series containing one row per symbol (and potentially
duplicate datetime indices) and columns for amount and
price.
Returns
-------
pd.DataFrame
Daily transaction volume and number of shares.
- See full explanation in tears.create_full_tear_sheet.
"""
txn_norm = transactions.copy()
txn_norm.index = txn_norm.index.normalize()
amounts = txn_norm.amount.abs()
prices = txn_norm.price
values = amounts * prices
daily_amounts = amounts.groupby(amounts.index).sum()
daily_values = values.groupby(values.index).sum()
daily_amounts.name = "txn_shares"
daily_values.name = "txn_volume"
return pd.concat([daily_values, daily_amounts], axis=1) | [
"def",
"get_txn_vol",
"(",
"transactions",
")",
":",
"txn_norm",
"=",
"transactions",
".",
"copy",
"(",
")",
"txn_norm",
".",
"index",
"=",
"txn_norm",
".",
"index",
".",
"normalize",
"(",
")",
"amounts",
"=",
"txn_norm",
".",
"amount",
".",
"abs",
"(",
... | Extract daily transaction data from set of transaction objects.
Parameters
----------
transactions : pd.DataFrame
Time series containing one row per symbol (and potentially
duplicate datetime indices) and columns for amount and
price.
Returns
-------
pd.DataFrame
Daily transaction volume and number of shares.
- See full explanation in tears.create_full_tear_sheet. | [
"Extract",
"daily",
"transaction",
"data",
"from",
"set",
"of",
"transaction",
"objects",
"."
] | python | valid |
mdickinson/bigfloat | bigfloat/core.py | https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2104-L2114 | def j1(x, context=None):
"""
Return the value of the first kind Bessel function of order 1 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_j1,
(BigFloat._implicit_convert(x),),
context,
) | [
"def",
"j1",
"(",
"x",
",",
"context",
"=",
"None",
")",
":",
"return",
"_apply_function_in_current_context",
"(",
"BigFloat",
",",
"mpfr",
".",
"mpfr_j1",
",",
"(",
"BigFloat",
".",
"_implicit_convert",
"(",
"x",
")",
",",
")",
",",
"context",
",",
")"
... | Return the value of the first kind Bessel function of order 1 at x. | [
"Return",
"the",
"value",
"of",
"the",
"first",
"kind",
"Bessel",
"function",
"of",
"order",
"1",
"at",
"x",
"."
] | python | train |
pricingassistant/mrq | mrq/queue_raw.py | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L239-L265 | def get_sorted_graph(
self,
start=0,
stop=100,
slices=100,
include_inf=False,
exact=False):
""" Returns a graph of the distribution of jobs in a sorted set """
if not self.is_sorted:
raise Exception("Not a sorted queue")
with context.connections.redis.pipeline(transaction=exact) as pipe:
interval = old_div(float(stop - start), slices)
for i in range(0, slices):
pipe.zcount(self.redis_key,
(start + i * interval),
"(%s" % (start + (i + 1) * interval))
if include_inf:
pipe.zcount(self.redis_key, stop, "+inf")
pipe.zcount(self.redis_key, "-inf", "(%s" % start)
data = pipe.execute()
if include_inf:
return data[-1:] + data[:-1]
return data | [
"def",
"get_sorted_graph",
"(",
"self",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"100",
",",
"slices",
"=",
"100",
",",
"include_inf",
"=",
"False",
",",
"exact",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"is_sorted",
":",
"raise",
"Exceptio... | Returns a graph of the distribution of jobs in a sorted set | [
"Returns",
"a",
"graph",
"of",
"the",
"distribution",
"of",
"jobs",
"in",
"a",
"sorted",
"set"
] | python | train |
briancappello/flask-unchained | flask_unchained/bundles/security/views/security_controller.py | https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/views/security_controller.py#L76-L88 | def logout(self):
"""
View function to log a user out. Supports html and json requests.
"""
if current_user.is_authenticated:
self.security_service.logout_user()
if request.is_json:
return '', HTTPStatus.NO_CONTENT
self.flash(_('flask_unchained.bundles.security:flash.logout'),
category='success')
return self.redirect('SECURITY_POST_LOGOUT_REDIRECT_ENDPOINT') | [
"def",
"logout",
"(",
"self",
")",
":",
"if",
"current_user",
".",
"is_authenticated",
":",
"self",
".",
"security_service",
".",
"logout_user",
"(",
")",
"if",
"request",
".",
"is_json",
":",
"return",
"''",
",",
"HTTPStatus",
".",
"NO_CONTENT",
"self",
"... | View function to log a user out. Supports html and json requests. | [
"View",
"function",
"to",
"log",
"a",
"user",
"out",
".",
"Supports",
"html",
"and",
"json",
"requests",
"."
] | python | train |
koalalorenzo/python-digitalocean | digitalocean/Manager.py | https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Manager.py#L266-L273 | def get_load_balancer(self, id):
"""
Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID
"""
return LoadBalancer.get_object(api_token=self.token, id=id) | [
"def",
"get_load_balancer",
"(",
"self",
",",
"id",
")",
":",
"return",
"LoadBalancer",
".",
"get_object",
"(",
"api_token",
"=",
"self",
".",
"token",
",",
"id",
"=",
"id",
")"
] | Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID | [
"Returns",
"a",
"Load",
"Balancer",
"object",
"by",
"its",
"ID",
"."
] | python | valid |
tensorflow/tensor2tensor | tensor2tensor/layers/discretization.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L57-L72 | def slice_hidden(x, hidden_size, num_blocks):
"""Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
"""
batch_size, latent_dim, _ = common_layers.shape_list(x)
block_dim = hidden_size // num_blocks
x_sliced = tf.reshape(x,
shape=[batch_size, latent_dim, num_blocks, block_dim])
return x_sliced | [
"def",
"slice_hidden",
"(",
"x",
",",
"hidden_size",
",",
"num_blocks",
")",
":",
"batch_size",
",",
"latent_dim",
",",
"_",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"block_dim",
"=",
"hidden_size",
"//",
"num_blocks",
"x_sliced",
"=",
"tf",
... | Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. | [
"Slice",
"encoder",
"hidden",
"state",
"under",
"num_blocks",
"."
] | python | train |
noxdafox/pebble | pebble/functions.py | https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/functions.py#L102-L113 | def prepare_threads(new_function):
"""Replaces threading._get_ident() function in order to notify
the waiting Condition."""
with _waitforthreads_lock:
if hasattr(threading, 'get_ident'):
old_function = threading.get_ident
threading.get_ident = new_function
else:
old_function = threading._get_ident
threading._get_ident = new_function
return old_function | [
"def",
"prepare_threads",
"(",
"new_function",
")",
":",
"with",
"_waitforthreads_lock",
":",
"if",
"hasattr",
"(",
"threading",
",",
"'get_ident'",
")",
":",
"old_function",
"=",
"threading",
".",
"get_ident",
"threading",
".",
"get_ident",
"=",
"new_function",
... | Replaces threading._get_ident() function in order to notify
the waiting Condition. | [
"Replaces",
"threading",
".",
"_get_ident",
"()",
"function",
"in",
"order",
"to",
"notify",
"the",
"waiting",
"Condition",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/pip/compat/dictconfig.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/compat/dictconfig.py#L521-L527 | def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e)) | [
"def",
"add_handlers",
"(",
"self",
",",
"logger",
",",
"handlers",
")",
":",
"for",
"h",
"in",
"handlers",
":",
"try",
":",
"logger",
".",
"addHandler",
"(",
"self",
".",
"config",
"[",
"'handlers'",
"]",
"[",
"h",
"]",
")",
"except",
"StandardError",... | Add handlers to a logger from a list of names. | [
"Add",
"handlers",
"to",
"a",
"logger",
"from",
"a",
"list",
"of",
"names",
"."
] | python | test |
boriel/zxbasic | arch/zx48k/optimizer.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L2345-L2388 | def optimize(initial_memory):
""" This will remove useless instructions
"""
global BLOCKS
global PROC_COUNTER
LABELS.clear()
JUMP_LABELS.clear()
del MEMORY[:]
PROC_COUNTER = 0
cleanupmem(initial_memory)
if OPTIONS.optimization.value <= 2:
return '\n'.join(x for x in initial_memory if not RE_PRAGMA.match(x))
optimize_init()
bb = BasicBlock(initial_memory)
cleanup_local_labels(bb)
initialize_memory(bb)
BLOCKS = basic_blocks = get_basic_blocks(bb) # 1st partition the Basic Blocks
for x in basic_blocks:
x.clean_up_comes_from()
x.clean_up_goes_to()
for x in basic_blocks:
x.update_goes_and_comes()
LABELS['*START*'].basic_block.add_goes_to(basic_blocks[0])
LABELS['*START*'].basic_block.next = basic_blocks[0]
basic_blocks[0].prev = LABELS['*START*'].basic_block
LABELS[END_PROGRAM_LABEL].basic_block.add_goes_to(LABELS['*__END_PROGRAM*'].basic_block)
for x in basic_blocks:
x.optimize()
for x in basic_blocks:
if x.comes_from == [] and len([y for y in JUMP_LABELS if x is LABELS[y].basic_block]):
x.ignored = True
return '\n'.join([y for y in flatten_list([x.asm for x in basic_blocks if not x.ignored])
if not RE_PRAGMA.match(y)]) | [
"def",
"optimize",
"(",
"initial_memory",
")",
":",
"global",
"BLOCKS",
"global",
"PROC_COUNTER",
"LABELS",
".",
"clear",
"(",
")",
"JUMP_LABELS",
".",
"clear",
"(",
")",
"del",
"MEMORY",
"[",
":",
"]",
"PROC_COUNTER",
"=",
"0",
"cleanupmem",
"(",
"initial... | This will remove useless instructions | [
"This",
"will",
"remove",
"useless",
"instructions"
] | python | train |
openai/baselines | baselines/common/tf_util.py | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L58-L72 | def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph) | [
"def",
"make_session",
"(",
"config",
"=",
"None",
",",
"num_cpu",
"=",
"None",
",",
"make_default",
"=",
"False",
",",
"graph",
"=",
"None",
")",
":",
"if",
"num_cpu",
"is",
"None",
":",
"num_cpu",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'RCALL_... | Returns a session that will use <num_cpu> CPU's only | [
"Returns",
"a",
"session",
"that",
"will",
"use",
"<num_cpu",
">",
"CPU",
"s",
"only"
] | python | valid |
tanghaibao/jcvi | jcvi/assembly/goldenpath.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L156-L183 | def update_clr(self, aclr, bclr):
"""
Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB
"""
print(aclr, bclr, file=sys.stderr)
otype = self.otype
if otype == 1:
if aclr.orientation == '+':
aclr.end = self.qstop
else:
aclr.start = self.qstart
if bclr.orientation == '+':
bclr.start = self.sstop + 1
else:
bclr.end = self.sstart - 1
elif otype == 3:
aclr.start = aclr.end
elif otype == 4:
bclr.start = bclr.end
print(aclr, bclr, file=sys.stderr) | [
"def",
"update_clr",
"(",
"self",
",",
"aclr",
",",
"bclr",
")",
":",
"print",
"(",
"aclr",
",",
"bclr",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"otype",
"=",
"self",
".",
"otype",
"if",
"otype",
"==",
"1",
":",
"if",
"aclr",
".",
"orientati... | Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB | [
"Zip",
"the",
"two",
"sequences",
"together",
"using",
"left",
"-",
"greedy",
"rule"
] | python | train |
markovmodel/PyEMMA | pyemma/_base/progress/reporter/__init__.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/progress/reporter/__init__.py#L155-L160 | def _progress_set_description(self, stage, description):
""" set description of an already existing progress """
self.__check_stage_registered(stage)
self._prog_rep_descriptions[stage] = description
if self._prog_rep_progressbars[stage]:
self._prog_rep_progressbars[stage].set_description(description, refresh=False) | [
"def",
"_progress_set_description",
"(",
"self",
",",
"stage",
",",
"description",
")",
":",
"self",
".",
"__check_stage_registered",
"(",
"stage",
")",
"self",
".",
"_prog_rep_descriptions",
"[",
"stage",
"]",
"=",
"description",
"if",
"self",
".",
"_prog_rep_p... | set description of an already existing progress | [
"set",
"description",
"of",
"an",
"already",
"existing",
"progress"
] | python | train |
coin-or/GiMPy | src/gimpy/graph.py | https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L2903-L2926 | def get_simplex_solution_graph(self):
'''
API:
get_simplex_solution_graph(self):
Description:
Assumes a feasible flow solution stored in 'flow' attribute's of
arcs. Returns the graph with arcs that have flow between 0 and
capacity.
Pre:
(1) 'flow' attribute represents a feasible flow solution. See
Pre section of min_cost_flow() for details.
Return:
Graph instance that only has the arcs that have flow strictly
between 0 and capacity.
'''
simplex_g = Graph(type=DIRECTED_GRAPH)
for i in self.neighbors:
simplex_g.add_node(i)
for e in self.edge_attr:
flow_e = self.edge_attr[e]['flow']
capacity_e = self.edge_attr[e]['capacity']
if flow_e>0 and flow_e<capacity_e:
simplex_g.add_edge(e[0], e[1])
return simplex_g | [
"def",
"get_simplex_solution_graph",
"(",
"self",
")",
":",
"simplex_g",
"=",
"Graph",
"(",
"type",
"=",
"DIRECTED_GRAPH",
")",
"for",
"i",
"in",
"self",
".",
"neighbors",
":",
"simplex_g",
".",
"add_node",
"(",
"i",
")",
"for",
"e",
"in",
"self",
".",
... | API:
get_simplex_solution_graph(self):
Description:
Assumes a feasible flow solution stored in 'flow' attribute's of
arcs. Returns the graph with arcs that have flow between 0 and
capacity.
Pre:
(1) 'flow' attribute represents a feasible flow solution. See
Pre section of min_cost_flow() for details.
Return:
Graph instance that only has the arcs that have flow strictly
between 0 and capacity. | [
"API",
":",
"get_simplex_solution_graph",
"(",
"self",
")",
":",
"Description",
":",
"Assumes",
"a",
"feasible",
"flow",
"solution",
"stored",
"in",
"flow",
"attribute",
"s",
"of",
"arcs",
".",
"Returns",
"the",
"graph",
"with",
"arcs",
"that",
"have",
"flow... | python | train |
jason-weirather/py-seq-tools | seqtools/graph/__init__.py | https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/graph/__init__.py#L148-L174 | def move_edges(self,n1,n2):
"""Move edges from node 1 to node 2
Not self edges though
Overwrites edges
"""
#Traverse edges to find incoming with n1
incoming = []
for e in self._edges.values():
if e.node2.id == n1.id: incoming.append(e)
#Traverse edges to find outgoing from n1
outgoing = []
for e in self._edges.values():
if e.node1.id == n1.id: outgoing.append(e)
#Make new edges to the new target
for e in incoming:
if e.node1.id == n2.id: continue # skip self
newedge = Edge(e.node1,n2,payload_list=n2.payload_list+n1.payload_list)
self.add_edge(newedge)
for e in outgoing:
if e.node2.id == n2.id: continue # skip self
newedge = Edge(n2,e.node2,payload_list=n2.payload_list+n1.payload_list)
self.add_edge(newedge)
#now remove the edges that got transfered
for e in incoming: self.remove_edge(e)
for e in outgoing: self.remove_edge(e) | [
"def",
"move_edges",
"(",
"self",
",",
"n1",
",",
"n2",
")",
":",
"#Traverse edges to find incoming with n1",
"incoming",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"_edges",
".",
"values",
"(",
")",
":",
"if",
"e",
".",
"node2",
".",
"id",
"==",
... | Move edges from node 1 to node 2
Not self edges though
Overwrites edges | [
"Move",
"edges",
"from",
"node",
"1",
"to",
"node",
"2",
"Not",
"self",
"edges",
"though"
] | python | train |
keenlabs/KeenClient-Python | keen/api.py | https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/api.py#L130-L166 | def _order_by_is_valid_or_none(self, params):
"""
Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed.
"""
if not "order_by" in params or not params["order_by"]:
return True
def _order_by_dict_is_not_well_formed(d):
if not isinstance(d, dict):
# Bad type.
return True
if "property_name" in d and d["property_name"]:
if "direction" in d and not direction.is_valid_direction(d["direction"]):
# Bad direction provided.
return True
for k in d:
if k != "property_name" and k != "direction":
# Unexpected key.
return True
# Everything looks good!
return False
# Missing required key.
return True
# order_by is converted to a list before this point if it wasn't one before.
order_by_list = json.loads(params["order_by"])
for order_by in order_by_list:
if _order_by_dict_is_not_well_formed(order_by):
return False
if not "group_by" in params or not params["group_by"]:
# We must have group_by to have order_by make sense.
return False
return True | [
"def",
"_order_by_is_valid_or_none",
"(",
"self",
",",
"params",
")",
":",
"if",
"not",
"\"order_by\"",
"in",
"params",
"or",
"not",
"params",
"[",
"\"order_by\"",
"]",
":",
"return",
"True",
"def",
"_order_by_dict_is_not_well_formed",
"(",
"d",
")",
":",
"if"... | Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed. | [
"Validates",
"that",
"a",
"given",
"order_by",
"has",
"proper",
"syntax",
"."
] | python | train |
uber/tchannel-python | tchannel/tornado/message_factory.py | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L115-L151 | def build_raw_response_message(self, response, args, is_completed=False):
"""build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage
"""
response.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if response.state == StreamState.init:
message = CallResponseMessage(
flags=response.flags,
code=response.code,
tracing=response.tracing,
headers=response.headers,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif response.state == StreamState.streaming:
message = CallResponseContinueMessage(
flags=response.flags,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = response.id
return message | [
"def",
"build_raw_response_message",
"(",
"self",
",",
"response",
",",
"args",
",",
"is_completed",
"=",
"False",
")",
":",
"response",
".",
"flags",
"=",
"FlagsType",
".",
"none",
"if",
"is_completed",
"else",
"FlagsType",
".",
"fragment",
"# TODO decide what ... | build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage | [
"build",
"protocol",
"level",
"message",
"based",
"on",
"response",
"and",
"args",
"."
] | python | train |
wonambi-python/wonambi | wonambi/widgets/info.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/info.py#L426-L433 | def toggle_buttons(self):
"""Turn buttons on and off."""
all_time_on = self.all_time.get_value()
all_chan_on = self.all_chan.get_value()
self.times['beg'].setEnabled(not all_time_on)
self.times['end'].setEnabled(not all_time_on)
self.idx_chan.setEnabled(not all_chan_on) | [
"def",
"toggle_buttons",
"(",
"self",
")",
":",
"all_time_on",
"=",
"self",
".",
"all_time",
".",
"get_value",
"(",
")",
"all_chan_on",
"=",
"self",
".",
"all_chan",
".",
"get_value",
"(",
")",
"self",
".",
"times",
"[",
"'beg'",
"]",
".",
"setEnabled",
... | Turn buttons on and off. | [
"Turn",
"buttons",
"on",
"and",
"off",
"."
] | python | train |
allenai/allennlp | allennlp/modules/residual_with_layer_dropout.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/residual_with_layer_dropout.py#L21-L59 | def forward(self,
layer_input: torch.Tensor,
layer_output: torch.Tensor,
layer_index: int = None,
total_layers: int = None) -> torch.Tensor:
# pylint: disable=arguments-differ
"""
Apply dropout to this layer, for this whole mini-batch.
dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and
total_layers is specified, else it will use the undecayed_dropout_prob directly.
Parameters
----------
layer_input ``torch.FloatTensor`` required
The input tensor of this layer.
layer_output ``torch.FloatTensor`` required
The output tensor of this layer, with the same shape as the layer_input.
layer_index ``int``
The layer index, starting from 1. This is used to calcuate the dropout prob
together with the `total_layers` parameter.
total_layers ``int``
The total number of layers.
Returns
-------
output: ``torch.FloatTensor``
A tensor with the same shape as `layer_input` and `layer_output`.
"""
if layer_index is not None and total_layers is not None:
dropout_prob = 1.0 * self.undecayed_dropout_prob * layer_index / total_layers
else:
dropout_prob = 1.0 * self.undecayed_dropout_prob
if self.training:
if torch.rand(1) < dropout_prob:
return layer_input
else:
return layer_output + layer_input
else:
return (1 - dropout_prob) * layer_output + layer_input | [
"def",
"forward",
"(",
"self",
",",
"layer_input",
":",
"torch",
".",
"Tensor",
",",
"layer_output",
":",
"torch",
".",
"Tensor",
",",
"layer_index",
":",
"int",
"=",
"None",
",",
"total_layers",
":",
"int",
"=",
"None",
")",
"->",
"torch",
".",
"Tenso... | Apply dropout to this layer, for this whole mini-batch.
dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and
total_layers is specified, else it will use the undecayed_dropout_prob directly.
Parameters
----------
layer_input ``torch.FloatTensor`` required
The input tensor of this layer.
layer_output ``torch.FloatTensor`` required
The output tensor of this layer, with the same shape as the layer_input.
layer_index ``int``
The layer index, starting from 1. This is used to calcuate the dropout prob
together with the `total_layers` parameter.
total_layers ``int``
The total number of layers.
Returns
-------
output: ``torch.FloatTensor``
A tensor with the same shape as `layer_input` and `layer_output`. | [
"Apply",
"dropout",
"to",
"this",
"layer",
"for",
"this",
"whole",
"mini",
"-",
"batch",
".",
"dropout_prob",
"=",
"layer_index",
"/",
"total_layers",
"*",
"undecayed_dropout_prob",
"if",
"layer_idx",
"and",
"total_layers",
"is",
"specified",
"else",
"it",
"will... | python | train |
tanghaibao/goatools | goatools/parsers/ncbi_gene_file_reader.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/parsers/ncbi_gene_file_reader.py#L215-L226 | def convert_ints_floats(self, flds):
"""Convert strings to ints and floats, if so specified."""
for idx in self.idxs_float:
flds[idx] = float(flds[idx])
for idx in self.idxs_int:
dig = flds[idx]
#print 'idx={} ({}) {}'.format(idx, flds[idx], flds) # DVK
flds[idx] = int(flds[idx]) if dig.isdigit() else dig
for idx in self.idxs_strpat:
hdr = self.hdr2idx.items()[idx][0]
pat = self.strpat_hdrs[hdr]
flds[idx] = pat.format(flds[idx]) | [
"def",
"convert_ints_floats",
"(",
"self",
",",
"flds",
")",
":",
"for",
"idx",
"in",
"self",
".",
"idxs_float",
":",
"flds",
"[",
"idx",
"]",
"=",
"float",
"(",
"flds",
"[",
"idx",
"]",
")",
"for",
"idx",
"in",
"self",
".",
"idxs_int",
":",
"dig",... | Convert strings to ints and floats, if so specified. | [
"Convert",
"strings",
"to",
"ints",
"and",
"floats",
"if",
"so",
"specified",
"."
] | python | train |
CalebBell/fluids | fluids/friction.py | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/friction.py#L1712-L1760 | def Prandtl_von_Karman_Nikuradse(Re):
r'''Calculates Darcy friction factor for smooth pipes as a function of
Reynolds number from the Prandtl-von Karman Nikuradse equation as given
in [1]_ and [2]_:
.. math::
\frac{1}{\sqrt{f}} = -2\log_{10}\left(\frac{2.51}{Re\sqrt{f}}\right)
Parameters
----------
Re : float
Reynolds number, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This equation is often stated as follows; the correct constant is not 0.8,
but 2log10(2.51) or approximately 0.7993474:
.. math::
\frac{1}{\sqrt{f}}\approx 2\log_{10}(\text{Re}\sqrt{f})-0.8
This function is calculable for all Reynolds numbers between 1E151 and
1E-151. It is solved with the LambertW function from SciPy. The solution is:
.. math::
f_d = \frac{\frac{1}{4}\log_{10}^2}{\left(\text{lambertW}\left(\frac{
\log(10)Re}{2(2.51)}\right)\right)^2}
Examples
--------
>>> Prandtl_von_Karman_Nikuradse(1E7)
0.008102669430874914
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] McGovern, Jim. "Technical Note: Friction Factor Diagrams for Pipe
Flow." Paper, October 3, 2011. http://arrow.dit.ie/engschmecart/28.
'''
# Good 1E150 to 1E-150
c1 = 1.151292546497022842008995727342182103801 # log(10)/2
c2 = 1.325474527619599502640416597148504422899 # log(10)**2/4
return c2/float(lambertw((c1*Re)/2.51).real)**2 | [
"def",
"Prandtl_von_Karman_Nikuradse",
"(",
"Re",
")",
":",
"# Good 1E150 to 1E-150",
"c1",
"=",
"1.151292546497022842008995727342182103801",
"# log(10)/2",
"c2",
"=",
"1.325474527619599502640416597148504422899",
"# log(10)**2/4",
"return",
"c2",
"/",
"float",
"(",
"lambertw"... | r'''Calculates Darcy friction factor for smooth pipes as a function of
Reynolds number from the Prandtl-von Karman Nikuradse equation as given
in [1]_ and [2]_:
.. math::
\frac{1}{\sqrt{f}} = -2\log_{10}\left(\frac{2.51}{Re\sqrt{f}}\right)
Parameters
----------
Re : float
Reynolds number, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This equation is often stated as follows; the correct constant is not 0.8,
but 2log10(2.51) or approximately 0.7993474:
.. math::
\frac{1}{\sqrt{f}}\approx 2\log_{10}(\text{Re}\sqrt{f})-0.8
This function is calculable for all Reynolds numbers between 1E151 and
1E-151. It is solved with the LambertW function from SciPy. The solution is:
.. math::
f_d = \frac{\frac{1}{4}\log_{10}^2}{\left(\text{lambertW}\left(\frac{
\log(10)Re}{2(2.51)}\right)\right)^2}
Examples
--------
>>> Prandtl_von_Karman_Nikuradse(1E7)
0.008102669430874914
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] McGovern, Jim. "Technical Note: Friction Factor Diagrams for Pipe
Flow." Paper, October 3, 2011. http://arrow.dit.ie/engschmecart/28. | [
"r",
"Calculates",
"Darcy",
"friction",
"factor",
"for",
"smooth",
"pipes",
"as",
"a",
"function",
"of",
"Reynolds",
"number",
"from",
"the",
"Prandtl",
"-",
"von",
"Karman",
"Nikuradse",
"equation",
"as",
"given",
"in",
"[",
"1",
"]",
"_",
"and",
"[",
"... | python | train |
saltstack/salt | salt/modules/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L3103-L3132 | def define_vol_xml_path(path, **kwargs):
'''
Define a volume based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the volume
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_vol_xml_path <path to XML file on the node>
'''
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return define_vol_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False | [
"def",
"define_vol_xml_path",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"path",
",",
"'r'",
")",
"as",
"fp_",
":",
"return",
"define_vol_xml_str",
"(",
"salt",
".",
"uti... | Define a volume based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the volume
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_vol_xml_path <path to XML file on the node> | [
"Define",
"a",
"volume",
"based",
"on",
"the",
"XML",
"-",
"file",
"path",
"passed",
"to",
"the",
"function"
] | python | train |
google/grr | grr/server/grr_response_server/check_lib/checks.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/check_lib/checks.py#L612-L645 | def FindChecks(cls,
artifact=None,
os_name=None,
cpe=None,
labels=None,
restrict_checks=None):
"""Takes targeting info, identifies relevant checks.
FindChecks will return results when a host has the conditions necessary for
a check to occur. Conditions with partial results are not returned. For
example, FindChecks will not return checks that if a check targets
os_name=["Linux"], labels=["foo"] and a host only has the os_name=["Linux"]
attribute.
Args:
artifact: 0+ artifact names.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids to restrict check processing to.
Returns:
the check_ids that apply.
"""
check_ids = set()
conditions = list(cls.Conditions(artifact, os_name, cpe, labels))
for chk_id, chk in iteritems(cls.checks):
if restrict_checks and chk_id not in restrict_checks:
continue
for condition in conditions:
if chk.triggers.Match(*condition):
check_ids.add(chk_id)
break # No need to keep checking other conditions.
return check_ids | [
"def",
"FindChecks",
"(",
"cls",
",",
"artifact",
"=",
"None",
",",
"os_name",
"=",
"None",
",",
"cpe",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"restrict_checks",
"=",
"None",
")",
":",
"check_ids",
"=",
"set",
"(",
")",
"conditions",
"=",
"lis... | Takes targeting info, identifies relevant checks.
FindChecks will return results when a host has the conditions necessary for
a check to occur. Conditions with partial results are not returned. For
example, FindChecks will not return checks that if a check targets
os_name=["Linux"], labels=["foo"] and a host only has the os_name=["Linux"]
attribute.
Args:
artifact: 0+ artifact names.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids to restrict check processing to.
Returns:
the check_ids that apply. | [
"Takes",
"targeting",
"info",
"identifies",
"relevant",
"checks",
"."
] | python | train |
planetlabs/planet-client-python | planet/api/utils.py | https://github.com/planetlabs/planet-client-python/blob/1c62ce7d416819951dddee0c22068fef6d40b027/planet/api/utils.py#L210-L227 | def get_random_filename(content_type=None):
"""Get a pseudo-random, Planet-looking filename.
>>> from planet.api import utils
>>> print(utils.get_random_filename()) #doctest:+SKIP
planet-61FPnh7K
>>> print(utils.get_random_filename('image/tiff')) #doctest:+SKIP
planet-V8ELYxy5.tif
>>>
:returns: a filename (i.e. ``basename``)
:rtype: str
"""
extension = mimetypes.guess_extension(content_type or '') or ''
characters = string.ascii_letters + '0123456789'
letters = ''.join(random.sample(characters, 8))
name = 'planet-{}{}'.format(letters, extension)
return name | [
"def",
"get_random_filename",
"(",
"content_type",
"=",
"None",
")",
":",
"extension",
"=",
"mimetypes",
".",
"guess_extension",
"(",
"content_type",
"or",
"''",
")",
"or",
"''",
"characters",
"=",
"string",
".",
"ascii_letters",
"+",
"'0123456789'",
"letters",
... | Get a pseudo-random, Planet-looking filename.
>>> from planet.api import utils
>>> print(utils.get_random_filename()) #doctest:+SKIP
planet-61FPnh7K
>>> print(utils.get_random_filename('image/tiff')) #doctest:+SKIP
planet-V8ELYxy5.tif
>>>
:returns: a filename (i.e. ``basename``)
:rtype: str | [
"Get",
"a",
"pseudo",
"-",
"random",
"Planet",
"-",
"looking",
"filename",
"."
] | python | train |
websocket-client/websocket-client | websocket/_core.py | https://github.com/websocket-client/websocket-client/blob/3c25814664fef5b78716ed8841123ed1c0d17824/websocket/_core.py#L186-L239 | def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"suppress_origin" -> suppress outputting origin header.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"redirect_limit" -> number of redirects to follow.
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
# FIXME: "subprotocols" are getting lost, not passed down
# FIXME: "header", "cookie", "origin" and "host" too
self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
for attempt in range(options.pop('redirect_limit', 3)):
if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES:
url = self.handshake_response.headers['location']
self.sock.close()
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise | [
"def",
"connect",
"(",
"self",
",",
"url",
",",
"*",
"*",
"options",
")",
":",
"# FIXME: \"subprotocols\" are getting lost, not passed down",
"# FIXME: \"header\", \"cookie\", \"origin\" and \"host\" too",
"self",
".",
"sock_opt",
".",
"timeout",
"=",
"options",
".",
"get... | Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"suppress_origin" -> suppress outputting origin header.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"redirect_limit" -> number of redirects to follow.
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket. | [
"Connect",
"to",
"url",
".",
"url",
"is",
"websocket",
"url",
"scheme",
".",
"ie",
".",
"ws",
":",
"//",
"host",
":",
"port",
"/",
"resource",
"You",
"can",
"customize",
"using",
"options",
".",
"If",
"you",
"set",
"header",
"list",
"object",
"you",
... | python | train |
tanghaibao/jcvi | jcvi/assembly/hic.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L412-L426 | def M(self):
"""
Contact frequency matrix. Each cell contains how many inter-contig
links between i-th and j-th contigs.
"""
N = self.N
tig_to_idx = self.tig_to_idx
M = np.zeros((N, N), dtype=int)
for (at, bt), links in self.contacts.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
M[ai, bi] = M[bi, ai] = links
return M | [
"def",
"M",
"(",
"self",
")",
":",
"N",
"=",
"self",
".",
"N",
"tig_to_idx",
"=",
"self",
".",
"tig_to_idx",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"N",
")",
",",
"dtype",
"=",
"int",
")",
"for",
"(",
"at",
",",
"bt",
")",
",",
... | Contact frequency matrix. Each cell contains how many inter-contig
links between i-th and j-th contigs. | [
"Contact",
"frequency",
"matrix",
".",
"Each",
"cell",
"contains",
"how",
"many",
"inter",
"-",
"contig",
"links",
"between",
"i",
"-",
"th",
"and",
"j",
"-",
"th",
"contigs",
"."
] | python | train |
inspirehep/inspire-dojson | inspire_dojson/hepnames/rules.py | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hepnames/rules.py#L119-L172 | def ids2marc(self, key, value):
"""Populate the ``035`` MARC field.
Also populates the ``8564`` and ``970`` MARC field through side effects.
"""
def _is_schema_inspire_bai(id_, schema):
return schema == 'INSPIRE BAI'
def _is_schema_inspire_id(id_, schema):
return schema == 'INSPIRE ID'
def _is_schema_spires(id_, schema):
return schema == 'SPIRES'
def _is_schema_linkedin(id, schema):
return schema == 'LINKEDIN'
def _is_schema_twitter(id, schema):
return schema == 'TWITTER'
id_ = value.get('value')
schema = value.get('schema')
if _is_schema_spires(id_, schema):
self.setdefault('970', []).append({'a': id_})
elif _is_schema_linkedin(id_, schema):
self.setdefault('8564', []).append(
{
'u': u'https://www.linkedin.com/in/{id}'.format(id=quote_url(id_)),
'y': 'LINKEDIN',
}
)
elif _is_schema_twitter(id_, schema):
self.setdefault('8564', []).append(
{
'u': u'https://twitter.com/{id}'.format(id=id_),
'y': 'TWITTER',
}
)
elif _is_schema_inspire_id(id_, schema):
return {
'a': id_,
'9': 'INSPIRE',
}
elif _is_schema_inspire_bai(id_, schema):
return {
'a': id_,
'9': 'BAI',
}
else:
return {
'a': id_,
'9': schema,
} | [
"def",
"ids2marc",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"def",
"_is_schema_inspire_bai",
"(",
"id_",
",",
"schema",
")",
":",
"return",
"schema",
"==",
"'INSPIRE BAI'",
"def",
"_is_schema_inspire_id",
"(",
"id_",
",",
"schema",
")",
":",
"retur... | Populate the ``035`` MARC field.
Also populates the ``8564`` and ``970`` MARC field through side effects. | [
"Populate",
"the",
"035",
"MARC",
"field",
"."
] | python | train |
woolfson-group/isambard | isambard/optimisation/optimizer.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L142-L203 | def run_opt(self, popsize, numgen, processors,
plot=False, log=False, **kwargs):
"""
Runs the optimizer.
:param popsize:
:param numgen:
:param processors:
:param plot:
:param log:
:param kwargs:
:return:
"""
self._params['popsize'] = popsize
self._params['numgen'] = numgen
self._params['processors'] = processors
self._params['plot'] = plot
self._params['log'] = log
# allows us to pass in additional arguments e.g. neighbours
self._params.update(**kwargs)
self.halloffame = tools.HallOfFame(1)
self.stats = tools.Statistics(lambda thing: thing.fitness.values)
self.stats.register("avg", numpy.mean)
self.stats.register("std", numpy.std)
self.stats.register("min", numpy.min)
self.stats.register("max", numpy.max)
self.logbook = tools.Logbook()
self.logbook.header = ["gen", "evals"] + self.stats.fields
self._params['model_count'] = 0
start_time = datetime.datetime.now()
self.initialize_pop()
for g in range(self._params['numgen']):
self.update_pop()
self.halloffame.update(self.population)
self.logbook.record(gen=g, evals=self._params['evals'],
**self.stats.compile(self.population))
print(self.logbook.stream)
end_time = datetime.datetime.now()
time_taken = end_time - start_time
self._params['time_taken'] = time_taken
print("Evaluated {0} models in total".format(
self._params['model_count']))
print("Best fitness is {0}".format(self.halloffame[0].fitness))
print("Best parameters are {0}".format(self.parse_individual(
self.halloffame[0])))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
print(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
print(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
if self._params['log']:
self.log_results()
if self._params['plot']:
print('----Minimisation plot:')
plt.figure(figsize=(5, 5))
plt.plot(range(len(self.logbook.select('min'))),
self.logbook.select('min'))
plt.xlabel('Iteration', fontsize=20)
plt.ylabel('Score', fontsize=20) | [
"def",
"run_opt",
"(",
"self",
",",
"popsize",
",",
"numgen",
",",
"processors",
",",
"plot",
"=",
"False",
",",
"log",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_params",
"[",
"'popsize'",
"]",
"=",
"popsize",
"self",
".",
"_p... | Runs the optimizer.
:param popsize:
:param numgen:
:param processors:
:param plot:
:param log:
:param kwargs:
:return: | [
"Runs",
"the",
"optimizer",
".",
":",
"param",
"popsize",
":",
":",
"param",
"numgen",
":",
":",
"param",
"processors",
":",
":",
"param",
"plot",
":",
":",
"param",
"log",
":",
":",
"param",
"kwargs",
":",
":",
"return",
":"
] | python | train |
stephanepechard/projy | projy/cmdline.py | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L41-L51 | def run_list():
""" Print the list of all available templates. """
term = TerminalView()
term.print_info("These are the available templates:")
import pkgutil, projy.templates
pkgpath = os.path.dirname(projy.templates.__file__)
templates = [name for _, name, _ in pkgutil.iter_modules([pkgpath])]
for name in templates:
# the father of all templates, not a real usable one
if (name != 'ProjyTemplate'):
term.print_info(term.text_in_color(template_name_from_class_name(name), TERM_PINK)) | [
"def",
"run_list",
"(",
")",
":",
"term",
"=",
"TerminalView",
"(",
")",
"term",
".",
"print_info",
"(",
"\"These are the available templates:\"",
")",
"import",
"pkgutil",
",",
"projy",
".",
"templates",
"pkgpath",
"=",
"os",
".",
"path",
".",
"dirname",
"(... | Print the list of all available templates. | [
"Print",
"the",
"list",
"of",
"all",
"available",
"templates",
"."
] | python | train |
elastic/elasticsearch-py | elasticsearch/client/xpack/ml.py | https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ml.py#L478-L502 | def get_model_snapshots(self, job_id, snapshot_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to fetch
:arg body: Model snapshot selection criteria
:arg desc: True if the results should be sorted in descending order
:arg end: The filter 'end' query parameter
:arg from_: Skips a number of documents
:arg size: The default number of documents returned in queries as a
string.
:arg sort: Name of the field to sort on
:arg start: The filter 'start' query parameter
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request(
"GET",
_make_path(
"_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id
),
params=params,
body=body,
) | [
"def",
"get_model_snapshots",
"(",
"self",
",",
"job_id",
",",
"snapshot_id",
"=",
"None",
",",
"body",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"job_id",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a requir... | `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to fetch
:arg body: Model snapshot selection criteria
:arg desc: True if the results should be sorted in descending order
:arg end: The filter 'end' query parameter
:arg from_: Skips a number of documents
:arg size: The default number of documents returned in queries as a
string.
:arg sort: Name of the field to sort on
:arg start: The filter 'start' query parameter | [
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"ml",
"-",
"get",
"-",
"snapshot",
".",
"html",
">",
"_"
] | python | train |
ciena/afkak | afkak/_protocol.py | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_protocol.py#L94-L101 | def connectionLost(self, reason=connectionDone):
"""
Mark the protocol as failed and fail all pending operations.
"""
self._failed = reason
pending, self._pending = self._pending, None
for d in pending.values():
d.errback(reason) | [
"def",
"connectionLost",
"(",
"self",
",",
"reason",
"=",
"connectionDone",
")",
":",
"self",
".",
"_failed",
"=",
"reason",
"pending",
",",
"self",
".",
"_pending",
"=",
"self",
".",
"_pending",
",",
"None",
"for",
"d",
"in",
"pending",
".",
"values",
... | Mark the protocol as failed and fail all pending operations. | [
"Mark",
"the",
"protocol",
"as",
"failed",
"and",
"fail",
"all",
"pending",
"operations",
"."
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_output.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_output.py#L90-L103 | def cmd_output_remove(self, args):
'''remove an output'''
device = args[0]
for i in range(len(self.mpstate.mav_outputs)):
conn = self.mpstate.mav_outputs[i]
if str(i) == device or conn.address == device:
print("Removing output %s" % conn.address)
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
conn.close()
self.mpstate.mav_outputs.pop(i)
return | [
"def",
"cmd_output_remove",
"(",
"self",
",",
"args",
")",
":",
"device",
"=",
"args",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"mpstate",
".",
"mav_outputs",
")",
")",
":",
"conn",
"=",
"self",
".",
"mpstate",
".",
... | remove an output | [
"remove",
"an",
"output"
] | python | train |
tchellomello/python-arlo | pyarlo/__init__.py | https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L86-L91 | def cleanup_headers(self):
"""Reset the headers and params."""
headers = {'Content-Type': 'application/json'}
headers['Authorization'] = self.__token
self.__headers = headers
self.__params = {} | [
"def",
"cleanup_headers",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
"headers",
"[",
"'Authorization'",
"]",
"=",
"self",
".",
"__token",
"self",
".",
"__headers",
"=",
"headers",
"self",
".",
"__params",
"=... | Reset the headers and params. | [
"Reset",
"the",
"headers",
"and",
"params",
"."
] | python | train |
basecrm/basecrm-python | basecrm/services.py | https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L81-L103 | def create(self, deal_id, *args, **kwargs):
"""
Create an associated contact
Creates a deal's associated contact and its role
If the specified deal or contact does not exist, the request will return an error
:calls: ``post /deals/{deal_id}/associated_contacts``
:param int deal_id: Unique identifier of a Deal.
:param tuple *args: (optional) Single object representing AssociatedContact resource.
:param dict **kwargs: (optional) AssociatedContact attributes.
:return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for AssociatedContact are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST)
_, _, associated_contact = self.http_client.post("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), body=attributes)
return associated_contact | [
"def",
"create",
"(",
"self",
",",
"deal_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"args",
"and",
"not",
"kwargs",
":",
"raise",
"Exception",
"(",
"'attributes for AssociatedContact are missing'",
")",
"attributes",
"=",
"args",
... | Create an associated contact
Creates a deal's associated contact and its role
If the specified deal or contact does not exist, the request will return an error
:calls: ``post /deals/{deal_id}/associated_contacts``
:param int deal_id: Unique identifier of a Deal.
:param tuple *args: (optional) Single object representing AssociatedContact resource.
:param dict **kwargs: (optional) AssociatedContact attributes.
:return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource.
:rtype: dict | [
"Create",
"an",
"associated",
"contact"
] | python | train |
edx/django-user-tasks | user_tasks/rules.py | https://github.com/edx/django-user-tasks/blob/6a9cf3821f4d8e202e6b48703e6a62e2a889adfb/user_tasks/rules.py#L64-L78 | def add_rules():
"""
Use the rules provided in this module to implement authorization checks for the ``django-user-tasks`` models.
These rules allow only superusers and the user who triggered a task to view its status or artifacts, cancel the
task, or delete the status information and all its related artifacts. Only superusers are allowed to directly
modify or delete an artifact (or to modify a task status record).
"""
rules.add_perm('user_tasks.view_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.cancel_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.change_usertaskstatus', rules.predicates.is_superuser)
rules.add_perm('user_tasks.delete_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.view_usertaskartifact', ARTIFACT_PERMISSION)
rules.add_perm('user_tasks.change_usertaskartifact', rules.predicates.is_superuser)
rules.add_perm('user_tasks.delete_usertaskartifact', rules.predicates.is_superuser) | [
"def",
"add_rules",
"(",
")",
":",
"rules",
".",
"add_perm",
"(",
"'user_tasks.view_usertaskstatus'",
",",
"STATUS_PERMISSION",
")",
"rules",
".",
"add_perm",
"(",
"'user_tasks.cancel_usertaskstatus'",
",",
"STATUS_PERMISSION",
")",
"rules",
".",
"add_perm",
"(",
"'... | Use the rules provided in this module to implement authorization checks for the ``django-user-tasks`` models.
These rules allow only superusers and the user who triggered a task to view its status or artifacts, cancel the
task, or delete the status information and all its related artifacts. Only superusers are allowed to directly
modify or delete an artifact (or to modify a task status record). | [
"Use",
"the",
"rules",
"provided",
"in",
"this",
"module",
"to",
"implement",
"authorization",
"checks",
"for",
"the",
"django",
"-",
"user",
"-",
"tasks",
"models",
"."
] | python | train |
Yubico/python-pyhsm | pyhsm/val/validation_server.py | https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/val/validation_server.py#L122-L186 | def do_GET(self):
"""
Process validation GET requests.
All modes of validation (OTP, OATH and PWHASH) must be explicitly
enabled in `args' to be allowed.
"""
if self.path.startswith(args.serve_url):
res = None
log_res = None
mode = None
params = urlparse.parse_qs(self.path[len(args.serve_url):])
if "otp" in params:
if args.mode_short_otp:
# YubiKey internal db OTP in KSM mode
mode = 'YubiKey OTP (short)'
res = validate_yubikey_otp_short(self, params)
elif args.mode_otp:
# YubiKey internal db OTP validation 2.0
mode = 'YubiKey OTP'
res = validate_yubikey_otp(self, params)
#status = [x for x in res.split('\n') if x.startswith("status=")]
#if len(status) == 1:
# res = status[0][7:]
log_res = '&'.join(res.split('\n'))
else:
res = "ERR 'otp/otp2' disabled"
elif "hotp" in params:
if args.mode_hotp:
mode = 'OATH-HOTP'
res = validate_oath_hotp(self, params)
else:
res = "ERR 'hotp' disabled"
elif "totp" in params:
if args.mode_totp:
mode = 'OATH-TOTP'
res = validate_oath_totp(self, params)
else:
res = "ERR 'totp' disabled"
elif "pwhash" in params:
if args.mode_pwhash:
mode = 'Password hash'
res = validate_pwhash(self, params)
else:
res = "ERR 'pwhash' disabled"
if not log_res:
log_res = res
self.log_message("%s validation result: %s -> %s", mode, self.path, log_res)
if res != None:
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(res)
self.wfile.write("\n")
else:
self.log_error ("No validation result to '%s' (responding 403)" % (self.path))
self.send_response(403, 'Forbidden')
self.end_headers()
else:
self.log_error ("Bad URL '%s' - I'm serving '%s' (responding 403)" % (self.path, args.serve_url))
self.send_response(403, 'Forbidden')
self.end_headers() | [
"def",
"do_GET",
"(",
"self",
")",
":",
"if",
"self",
".",
"path",
".",
"startswith",
"(",
"args",
".",
"serve_url",
")",
":",
"res",
"=",
"None",
"log_res",
"=",
"None",
"mode",
"=",
"None",
"params",
"=",
"urlparse",
".",
"parse_qs",
"(",
"self",
... | Process validation GET requests.
All modes of validation (OTP, OATH and PWHASH) must be explicitly
enabled in `args' to be allowed. | [
"Process",
"validation",
"GET",
"requests",
"."
] | python | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/directory_watcher.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/directory_watcher.py#L96-L145 | def _LoadInternal(self):
"""Internal implementation of Load().
The only difference between this and Load() is that the latter will throw
DirectoryDeletedError on I/O errors if it thinks that the directory has been
permanently deleted.
Yields:
All values that have not been yielded yet.
"""
# If the loader exists, check it for a value.
if not self._loader:
self._InitializeLoader()
while True:
# Yield all the new events in the path we're currently loading from.
for event in self._loader.Load():
yield event
next_path = self._GetNextPath()
if not next_path:
logger.info('No path found after %s', self._path)
# Current path is empty and there are no new paths, so we're done.
return
# There's a new path, so check to make sure there weren't any events
# written between when we finished reading the current path and when we
# checked for the new one. The sequence of events might look something
# like this:
#
# 1. Event #1 written to path #1.
# 2. We check for events and yield event #1 from path #1
# 3. We check for events and see that there are no more events in path #1.
# 4. Event #2 is written to path #1.
# 5. Event #3 is written to path #2.
# 6. We check for a new path and see that path #2 exists.
#
# Without this loop, we would miss event #2. We're also guaranteed by the
# loader contract that no more events will be written to path #1 after
# events start being written to path #2, so we don't have to worry about
# that.
for event in self._loader.Load():
yield event
logger.info('Directory watcher advancing from %s to %s', self._path,
next_path)
# Advance to the next path and start over.
self._SetPath(next_path) | [
"def",
"_LoadInternal",
"(",
"self",
")",
":",
"# If the loader exists, check it for a value.",
"if",
"not",
"self",
".",
"_loader",
":",
"self",
".",
"_InitializeLoader",
"(",
")",
"while",
"True",
":",
"# Yield all the new events in the path we're currently loading from."... | Internal implementation of Load().
The only difference between this and Load() is that the latter will throw
DirectoryDeletedError on I/O errors if it thinks that the directory has been
permanently deleted.
Yields:
All values that have not been yielded yet. | [
"Internal",
"implementation",
"of",
"Load",
"()",
"."
] | python | train |
uw-it-aca/uw-restclients-canvas | uw_canvas/external_tools.py | https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/external_tools.py#L154-L162 | def get_sessionless_launch_url_from_course_sis_id(
self, tool_id, course_sis_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
return self.get_sessionless_launch_url_from_course(
tool_id, self._sis_id(course_sis_id, "course")) | [
"def",
"get_sessionless_launch_url_from_course_sis_id",
"(",
"self",
",",
"tool_id",
",",
"course_sis_id",
")",
":",
"return",
"self",
".",
"get_sessionless_launch_url_from_course",
"(",
"tool_id",
",",
"self",
".",
"_sis_id",
"(",
"course_sis_id",
",",
"\"course\"",
... | Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch | [
"Get",
"a",
"sessionless",
"launch",
"url",
"for",
"an",
"external",
"tool",
"."
] | python | test |
dslackw/slpkg | slpkg/main.py | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L381-L418 | def pkg_tracking(self):
"""Tracking package dependencies
"""
flag = []
options = [
"-t",
"--tracking"
]
additional_options = [
"--check-deps",
"--graph=",
"--case-ins"
]
for arg in self.args[2:]:
if arg.startswith(additional_options[1]):
flag.append(arg)
self.args.remove(arg)
if arg in additional_options:
flag.append(arg)
# clean additional options from args
for f in flag:
if f in self.args:
self.args.remove(f)
# print usage message if wrong additional option
for arg in self.args:
if arg.startswith("--"):
if arg not in additional_options:
usage("")
raise SystemExit()
if (len(self.args) >= 3 and len(self.args) <= 3 and
self.args[0] in options and
self.args[1] in self.meta.repositories):
TrackingDeps(self.args[2], self.args[1], flag).run()
elif (len(self.args) >= 2 and
self.args[1] not in self.meta.repositories):
usage(self.args[1])
else:
usage("") | [
"def",
"pkg_tracking",
"(",
"self",
")",
":",
"flag",
"=",
"[",
"]",
"options",
"=",
"[",
"\"-t\"",
",",
"\"--tracking\"",
"]",
"additional_options",
"=",
"[",
"\"--check-deps\"",
",",
"\"--graph=\"",
",",
"\"--case-ins\"",
"]",
"for",
"arg",
"in",
"self",
... | Tracking package dependencies | [
"Tracking",
"package",
"dependencies"
] | python | train |
cjdrake/pyeda | pyeda/parsing/boolexpr.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/boolexpr.py#L593-L602 | def _zom_arg(lexer):
"""Return zero or more arguments."""
tok = next(lexer)
# ',' EXPR ZOM_X
if isinstance(tok, COMMA):
return (_expr(lexer), ) + _zom_arg(lexer)
# null
else:
lexer.unpop_token(tok)
return tuple() | [
"def",
"_zom_arg",
"(",
"lexer",
")",
":",
"tok",
"=",
"next",
"(",
"lexer",
")",
"# ',' EXPR ZOM_X",
"if",
"isinstance",
"(",
"tok",
",",
"COMMA",
")",
":",
"return",
"(",
"_expr",
"(",
"lexer",
")",
",",
")",
"+",
"_zom_arg",
"(",
"lexer",
")",
"... | Return zero or more arguments. | [
"Return",
"zero",
"or",
"more",
"arguments",
"."
] | python | train |
dossier/dossier.models | dossier/models/web/routes.py | https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L726-L741 | def maybe_store_highlights(file_id, data, tfidf, kvlclient):
'''wrapper around :func:`create_highlights` that stores the response
payload in the `kvlayer` table called `highlights` as a stored
value if data['store'] is `False`. This allows error values as
well as successful responses from :func:`create_highlights` to
both get stored.
'''
payload = create_highlights(data, tfidf)
if data['store'] is True:
stored_payload = {}
stored_payload.update(payload)
stored_payload['state'] = STORED
payload_str = json.dumps(stored_payload)
kvlclient.put('highlights', (file_id, payload_str))
return payload | [
"def",
"maybe_store_highlights",
"(",
"file_id",
",",
"data",
",",
"tfidf",
",",
"kvlclient",
")",
":",
"payload",
"=",
"create_highlights",
"(",
"data",
",",
"tfidf",
")",
"if",
"data",
"[",
"'store'",
"]",
"is",
"True",
":",
"stored_payload",
"=",
"{",
... | wrapper around :func:`create_highlights` that stores the response
payload in the `kvlayer` table called `highlights` as a stored
value if data['store'] is `False`. This allows error values as
well as successful responses from :func:`create_highlights` to
both get stored. | [
"wrapper",
"around",
":",
"func",
":",
"create_highlights",
"that",
"stores",
"the",
"response",
"payload",
"in",
"the",
"kvlayer",
"table",
"called",
"highlights",
"as",
"a",
"stored",
"value",
"if",
"data",
"[",
"store",
"]",
"is",
"False",
".",
"This",
... | python | train |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/modeling_gpt2.py | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L351-L362 | def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_() | [
"def",
"init_weights",
"(",
"self",
",",
"module",
")",
":",
"if",
"isinstance",
"(",
"module",
",",
"(",
"nn",
".",
"Linear",
",",
"nn",
".",
"Embedding",
")",
")",
":",
"# Slightly different from the TF version which uses truncated_normal for initialization",
"# c... | Initialize the weights. | [
"Initialize",
"the",
"weights",
"."
] | python | train |
mfcloud/python-zvm-sdk | zvmconnector/restclient.py | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmconnector/restclient.py#L1001-L1008 | def _save_file(self, data, path):
"""Save an file to the specified path.
:param data: binary data of the file
:param path: path to save the file to
"""
with open(path, 'wb') as tfile:
for chunk in data:
tfile.write(chunk) | [
"def",
"_save_file",
"(",
"self",
",",
"data",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"tfile",
":",
"for",
"chunk",
"in",
"data",
":",
"tfile",
".",
"write",
"(",
"chunk",
")"
] | Save an file to the specified path.
:param data: binary data of the file
:param path: path to save the file to | [
"Save",
"an",
"file",
"to",
"the",
"specified",
"path",
".",
":",
"param",
"data",
":",
"binary",
"data",
"of",
"the",
"file",
":",
"param",
"path",
":",
"path",
"to",
"save",
"the",
"file",
"to"
] | python | train |
dtcooper/python-fitparse | fitparse/records.py | https://github.com/dtcooper/python-fitparse/blob/40fa2918c3e91bd8f89908ad3bad81c1c1189dd2/fitparse/records.py#L376-L387 | def calculate(cls, byte_arr, crc=0):
"""Compute CRC for input bytes."""
for byte in byte_iter(byte_arr):
# Taken verbatim from FIT SDK docs
tmp = cls.CRC_TABLE[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF]
tmp = cls.CRC_TABLE[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ cls.CRC_TABLE[(byte >> 4) & 0xF]
return crc | [
"def",
"calculate",
"(",
"cls",
",",
"byte_arr",
",",
"crc",
"=",
"0",
")",
":",
"for",
"byte",
"in",
"byte_iter",
"(",
"byte_arr",
")",
":",
"# Taken verbatim from FIT SDK docs",
"tmp",
"=",
"cls",
".",
"CRC_TABLE",
"[",
"crc",
"&",
"0xF",
"]",
"crc",
... | Compute CRC for input bytes. | [
"Compute",
"CRC",
"for",
"input",
"bytes",
"."
] | python | train |
jonathf/chaospy | chaospy/poly/collection/arithmetics.py | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/collection/arithmetics.py#L76-L130 | def mul(*args):
"""Polynomial multiplication."""
if len(args) > 2:
return add(args[0], add(args[1], args[1:]))
if len(args) == 1:
return args[0]
part1, part2 = args
if not isinstance(part2, Poly):
if isinstance(part2, (float, int)):
part2 = np.asarray(part2)
if not part2.shape:
core = part1.A.copy()
dtype = chaospy.poly.typing.dtyping(
part1.dtype, part2.dtype)
for key in part1.keys:
core[key] = np.asarray(core[key]*part2, dtype)
return Poly(core, part1.dim, part1.shape, dtype)
part2 = Poly(part2)
if part2.dim > part1.dim:
part1 = chaospy.dimension.setdim(part1, part2.dim)
elif part2.dim < part1.dim:
part2 = chaospy.dimension.setdim(part2, part1.dim)
if np.prod(part1.shape) >= np.prod(part2.shape):
shape = part1.shape
else:
shape = part2.shape
dtype = chaospy.poly.typing.dtyping(part1.dtype, part2.dtype)
if part1.dtype != part2.dtype:
if part1.dtype == dtype:
part2 = chaospy.poly.typing.asfloat(part2)
else:
part1 = chaospy.poly.typing.asfloat(part1)
core = {}
for idx1 in part2.A:
for idx2 in part1.A:
key = tuple(np.array(idx1) + np.array(idx2))
core[key] = np.asarray(
core.get(key, 0) + part2.A[idx1]*part1.A[idx2])
core = {key: value for key, value in core.items() if np.any(value)}
out = Poly(core, part1.dim, shape, dtype)
return out | [
"def",
"mul",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"2",
":",
"return",
"add",
"(",
"args",
"[",
"0",
"]",
",",
"add",
"(",
"args",
"[",
"1",
"]",
",",
"args",
"[",
"1",
":",
"]",
")",
")",
"if",
"len",
"(",
"a... | Polynomial multiplication. | [
"Polynomial",
"multiplication",
"."
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/mainloop/threads.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/mainloop/threads.py#L461-L502 | def stop(self, join = False, timeout = None):
"""Stop the threads.
:Parameters:
- `join`: join the threads (wait until they exit)
- `timeout`: maximum time (in seconds) to wait when `join` is
`True`). No limit when `timeout` is `None`.
"""
logger.debug("Closing the io handlers...")
for handler in self.io_handlers:
handler.close()
if self.event_thread and self.event_thread.is_alive():
logger.debug("Sending the QUIT signal")
self.event_queue.put(QUIT)
logger.debug(" sent")
threads = self.io_threads + self.timeout_threads
for thread in threads:
logger.debug("Stopping thread: {0!r}".format(thread))
thread.stop()
if not join:
return
if self.event_thread:
threads.append(self.event_thread)
if timeout is None:
for thread in threads:
thread.join()
else:
timeout1 = (timeout * 0.01) / len(threads)
threads_left = []
for thread in threads:
logger.debug("Quick-joining thread {0!r}...".format(thread))
thread.join(timeout1)
if thread.is_alive():
logger.debug(" thread still alive".format(thread))
threads_left.append(thread)
if threads_left:
timeout2 = (timeout * 0.99) / len(threads_left)
for thread in threads_left:
logger.debug("Joining thread {0!r}...".format(thread))
thread.join(timeout2)
self.io_threads = []
self.event_thread = None | [
"def",
"stop",
"(",
"self",
",",
"join",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"Closing the io handlers...\"",
")",
"for",
"handler",
"in",
"self",
".",
"io_handlers",
":",
"handler",
".",
"close",
"(",
")",
... | Stop the threads.
:Parameters:
- `join`: join the threads (wait until they exit)
- `timeout`: maximum time (in seconds) to wait when `join` is
`True`). No limit when `timeout` is `None`. | [
"Stop",
"the",
"threads",
"."
] | python | valid |
annoviko/pyclustering | pyclustering/nnet/cnn.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/cnn.py#L449-L476 | def show_network(self):
"""!
@brief Shows structure of the network: neurons and connections between them.
"""
dimension = len(self.__location[0])
if (dimension != 3) and (dimension != 2):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented')
(fig, axes) = self.__create_surface(dimension)
for i in range(0, self.__num_osc, 1):
if dimension == 2:
axes.plot(self.__location[i][0], self.__location[i][1], 'bo')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], 'b-', linewidth = 0.5)
elif dimension == 3:
axes.scatter(self.__location[i][0], self.__location[i][1], self.__location[i][2], c = 'b', marker = 'o')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], [self.__location[i][2], self.__location[j][2]], 'b-', linewidth = 0.5)
plt.grid()
plt.show() | [
"def",
"show_network",
"(",
"self",
")",
":",
"dimension",
"=",
"len",
"(",
"self",
".",
"__location",
"[",
"0",
"]",
")",
"if",
"(",
"dimension",
"!=",
"3",
")",
"and",
"(",
"dimension",
"!=",
"2",
")",
":",
"raise",
"NameError",
"(",
"'Network that... | !
@brief Shows structure of the network: neurons and connections between them. | [
"!"
] | python | valid |
inasafe/inasafe | safe/gui/tools/wizard/step_fc90_analysis.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc90_analysis.py#L302-L310 | def show_busy(self):
"""Lock buttons and enable the busy cursor."""
self.progress_bar.show()
self.parent.pbnNext.setEnabled(False)
self.parent.pbnBack.setEnabled(False)
self.parent.pbnCancel.setEnabled(False)
self.parent.repaint()
enable_busy_cursor()
QgsApplication.processEvents() | [
"def",
"show_busy",
"(",
"self",
")",
":",
"self",
".",
"progress_bar",
".",
"show",
"(",
")",
"self",
".",
"parent",
".",
"pbnNext",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"parent",
".",
"pbnBack",
".",
"setEnabled",
"(",
"False",
")",
"... | Lock buttons and enable the busy cursor. | [
"Lock",
"buttons",
"and",
"enable",
"the",
"busy",
"cursor",
"."
] | python | train |
python-security/pyt | pyt/formatters/screen.py | https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/formatters/screen.py#L17-L46 | def report(
vulnerabilities,
fileobj,
print_sanitised,
):
"""
Prints issues in color-coded text format.
Args:
vulnerabilities: list of vulnerabilities to report
fileobj: The output file object, which may be sys.stdout
"""
n_vulnerabilities = len(vulnerabilities)
unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)]
n_unsanitised = len(unsanitised_vulnerabilities)
n_sanitised = n_vulnerabilities - n_unsanitised
heading = "{} vulnerabilit{} found{}.\n".format(
'No' if n_unsanitised == 0 else n_unsanitised,
'y' if n_unsanitised == 1 else 'ies',
" (plus {} sanitised)".format(n_sanitised) if n_sanitised else "",
)
vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities
with fileobj:
for i, vulnerability in enumerate(vulnerabilities_to_print, start=1):
fileobj.write(vulnerability_to_str(i, vulnerability))
if n_unsanitised == 0:
fileobj.write(color(heading, GOOD))
else:
fileobj.write(color(heading, DANGER)) | [
"def",
"report",
"(",
"vulnerabilities",
",",
"fileobj",
",",
"print_sanitised",
",",
")",
":",
"n_vulnerabilities",
"=",
"len",
"(",
"vulnerabilities",
")",
"unsanitised_vulnerabilities",
"=",
"[",
"v",
"for",
"v",
"in",
"vulnerabilities",
"if",
"not",
"isinsta... | Prints issues in color-coded text format.
Args:
vulnerabilities: list of vulnerabilities to report
fileobj: The output file object, which may be sys.stdout | [
"Prints",
"issues",
"in",
"color",
"-",
"coded",
"text",
"format",
"."
] | python | train |
benoitkugler/abstractDataLibrary | pyDLib/Core/sql.py | https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/sql.py#L209-L223 | def cree_local_DB(scheme):
"""Create emmpt DB according to the given scheme : dict { table : [ (column_name, column_type), .. ]}
Usefull at installation of application (and for developement)
"""
conn = LocalConnexion()
req = ""
for table, fields in scheme.items():
req += f"DROP TABLE IF EXISTS {table};"
req_fields = ", ".join(f'{c_name} {c_type}' for c_name, c_type in fields)
req += f"""CREATE TABLE {table} ( {req_fields} ) ;"""
cur = conn.cursor()
cur.executescript(req)
conn.connexion.commit()
conn.connexion.close()
logging.info("Database created with succes.") | [
"def",
"cree_local_DB",
"(",
"scheme",
")",
":",
"conn",
"=",
"LocalConnexion",
"(",
")",
"req",
"=",
"\"\"",
"for",
"table",
",",
"fields",
"in",
"scheme",
".",
"items",
"(",
")",
":",
"req",
"+=",
"f\"DROP TABLE IF EXISTS {table};\"",
"req_fields",
"=",
... | Create emmpt DB according to the given scheme : dict { table : [ (column_name, column_type), .. ]}
Usefull at installation of application (and for developement) | [
"Create",
"emmpt",
"DB",
"according",
"to",
"the",
"given",
"scheme",
":",
"dict",
"{",
"table",
":",
"[",
"(",
"column_name",
"column_type",
")",
"..",
"]",
"}",
"Usefull",
"at",
"installation",
"of",
"application",
"(",
"and",
"for",
"developement",
")"
... | python | train |
attilaolah/diffbot.py | diffbot.py | https://github.com/attilaolah/diffbot.py/blob/b66d68a36a22c944297c0575413db23687029af4/diffbot.py#L195-L197 | def api(name, url, token, **kwargs):
"""Shortcut for caling methods on `Client(token, version)`."""
return Client(token).api(name, url, **kwargs) | [
"def",
"api",
"(",
"name",
",",
"url",
",",
"token",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Client",
"(",
"token",
")",
".",
"api",
"(",
"name",
",",
"url",
",",
"*",
"*",
"kwargs",
")"
] | Shortcut for caling methods on `Client(token, version)`. | [
"Shortcut",
"for",
"caling",
"methods",
"on",
"Client",
"(",
"token",
"version",
")",
"."
] | python | train |
juju/python-libjuju | juju/utils.py | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/utils.py#L32-L43 | def _read_ssh_key():
'''
Inner function for read_ssh_key, suitable for passing to our
Executor.
'''
default_data_dir = Path(Path.home(), ".local", "share", "juju")
juju_data = os.environ.get("JUJU_DATA", default_data_dir)
ssh_key_path = Path(juju_data, 'ssh', 'juju_id_rsa.pub')
with ssh_key_path.open('r') as ssh_key_file:
ssh_key = ssh_key_file.readlines()[0].strip()
return ssh_key | [
"def",
"_read_ssh_key",
"(",
")",
":",
"default_data_dir",
"=",
"Path",
"(",
"Path",
".",
"home",
"(",
")",
",",
"\".local\"",
",",
"\"share\"",
",",
"\"juju\"",
")",
"juju_data",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"JUJU_DATA\"",
",",
"default... | Inner function for read_ssh_key, suitable for passing to our
Executor. | [
"Inner",
"function",
"for",
"read_ssh_key",
"suitable",
"for",
"passing",
"to",
"our",
"Executor",
"."
] | python | train |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1224-L1322 | def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency | [
"def",
"overlap_matrix",
"(",
"hdf5_file_name",
",",
"consensus_labels",
",",
"cluster_runs",
")",
":",
"if",
"reduce",
"(",
"operator",
".",
"mul",
",",
"cluster_runs",
".",
"shape",
",",
"1",
")",
"==",
"max",
"(",
"cluster_runs",
".",
"shape",
")",
":",... | Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency : | [
"Writes",
"on",
"disk",
"(",
"in",
"an",
"HDF5",
"file",
"whose",
"handle",
"is",
"provided",
"as",
"the",
"first",
"argument",
"to",
"this",
"function",
")",
"a",
"stack",
"of",
"matrices",
"each",
"describing",
"for",
"a",
"particular",
"run",
"the",
"... | python | train |
totalgood/nlpia | src/nlpia/loaders.py | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L806-L817 | def get_ftp_filemeta(parsed_url, username='anonymous', password='nlpia@totalgood.com'):
""" FIXME: Get file size, hostname, path metadata from FTP server using parsed_url (urlparse)"""
return dict(
url=parsed_url.geturl(), hostname=parsed_url.hostname, path=parsed_url.path,
username=(parsed_url.username or username),
remote_size=-1,
filename=os.path.basename(parsed_url.path))
ftp = ftplib.FTP(parsed_url.hostname)
ftp.login(username, password)
ftp.cwd(parsed_url.path)
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
ftp.quit() | [
"def",
"get_ftp_filemeta",
"(",
"parsed_url",
",",
"username",
"=",
"'anonymous'",
",",
"password",
"=",
"'nlpia@totalgood.com'",
")",
":",
"return",
"dict",
"(",
"url",
"=",
"parsed_url",
".",
"geturl",
"(",
")",
",",
"hostname",
"=",
"parsed_url",
".",
"ho... | FIXME: Get file size, hostname, path metadata from FTP server using parsed_url (urlparse) | [
"FIXME",
":",
"Get",
"file",
"size",
"hostname",
"path",
"metadata",
"from",
"FTP",
"server",
"using",
"parsed_url",
"(",
"urlparse",
")"
] | python | train |
PythonCharmers/python-future | src/future/types/newbytes.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newbytes.py#L335-L359 | def index(self, sub, *args):
'''
Returns index of sub in bytes.
Raises ValueError if byte is not in bytes and TypeError if can't
be converted bytes or its length is not 1.
'''
if isinstance(sub, int):
if len(args) == 0:
start, end = 0, len(self)
elif len(args) == 1:
start = args[0]
elif len(args) == 2:
start, end = args
else:
raise TypeError('takes at most 3 arguments')
return list(self)[start:end].index(sub)
if not isinstance(sub, bytes):
try:
sub = self.__class__(sub)
except (TypeError, ValueError):
raise TypeError("can't convert sub to bytes")
try:
return super(newbytes, self).index(sub, *args)
except ValueError:
raise ValueError('substring not found') | [
"def",
"index",
"(",
"self",
",",
"sub",
",",
"*",
"args",
")",
":",
"if",
"isinstance",
"(",
"sub",
",",
"int",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"start",
",",
"end",
"=",
"0",
",",
"len",
"(",
"self",
")",
"elif",
"... | Returns index of sub in bytes.
Raises ValueError if byte is not in bytes and TypeError if can't
be converted bytes or its length is not 1. | [
"Returns",
"index",
"of",
"sub",
"in",
"bytes",
".",
"Raises",
"ValueError",
"if",
"byte",
"is",
"not",
"in",
"bytes",
"and",
"TypeError",
"if",
"can",
"t",
"be",
"converted",
"bytes",
"or",
"its",
"length",
"is",
"not",
"1",
"."
] | python | train |
vsjha18/nsetools | nse.py | https://github.com/vsjha18/nsetools/blob/c306b568471701c19195d2f17e112cc92022d3e0/nse.py#L316-L321 | def is_valid_index(self, code):
"""
returns: True | Flase , based on whether code is valid
"""
index_list = self.get_index_list()
return True if code.upper() in index_list else False | [
"def",
"is_valid_index",
"(",
"self",
",",
"code",
")",
":",
"index_list",
"=",
"self",
".",
"get_index_list",
"(",
")",
"return",
"True",
"if",
"code",
".",
"upper",
"(",
")",
"in",
"index_list",
"else",
"False"
] | returns: True | Flase , based on whether code is valid | [
"returns",
":",
"True",
"|",
"Flase",
"based",
"on",
"whether",
"code",
"is",
"valid"
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L12030-L12051 | def _set_cee_map(self, v, load=False):
"""
Setter method for cee_map, mapped from YANG variable /cee_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cee_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cee_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",cee_map.cee_map, yang_name="cee-map", rest_name="cee-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'CEE map command', u'callpoint': u'qos_cee_map', u'sort-priority': u'36'}}), is_container='list', yang_name="cee-map", rest_name="cee-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'CEE map command', u'callpoint': u'qos_cee_map', u'sort-priority': u'36'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cee', defining_module='brocade-qos-cee', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cee_map must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",cee_map.cee_map, yang_name="cee-map", rest_name="cee-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'CEE map command', u'callpoint': u'qos_cee_map', u'sort-priority': u'36'}}), is_container='list', yang_name="cee-map", rest_name="cee-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'CEE map command', u'callpoint': u'qos_cee_map', u'sort-priority': u'36'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cee', defining_module='brocade-qos-cee', yang_type='list', is_config=True)""",
})
self.__cee_map = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_cee_map",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",... | Setter method for cee_map, mapped from YANG variable /cee_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cee_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cee_map() directly. | [
"Setter",
"method",
"for",
"cee_map",
"mapped",
"from",
"YANG",
"variable",
"/",
"cee_map",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_cee_... | python | train |
persephone-tools/persephone | persephone/utterance.py | https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L45-L65 | def write_transcriptions(utterances: List[Utterance],
tgt_dir: Path, ext: str, lazy: bool) -> None:
""" Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
out_path = tgt_dir / "{}.{}".format(utter.prefix, ext)
if lazy and out_path.is_file():
continue
with out_path.open("w") as f:
print(utter.text, file=f) | [
"def",
"write_transcriptions",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"tgt_dir",
":",
"Path",
",",
"ext",
":",
"str",
",",
"lazy",
":",
"bool",
")",
"->",
"None",
":",
"tgt_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"e... | Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones". | [
"Write",
"the",
"utterance",
"transcriptions",
"to",
"files",
"in",
"the",
"tgt_dir",
".",
"Is",
"lazy",
"and",
"checks",
"if",
"the",
"file",
"already",
"exists",
"."
] | python | train |
ask/redish | redish/client.py | https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/client.py#L135-L142 | def rename(self, old_name, new_name):
"""Rename key to a new name."""
try:
self.api.rename(mkey(old_name), mkey(new_name))
except ResponseError, exc:
if "no such key" in exc.args:
raise KeyError(old_name)
raise | [
"def",
"rename",
"(",
"self",
",",
"old_name",
",",
"new_name",
")",
":",
"try",
":",
"self",
".",
"api",
".",
"rename",
"(",
"mkey",
"(",
"old_name",
")",
",",
"mkey",
"(",
"new_name",
")",
")",
"except",
"ResponseError",
",",
"exc",
":",
"if",
"\... | Rename key to a new name. | [
"Rename",
"key",
"to",
"a",
"new",
"name",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.