repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/utility.py | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/utility.py#L62-L83 | def object_as_axis(arr, dtype, axis=-1):
"""
cast an array of void objects to a typed axis
Parameters
----------
arr : ndarray, [ndim], void
array of type np.void
dtype : numpy dtype object
the output dtype to cast the input array to
axis : int
position to insert the newly formed axis into
Returns
-------
ndarray, [ndim+1], dtype
output array cast to given dtype
"""
# view the void objects as typed elements
arr = arr.view(dtype).reshape(arr.shape + (-1,))
# put the axis in the specified location
return np.rollaxis(arr, -1, axis) | [
"def",
"object_as_axis",
"(",
"arr",
",",
"dtype",
",",
"axis",
"=",
"-",
"1",
")",
":",
"# view the void objects as typed elements",
"arr",
"=",
"arr",
".",
"view",
"(",
"dtype",
")",
".",
"reshape",
"(",
"arr",
".",
"shape",
"+",
"(",
"-",
"1",
",",
... | cast an array of void objects to a typed axis
Parameters
----------
arr : ndarray, [ndim], void
array of type np.void
dtype : numpy dtype object
the output dtype to cast the input array to
axis : int
position to insert the newly formed axis into
Returns
-------
ndarray, [ndim+1], dtype
output array cast to given dtype | [
"cast",
"an",
"array",
"of",
"void",
"objects",
"to",
"a",
"typed",
"axis"
] | python | train |
dhermes/bezier | src/bezier/surface.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L554-L598 | def evaluate_cartesian(self, s, t, _verify=True):
r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array).
"""
if _verify:
self._verify_cartesian(s, t)
return _surface_helpers.evaluate_barycentric(
self._nodes, self._degree, 1.0 - s - t, s, t
) | [
"def",
"evaluate_cartesian",
"(",
"self",
",",
"s",
",",
"t",
",",
"_verify",
"=",
"True",
")",
":",
"if",
"_verify",
":",
"self",
".",
"_verify_cartesian",
"(",
"s",
",",
"t",
")",
"return",
"_surface_helpers",
".",
"evaluate_barycentric",
"(",
"self",
... | r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array). | [
"r",
"Compute",
"a",
"point",
"on",
"the",
"surface",
"."
] | python | train |
mozilla/socorrolib | socorrolib/lib/httpclient.py | https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/httpclient.py#L27-L46 | def _process_response(self):
"""Return a JSON result after an HTTP Request.
Process the response of an HTTP Request and make it a JSON error if
it failed. Otherwise return the response's content.
"""
response = self.conn.getresponse()
if response.status == 200 or response.status == 201:
data = response.read()
else:
data = {
"error": {
"code": response.status,
"reason": response.reason,
"data": response.read()
}
}
return data | [
"def",
"_process_response",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"conn",
".",
"getresponse",
"(",
")",
"if",
"response",
".",
"status",
"==",
"200",
"or",
"response",
".",
"status",
"==",
"201",
":",
"data",
"=",
"response",
".",
"read"... | Return a JSON result after an HTTP Request.
Process the response of an HTTP Request and make it a JSON error if
it failed. Otherwise return the response's content. | [
"Return",
"a",
"JSON",
"result",
"after",
"an",
"HTTP",
"Request",
"."
] | python | train |
liip/taxi | taxi/timesheet/parser.py | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/parser.py#L101-L120 | def duration_to_text(self, duration):
"""
Return the textual representation of the given `duration`. The duration can either be a tuple of
:class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the
given `duration` is a tuple) or a number.
"""
if isinstance(duration, tuple):
start = (duration[0].strftime(self.ENTRY_DURATION_FORMAT)
if duration[0] is not None
else '')
end = (duration[1].strftime(self.ENTRY_DURATION_FORMAT)
if duration[1] is not None
else '?')
duration = '%s-%s' % (start, end)
else:
duration = six.text_type(duration)
return duration | [
"def",
"duration_to_text",
"(",
"self",
",",
"duration",
")",
":",
"if",
"isinstance",
"(",
"duration",
",",
"tuple",
")",
":",
"start",
"=",
"(",
"duration",
"[",
"0",
"]",
".",
"strftime",
"(",
"self",
".",
"ENTRY_DURATION_FORMAT",
")",
"if",
"duration... | Return the textual representation of the given `duration`. The duration can either be a tuple of
:class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the
given `duration` is a tuple) or a number. | [
"Return",
"the",
"textual",
"representation",
"of",
"the",
"given",
"duration",
".",
"The",
"duration",
"can",
"either",
"be",
"a",
"tuple",
"of",
":",
"class",
":",
"datetime",
".",
"time",
"objects",
"or",
"a",
"simple",
"number",
".",
"The",
"returned",... | python | train |
CxAalto/gtfspy | gtfspy/stats.py | https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L14-L35 | def get_spatial_bounds(gtfs, as_dict=False):
"""
Parameters
----------
gtfs
Returns
-------
min_lon: float
max_lon: float
min_lat: float
max_lat: float
"""
stats = get_stats(gtfs)
lon_min = stats['lon_min']
lon_max = stats['lon_max']
lat_min = stats['lat_min']
lat_max = stats['lat_max']
if as_dict:
return {'lon_min': lon_min, 'lon_max': lon_max, 'lat_min': lat_min, 'lat_max': lat_max}
else:
return lon_min, lon_max, lat_min, lat_max | [
"def",
"get_spatial_bounds",
"(",
"gtfs",
",",
"as_dict",
"=",
"False",
")",
":",
"stats",
"=",
"get_stats",
"(",
"gtfs",
")",
"lon_min",
"=",
"stats",
"[",
"'lon_min'",
"]",
"lon_max",
"=",
"stats",
"[",
"'lon_max'",
"]",
"lat_min",
"=",
"stats",
"[",
... | Parameters
----------
gtfs
Returns
-------
min_lon: float
max_lon: float
min_lat: float
max_lat: float | [
"Parameters",
"----------",
"gtfs"
] | python | valid |
Kronuz/pyScss | scss/selector.py | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/selector.py#L393-L409 | def lookup_key(self):
"""Build a key from the "important" parts of a selector: elements,
classes, ids.
"""
parts = set()
for node in self.simple_selectors:
for token in node.tokens:
if token[0] not in ':[':
parts.add(token)
if not parts:
# Should always have at least ONE key; selectors with no elements,
# no classes, and no ids can be indexed as None to avoid a scan of
# every selector in the entire document
parts.add(None)
return frozenset(parts) | [
"def",
"lookup_key",
"(",
"self",
")",
":",
"parts",
"=",
"set",
"(",
")",
"for",
"node",
"in",
"self",
".",
"simple_selectors",
":",
"for",
"token",
"in",
"node",
".",
"tokens",
":",
"if",
"token",
"[",
"0",
"]",
"not",
"in",
"':['",
":",
"parts",... | Build a key from the "important" parts of a selector: elements,
classes, ids. | [
"Build",
"a",
"key",
"from",
"the",
"important",
"parts",
"of",
"a",
"selector",
":",
"elements",
"classes",
"ids",
"."
] | python | train |
Guake/guake | guake/prefs.py | https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L944-L957 | def set_palette_name(self, palette_name):
"""If the given palette matches an existing one, shows it in the
combobox
"""
combo = self.get_widget('palette_name')
found = False
log.debug("wanting palette: %r", palette_name)
for i in combo.get_model():
if i[0] == palette_name:
combo.set_active_iter(i.iter)
found = True
break
if not found:
combo.set_active(self.custom_palette_index) | [
"def",
"set_palette_name",
"(",
"self",
",",
"palette_name",
")",
":",
"combo",
"=",
"self",
".",
"get_widget",
"(",
"'palette_name'",
")",
"found",
"=",
"False",
"log",
".",
"debug",
"(",
"\"wanting palette: %r\"",
",",
"palette_name",
")",
"for",
"i",
"in"... | If the given palette matches an existing one, shows it in the
combobox | [
"If",
"the",
"given",
"palette",
"matches",
"an",
"existing",
"one",
"shows",
"it",
"in",
"the",
"combobox"
] | python | train |
ray-project/ray | python/ray/rllib/agents/ppo/utils.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/agents/ppo/utils.py#L8-L21 | def flatten(weights, start=0, stop=2):
"""This methods reshapes all values in a dictionary.
The indices from start to stop will be flattened into a single index.
Args:
weights: A dictionary mapping keys to numpy arrays.
start: The starting index.
stop: The ending index.
"""
for key, val in weights.items():
new_shape = val.shape[0:start] + (-1, ) + val.shape[stop:]
weights[key] = val.reshape(new_shape)
return weights | [
"def",
"flatten",
"(",
"weights",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"2",
")",
":",
"for",
"key",
",",
"val",
"in",
"weights",
".",
"items",
"(",
")",
":",
"new_shape",
"=",
"val",
".",
"shape",
"[",
"0",
":",
"start",
"]",
"+",
"(",
"... | This methods reshapes all values in a dictionary.
The indices from start to stop will be flattened into a single index.
Args:
weights: A dictionary mapping keys to numpy arrays.
start: The starting index.
stop: The ending index. | [
"This",
"methods",
"reshapes",
"all",
"values",
"in",
"a",
"dictionary",
"."
] | python | train |
holgern/pyedflib | pyedflib/edfwriter.py | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L421-L432 | def setSamplefrequency(self, edfsignal, samplefrequency):
"""
Sets the samplefrequency of signal edfsignal.
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['sample_rate'] = samplefrequency
self.update_header() | [
"def",
"setSamplefrequency",
"(",
"self",
",",
"edfsignal",
",",
"samplefrequency",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
... | Sets the samplefrequency of signal edfsignal.
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"samplefrequency",
"of",
"signal",
"edfsignal",
"."
] | python | train |
ray-project/ray | python/ray/actor.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L78-L106 | def method(*args, **kwargs):
"""Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
"""
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method | [
"def",
"method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"len",
"(",
"args",
")",
"==",
"0",
"assert",
"len",
"(",
"kwargs",
")",
"==",
"1",
"assert",
"\"num_return_vals\"",
"in",
"kwargs",
"num_return_vals",
"=",
"kwargs",
"[",
... | Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method. | [
"Annotate",
"an",
"actor",
"method",
"."
] | python | train |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L509-L518 | def validate_headers(self):
"""
Check if CSV metadata files have the right format.
"""
super().validate()
self.validate_header(self.channeldir, self.channelinfo, CHANNEL_INFO_HEADER)
self.validate_header(self.channeldir, self.contentinfo, CONTENT_INFO_HEADER)
if self.has_exercises():
self.validate_header(self.channeldir, self.exercisesinfo, EXERCISE_INFO_HEADER)
self.validate_header(self.channeldir, self.questionsinfo, EXERCISE_QUESTIONS_INFO_HEADER) | [
"def",
"validate_headers",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"validate",
"(",
")",
"self",
".",
"validate_header",
"(",
"self",
".",
"channeldir",
",",
"self",
".",
"channelinfo",
",",
"CHANNEL_INFO_HEADER",
")",
"self",
".",
"validate_header",
... | Check if CSV metadata files have the right format. | [
"Check",
"if",
"CSV",
"metadata",
"files",
"have",
"the",
"right",
"format",
"."
] | python | train |
limix/limix-core | limix_core/mean/meanKronSum.py | https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/meanKronSum.py#L169-L181 | def Y(self, value):
""" set phenotype """
self._N = value.shape[0]
self._P = value.shape[1]
self._Y = value
# missing data
self._Iok = ~sp.isnan(value)
self._veIok = vec(self._Iok)[:, 0]
self._miss = (~self._Iok).any()
# notify and clear_cached
self.clear_cache('pheno')
self._notify()
self._notify('pheno') | [
"def",
"Y",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_N",
"=",
"value",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"_P",
"=",
"value",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"_Y",
"=",
"value",
"# missing data",
"self",
".",
"_Iok",... | set phenotype | [
"set",
"phenotype"
] | python | train |
gplepage/lsqfit | src/lsqfit/__init__.py | https://github.com/gplepage/lsqfit/blob/6a57fd687632c175fccb47d8e8e943cda5e9ce9d/src/lsqfit/__init__.py#L831-L842 | def check_roundoff(self, rtol=0.25, atol=1e-6):
""" Check for roundoff errors in fit.p.
Compares standard deviations from fit.p and fit.palt to see if they
agree to within relative tolerance ``rtol`` and absolute tolerance
``atol``. Generates a warning if they do not (in which
case an SVD cut might be advisable).
"""
psdev = _gvar.sdev(self.p.flat)
paltsdev = _gvar.sdev(self.palt.flat)
if not numpy.allclose(psdev, paltsdev, rtol=rtol, atol=atol):
warnings.warn("Possible roundoff errors in fit.p; try svd cut.") | [
"def",
"check_roundoff",
"(",
"self",
",",
"rtol",
"=",
"0.25",
",",
"atol",
"=",
"1e-6",
")",
":",
"psdev",
"=",
"_gvar",
".",
"sdev",
"(",
"self",
".",
"p",
".",
"flat",
")",
"paltsdev",
"=",
"_gvar",
".",
"sdev",
"(",
"self",
".",
"palt",
".",... | Check for roundoff errors in fit.p.
Compares standard deviations from fit.p and fit.palt to see if they
agree to within relative tolerance ``rtol`` and absolute tolerance
``atol``. Generates a warning if they do not (in which
case an SVD cut might be advisable). | [
"Check",
"for",
"roundoff",
"errors",
"in",
"fit",
".",
"p",
"."
] | python | train |
tylerbutler/propane | propane/paths.py | https://github.com/tylerbutler/propane/blob/6c404285ab8d78865b7175a5c8adf8fae12d6be5/propane/paths.py#L88-L100 | def has_files(the_path):
"""Given a path, returns whether the path has any files in it or any subfolders. Works recursively."""
the_path = Path(the_path)
try:
for _ in the_path.walkfiles():
return True
return False
except OSError as ex:
if ex.errno == errno.ENOENT:
# ignore
return False
else:
raise | [
"def",
"has_files",
"(",
"the_path",
")",
":",
"the_path",
"=",
"Path",
"(",
"the_path",
")",
"try",
":",
"for",
"_",
"in",
"the_path",
".",
"walkfiles",
"(",
")",
":",
"return",
"True",
"return",
"False",
"except",
"OSError",
"as",
"ex",
":",
"if",
... | Given a path, returns whether the path has any files in it or any subfolders. Works recursively. | [
"Given",
"a",
"path",
"returns",
"whether",
"the",
"path",
"has",
"any",
"files",
"in",
"it",
"or",
"any",
"subfolders",
".",
"Works",
"recursively",
"."
] | python | train |
gregmuellegger/django-autofixture | autofixture/base.py | https://github.com/gregmuellegger/django-autofixture/blob/0b696fd3a06747459981e4269aff427676f84ae0/autofixture/base.py#L549-L563 | def create(self, count=1, commit=True, **kwargs):
'''
Create and return ``count`` model instances. If *commit* is ``False``
the instances will not be saved and many to many relations will not be
processed.
May raise ``CreateInstanceError`` if constraints are not satisfied.
The method internally calls :meth:`create_one` to generate instances.
'''
object_list = []
for i in range(count):
instance = self.create_one(commit=commit, **kwargs)
object_list.append(instance)
return object_list | [
"def",
"create",
"(",
"self",
",",
"count",
"=",
"1",
",",
"commit",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"object_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"instance",
"=",
"self",
".",
"create_one",
"(",... | Create and return ``count`` model instances. If *commit* is ``False``
the instances will not be saved and many to many relations will not be
processed.
May raise ``CreateInstanceError`` if constraints are not satisfied.
The method internally calls :meth:`create_one` to generate instances. | [
"Create",
"and",
"return",
"count",
"model",
"instances",
".",
"If",
"*",
"commit",
"*",
"is",
"False",
"the",
"instances",
"will",
"not",
"be",
"saved",
"and",
"many",
"to",
"many",
"relations",
"will",
"not",
"be",
"processed",
"."
] | python | train |
poldracklab/niworkflows | niworkflows/interfaces/confounds.py | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/confounds.py#L356-L368 | def _check_and_expand_derivative(expr, variables, data):
"""Check if the current operation specifies a temporal derivative. dd6x
specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the
6th only."""
if re.search(r'^dd[0-9]+', expr):
order = re.compile(r'^dd([0-9]+)').findall(expr)
order = range(0, int(*order) + 1)
(variables, data) = temporal_derivatives(order, variables, data)
elif re.search(r'^d[0-9]+[\-]?[0-9]*', expr):
order = re.compile(r'^d([0-9]+[\-]?[0-9]*)').findall(expr)
order = _order_as_range(*order)
(variables, data) = temporal_derivatives(order, variables, data)
return variables, data | [
"def",
"_check_and_expand_derivative",
"(",
"expr",
",",
"variables",
",",
"data",
")",
":",
"if",
"re",
".",
"search",
"(",
"r'^dd[0-9]+'",
",",
"expr",
")",
":",
"order",
"=",
"re",
".",
"compile",
"(",
"r'^dd([0-9]+)'",
")",
".",
"findall",
"(",
"expr... | Check if the current operation specifies a temporal derivative. dd6x
specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the
6th only. | [
"Check",
"if",
"the",
"current",
"operation",
"specifies",
"a",
"temporal",
"derivative",
".",
"dd6x",
"specifies",
"all",
"derivatives",
"up",
"to",
"the",
"6th",
"d5",
"-",
"6x",
"the",
"5th",
"and",
"6th",
"d6x",
"the",
"6th",
"only",
"."
] | python | train |
ioos/compliance-checker | compliance_checker/cfutil.py | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cfutil.py#L383-L421 | def get_z_variables(nc):
'''
Returns a list of all variables matching definitions for Z
:param netcdf4.dataset nc: an open netcdf dataset object
'''
z_variables = []
# Vertical coordinates will be identifiable by units of pressure or the
# presence of the positive attribute with a value of up/down
# optionally, the vertical type may be indicated by providing the
# standard_name attribute or axis='Z'
total_coords = get_coordinate_variables(nc) + get_auxiliary_coordinate_variables(nc)
for coord_name in total_coords:
if coord_name in z_variables:
continue
coord_var = nc.variables[coord_name]
units = getattr(coord_var, 'units', None)
positive = getattr(coord_var, 'positive', None)
standard_name = getattr(coord_var, 'standard_name', None)
axis = getattr(coord_var, 'axis', None)
# If there are no units, we can't identify it as a vertical coordinate
# by checking pressure or positive
if units is not None:
if units_convertible(units, 'bar'):
z_variables.append(coord_name)
elif isinstance(positive, basestring):
if positive.lower() in ['up', 'down']:
z_variables.append(coord_name)
# if axis='Z' we're good
if coord_name not in z_variables and axis == 'Z':
z_variables.append(coord_name)
if coord_name not in z_variables and standard_name in ('depth', 'height', 'altitude'):
z_variables.append(coord_name)
if coord_name not in z_variables and standard_name in DIMENSIONLESS_VERTICAL_COORDINATES:
z_variables.append(coord_name)
return z_variables | [
"def",
"get_z_variables",
"(",
"nc",
")",
":",
"z_variables",
"=",
"[",
"]",
"# Vertical coordinates will be identifiable by units of pressure or the",
"# presence of the positive attribute with a value of up/down",
"# optionally, the vertical type may be indicated by providing the",
"# st... | Returns a list of all variables matching definitions for Z
:param netcdf4.dataset nc: an open netcdf dataset object | [
"Returns",
"a",
"list",
"of",
"all",
"variables",
"matching",
"definitions",
"for",
"Z"
] | python | train |
klen/adrest | adrest/api.py | https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/api.py#L190-L207 | def call(self, name, request=None, **params):
""" Call resource by ``Api`` name.
:param name: The resource's name (short form)
:param request: django.http.Request instance
:param **params: Params for a resource's call
:return object: Result of resource's execution
"""
if name not in self.resources:
raise exceptions.HttpError(
'Unknown method \'%s\'' % name,
status=status.HTTP_501_NOT_IMPLEMENTED)
request = request or HttpRequest()
resource = self.resources[name]
view = resource.as_view(api=self)
return view(request, **params) | [
"def",
"call",
"(",
"self",
",",
"name",
",",
"request",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"resources",
":",
"raise",
"exceptions",
".",
"HttpError",
"(",
"'Unknown method \\'%s\\''",
"%",
"name",
"... | Call resource by ``Api`` name.
:param name: The resource's name (short form)
:param request: django.http.Request instance
:param **params: Params for a resource's call
:return object: Result of resource's execution | [
"Call",
"resource",
"by",
"Api",
"name",
"."
] | python | train |
bpython/curtsies | curtsies/window.py | https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/window.py#L370-L461 | def render_to_terminal(self, array, cursor_pos=(0, 0)):
"""Renders array to terminal, returns the number of lines scrolled offscreen
Returns:
Number of times scrolled
Args:
array (FSArray): Grid of styled characters to be rendered.
If array received is of width too small, render it anyway
if array received is of width too large, render it anyway
if array received is of height too small, render it anyway
if array received is of height too large, render it, scroll down,
and render the rest of it, then return how much we scrolled down
"""
for_stdout = self.fmtstr_to_stdout_xform()
# caching of write and tc (avoiding the self. lookups etc) made
# no significant performance difference here
if not self.hide_cursor:
self.write(self.t.hide_cursor)
# TODO race condition here?
height, width = self.t.height, self.t.width
if (height != self._last_rendered_height or
width != self._last_rendered_width):
self.on_terminal_size_change(height, width)
current_lines_by_row = {}
rows_for_use = list(range(self.top_usable_row, height))
# rows which we have content for and don't require scrolling
# TODO rename shared
shared = min(len(array), len(rows_for_use))
for row, line in zip(rows_for_use[:shared], array[:shared]):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows already on screen that we don't have content for
rest_of_lines = array[shared:]
rest_of_rows = rows_for_use[shared:]
for row in rest_of_rows: # if array too small
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
# TODO probably not necessary - is first char cleared?
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
# lines for which we need to scroll down to render
offscreen_scrolls = 0
for line in rest_of_lines: # if array too big
self.scroll_down()
if self.top_usable_row > 0:
self.top_usable_row -= 1
else:
offscreen_scrolls += 1
current_lines_by_row = dict(
(k - 1, v) for k, v in current_lines_by_row.items()
)
logger.debug('new top_usable_row: %d' % self.top_usable_row)
# since scrolling moves the cursor
self.write(self.t.move(height - 1, 0))
self.write(for_stdout(line))
current_lines_by_row[height - 1] = line
logger.debug(
'lines in last lines by row: %r' % self._last_lines_by_row.keys()
)
logger.debug(
'lines in current lines by row: %r' % current_lines_by_row.keys()
)
self._last_cursor_row = max(
0, cursor_pos[0] - offscreen_scrolls + self.top_usable_row
)
self._last_cursor_column = cursor_pos[1]
self.write(
self.t.move(self._last_cursor_row, self._last_cursor_column)
)
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
return offscreen_scrolls | [
"def",
"render_to_terminal",
"(",
"self",
",",
"array",
",",
"cursor_pos",
"=",
"(",
"0",
",",
"0",
")",
")",
":",
"for_stdout",
"=",
"self",
".",
"fmtstr_to_stdout_xform",
"(",
")",
"# caching of write and tc (avoiding the self. lookups etc) made",
"# no significant ... | Renders array to terminal, returns the number of lines scrolled offscreen
Returns:
Number of times scrolled
Args:
array (FSArray): Grid of styled characters to be rendered.
If array received is of width too small, render it anyway
if array received is of width too large, render it anyway
if array received is of height too small, render it anyway
if array received is of height too large, render it, scroll down,
and render the rest of it, then return how much we scrolled down | [
"Renders",
"array",
"to",
"terminal",
"returns",
"the",
"number",
"of",
"lines",
"scrolled",
"offscreen"
] | python | train |
zloidemon/aiohttp_jrpc | aiohttp_jrpc/__init__.py | https://github.com/zloidemon/aiohttp_jrpc/blob/f2ced214844041aa6f18b6bf6e5abeef7b47735e/aiohttp_jrpc/__init__.py#L62-L77 | def decode(request):
""" Get/decode/validate json from request """
try:
data = yield from request.json(loader=json.loads)
except Exception as err:
raise ParseError(err)
try:
validate(data, REQ_JSONRPC20)
except ValidationError as err:
raise InvalidRequest(err)
except SchemaError as err:
raise InternalError(err)
except Exception as err:
raise InternalError(err)
return data | [
"def",
"decode",
"(",
"request",
")",
":",
"try",
":",
"data",
"=",
"yield",
"from",
"request",
".",
"json",
"(",
"loader",
"=",
"json",
".",
"loads",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"ParseError",
"(",
"err",
")",
"try",
":",
... | Get/decode/validate json from request | [
"Get",
"/",
"decode",
"/",
"validate",
"json",
"from",
"request"
] | python | test |
googleapis/oauth2client | oauth2client/client.py | https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/client.py#L714-L722 | def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.parse.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body | [
"def",
"_generate_refresh_request_body",
"(",
"self",
")",
":",
"body",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"{",
"'grant_type'",
":",
"'refresh_token'",
",",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
"."... | Generate the body that will be used in the refresh request. | [
"Generate",
"the",
"body",
"that",
"will",
"be",
"used",
"in",
"the",
"refresh",
"request",
"."
] | python | valid |
uogbuji/versa | tools/py/util.py | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/util.py#L83-L93 | def replace_values(in_m, out_m, map_from=(), map_to=()):
'''
Make a copy of a model with one value replaced with another
'''
for link in in_m.match():
new_link = list(link)
if map_from:
if link[ORIGIN] in map_from: new_link[ORIGIN] = map_to[map_from.index(link[ORIGIN])]
new_link[ATTRIBUTES] = link[ATTRIBUTES].copy()
out_m.add(*new_link)
return | [
"def",
"replace_values",
"(",
"in_m",
",",
"out_m",
",",
"map_from",
"=",
"(",
")",
",",
"map_to",
"=",
"(",
")",
")",
":",
"for",
"link",
"in",
"in_m",
".",
"match",
"(",
")",
":",
"new_link",
"=",
"list",
"(",
"link",
")",
"if",
"map_from",
":"... | Make a copy of a model with one value replaced with another | [
"Make",
"a",
"copy",
"of",
"a",
"model",
"with",
"one",
"value",
"replaced",
"with",
"another"
] | python | train |
mdavidsaver/p4p | src/p4p/rpc.py | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/rpc.py#L71-L86 | def rpccall(pvname, request=None, rtype=None):
"""Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`.
"""
def wrapper(fn):
fn._call_PV = pvname
fn._call_Request = request
fn._reply_Type = rtype
return fn
return wrapper | [
"def",
"rpccall",
"(",
"pvname",
",",
"request",
"=",
"None",
",",
"rtype",
"=",
"None",
")",
":",
"def",
"wrapper",
"(",
"fn",
")",
":",
"fn",
".",
"_call_PV",
"=",
"pvname",
"fn",
".",
"_call_Request",
"=",
"request",
"fn",
".",
"_reply_Type",
"=",... | Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`. | [
"Decorator",
"marks",
"a",
"client",
"proxy",
"method",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xganttwidget/xganttwidgetitem.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidgetitem.py#L138-L170 | def adjustRange(self, recursive=True):
"""
Adjust the start and end ranges for this item based on the limits from
its children. This method will only apply to group items.
:param recursive | <bool>
"""
if ( self.adjustmentsBlocked('range') ):
return
if ( self.itemStyle() == self.ItemStyle.Group ):
dateStart = self.dateStart()
dateEnd = self.dateEnd()
first = True
for c in range(self.childCount()):
child = self.child(c)
if ( first ):
dateStart = child.dateStart()
dateEnd = child.dateEnd()
first = False
else:
dateStart = min(child.dateStart(), dateStart)
dateEnd = max(child.dateEnd(), dateEnd)
self._dateStart = dateStart
self._dateEnd = dateEnd
self.sync()
if ( self.parent() and recursive ):
self.parent().adjustRange(True) | [
"def",
"adjustRange",
"(",
"self",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"(",
"self",
".",
"adjustmentsBlocked",
"(",
"'range'",
")",
")",
":",
"return",
"if",
"(",
"self",
".",
"itemStyle",
"(",
")",
"==",
"self",
".",
"ItemStyle",
".",
"Gr... | Adjust the start and end ranges for this item based on the limits from
its children. This method will only apply to group items.
:param recursive | <bool> | [
"Adjust",
"the",
"start",
"and",
"end",
"ranges",
"for",
"this",
"item",
"based",
"on",
"the",
"limits",
"from",
"its",
"children",
".",
"This",
"method",
"will",
"only",
"apply",
"to",
"group",
"items",
".",
":",
"param",
"recursive",
"|",
"<bool",
">"
... | python | train |
istresearch/scrapy-cluster | crawler/crawling/distributed_scheduler.py | https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L151-L172 | def update_domain_queues(self):
'''
Check to update existing queues already in memory
new queues are created elsewhere
'''
for key in self.domain_config:
final_key = "{name}:{domain}:queue".format(
name=self.spider.name,
domain=key)
# we already have a throttled queue for this domain, update it to new settings
if final_key in self.queue_dict:
self.queue_dict[final_key][0].window = float(self.domain_config[key]['window'])
self.logger.debug("Updated queue {q} with new config"
.format(q=final_key))
# if scale is applied, scale back; otherwise use updated hits
if 'scale' in self.domain_config[key]:
# round to int
hits = int(self.domain_config[key]['hits'] * self.fit_scale(
self.domain_config[key]['scale']))
self.queue_dict[final_key][0].limit = float(hits)
else:
self.queue_dict[final_key][0].limit = float(self.domain_config[key]['hits']) | [
"def",
"update_domain_queues",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"domain_config",
":",
"final_key",
"=",
"\"{name}:{domain}:queue\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"spider",
".",
"name",
",",
"domain",
"=",
"key",
")",... | Check to update existing queues already in memory
new queues are created elsewhere | [
"Check",
"to",
"update",
"existing",
"queues",
"already",
"in",
"memory",
"new",
"queues",
"are",
"created",
"elsewhere"
] | python | train |
CZ-NIC/yangson | yangson/instance.py | https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/instance.py#L213-L235 | def put_member(self, name: InstanceName, value: Value,
raw: bool = False) -> "InstanceNode":
"""Return receiver's member with a new value.
If the member is permitted by the schema but doesn't exist, it
is created.
Args:
name: Instance name of the member.
value: New value of the member.
raw: Flag to be set if `value` is raw.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
InstanceValueError: If the receiver's value is not an object.
"""
if not isinstance(self.value, ObjectValue):
raise InstanceValueError(self.json_pointer(), "member of non-object")
csn = self._member_schema_node(name)
newval = self.value.copy()
newval[name] = csn.from_raw(value, self.json_pointer()) if raw else value
return self._copy(newval)._member(name) | [
"def",
"put_member",
"(",
"self",
",",
"name",
":",
"InstanceName",
",",
"value",
":",
"Value",
",",
"raw",
":",
"bool",
"=",
"False",
")",
"->",
"\"InstanceNode\"",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"value",
",",
"ObjectValue",
")",
":... | Return receiver's member with a new value.
If the member is permitted by the schema but doesn't exist, it
is created.
Args:
name: Instance name of the member.
value: New value of the member.
raw: Flag to be set if `value` is raw.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
InstanceValueError: If the receiver's value is not an object. | [
"Return",
"receiver",
"s",
"member",
"with",
"a",
"new",
"value",
"."
] | python | train |
stefanbraun-private/visitoolkit_eventsystem | visitoolkit_eventsystem/eventsystem.py | https://github.com/stefanbraun-private/visitoolkit_eventsystem/blob/d606a167ac3b8edc5bfbd8de7fb7063a9a1922f1/visitoolkit_eventsystem/eventsystem.py#L136-L143 | def unhandle(self, handler):
""" unregister handler (removing callback function) """
with self._hlock:
try:
self._handler_list.remove(handler)
except ValueError:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self | [
"def",
"unhandle",
"(",
"self",
",",
"handler",
")",
":",
"with",
"self",
".",
"_hlock",
":",
"try",
":",
"self",
".",
"_handler_list",
".",
"remove",
"(",
"handler",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Handler is not handling th... | unregister handler (removing callback function) | [
"unregister",
"handler",
"(",
"removing",
"callback",
"function",
")"
] | python | train |
CxAalto/gtfspy | gtfspy/routing/node_profile_multiobjective.py | https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L234-L258 | def finalize(self, neighbor_label_bags=None, walk_durations=None, departure_arrival_stop_pairs=None):
"""
Parameters
----------
neighbor_label_bags: list
each list element is a list of labels corresponding to a neighboring node
(note: only labels with first connection being a departure should be included)
walk_durations: list
departure_arrival_stop_pairs: list of tuples
Returns
-------
None
"""
assert (not self._finalized)
if self._final_pareto_optimal_labels is None:
self._compute_real_connection_labels()
if neighbor_label_bags is not None:
assert (len(walk_durations) == len(neighbor_label_bags))
self._compute_final_pareto_optimal_labels(neighbor_label_bags,
walk_durations,
departure_arrival_stop_pairs)
else:
self._final_pareto_optimal_labels = self._real_connection_labels
self._finalized = True
self._closed = True | [
"def",
"finalize",
"(",
"self",
",",
"neighbor_label_bags",
"=",
"None",
",",
"walk_durations",
"=",
"None",
",",
"departure_arrival_stop_pairs",
"=",
"None",
")",
":",
"assert",
"(",
"not",
"self",
".",
"_finalized",
")",
"if",
"self",
".",
"_final_pareto_opt... | Parameters
----------
neighbor_label_bags: list
each list element is a list of labels corresponding to a neighboring node
(note: only labels with first connection being a departure should be included)
walk_durations: list
departure_arrival_stop_pairs: list of tuples
Returns
-------
None | [
"Parameters",
"----------",
"neighbor_label_bags",
":",
"list",
"each",
"list",
"element",
"is",
"a",
"list",
"of",
"labels",
"corresponding",
"to",
"a",
"neighboring",
"node",
"(",
"note",
":",
"only",
"labels",
"with",
"first",
"connection",
"being",
"a",
"d... | python | valid |
ottogroup/palladium | palladium/eval.py | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/eval.py#L82-L97 | def list_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
"""
docopt(list_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
list() | [
"def",
"list_cmd",
"(",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
":",
"# pragma: no cover",
"docopt",
"(",
"list_cmd",
".",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"initialize_config",
"(",
"__mode__",
"=",
"'fit'",
")",
"list",
"(",... | \
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen. | [
"\\",
"List",
"information",
"about",
"available",
"models",
"."
] | python | train |
androguard/androguard | androguard/decompiler/dad/dataflow.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/decompiler/dad/dataflow.py#L161-L186 | def clear_path(graph, reg, loc1, loc2):
"""
Check that the path from loc1 to loc2 is clear.
We have to check that there is no side effect between the two location
points. We also have to check that the variable `reg` is not redefined
along one of the possible pathes from loc1 to loc2.
"""
logger.debug('clear_path: reg(%s), loc1(%s), loc2(%s)', reg, loc1, loc2)
node1 = graph.get_node_from_loc(loc1)
node2 = graph.get_node_from_loc(loc2)
# If both instructions are in the same node, we only have to check that the
# path is clear inside the node
if node1 is node2:
return clear_path_node(graph, reg, loc1 + 1, loc2)
# If instructions are in different nodes, we also have to check the nodes
# in the path between the two locations.
if not clear_path_node(graph, reg, loc1 + 1, node1.ins_range[1]):
return False
path = build_path(graph, node1, node2)
for node in path:
locs = node.ins_range
end_loc = loc2 if (locs[0] <= loc2 <= locs[1]) else locs[1]
if not clear_path_node(graph, reg, locs[0], end_loc):
return False
return True | [
"def",
"clear_path",
"(",
"graph",
",",
"reg",
",",
"loc1",
",",
"loc2",
")",
":",
"logger",
".",
"debug",
"(",
"'clear_path: reg(%s), loc1(%s), loc2(%s)'",
",",
"reg",
",",
"loc1",
",",
"loc2",
")",
"node1",
"=",
"graph",
".",
"get_node_from_loc",
"(",
"l... | Check that the path from loc1 to loc2 is clear.
We have to check that there is no side effect between the two location
points. We also have to check that the variable `reg` is not redefined
along one of the possible pathes from loc1 to loc2. | [
"Check",
"that",
"the",
"path",
"from",
"loc1",
"to",
"loc2",
"is",
"clear",
".",
"We",
"have",
"to",
"check",
"that",
"there",
"is",
"no",
"side",
"effect",
"between",
"the",
"two",
"location",
"points",
".",
"We",
"also",
"have",
"to",
"check",
"that... | python | train |
peshay/tpm | tpm.py | https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L404-L409 | def unlock_password(self, ID, reason):
"""Unlock a password."""
# http://teampasswordmanager.com/docs/api-passwords/#unlock_password
log.info('Unlock password %s, Reason: %s' % (ID, reason))
self.unlock_reason = reason
self.put('passwords/%s/unlock.json' % ID) | [
"def",
"unlock_password",
"(",
"self",
",",
"ID",
",",
"reason",
")",
":",
"# http://teampasswordmanager.com/docs/api-passwords/#unlock_password",
"log",
".",
"info",
"(",
"'Unlock password %s, Reason: %s'",
"%",
"(",
"ID",
",",
"reason",
")",
")",
"self",
".",
"unl... | Unlock a password. | [
"Unlock",
"a",
"password",
"."
] | python | train |
praekeltfoundation/marathon-acme | marathon_acme/clients/_base.py | https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/clients/_base.py#L12-L27 | def get_single_header(headers, key):
"""
Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key
"""
raw_headers = headers.getRawHeaders(key)
if raw_headers is None:
return None
# Take the final header as the authorative
header, _ = cgi.parse_header(raw_headers[-1])
return header | [
"def",
"get_single_header",
"(",
"headers",
",",
"key",
")",
":",
"raw_headers",
"=",
"headers",
".",
"getRawHeaders",
"(",
"key",
")",
"if",
"raw_headers",
"is",
"None",
":",
"return",
"None",
"# Take the final header as the authorative",
"header",
",",
"_",
"=... | Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key | [
"Get",
"a",
"single",
"value",
"for",
"the",
"given",
"key",
"out",
"of",
"the",
"given",
"set",
"of",
"headers",
"."
] | python | valid |
saltstack/salt | salt/states/neutron_network.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/neutron_network.py#L130-L161 | def absent(name, auth=None, **kwargs):
'''
Ensure a network does not exists
name
Name of the network
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['neutronng.setup_clouds'](auth)
kwargs['name'] = name
network = __salt__['neutronng.network_get'](name=name)
if network:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = {'id': network.id}
ret['comment'] = 'Network will be deleted.'
return ret
__salt__['neutronng.network_delete'](name=network)
ret['changes']['id'] = network.id
ret['comment'] = 'Deleted network'
return ret | [
"def",
"absent",
"(",
"name",
",",
"auth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"kwargs",
"=",
"... | Ensure a network does not exists
name
Name of the network | [
"Ensure",
"a",
"network",
"does",
"not",
"exists"
] | python | train |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L991-L1018 | def min_chans(self, min_chans):
"""
Remove detections with fewer channels used than min_chans
:type min_chans: int
:param min_chans: Minimum number of channels to allow a detection.
:return: Party
.. Note:: Works in place on Party.
.. rubric:: Example
>>> party = Party().read()
>>> print(len(party))
4
>>> party = party.min_chans(5)
>>> print(len(party))
1
"""
declustered = Party()
for family in self.families:
fam = Family(family.template)
for d in family.detections:
if d.no_chans > min_chans:
fam.detections.append(d)
declustered.families.append(fam)
self.families = declustered.families
return self | [
"def",
"min_chans",
"(",
"self",
",",
"min_chans",
")",
":",
"declustered",
"=",
"Party",
"(",
")",
"for",
"family",
"in",
"self",
".",
"families",
":",
"fam",
"=",
"Family",
"(",
"family",
".",
"template",
")",
"for",
"d",
"in",
"family",
".",
"dete... | Remove detections with fewer channels used than min_chans
:type min_chans: int
:param min_chans: Minimum number of channels to allow a detection.
:return: Party
.. Note:: Works in place on Party.
.. rubric:: Example
>>> party = Party().read()
>>> print(len(party))
4
>>> party = party.min_chans(5)
>>> print(len(party))
1 | [
"Remove",
"detections",
"with",
"fewer",
"channels",
"used",
"than",
"min_chans"
] | python | train |
six8/anticipate | src/anticipate/adapt.py | https://github.com/six8/anticipate/blob/5c0651f9829ba0140e7cf185505da6109ef1f55c/src/anticipate/adapt.py#L111-L127 | def register_adapter(from_classes, to_classes, func):
"""
Register a function that can handle adapting from `from_classes` to `to_classes`.
"""
assert from_classes, 'Must supply classes to adapt from'
assert to_classes, 'Must supply classes to adapt to'
assert func, 'Must supply adapter function'
if not isinstance(from_classes, (tuple, list)):
from_classes = [from_classes]
if not isinstance(to_classes, (tuple, list)):
to_classes = [to_classes]
for key in itertools.product(from_classes, to_classes):
if key in __adapters__:
raise AdapterExists('%r to %r already exists.' % key)
__adapters__[key] = func | [
"def",
"register_adapter",
"(",
"from_classes",
",",
"to_classes",
",",
"func",
")",
":",
"assert",
"from_classes",
",",
"'Must supply classes to adapt from'",
"assert",
"to_classes",
",",
"'Must supply classes to adapt to'",
"assert",
"func",
",",
"'Must supply adapter fun... | Register a function that can handle adapting from `from_classes` to `to_classes`. | [
"Register",
"a",
"function",
"that",
"can",
"handle",
"adapting",
"from",
"from_classes",
"to",
"to_classes",
"."
] | python | train |
dedupeio/dedupe | dedupe/api.py | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L190-L223 | def match(self, data, threshold=0.5, generator=False): # pragma: no cover
"""Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision
"""
blocked_pairs = self._blockData(data)
clusters = self.matchBlocks(blocked_pairs, threshold)
if generator:
return clusters
else:
return list(clusters) | [
"def",
"match",
"(",
"self",
",",
"data",
",",
"threshold",
"=",
"0.5",
",",
"generator",
"=",
"False",
")",
":",
"# pragma: no cover",
"blocked_pairs",
"=",
"self",
".",
"_blockData",
"(",
"data",
")",
"clusters",
"=",
"self",
".",
"matchBlocks",
"(",
"... | Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision | [
"Identifies",
"records",
"that",
"all",
"refer",
"to",
"the",
"same",
"entity",
"returns",
"tuples"
] | python | train |
openstax/cnx-archive | cnxarchive/database.py | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L201-L228 | def get_collections(module_ident, plpy):
"""Get all the collections that the module is part of."""
# Make sure to only return one match per collection and only if it is the
# latest collection (which may not be the same as what is in
# latest_modules)
plan = plpy.prepare('''
WITH RECURSIVE t(node, parent, path, document) AS (
SELECT tr.nodeid, tr.parent_id, ARRAY[tr.nodeid], tr.documentid
FROM trees tr
WHERE tr.documentid = $1 and tr.is_collated = 'False'
UNION ALL
SELECT c.nodeid, c.parent_id, path || ARRAY[c.nodeid], c.documentid
FROM trees c JOIN t ON (c.nodeid = t.parent)
WHERE not c.nodeid = ANY(t.path)
),
latest(module_ident) AS (
SELECT module_ident FROM (
SELECT m.module_ident, m.revised,
MAX(m.revised) OVER (PARTITION BY m.uuid) as latest
FROM modules m where m.portal_type = 'Collection'
) r
WHERE r.revised = r.latest
)
SELECT module_ident FROM t, latest
WHERE latest.module_ident = t.document
''', ('integer',))
for i in plpy.execute(plan, (module_ident,)):
yield i['module_ident'] | [
"def",
"get_collections",
"(",
"module_ident",
",",
"plpy",
")",
":",
"# Make sure to only return one match per collection and only if it is the",
"# latest collection (which may not be the same as what is in",
"# latest_modules)",
"plan",
"=",
"plpy",
".",
"prepare",
"(",
"'''\nWI... | Get all the collections that the module is part of. | [
"Get",
"all",
"the",
"collections",
"that",
"the",
"module",
"is",
"part",
"of",
"."
] | python | train |
scanny/python-pptx | pptx/dml/line.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/dml/line.py#L26-L37 | def color(self):
"""
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
"""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color | [
"def",
"color",
"(",
"self",
")",
":",
"if",
"self",
".",
"fill",
".",
"type",
"!=",
"MSO_FILL",
".",
"SOLID",
":",
"self",
".",
"fill",
".",
"solid",
"(",
")",
"return",
"self",
".",
"fill",
".",
"fore_color"
] | The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type. | [
"The",
"|ColorFormat|",
"instance",
"that",
"provides",
"access",
"to",
"the",
"color",
"settings",
"for",
"this",
"line",
".",
"Essentially",
"a",
"shortcut",
"for",
"line",
".",
"fill",
".",
"fore_color",
".",
"As",
"a",
"side",
"-",
"effect",
"accessing",... | python | train |
DistrictDataLabs/yellowbrick | yellowbrick/classifier/rocauc.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/classifier/rocauc.py#L374-L392 | def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO]) | [
"def",
"_score_macro_average",
"(",
"self",
",",
"n_classes",
")",
":",
"# Gather all FPRs",
"all_fpr",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"fpr",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"n_classes",
"... | Compute the macro average scores for the ROCAUC curves. | [
"Compute",
"the",
"macro",
"average",
"scores",
"for",
"the",
"ROCAUC",
"curves",
"."
] | python | train |
agoragames/leaderboard-python | leaderboard/leaderboard.py | https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L298-L309 | def update_member_data_in(self, leaderboard_name, member, member_data):
'''
Update the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
@param member_data [String] Optional member data.
'''
self.redis_connection.hset(
self._member_data_key(leaderboard_name),
member,
member_data) | [
"def",
"update_member_data_in",
"(",
"self",
",",
"leaderboard_name",
",",
"member",
",",
"member_data",
")",
":",
"self",
".",
"redis_connection",
".",
"hset",
"(",
"self",
".",
"_member_data_key",
"(",
"leaderboard_name",
")",
",",
"member",
",",
"member_data"... | Update the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
@param member_data [String] Optional member data. | [
"Update",
"the",
"optional",
"member",
"data",
"for",
"a",
"given",
"member",
"in",
"the",
"named",
"leaderboard",
"."
] | python | train |
senaite/senaite.core | bika/lims/content/batch.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/batch.py#L262-L269 | def Title(self):
"""Return the Batch ID if title is not defined
"""
titlefield = self.Schema().getField('title')
if titlefield.widget.visible:
return safe_unicode(self.title).encode('utf-8')
else:
return safe_unicode(self.id).encode('utf-8') | [
"def",
"Title",
"(",
"self",
")",
":",
"titlefield",
"=",
"self",
".",
"Schema",
"(",
")",
".",
"getField",
"(",
"'title'",
")",
"if",
"titlefield",
".",
"widget",
".",
"visible",
":",
"return",
"safe_unicode",
"(",
"self",
".",
"title",
")",
".",
"e... | Return the Batch ID if title is not defined | [
"Return",
"the",
"Batch",
"ID",
"if",
"title",
"is",
"not",
"defined"
] | python | train |
tswicegood/Dolt | dolt/__init__.py | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L203-L220 | def with_headers(self, headers=None, **params):
"""
Add headers to the request.
:param headers: A dict, or a list of key, value pairs
:param params: A dict of key value pairs
"""
if isinstance(headers, (tuple, list)):
headers = dict(headers)
if params:
if isinstance(headers, dict):
headers.update(params)
elif headers is None:
headers = params
self._headers.update(headers)
return self | [
"def",
"with_headers",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"isinstance",
"(",
"headers",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"headers",
"=",
"dict",
"(",
"headers",
")",
"if",
"params",
":",... | Add headers to the request.
:param headers: A dict, or a list of key, value pairs
:param params: A dict of key value pairs | [
"Add",
"headers",
"to",
"the",
"request",
".",
":",
"param",
"headers",
":",
"A",
"dict",
"or",
"a",
"list",
"of",
"key",
"value",
"pairs",
":",
"param",
"params",
":",
"A",
"dict",
"of",
"key",
"value",
"pairs"
] | python | train |
yatiml/yatiml | yatiml/helpers.py | https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/helpers.py#L198-L220 | def get_attribute(self, attribute: str) -> 'Node':
"""Returns the node representing the given attribute's value.
Use only if is_mapping() returns true.
Args:
attribute: The name of the attribute to retrieve.
Raises:
KeyError: If the attribute does not exist.
Returns:
A node representing the value.
"""
matches = [
value_node for key_node, value_node in self.yaml_node.value
if key_node.value == attribute
]
if len(matches) != 1:
raise SeasoningError(
'Attribute not found, or found multiple times: {}'.format(
matches))
return Node(matches[0]) | [
"def",
"get_attribute",
"(",
"self",
",",
"attribute",
":",
"str",
")",
"->",
"'Node'",
":",
"matches",
"=",
"[",
"value_node",
"for",
"key_node",
",",
"value_node",
"in",
"self",
".",
"yaml_node",
".",
"value",
"if",
"key_node",
".",
"value",
"==",
"att... | Returns the node representing the given attribute's value.
Use only if is_mapping() returns true.
Args:
attribute: The name of the attribute to retrieve.
Raises:
KeyError: If the attribute does not exist.
Returns:
A node representing the value. | [
"Returns",
"the",
"node",
"representing",
"the",
"given",
"attribute",
"s",
"value",
"."
] | python | train |
ethereum/eth-account | eth_account/account.py | https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/account.py#L95-L134 | def decrypt(keyfile_json, password):
'''
Decrypts a private key that was encrypted using an Ethereum client or
:meth:`~Account.encrypt`.
:param keyfile_json: The encrypted key
:type keyfile_json: dict or str
:param str password: The password that was used to encrypt the key
:returns: the raw private key
:rtype: ~hexbytes.main.HexBytes
.. code-block:: python
>>> encrypted = {
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {'cipher': 'aes-128-ctr',
'cipherparams': {'iv': '78f214584844e0b241b433d7c3bb8d5f'},
'ciphertext': 'd6dbb56e4f54ba6db2e8dc14df17cb7352fdce03681dd3f90ce4b6c1d5af2c4f',
'kdf': 'pbkdf2',
'kdfparams': {'c': 1000000,
'dklen': 32,
'prf': 'hmac-sha256',
'salt': '45cf943b4de2c05c2c440ef96af914a2'},
'mac': 'f5e1af09df5ded25c96fcf075ada313fb6f79735a914adc8cb02e8ddee7813c3'},
'id': 'b812f3f9-78cc-462a-9e89-74418aa27cb0',
'version': 3}
>>> import getpass
>>> Account.decrypt(encrypted, getpass.getpass())
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364')
'''
if isinstance(keyfile_json, str):
keyfile = json.loads(keyfile_json)
elif is_dict(keyfile_json):
keyfile = keyfile_json
else:
raise TypeError("The keyfile should be supplied as a JSON string, or a dictionary.")
password_bytes = text_if_str(to_bytes, password)
return HexBytes(decode_keyfile_json(keyfile, password_bytes)) | [
"def",
"decrypt",
"(",
"keyfile_json",
",",
"password",
")",
":",
"if",
"isinstance",
"(",
"keyfile_json",
",",
"str",
")",
":",
"keyfile",
"=",
"json",
".",
"loads",
"(",
"keyfile_json",
")",
"elif",
"is_dict",
"(",
"keyfile_json",
")",
":",
"keyfile",
... | Decrypts a private key that was encrypted using an Ethereum client or
:meth:`~Account.encrypt`.
:param keyfile_json: The encrypted key
:type keyfile_json: dict or str
:param str password: The password that was used to encrypt the key
:returns: the raw private key
:rtype: ~hexbytes.main.HexBytes
.. code-block:: python
>>> encrypted = {
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {'cipher': 'aes-128-ctr',
'cipherparams': {'iv': '78f214584844e0b241b433d7c3bb8d5f'},
'ciphertext': 'd6dbb56e4f54ba6db2e8dc14df17cb7352fdce03681dd3f90ce4b6c1d5af2c4f',
'kdf': 'pbkdf2',
'kdfparams': {'c': 1000000,
'dklen': 32,
'prf': 'hmac-sha256',
'salt': '45cf943b4de2c05c2c440ef96af914a2'},
'mac': 'f5e1af09df5ded25c96fcf075ada313fb6f79735a914adc8cb02e8ddee7813c3'},
'id': 'b812f3f9-78cc-462a-9e89-74418aa27cb0',
'version': 3}
>>> import getpass
>>> Account.decrypt(encrypted, getpass.getpass())
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364') | [
"Decrypts",
"a",
"private",
"key",
"that",
"was",
"encrypted",
"using",
"an",
"Ethereum",
"client",
"or",
":",
"meth",
":",
"~Account",
".",
"encrypt",
"."
] | python | train |
pcattori/deep-blue-talks | kasparobot/__init__.py | https://github.com/pcattori/deep-blue-talks/blob/7af7c740e8ec03dd30f1291ecf174078890eec89/kasparobot/__init__.py#L18-L27 | def controls(self, move):
'''Returns a set of attacked/defended squares'''
to_move = self.board.turn
analysis_board = chess.Board(self.board.fen())
analysis_board.push(move)
squares = 0
for square in chess.SQUARES:
if move.to_square in analysis_board.attackers(to_move, square):
squares |= chess.BB_SQUARES[square]
return SquareSet(squares) | [
"def",
"controls",
"(",
"self",
",",
"move",
")",
":",
"to_move",
"=",
"self",
".",
"board",
".",
"turn",
"analysis_board",
"=",
"chess",
".",
"Board",
"(",
"self",
".",
"board",
".",
"fen",
"(",
")",
")",
"analysis_board",
".",
"push",
"(",
"move",
... | Returns a set of attacked/defended squares | [
"Returns",
"a",
"set",
"of",
"attacked",
"/",
"defended",
"squares"
] | python | train |
Gandi/gandi.cli | gandi/cli/core/client.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/client.py#L61-L83 | def request(self, method, apikey, *args, **kwargs):
""" Make a xml-rpc call to remote API. """
dry_run = kwargs.get('dry_run', False)
return_dry_run = kwargs.get('return_dry_run', False)
if return_dry_run:
args[-1]['--dry-run'] = True
try:
func = getattr(self.endpoint, method)
return func(apikey, *args)
except (socket.error, requests.exceptions.ConnectionError):
msg = 'Gandi API service is unreachable'
raise APICallFailed(msg)
except xmlrpclib.Fault as err:
msg = 'Gandi API has returned an error: %s' % err
if dry_run:
args[-1]['--dry-run'] = True
ret = func(apikey, *args)
raise DryRunException(msg, err.faultCode, ret)
raise APICallFailed(msg, err.faultCode)
except TypeError as err:
msg = 'An unknown error has occurred: %s' % err
raise APICallFailed(msg) | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"apikey",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dry_run",
"=",
"kwargs",
".",
"get",
"(",
"'dry_run'",
",",
"False",
")",
"return_dry_run",
"=",
"kwargs",
".",
"get",
"(",
"'return_dr... | Make a xml-rpc call to remote API. | [
"Make",
"a",
"xml",
"-",
"rpc",
"call",
"to",
"remote",
"API",
"."
] | python | train |
emc-openstack/storops | storops/unity/resource/remote_system.py | https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/remote_system.py#L59-L75 | def modify(self, management_address=None, username=None, password=None,
connection_type=None):
"""
Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(
managementAddress=management_address, username=username,
password=password, connectionType=connection_type)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | [
"def",
"modify",
"(",
"self",
",",
"management_address",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"connection_type",
"=",
"None",
")",
":",
"req_body",
"=",
"self",
".",
"_cli",
".",
"make_body",
"(",
"managementAddress... | Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method. | [
"Modifies",
"a",
"remote",
"system",
"for",
"remote",
"replication",
"."
] | python | train |
Erotemic/utool | utool/_internal/util_importer.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L34-L44 | def __execute_fromimport(module, modname, import_tuples, verbose=False):
""" Module From Imports """
if verbose:
print('[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES' % (len(import_tuples),))
from_imports = __get_from_imports(import_tuples)
for name, fromlist in from_imports:
full_modname = '.'.join((modname, name))
tmp = __import__(full_modname, globals(), locals(), fromlist=fromlist, level=0)
for attrname in fromlist:
setattr(module, attrname, getattr(tmp, attrname))
return from_imports | [
"def",
"__execute_fromimport",
"(",
"module",
",",
"modname",
",",
"import_tuples",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES'",
"%",
"(",
"len",
"(",
"import_tuples",
")",
",",
")... | Module From Imports | [
"Module",
"From",
"Imports"
] | python | train |
angr/angr | angr/callable.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/callable.py#L84-L125 | def call_c(self, c_args):
"""
Call this Callable with a string of C-style arguments.
:param str c_args: C-style arguments.
:return: The return value from the call.
:rtype: claripy.Ast
"""
c_args = c_args.strip()
if c_args[0] != "(":
c_args = "(" + c_args
if c_args[-1] != ")":
c_args += ")"
# Parse arguments
content = "int main() { func%s; }" % c_args
ast = pycparser.CParser().parse(content)
if not ast.ext or not isinstance(ast.ext[0], pycparser.c_ast.FuncDef):
raise AngrCallableError("Error in parsing the given C-style argument string.")
if not ast.ext[0].body.block_items or not isinstance(ast.ext[0].body.block_items[0], pycparser.c_ast.FuncCall):
raise AngrCallableError("Error in parsing the given C-style argument string: "
"Cannot find the expected function call.")
arg_exprs = ast.ext[0].body.block_items[0].args.exprs
args = [ ]
for expr in arg_exprs:
if isinstance(expr, pycparser.c_ast.Constant):
# string
if expr.type == "string":
args.append(expr.value[1:-1])
elif expr.type == "int":
args.append(int(expr.value))
else:
raise AngrCallableError("Unsupported expression type %s." % expr.type)
else:
raise AngrCallableError("Unsupported expression type %s." % type(expr))
return self.__call__(*args) | [
"def",
"call_c",
"(",
"self",
",",
"c_args",
")",
":",
"c_args",
"=",
"c_args",
".",
"strip",
"(",
")",
"if",
"c_args",
"[",
"0",
"]",
"!=",
"\"(\"",
":",
"c_args",
"=",
"\"(\"",
"+",
"c_args",
"if",
"c_args",
"[",
"-",
"1",
"]",
"!=",
"\")\"",
... | Call this Callable with a string of C-style arguments.
:param str c_args: C-style arguments.
:return: The return value from the call.
:rtype: claripy.Ast | [
"Call",
"this",
"Callable",
"with",
"a",
"string",
"of",
"C",
"-",
"style",
"arguments",
"."
] | python | train |
StagPython/StagPy | stagpy/parfile.py | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/parfile.py#L637-L685 | def readpar(par_file, root):
"""Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name.
"""
par_nml = deepcopy(PAR_DEFAULT)
if PAR_DFLT_FILE.is_file():
_enrich_with_par(par_nml, PAR_DFLT_FILE)
else:
PAR_DFLT_FILE.parent.mkdir(exist_ok=True)
f90nml.write(par_nml, str(PAR_DFLT_FILE))
if not par_file.is_file():
raise NoParFileError(par_file)
par_main = f90nml.read(str(par_file))
if 'default_parameters_parfile' in par_main:
par_dflt = par_main['default_parameters_parfile'].get(
'par_name_defaultparameters', 'par_defaults')
par_dflt = root / par_dflt
if not par_dflt.is_file():
raise NoParFileError(par_dflt)
_enrich_with_par(par_nml, par_dflt)
_enrich_with_par(par_nml, par_file)
par_out = root / par_nml['ioin']['output_file_stem'] / '_parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
par_out = root / par_nml['ioin']['hdf5_output_folder'] / 'parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
return par_nml | [
"def",
"readpar",
"(",
"par_file",
",",
"root",
")",
":",
"par_nml",
"=",
"deepcopy",
"(",
"PAR_DEFAULT",
")",
"if",
"PAR_DFLT_FILE",
".",
"is_file",
"(",
")",
":",
"_enrich_with_par",
"(",
"par_nml",
",",
"PAR_DFLT_FILE",
")",
"else",
":",
"PAR_DFLT_FILE",
... | Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name. | [
"Read",
"StagYY",
"par",
"file",
"."
] | python | train |
nkmathew/yasi-sexp-indenter | yasi.py | https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L109-L147 | def parse_options(arguments=None):
""" Reads command-line arguments
>>> parse_options('--indent-comments')
"""
if arguments is None:
arguments = sys.argv[1:]
if isinstance(arguments, str):
arguments = arguments.split()
if isinstance(arguments, argparse.Namespace):
return arguments
parser = create_args_parser()
args = parser.parse_args(arguments)
# pprint(args.__dict__)
args.dialect = args.dialect.lower()
if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']:
parser.error("`{0}' is not a recognized dialect".format(args.dialect))
args.backup_dir = os.path.expanduser(args.backup_dir)
if not os.path.exists(args.backup_dir):
parser.error("Directory `{0}' does not exist".format(args.backup_dir))
if len(args.files) > 1 and args.output_file:
parser.error('Cannot use the -o flag when more than one file is specified')
if not args.files:
# Indentation from standard input
if args.modify and not args.output_file:
args.modify = False
args.backup = False
args.warning = False
if args.output_diff:
# If someone requests a diff we assume he/she doesn't want the file to be
# modified
args.modify = False
return args | [
"def",
"parse_options",
"(",
"arguments",
"=",
"None",
")",
":",
"if",
"arguments",
"is",
"None",
":",
"arguments",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"if",
"isinstance",
"(",
"arguments",
",",
"str",
")",
":",
"arguments",
"=",
"arguments",
... | Reads command-line arguments
>>> parse_options('--indent-comments') | [
"Reads",
"command",
"-",
"line",
"arguments"
] | python | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L79-L92 | def insert_child(self, child_pid, index=-1):
"""Insert a Version child PID."""
if child_pid.status != PIDStatus.REGISTERED:
raise PIDRelationConsistencyError(
"Version PIDs should have status 'REGISTERED'. Use "
"insert_draft_child to insert 'RESERVED' draft PID.")
with db.session.begin_nested():
# if there is a draft and "child" is inserted as the last version,
# it should be inserted before the draft.
draft = self.draft_child
if draft and index == -1:
index = self.index(draft)
super(PIDNodeVersioning, self).insert_child(child_pid, index=index)
self.update_redirect() | [
"def",
"insert_child",
"(",
"self",
",",
"child_pid",
",",
"index",
"=",
"-",
"1",
")",
":",
"if",
"child_pid",
".",
"status",
"!=",
"PIDStatus",
".",
"REGISTERED",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Version PIDs should have status 'REGISTERED'. Us... | Insert a Version child PID. | [
"Insert",
"a",
"Version",
"child",
"PID",
"."
] | python | train |
box/flaky | flaky/_flaky_plugin.py | https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L463-L476 | def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1) | [
"def",
"_increment_flaky_attribute",
"(",
"cls",
",",
"test_item",
",",
"flaky_attribute",
")",
":",
"cls",
".",
"_set_flaky_attribute",
"(",
"test_item",
",",
"flaky_attribute",
",",
"cls",
".",
"_get_flaky_attribute",
"(",
"test_item",
",",
"flaky_attribute",
")",... | Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode` | [
"Increments",
"the",
"value",
"of",
"an",
"attribute",
"on",
"a",
"flaky",
"test",
"."
] | python | train |
xingjiepan/cylinder_fitting | cylinder_fitting/fitting.py | https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L53-L63 | def C(w, Xs):
'''Calculate the cylinder center given the cylinder direction and
a list of data points.
'''
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
return np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A)) | [
"def",
"C",
"(",
"w",
",",
"Xs",
")",
":",
"n",
"=",
"len",
"(",
"Xs",
")",
"P",
"=",
"projection_matrix",
"(",
"w",
")",
"Ys",
"=",
"[",
"np",
".",
"dot",
"(",
"P",
",",
"X",
")",
"for",
"X",
"in",
"Xs",
"]",
"A",
"=",
"calc_A",
"(",
"... | Calculate the cylinder center given the cylinder direction and
a list of data points. | [
"Calculate",
"the",
"cylinder",
"center",
"given",
"the",
"cylinder",
"direction",
"and",
"a",
"list",
"of",
"data",
"points",
"."
] | python | train |
MillionIntegrals/vel | vel/rl/models/q_rainbow_model.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/q_rainbow_model.py#L97-L110 | def create(backbone: ModelFactory, vmin: float, vmax: float, atoms: int, initial_std_dev: float = 0.4,
factorized_noise: bool = True, input_block: typing.Optional[ModelFactory] = None):
""" Vel factory function """
if input_block is None:
input_block = IdentityFactory()
return QDistributionalModelFactory(
input_block=input_block, backbone=backbone,
vmin=vmin,
vmax=vmax,
atoms=atoms,
initial_std_dev=initial_std_dev,
factorized_noise=factorized_noise
) | [
"def",
"create",
"(",
"backbone",
":",
"ModelFactory",
",",
"vmin",
":",
"float",
",",
"vmax",
":",
"float",
",",
"atoms",
":",
"int",
",",
"initial_std_dev",
":",
"float",
"=",
"0.4",
",",
"factorized_noise",
":",
"bool",
"=",
"True",
",",
"input_block"... | Vel factory function | [
"Vel",
"factory",
"function"
] | python | train |
yahoo/TensorFlowOnSpark | tensorflowonspark/TFNode.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/tensorflowonspark/TFNode.py#L286-L308 | def terminate(self):
"""Terminate data feeding early.
Since TensorFlow applications can often terminate on conditions unrelated to the training data (e.g. steps, accuracy, etc),
this method signals the data feeding process to ignore any further incoming data. Note that Spark itself does not have a mechanism
to terminate an RDD operation early, so the extra partitions will still be sent to the executors (but will be ignored). Because
of this, you should size your input data accordingly to avoid excessive overhead.
"""
logging.info("terminate() invoked")
self.mgr.set('state', 'terminating')
# drop remaining items in the queue
queue = self.mgr.get_queue(self.qname_in)
count = 0
done = False
while not done:
try:
queue.get(block=True, timeout=5)
queue.task_done()
count += 1
except Empty:
logging.info("dropped {0} items from queue".format(count))
done = True | [
"def",
"terminate",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"\"terminate() invoked\"",
")",
"self",
".",
"mgr",
".",
"set",
"(",
"'state'",
",",
"'terminating'",
")",
"# drop remaining items in the queue",
"queue",
"=",
"self",
".",
"mgr",
".",
"... | Terminate data feeding early.
Since TensorFlow applications can often terminate on conditions unrelated to the training data (e.g. steps, accuracy, etc),
this method signals the data feeding process to ignore any further incoming data. Note that Spark itself does not have a mechanism
to terminate an RDD operation early, so the extra partitions will still be sent to the executors (but will be ignored). Because
of this, you should size your input data accordingly to avoid excessive overhead. | [
"Terminate",
"data",
"feeding",
"early",
"."
] | python | train |
ThreatResponse/margaritashotgun | margaritashotgun/remote_host.py | https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/remote_host.py#L126-L158 | def connect(self, username, password, key, address, port, jump_host):
"""
Connect ssh tunnel and shell executor to remote host
:type username: str
:param username: username for authentication
:type password: str
:param password: password for authentication, may be used to unlock rsa key
:type key: str
:param key: path to rsa key for authentication
:type address: str
:param address: address for remote host
:type port: int
:param port: ssh port for remote host
"""
if port is None:
self.remote_port = 22
else:
self.remote_port = int(port)
auth = Auth(username=username, password=password, key=key)
if jump_host is not None:
jump_auth = Auth(username=jump_host['username'],
password=jump_host['password'],
key=jump_host['key'])
if jump_host['port'] is None:
jump_host['port'] = 22
else:
jump_auth = None
self.shell.connect(auth, address, self.remote_port, jump_host, jump_auth)
transport = self.shell.transport()
self.tunnel.configure(transport, auth, address, self.remote_port)
self.remote_addr = address | [
"def",
"connect",
"(",
"self",
",",
"username",
",",
"password",
",",
"key",
",",
"address",
",",
"port",
",",
"jump_host",
")",
":",
"if",
"port",
"is",
"None",
":",
"self",
".",
"remote_port",
"=",
"22",
"else",
":",
"self",
".",
"remote_port",
"="... | Connect ssh tunnel and shell executor to remote host
:type username: str
:param username: username for authentication
:type password: str
:param password: password for authentication, may be used to unlock rsa key
:type key: str
:param key: path to rsa key for authentication
:type address: str
:param address: address for remote host
:type port: int
:param port: ssh port for remote host | [
"Connect",
"ssh",
"tunnel",
"and",
"shell",
"executor",
"to",
"remote",
"host"
] | python | train |
odlgroup/odl | odl/discr/partition.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L308-L363 | def boundary_cell_fractions(self):
"""Return a tuple of contained fractions of boundary cells.
Since the outermost grid points can have any distance to the
boundary of the partitioned set, the "natural" outermost cell
around these points can either be cropped or extended. This
property is a tuple of (float, float) tuples, one entry per
dimension, where the fractions of the left- and rightmost
cells inside the set are stored. If a grid point lies exactly
on the boundary, the value is 1/2 since the cell is cut in half.
Otherwise, any value larger than 1/2 is possible.
Returns
-------
on_bdry : tuple of 2-tuples of floats
Each 2-tuple contains the fraction of the leftmost
(first entry) and rightmost (second entry) cell in the
partitioned set in the corresponding dimension.
See Also
--------
cell_boundary_vecs
Examples
--------
We create a partition of the rectangle [0, 1.5] x [-2, 2] with
the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the
boundary would be:
[-0.5, 0.5] and [0.5, 1.5] in the first axis
[-1.5, -0.5] and [1, 3] in the second axis
Thus, in the first axis, the fractions contained in [0, 1.5]
are 0.5 and 1, and in the second axis, [-2, 2] contains the
fractions 1.5 and 0.5.
>>> rect = odl.IntervalProd([0, -2], [1.5, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.boundary_cell_fractions
((0.5, 1.0), (1.5, 0.5))
"""
frac_list = []
for ax, (cvec, bmin, bmax) in enumerate(zip(
self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)):
# Degenerate axes have a value of 1.0 (this is used as weight
# in integration formulas later)
if len(cvec) == 1:
frac_list.append((1.0, 1.0))
else:
left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0])
right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2])
frac_list.append((left_frac, right_frac))
return tuple(frac_list) | [
"def",
"boundary_cell_fractions",
"(",
"self",
")",
":",
"frac_list",
"=",
"[",
"]",
"for",
"ax",
",",
"(",
"cvec",
",",
"bmin",
",",
"bmax",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"grid",
".",
"coord_vectors",
",",
"self",
".",
"set",... | Return a tuple of contained fractions of boundary cells.
Since the outermost grid points can have any distance to the
boundary of the partitioned set, the "natural" outermost cell
around these points can either be cropped or extended. This
property is a tuple of (float, float) tuples, one entry per
dimension, where the fractions of the left- and rightmost
cells inside the set are stored. If a grid point lies exactly
on the boundary, the value is 1/2 since the cell is cut in half.
Otherwise, any value larger than 1/2 is possible.
Returns
-------
on_bdry : tuple of 2-tuples of floats
Each 2-tuple contains the fraction of the leftmost
(first entry) and rightmost (second entry) cell in the
partitioned set in the corresponding dimension.
See Also
--------
cell_boundary_vecs
Examples
--------
We create a partition of the rectangle [0, 1.5] x [-2, 2] with
the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the
boundary would be:
[-0.5, 0.5] and [0.5, 1.5] in the first axis
[-1.5, -0.5] and [1, 3] in the second axis
Thus, in the first axis, the fractions contained in [0, 1.5]
are 0.5 and 1, and in the second axis, [-2, 2] contains the
fractions 1.5 and 0.5.
>>> rect = odl.IntervalProd([0, -2], [1.5, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.boundary_cell_fractions
((0.5, 1.0), (1.5, 0.5)) | [
"Return",
"a",
"tuple",
"of",
"contained",
"fractions",
"of",
"boundary",
"cells",
"."
] | python | train |
deepmind/pysc2 | pysc2/bin/gen_units.py | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/bin/gen_units.py#L51-L68 | def generate_py_units(data):
"""Generate the list of units in units.py."""
units = collections.defaultdict(list)
for unit in sorted(data.units, key=lambda a: a.name):
if unit.unit_id in static_data.UNIT_TYPES:
units[unit.race].append(unit)
def print_race(name, race):
print("class %s(enum.IntEnum):" % name)
print(' """%s units."""' % name)
for unit in units[race]:
print(" %s = %s" % (unit.name, unit.unit_id))
print("\n")
print_race("Neutral", sc_common.NoRace)
print_race("Protoss", sc_common.Protoss)
print_race("Terran", sc_common.Terran)
print_race("Zerg", sc_common.Zerg) | [
"def",
"generate_py_units",
"(",
"data",
")",
":",
"units",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"unit",
"in",
"sorted",
"(",
"data",
".",
"units",
",",
"key",
"=",
"lambda",
"a",
":",
"a",
".",
"name",
")",
":",
"if",
"... | Generate the list of units in units.py. | [
"Generate",
"the",
"list",
"of",
"units",
"in",
"units",
".",
"py",
"."
] | python | train |
tobgu/pyrsistent | pyrsistent/_pset.py | https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_pset.py#L103-L110 | def discard(self, element):
"""
Return a new PSet with element removed. Returns itself if element is not present.
"""
if element in self._map:
return self.evolver().remove(element).persistent()
return self | [
"def",
"discard",
"(",
"self",
",",
"element",
")",
":",
"if",
"element",
"in",
"self",
".",
"_map",
":",
"return",
"self",
".",
"evolver",
"(",
")",
".",
"remove",
"(",
"element",
")",
".",
"persistent",
"(",
")",
"return",
"self"
] | Return a new PSet with element removed. Returns itself if element is not present. | [
"Return",
"a",
"new",
"PSet",
"with",
"element",
"removed",
".",
"Returns",
"itself",
"if",
"element",
"is",
"not",
"present",
"."
] | python | train |
rackerlabs/rackspace-python-neutronclient | neutronclient/v2_0/client.py | https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1094-L1097 | def update_lbaas_healthmonitor(self, lbaas_healthmonitor, body=None):
"""Updates a lbaas_healthmonitor."""
return self.put(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
body=body) | [
"def",
"update_lbaas_healthmonitor",
"(",
"self",
",",
"lbaas_healthmonitor",
",",
"body",
"=",
"None",
")",
":",
"return",
"self",
".",
"put",
"(",
"self",
".",
"lbaas_healthmonitor_path",
"%",
"(",
"lbaas_healthmonitor",
")",
",",
"body",
"=",
"body",
")"
] | Updates a lbaas_healthmonitor. | [
"Updates",
"a",
"lbaas_healthmonitor",
"."
] | python | train |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_context.py | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_context.py#L158-L161 | def undefined_entries(self) -> Set[str]:
""" Return the set of tokens that are referenced but not defined. """
return as_set([[d for d in self.dependencies(k) if d not in self.grammarelts]
for k in self.grammarelts.keys()]) | [
"def",
"undefined_entries",
"(",
"self",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"return",
"as_set",
"(",
"[",
"[",
"d",
"for",
"d",
"in",
"self",
".",
"dependencies",
"(",
"k",
")",
"if",
"d",
"not",
"in",
"self",
".",
"grammarelts",
"]",
"for",
... | Return the set of tokens that are referenced but not defined. | [
"Return",
"the",
"set",
"of",
"tokens",
"that",
"are",
"referenced",
"but",
"not",
"defined",
"."
] | python | train |
RiotGames/cloud-inquisitor | plugins/public/cinq-collector-aws/cinq_collector_aws/region.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-collector-aws/cinq_collector_aws/region.py#L307-L374 | def update_snapshots(self):
"""Update list of EBS Snapshots for the account / region
Returns:
`None`
"""
self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region))
ec2 = self.session.resource('ec2', region_name=self.region)
try:
existing_snapshots = EBSSnapshot.get_all(self.account, self.region)
snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])}
for data in list(snapshots.values()):
if data.id in existing_snapshots:
snapshot = existing_snapshots[data.id]
if snapshot.update(data):
self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
else:
properties = {
'create_time': data.start_time,
'encrypted': data.encrypted,
'kms_key_id': data.kms_key_id,
'state': data.state,
'state_message': data.state_message,
'volume_id': data.volume_id,
'volume_size': data.volume_size,
}
tags = {t['Key']: t['Value'] for t in data.tags or {}}
snapshot = EBSSnapshot.create(
data.id,
account_id=self.account.account_id,
location=self.region,
properties=properties,
tags=tags
)
self.log.debug('Added new EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
db.session.commit()
vk = set(list(snapshots.keys()))
evk = set(list(existing_snapshots.keys()))
try:
for snapshotID in evk - vk:
db.session.delete(existing_snapshots[snapshotID].resource)
self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshotID
))
db.session.commit()
except:
self.log.exception('Failed removing deleted snapshots')
db.session.rollback()
finally:
del ec2 | [
"def",
"update_snapshots",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Updating EBSSnapshots for {}/{}'",
".",
"format",
"(",
"self",
".",
"account",
".",
"account_name",
",",
"self",
".",
"region",
")",
")",
"ec2",
"=",
"self",
".",
... | Update list of EBS Snapshots for the account / region
Returns:
`None` | [
"Update",
"list",
"of",
"EBS",
"Snapshots",
"for",
"the",
"account",
"/",
"region"
] | python | train |
pymc-devs/pymc | pymc/Matplot.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L39-L65 | def get_index_list(shape, j):
"""
index_list = get_index_list(shape, j)
:Arguments:
shape: a tuple
j: an integer
Assumes index j is from a ravelled version of an array
with specified shape, returns the corresponding
non-ravelled index tuple as a list.
"""
r = range(len(shape))
index_list = (r)
for i in r:
if i < len(shape):
prodshape = prod(shape[i + 1:])
else:
prodshape = 0
index_list[i] = int(floor(j / prodshape))
if index_list[i] > shape[i]:
raise IndexError('Requested index too large')
j %= prodshape
return index_list | [
"def",
"get_index_list",
"(",
"shape",
",",
"j",
")",
":",
"r",
"=",
"range",
"(",
"len",
"(",
"shape",
")",
")",
"index_list",
"=",
"(",
"r",
")",
"for",
"i",
"in",
"r",
":",
"if",
"i",
"<",
"len",
"(",
"shape",
")",
":",
"prodshape",
"=",
"... | index_list = get_index_list(shape, j)
:Arguments:
shape: a tuple
j: an integer
Assumes index j is from a ravelled version of an array
with specified shape, returns the corresponding
non-ravelled index tuple as a list. | [
"index_list",
"=",
"get_index_list",
"(",
"shape",
"j",
")"
] | python | train |
coleifer/peewee | playhouse/sqlite_ext.py | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L391-L400 | def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering) | [
"def",
"search",
"(",
"cls",
",",
"term",
",",
"weights",
"=",
"None",
",",
"with_score",
"=",
"False",
",",
"score_alias",
"=",
"'score'",
",",
"explicit_ordering",
"=",
"False",
")",
":",
"return",
"cls",
".",
"_search",
"(",
"term",
",",
"weights",
... | Full-text search using selected `term`. | [
"Full",
"-",
"text",
"search",
"using",
"selected",
"term",
"."
] | python | train |
wishtack/pysynthetic | synthetic/decorators.py | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/decorators.py#L115-L149 | def synthesizeProperty(propertyName,
default = None,
contract = None,
readOnly = False,
privateMemberName = None):
"""
When applied to a class, this decorator adds a property to it and overrides the constructor in order to set\
the default value of the property.
:IMPORTANT: In order for this to work on python 2, you must use new objects that is to say that the class must inherit from object.
By default, the private attribute containing the property's value will be named ``propertyName`` with '_' prepended to it.
Naming convention can be overridden with a custom one using :meth:`namingConvention <namingConvention>` decorator.
:param propertyName: Name of the property to synthesize.
:type propertyName: str
:param default: Property's default value.
:type default: *
:param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_
:type contract: *
:param readOnly: If set to ``True``, the property will not a have a setter.
:type readOnly: bool
:param privateMemberName: Custom name for the private attribute that contains the property's value.
:type privateMemberName: str|None
:raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name.
:raises: :class:`InvalidPropertyOverrideError` when there's already a member with that name and which is not a property.
"""
return SyntheticDecoratorFactory().syntheticMemberDecorator(memberName = propertyName,
defaultValue = default,
contract = contract,
readOnly = readOnly,
privateMemberName = privateMemberName,
memberDelegate = PropertyDelegate()) | [
"def",
"synthesizeProperty",
"(",
"propertyName",
",",
"default",
"=",
"None",
",",
"contract",
"=",
"None",
",",
"readOnly",
"=",
"False",
",",
"privateMemberName",
"=",
"None",
")",
":",
"return",
"SyntheticDecoratorFactory",
"(",
")",
".",
"syntheticMemberDec... | When applied to a class, this decorator adds a property to it and overrides the constructor in order to set\
the default value of the property.
:IMPORTANT: In order for this to work on python 2, you must use new objects that is to say that the class must inherit from object.
By default, the private attribute containing the property's value will be named ``propertyName`` with '_' prepended to it.
Naming convention can be overridden with a custom one using :meth:`namingConvention <namingConvention>` decorator.
:param propertyName: Name of the property to synthesize.
:type propertyName: str
:param default: Property's default value.
:type default: *
:param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_
:type contract: *
:param readOnly: If set to ``True``, the property will not a have a setter.
:type readOnly: bool
:param privateMemberName: Custom name for the private attribute that contains the property's value.
:type privateMemberName: str|None
:raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name.
:raises: :class:`InvalidPropertyOverrideError` when there's already a member with that name and which is not a property. | [
"When",
"applied",
"to",
"a",
"class",
"this",
"decorator",
"adds",
"a",
"property",
"to",
"it",
"and",
"overrides",
"the",
"constructor",
"in",
"order",
"to",
"set",
"\\",
"the",
"default",
"value",
"of",
"the",
"property",
"."
] | python | train |
HewlettPackard/python-hpOneView | hpOneView/resources/resource.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L313-L318 | def _merge_default_values(self):
"""Merge default values with resource data."""
values = self._get_default_values()
for key, value in values.items():
if not self.data.get(key):
self.data[key] = value | [
"def",
"_merge_default_values",
"(",
"self",
")",
":",
"values",
"=",
"self",
".",
"_get_default_values",
"(",
")",
"for",
"key",
",",
"value",
"in",
"values",
".",
"items",
"(",
")",
":",
"if",
"not",
"self",
".",
"data",
".",
"get",
"(",
"key",
")"... | Merge default values with resource data. | [
"Merge",
"default",
"values",
"with",
"resource",
"data",
"."
] | python | train |
bram85/topydo | topydo/commands/DepCommand.py | https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/commands/DepCommand.py#L133-L150 | def _handle_dot(self):
""" Handles the dot subsubcommand. """
self.printer = DotPrinter(self.todolist)
try:
arg = self.argument(1)
todo = self.todolist.todo(arg)
arg = self.argument(1)
todos = set([self.todolist.todo(arg)])
todos |= set(self.todolist.children(todo))
todos |= set(self.todolist.parents(todo))
todos = sorted(todos, key=lambda t: t.text())
self.out(self.printer.print_list(todos))
except InvalidTodoException:
self.error("Invalid todo number given.")
except InvalidCommandArgument:
self.error(self.usage()) | [
"def",
"_handle_dot",
"(",
"self",
")",
":",
"self",
".",
"printer",
"=",
"DotPrinter",
"(",
"self",
".",
"todolist",
")",
"try",
":",
"arg",
"=",
"self",
".",
"argument",
"(",
"1",
")",
"todo",
"=",
"self",
".",
"todolist",
".",
"todo",
"(",
"arg"... | Handles the dot subsubcommand. | [
"Handles",
"the",
"dot",
"subsubcommand",
"."
] | python | train |
mwgielen/jackal | jackal/core.py | https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L87-L97 | def count(self, *args, **kwargs):
"""
Returns the number of results after filtering with the given arguments.
"""
search = self.create_search(*args, **kwargs)
try:
return search.count()
except NotFoundError:
print_error("The index was not found, have you initialized the index?")
except (ConnectionError, TransportError):
print_error("Cannot connect to elasticsearch") | [
"def",
"count",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"search",
"=",
"self",
".",
"create_search",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"return",
"search",
".",
"count",
"(",
")",
"except",
"NotF... | Returns the number of results after filtering with the given arguments. | [
"Returns",
"the",
"number",
"of",
"results",
"after",
"filtering",
"with",
"the",
"given",
"arguments",
"."
] | python | valid |
TkTech/Jawa | jawa/constants.py | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/constants.py#L461-L472 | def create_string(self, value: str) -> String:
"""
Creates a new :class:`ConstantString`, adding it to the pool and
returning it.
:param value: The value of the new string as a UTF8 string.
"""
self.append((
8,
self.create_utf8(value).index
))
return self.get(self.raw_count - 1) | [
"def",
"create_string",
"(",
"self",
",",
"value",
":",
"str",
")",
"->",
"String",
":",
"self",
".",
"append",
"(",
"(",
"8",
",",
"self",
".",
"create_utf8",
"(",
"value",
")",
".",
"index",
")",
")",
"return",
"self",
".",
"get",
"(",
"self",
... | Creates a new :class:`ConstantString`, adding it to the pool and
returning it.
:param value: The value of the new string as a UTF8 string. | [
"Creates",
"a",
"new",
":",
"class",
":",
"ConstantString",
"adding",
"it",
"to",
"the",
"pool",
"and",
"returning",
"it",
"."
] | python | train |
StanfordVL/robosuite | robosuite/utils/mjcf_utils.py | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/utils/mjcf_utils.py#L14-L24 | def xml_path_completion(xml_path):
"""
Takes in a local xml path and returns a full path.
if @xml_path is absolute, do nothing
if @xml_path is not absolute, load xml that is shipped by the package
"""
if xml_path.startswith("/"):
full_path = xml_path
else:
full_path = os.path.join(robosuite.models.assets_root, xml_path)
return full_path | [
"def",
"xml_path_completion",
"(",
"xml_path",
")",
":",
"if",
"xml_path",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"full_path",
"=",
"xml_path",
"else",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"robosuite",
".",
"models",
".",
"asse... | Takes in a local xml path and returns a full path.
if @xml_path is absolute, do nothing
if @xml_path is not absolute, load xml that is shipped by the package | [
"Takes",
"in",
"a",
"local",
"xml",
"path",
"and",
"returns",
"a",
"full",
"path",
".",
"if"
] | python | train |
PGower/PyCanvas | pycanvas/apis/files.py | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1365-L1389 | def remove_usage_rights_groups(self, group_id, file_ids, folder_ids=None):
"""
Remove usage rights.
Removes copyright and license information associated with one or more files
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - file_ids
"""List of ids of files to remove associated usage rights from."""
params["file_ids"] = file_ids
# OPTIONAL - folder_ids
"""List of ids of folders. Usage rights will be removed from all files in these folders."""
if folder_ids is not None:
params["folder_ids"] = folder_ids
self.logger.debug("DELETE /api/v1/groups/{group_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/groups/{group_id}/usage_rights".format(**path), data=data, params=params, no_data=True) | [
"def",
"remove_usage_rights_groups",
"(",
"self",
",",
"group_id",
",",
"file_ids",
",",
"folder_ids",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - group_id\r",
"\"\"\"ID\"\"\"",
"path",
"... | Remove usage rights.
Removes copyright and license information associated with one or more files | [
"Remove",
"usage",
"rights",
".",
"Removes",
"copyright",
"and",
"license",
"information",
"associated",
"with",
"one",
"or",
"more",
"files"
] | python | train |
rhelmot/nclib | nclib/netcat.py | https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L652-L672 | def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout) | [
"def",
"recv_until",
"(",
"self",
",",
"s",
",",
"max_size",
"=",
"None",
",",
"timeout",
"=",
"'default'",
")",
":",
"self",
".",
"_print_recv_header",
"(",
"'======== Receiving until {0}{timeout_text} ========'",
",",
"timeout",
",",
"repr",
"(",
"s",
")",
"... | Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil | [
"Recieve",
"data",
"from",
"the",
"socket",
"until",
"the",
"given",
"substring",
"is",
"observed",
".",
"Data",
"in",
"the",
"same",
"datagram",
"as",
"the",
"substring",
"following",
"the",
"substring",
"will",
"not",
"be",
"returned",
"and",
"will",
"be",... | python | train |
GNS3/gns3-server | gns3server/controller/project.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L47-L56 | def open_required(func):
"""
Use this decorator to raise an error if the project is not opened
"""
def wrapper(self, *args, **kwargs):
if self._status == "closed":
raise aiohttp.web.HTTPForbidden(text="The project is not opened")
return func(self, *args, **kwargs)
return wrapper | [
"def",
"open_required",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_status",
"==",
"\"closed\"",
":",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPForbidden",
"(",
"text... | Use this decorator to raise an error if the project is not opened | [
"Use",
"this",
"decorator",
"to",
"raise",
"an",
"error",
"if",
"the",
"project",
"is",
"not",
"opened"
] | python | train |
iterative/dvc | dvc/main.py | https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/main.py#L15-L52 | def main(argv=None):
"""Run dvc CLI command.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Returns:
int: command's return code.
"""
args = None
cmd = None
try:
args = parse_args(argv)
if args.quiet:
logger.setLevel(logging.CRITICAL)
elif args.verbose:
logger.setLevel(logging.DEBUG)
cmd = args.func(args)
ret = cmd.run_cmd()
except KeyboardInterrupt:
logger.exception("interrupted by the user")
ret = 252
except NotDvcRepoError:
logger.exception("")
ret = 253
except DvcParserError:
ret = 254
except Exception: # pylint: disable=broad-except
logger.exception("unexpected error")
ret = 255
Analytics().send_cmd(cmd, args, ret)
return ret | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"args",
"=",
"None",
"cmd",
"=",
"None",
"try",
":",
"args",
"=",
"parse_args",
"(",
"argv",
")",
"if",
"args",
".",
"quiet",
":",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"CRITICAL",
")",
... | Run dvc CLI command.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Returns:
int: command's return code. | [
"Run",
"dvc",
"CLI",
"command",
"."
] | python | train |
maximkulkin/lollipop | lollipop/errors.py | https://github.com/maximkulkin/lollipop/blob/042e8a24508cc3b28630863253c38ffbfc52c882/lollipop/errors.py#L66-L118 | def merge_errors(errors1, errors2):
"""Deeply merges two error messages. Error messages can be
string, list of strings or dict of error messages (recursively).
Format is the same as accepted by :exc:`ValidationError`.
Returns new error messages.
"""
if errors1 is None:
return errors2
elif errors2 is None:
return errors1
if isinstance(errors1, list):
if not errors1:
return errors2
if isinstance(errors2, list):
return errors1 + errors2
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return errors1 + [errors2]
elif isinstance(errors1, dict):
if isinstance(errors2, list):
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
elif isinstance(errors2, dict):
errors = dict(errors1)
for k, v in iteritems(errors2):
if k in errors:
errors[k] = merge_errors(errors[k], v)
else:
errors[k] = v
return errors
else:
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
else:
if isinstance(errors2, list):
return [errors1] + errors2 if errors2 else errors1
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return [errors1, errors2] | [
"def",
"merge_errors",
"(",
"errors1",
",",
"errors2",
")",
":",
"if",
"errors1",
"is",
"None",
":",
"return",
"errors2",
"elif",
"errors2",
"is",
"None",
":",
"return",
"errors1",
"if",
"isinstance",
"(",
"errors1",
",",
"list",
")",
":",
"if",
"not",
... | Deeply merges two error messages. Error messages can be
string, list of strings or dict of error messages (recursively).
Format is the same as accepted by :exc:`ValidationError`.
Returns new error messages. | [
"Deeply",
"merges",
"two",
"error",
"messages",
".",
"Error",
"messages",
"can",
"be",
"string",
"list",
"of",
"strings",
"or",
"dict",
"of",
"error",
"messages",
"(",
"recursively",
")",
".",
"Format",
"is",
"the",
"same",
"as",
"accepted",
"by",
":",
"... | python | train |
Qiskit/qiskit-terra | qiskit/qobj/converters/pulse_instruction.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qobj/converters/pulse_instruction.py#L213-L228 | def convert_snapshot(self, shift, instruction):
"""Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'snapshot',
't0': shift+instruction.start_time,
'label': instruction.name,
'type': instruction.type
}
return self._qobj_model(**command_dict) | [
"def",
"convert_snapshot",
"(",
"self",
",",
"shift",
",",
"instruction",
")",
":",
"command_dict",
"=",
"{",
"'name'",
":",
"'snapshot'",
",",
"'t0'",
":",
"shift",
"+",
"instruction",
".",
"start_time",
",",
"'label'",
":",
"instruction",
".",
"name",
",... | Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters. | [
"Return",
"converted",
"Snapshot",
"."
] | python | test |
chaimleib/intervaltree | intervaltree/node.py | https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/node.py#L283-L290 | def search_overlap(self, point_list):
"""
Returns all intervals that overlap the point_list.
"""
result = set()
for j in point_list:
self.search_point(j, result)
return result | [
"def",
"search_overlap",
"(",
"self",
",",
"point_list",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"j",
"in",
"point_list",
":",
"self",
".",
"search_point",
"(",
"j",
",",
"result",
")",
"return",
"result"
] | Returns all intervals that overlap the point_list. | [
"Returns",
"all",
"intervals",
"that",
"overlap",
"the",
"point_list",
"."
] | python | train |
Opentrons/opentrons | update-server/otupdate/buildroot/update.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/update.py#L86-L104 | def _begin_write(session: UpdateSession,
loop: asyncio.AbstractEventLoop,
rootfs_file_path: str):
""" Start the write process. """
session.set_progress(0)
session.set_stage(Stages.WRITING)
write_future = asyncio.ensure_future(loop.run_in_executor(
None, file_actions.write_update, rootfs_file_path,
session.set_progress))
def write_done(fut):
exc = fut.exception()
if exc:
session.set_error(getattr(exc, 'short', str(type(exc))),
str(exc))
else:
session.set_stage(Stages.DONE)
write_future.add_done_callback(write_done) | [
"def",
"_begin_write",
"(",
"session",
":",
"UpdateSession",
",",
"loop",
":",
"asyncio",
".",
"AbstractEventLoop",
",",
"rootfs_file_path",
":",
"str",
")",
":",
"session",
".",
"set_progress",
"(",
"0",
")",
"session",
".",
"set_stage",
"(",
"Stages",
".",... | Start the write process. | [
"Start",
"the",
"write",
"process",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L1684-L1693 | def _keep_cursor_in_buffer(self):
""" Ensures that the cursor is inside the editing region. Returns
whether the cursor was moved.
"""
moved = not self._in_buffer()
if moved:
cursor = self._control.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self._control.setTextCursor(cursor)
return moved | [
"def",
"_keep_cursor_in_buffer",
"(",
"self",
")",
":",
"moved",
"=",
"not",
"self",
".",
"_in_buffer",
"(",
")",
"if",
"moved",
":",
"cursor",
"=",
"self",
".",
"_control",
".",
"textCursor",
"(",
")",
"cursor",
".",
"movePosition",
"(",
"QtGui",
".",
... | Ensures that the cursor is inside the editing region. Returns
whether the cursor was moved. | [
"Ensures",
"that",
"the",
"cursor",
"is",
"inside",
"the",
"editing",
"region",
".",
"Returns",
"whether",
"the",
"cursor",
"was",
"moved",
"."
] | python | test |
juicer/juicer | juicer/juicer/Juicer.py | https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/juicer/Juicer.py#L404-L423 | def pull(self, cartname=None, env=None):
"""
`cartname` - Name of cart
Pull remote cart from the pre release (base) environment
"""
if not env:
env = self._defaults['start_in']
juicer.utils.Log.log_debug("Initializing pulling cart: %s ...", cartname)
cart_file = os.path.join(juicer.common.Cart.CART_LOCATION, cartname)
cart_file += '.json'
cart_check = juicer.utils.download_cart(cartname, env)
if cart_check is None:
print 'error: cart \'%s\' does not exist' % cartname
return None
else:
juicer.utils.write_json_document(cart_file, juicer.utils.download_cart(cartname, env))
return cart_check | [
"def",
"pull",
"(",
"self",
",",
"cartname",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"if",
"not",
"env",
":",
"env",
"=",
"self",
".",
"_defaults",
"[",
"'start_in'",
"]",
"juicer",
".",
"utils",
".",
"Log",
".",
"log_debug",
"(",
"\"Initia... | `cartname` - Name of cart
Pull remote cart from the pre release (base) environment | [
"cartname",
"-",
"Name",
"of",
"cart"
] | python | train |
django-salesforce/django-salesforce | salesforce/dbapi/subselect.py | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L268-L286 | def split_subquery(sql):
"""Split on subqueries and replace them by '&'."""
sql, params = mark_quoted_strings(sql)
sql = simplify_expression(sql)
_ = params # NOQA
start = 0
out = []
subqueries = []
pattern = re.compile(r'\(SELECT\b', re.I)
match = pattern.search(sql, start)
while match:
out.append(sql[start:match.start() + 1] + '&')
start, pos = find_closing_parenthesis(sql, match.start())
start, pos = start + 1, pos - 1
subqueries.append(split_subquery(sql[start:pos]))
start = pos
match = pattern.search(sql, start)
out.append(sql[start:len(sql)])
return ''.join(out), subqueries | [
"def",
"split_subquery",
"(",
"sql",
")",
":",
"sql",
",",
"params",
"=",
"mark_quoted_strings",
"(",
"sql",
")",
"sql",
"=",
"simplify_expression",
"(",
"sql",
")",
"_",
"=",
"params",
"# NOQA",
"start",
"=",
"0",
"out",
"=",
"[",
"]",
"subqueries",
"... | Split on subqueries and replace them by '&'. | [
"Split",
"on",
"subqueries",
"and",
"replace",
"them",
"by",
"&",
"."
] | python | train |
EpistasisLab/scikit-rebate | skrebate/relieff.py | https://github.com/EpistasisLab/scikit-rebate/blob/67dab51a7525fa5d076b059f1e6f8cff7481c1ef/skrebate/relieff.py#L87-L214 | def fit(self, X, y):
"""Scikit-learn required: Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
Copy of the ReliefF instance
"""
self._X = X # matrix of predictive variables ('independent variables')
self._y = y # vector of values for outcome variable ('dependent variable')
# Set up the properties for ReliefF -------------------------------------------------------------------------------------
self._datalen = len(self._X) # Number of training instances ('n')
""""Below: Handles special case where user requests that a proportion of training instances be neighbors for
ReliefF rather than a specified 'k' number of neighbors. Note that if k is specified, then k 'hits' and k
'misses' will be used to update feature scores. Thus total number of neighbors is 2k. If instead a proportion
is specified (say 0.1 out of 1000 instances) this represents the total number of neighbors (e.g. 100). In this
case, k would be set to 50 (i.e. 50 hits and 50 misses). """
if hasattr(self, 'n_neighbors') and type(self.n_neighbors) is float:
# Halve the number of neighbors because ReliefF uses n_neighbors matches and n_neighbors misses
self.n_neighbors = int(self.n_neighbors * self._datalen * 0.5)
# Number of unique outcome (label) values (used to determine outcome variable type)
self._label_list = list(set(self._y))
# Determine if label is discrete
discrete_label = (len(self._label_list) <= self.discrete_threshold)
# Identify label type (binary, multiclass, or continuous)
if discrete_label:
if len(self._label_list) == 2:
self._class_type = 'binary'
self.mcmap = 0
elif len(self._label_list) > 2:
self._class_type = 'multiclass'
self.mcmap = self._getMultiClassMap()
else:
raise ValueError('All labels are of the same class.')
else:
self._class_type = 'continuous'
self.mcmap = 0
# Training labels standard deviation -- only used if the training labels are continuous
self._labels_std = 0.
if len(self._label_list) > self.discrete_threshold:
self._labels_std = np.std(self._y, ddof=1)
self._num_attributes = len(self._X[0]) # Number of features in training data
# Number of missing data values in predictor variable matrix.
self._missing_data_count = np.isnan(self._X).sum()
"""Assign internal headers for the features (scikit-learn does not accept external headers from dataset):
The pre_normalize() function relies on the headers being ordered, e.g., X01, X02, etc.
If this is changed, then the sort in the pre_normalize() function needs to be adapted as well. """
xlen = len(self._X[0])
mxlen = len(str(xlen + 1))
self._headers = ['X{}'.format(str(i).zfill(mxlen)) for i in range(1, xlen + 1)]
start = time.time() # Runtime tracking
# Determine data types for all features/attributes in training data (i.e. discrete or continuous)
C = D = False
# Examines each feature and applies discrete_threshold to determine variable type.
self.attr = self._get_attribute_info()
for key in self.attr.keys():
if self.attr[key][0] == 'discrete':
D = True
if self.attr[key][0] == 'continuous':
C = True
# For downstream computational efficiency, determine if dataset is comprised of all discrete, all continuous, or a mix of discrete/continuous features.
if C and D:
self.data_type = 'mixed'
elif D and not C:
self.data_type = 'discrete'
elif C and not D:
self.data_type = 'continuous'
else:
raise ValueError('Invalid data type in data set.')
#--------------------------------------------------------------------------------------------------------------------
# Compute the distance array between all data points ----------------------------------------------------------------
# For downstream efficiency, separate features in dataset by type (i.e. discrete/continuous)
diffs, cidx, didx = self._dtype_array()
cdiffs = diffs[cidx] # max/min continuous value difference for continuous features.
xc = self._X[:, cidx] # Subset of continuous-valued feature data
xd = self._X[:, didx] # Subset of discrete-valued feature data
""" For efficiency, the distance array is computed more efficiently for data with no missing values.
This distance array will only be used to identify nearest neighbors. """
if self._missing_data_count > 0:
self._distance_array = self._distarray_missing(xc, xd, cdiffs)
else:
self._distance_array = self._distarray_no_missing(xc, xd)
if self.verbose:
elapsed = time.time() - start
print('Created distance array in {} seconds.'.format(elapsed))
print('Feature scoring under way ...')
start = time.time()
#--------------------------------------------------------------------------------------------------------------------
# Run remainder of algorithm (i.e. identification of 'neighbors' for each instance, and feature scoring).------------
# Stores feature importance scores for ReliefF or respective Relief-based algorithm.
self.feature_importances_ = self._run_algorithm()
# Delete the internal distance array because it is no longer needed
del self._distance_array
if self.verbose:
elapsed = time.time() - start
print('Completed scoring in {} seconds.'.format(elapsed))
# Compute indices of top features
self.top_features_ = np.argsort(self.feature_importances_)[::-1]
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"_X",
"=",
"X",
"# matrix of predictive variables ('independent variables')",
"self",
".",
"_y",
"=",
"y",
"# vector of values for outcome variable ('dependent variable')",
"# Set up the properties for ... | Scikit-learn required: Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
Copy of the ReliefF instance | [
"Scikit",
"-",
"learn",
"required",
":",
"Computes",
"the",
"feature",
"importance",
"scores",
"from",
"the",
"training",
"data",
"."
] | python | train |
gnosis/gnosis-py | gnosis/safe/safe_create2_tx.py | https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/safe/safe_create2_tx.py#L150-L185 | def _estimate_gas(self, initializer: bytes, salt_nonce: int,
payment_token: str, payment_receiver: str) -> int:
"""
Gas estimation done using web3 and calling the node
Payment cannot be estimated, as no ether is in the address. So we add some gas later.
:param initializer: Data initializer to send to GnosisSafe setup method
:param salt_nonce: Nonce that will be used to generate the salt to calculate
the address of the new proxy contract.
:return: Total gas estimation
"""
# Estimate the contract deployment. We cannot estimate the refunding, as the safe address has not any fund
gas: int = self.proxy_factory_contract.functions.createProxyWithNonce(self.master_copy_address,
initializer, salt_nonce).estimateGas()
# It's not very relevant if is 1 or 9999
payment: int = 1
# We estimate the refund as a new tx
if payment_token == NULL_ADDRESS:
# Same cost to send 1 ether than 1000
gas += self.w3.eth.estimateGas({'to': payment_receiver, 'value': payment})
else:
# Top should be around 52000 when storage is needed (funder no previous owner of token),
# we use value 1 as we are simulating an internal call, and in that calls you don't pay for the data.
# If it was a new tx sending 5000 tokens would be more expensive than sending 1 because of data costs
gas += 55000
# try:
# gas += get_erc20_contract(self.w3,
# payment_token).functions.transfer(payment_receiver,
# payment).estimateGas({'from':
# payment_token})
# except ValueError as exc:
# raise InvalidERC20Token from exc
return gas | [
"def",
"_estimate_gas",
"(",
"self",
",",
"initializer",
":",
"bytes",
",",
"salt_nonce",
":",
"int",
",",
"payment_token",
":",
"str",
",",
"payment_receiver",
":",
"str",
")",
"->",
"int",
":",
"# Estimate the contract deployment. We cannot estimate the refunding, a... | Gas estimation done using web3 and calling the node
Payment cannot be estimated, as no ether is in the address. So we add some gas later.
:param initializer: Data initializer to send to GnosisSafe setup method
:param salt_nonce: Nonce that will be used to generate the salt to calculate
the address of the new proxy contract.
:return: Total gas estimation | [
"Gas",
"estimation",
"done",
"using",
"web3",
"and",
"calling",
"the",
"node",
"Payment",
"cannot",
"be",
"estimated",
"as",
"no",
"ether",
"is",
"in",
"the",
"address",
".",
"So",
"we",
"add",
"some",
"gas",
"later",
".",
":",
"param",
"initializer",
":... | python | test |
tradenity/python-sdk | tradenity/resources/geo_zone.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/geo_zone.py#L259-L281 | def list_all_geo_zones(cls, **kwargs):
"""List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_geo_zones_with_http_info(**kwargs)
return data | [
"def",
"list_all_geo_zones",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_list_all_geo_zones_with_http_info",
"(",
"*... | List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread. | [
"List",
"GeoZones"
] | python | train |
davidmogar/cucco | cucco/cli.py | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L77-L98 | def cli(ctx, config, debug, language, verbose):
"""
Cucco allows to apply normalizations to a given text or file.
This normalizations include, among others, removal of accent
marks, stop words an extra white spaces, replacement of
punctuation symbols, emails, emojis, etc.
For more info on how to use and configure Cucco, check the
project website at https://cucco.io.
"""
ctx.obj = {}
try:
ctx.obj['config'] = Config(normalizations=config,
language=language,
debug=debug,
verbose=verbose)
except ConfigError as e:
click.echo(e.message)
sys.exit(-1)
ctx.obj['cucco'] = Cucco(ctx.obj['config']) | [
"def",
"cli",
"(",
"ctx",
",",
"config",
",",
"debug",
",",
"language",
",",
"verbose",
")",
":",
"ctx",
".",
"obj",
"=",
"{",
"}",
"try",
":",
"ctx",
".",
"obj",
"[",
"'config'",
"]",
"=",
"Config",
"(",
"normalizations",
"=",
"config",
",",
"la... | Cucco allows to apply normalizations to a given text or file.
This normalizations include, among others, removal of accent
marks, stop words an extra white spaces, replacement of
punctuation symbols, emails, emojis, etc.
For more info on how to use and configure Cucco, check the
project website at https://cucco.io. | [
"Cucco",
"allows",
"to",
"apply",
"normalizations",
"to",
"a",
"given",
"text",
"or",
"file",
".",
"This",
"normalizations",
"include",
"among",
"others",
"removal",
"of",
"accent",
"marks",
"stop",
"words",
"an",
"extra",
"white",
"spaces",
"replacement",
"of... | python | train |
majerteam/sqla_inspect | sqla_inspect/py3o.py | https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L297-L316 | def _get_to_one_relationship_value(self, obj, column):
"""
Compute datas produced for a many to one relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
if related:
if related_key is not None:
value = self._get_formatted_val(
related, related_key, column
)
else:
value = column['__prop__'].compile_obj(related)
else:
value = ""
return value | [
"def",
"_get_to_one_relationship_value",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"related_key",
"=",
"column",
".",
"get",
"(",
"'related_key'",
",",
"None",
")",
"related",
"=",
"getattr",
"(",
"obj",
",",
"column",
"[",
"'__col__'",
"]",
".",
... | Compute datas produced for a many to one relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value | [
"Compute",
"datas",
"produced",
"for",
"a",
"many",
"to",
"one",
"relationship"
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L306-L324 | def get_value_in_base_currency(self) -> Decimal:
""" Calculates the value of security holdings in base currency """
# check if the currency is the base currency.
amt_orig = self.get_value()
# Security currency
sec_cur = self.get_currency()
#base_cur = self.book.default_currency
cur_svc = CurrenciesAggregate(self.book)
base_cur = cur_svc.get_default_currency()
if sec_cur == base_cur:
return amt_orig
# otherwise recalculate
single_svc = cur_svc.get_currency_aggregate(sec_cur)
rate = single_svc.get_latest_rate(base_cur)
result = amt_orig * rate.value
return result | [
"def",
"get_value_in_base_currency",
"(",
"self",
")",
"->",
"Decimal",
":",
"# check if the currency is the base currency.",
"amt_orig",
"=",
"self",
".",
"get_value",
"(",
")",
"# Security currency",
"sec_cur",
"=",
"self",
".",
"get_currency",
"(",
")",
"#base_cur ... | Calculates the value of security holdings in base currency | [
"Calculates",
"the",
"value",
"of",
"security",
"holdings",
"in",
"base",
"currency"
] | python | train |
thebjorn/pydeps | pydeps/dot.py | https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/dot.py#L57-L67 | def dot(src, **kw):
"""Execute the dot command to create an svg output.
"""
cmd = "dot -T%s" % kw.pop('T', 'svg')
for k, v in list(kw.items()):
if v is True:
cmd += " -%s" % k
else:
cmd += " -%s%s" % (k, v)
return pipe(cmd, to_bytes(src)) | [
"def",
"dot",
"(",
"src",
",",
"*",
"*",
"kw",
")",
":",
"cmd",
"=",
"\"dot -T%s\"",
"%",
"kw",
".",
"pop",
"(",
"'T'",
",",
"'svg'",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"kw",
".",
"items",
"(",
")",
")",
":",
"if",
"v",
"is",
"... | Execute the dot command to create an svg output. | [
"Execute",
"the",
"dot",
"command",
"to",
"create",
"an",
"svg",
"output",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/servicebusmanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicebusmanagementservice.py#L93-L103 | def list_namespaces(self):
'''
List the service bus namespaces defined on the account.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces/', None),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_namespace) | [
"def",
"list_namespaces",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_perform_get",
"(",
"self",
".",
"_get_path",
"(",
"'services/serviceBus/Namespaces/'",
",",
"None",
")",
",",
"None",
")",
"return",
"_MinidomXmlToObject",
".",
"convert_response_to_f... | List the service bus namespaces defined on the account. | [
"List",
"the",
"service",
"bus",
"namespaces",
"defined",
"on",
"the",
"account",
"."
] | python | test |
erdc/RAPIDpy | RAPIDpy/postprocess/goodness_of_fit.py | https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/postprocess/goodness_of_fit.py#L158-L175 | def KGE(s, o):
"""
Kling-Gupta Efficiency
input:
s: simulated
o: observed
output:
kge: Kling-Gupta Efficiency
cc: correlation
alpha: ratio of the standard deviation
beta: ratio of the mean
"""
# s,o = filter_nan(s, o)
cc = correlation(s, o)
alpha = np.std(s)/np.std(o)
beta = np.sum(s)/np.sum(o)
kge = 1 - np.sqrt((cc-1)**2 + (alpha-1)**2 + (beta-1)**2)
return kge, cc, alpha, beta | [
"def",
"KGE",
"(",
"s",
",",
"o",
")",
":",
"# s,o = filter_nan(s, o)",
"cc",
"=",
"correlation",
"(",
"s",
",",
"o",
")",
"alpha",
"=",
"np",
".",
"std",
"(",
"s",
")",
"/",
"np",
".",
"std",
"(",
"o",
")",
"beta",
"=",
"np",
".",
"sum",
"("... | Kling-Gupta Efficiency
input:
s: simulated
o: observed
output:
kge: Kling-Gupta Efficiency
cc: correlation
alpha: ratio of the standard deviation
beta: ratio of the mean | [
"Kling",
"-",
"Gupta",
"Efficiency",
"input",
":",
"s",
":",
"simulated",
"o",
":",
"observed",
"output",
":",
"kge",
":",
"Kling",
"-",
"Gupta",
"Efficiency",
"cc",
":",
"correlation",
"alpha",
":",
"ratio",
"of",
"the",
"standard",
"deviation",
"beta",
... | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1610-L1667 | def change_svc_modattr(self, service, value):
"""Change service modified attributes
Format of the line that triggers function call::
CHANGE_SVC_MODATTR;<host_name>;<service_description>;<value>
For boolean attributes, toggles the service attribute state (enable/disable)
For non boolean attribute, only indicates that the corresponding attribute is to be saved
in the retention.
Value can be:
MODATTR_NONE 0
MODATTR_NOTIFICATIONS_ENABLED 1
MODATTR_ACTIVE_CHECKS_ENABLED 2
MODATTR_PASSIVE_CHECKS_ENABLED 4
MODATTR_EVENT_HANDLER_ENABLED 8
MODATTR_FLAP_DETECTION_ENABLED 16
MODATTR_PERFORMANCE_DATA_ENABLED 64
MODATTR_EVENT_HANDLER_COMMAND 256
MODATTR_CHECK_COMMAND 512
MODATTR_NORMAL_CHECK_INTERVAL 1024
MODATTR_RETRY_CHECK_INTERVAL 2048
MODATTR_MAX_CHECK_ATTEMPTS 4096
MODATTR_FRESHNESS_CHECKS_ENABLED 8192
MODATTR_CHECK_TIMEPERIOD 16384
MODATTR_CUSTOM_VARIABLE 32768
MODATTR_NOTIFICATION_TIMEPERIOD 65536
:param service: service to edit
:type service: alignak.objects.service.Service
:param value: new value to set / unset
:type value: str
:return: None
"""
# todo: deprecate this
# We need to change each of the needed attributes.
previous_value = service.modified_attributes
changes = int(value)
# For all boolean and non boolean attributes
for modattr in ["MODATTR_NOTIFICATIONS_ENABLED", "MODATTR_ACTIVE_CHECKS_ENABLED",
"MODATTR_PASSIVE_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_ENABLED",
"MODATTR_FLAP_DETECTION_ENABLED", "MODATTR_PERFORMANCE_DATA_ENABLED",
"MODATTR_FRESHNESS_CHECKS_ENABLED",
"MODATTR_EVENT_HANDLER_COMMAND", "MODATTR_CHECK_COMMAND",
"MODATTR_NORMAL_CHECK_INTERVAL", "MODATTR_RETRY_CHECK_INTERVAL",
"MODATTR_MAX_CHECK_ATTEMPTS", "MODATTR_FRESHNESS_CHECKS_ENABLED",
"MODATTR_CHECK_TIMEPERIOD", "MODATTR_CUSTOM_VARIABLE",
"MODATTR_NOTIFICATION_TIMEPERIOD"]:
if changes & DICT_MODATTR[modattr].value:
# Toggle the concerned service attribute
setattr(service, DICT_MODATTR[modattr].attribute, not
getattr(service, DICT_MODATTR[modattr].attribute))
service.modified_attributes = previous_value ^ changes
# And we need to push the information to the scheduler.
self.send_an_element(service.get_update_status_brok()) | [
"def",
"change_svc_modattr",
"(",
"self",
",",
"service",
",",
"value",
")",
":",
"# todo: deprecate this",
"# We need to change each of the needed attributes.",
"previous_value",
"=",
"service",
".",
"modified_attributes",
"changes",
"=",
"int",
"(",
"value",
")",
"# F... | Change service modified attributes
Format of the line that triggers function call::
CHANGE_SVC_MODATTR;<host_name>;<service_description>;<value>
For boolean attributes, toggles the service attribute state (enable/disable)
For non boolean attribute, only indicates that the corresponding attribute is to be saved
in the retention.
Value can be:
MODATTR_NONE 0
MODATTR_NOTIFICATIONS_ENABLED 1
MODATTR_ACTIVE_CHECKS_ENABLED 2
MODATTR_PASSIVE_CHECKS_ENABLED 4
MODATTR_EVENT_HANDLER_ENABLED 8
MODATTR_FLAP_DETECTION_ENABLED 16
MODATTR_PERFORMANCE_DATA_ENABLED 64
MODATTR_EVENT_HANDLER_COMMAND 256
MODATTR_CHECK_COMMAND 512
MODATTR_NORMAL_CHECK_INTERVAL 1024
MODATTR_RETRY_CHECK_INTERVAL 2048
MODATTR_MAX_CHECK_ATTEMPTS 4096
MODATTR_FRESHNESS_CHECKS_ENABLED 8192
MODATTR_CHECK_TIMEPERIOD 16384
MODATTR_CUSTOM_VARIABLE 32768
MODATTR_NOTIFICATION_TIMEPERIOD 65536
:param service: service to edit
:type service: alignak.objects.service.Service
:param value: new value to set / unset
:type value: str
:return: None | [
"Change",
"service",
"modified",
"attributes",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
axialmarket/fsq | fsq/enqueue.py | https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/enqueue.py#L97-L193 | def venqueue(trg_queue, item_f, args, user=None, group=None, mode=None):
'''Enqueue the contents of a file, or file-like object, file-descriptor or
the contents of a file at an address (e.g. '/my/file') queue with
an argument list, venqueue is to enqueue what vprintf is to printf
If entropy is passed in, failure on duplicates is raised to the caller,
if entropy is not passed in, venqueue will increment entropy until it
can create the queue item.
'''
# setup defaults
trg_fd = name = None
user = _c.FSQ_ITEM_USER if user is None else user
group = _c.FSQ_ITEM_GROUP if group is None else group
mode = _c.FSQ_ITEM_MODE if mode is None else mode
now = fmt_time(datetime.datetime.now(), _c.FSQ_TIMEFMT, _c.FSQ_CHARSET)
pid = coerce_unicode(os.getpid(), _c.FSQ_CHARSET)
host = coerce_unicode(_HOSTNAME, _c.FSQ_CHARSET)
tries = u'0'
entropy = _mkentropy(pid, now, host)
# open source file
try:
src_file = rationalize_file(item_f, _c.FSQ_CHARSET)
except (OSError, IOError, ), e:
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
try:
real_file = True if hasattr(src_file, 'fileno') else False
# get low, so we can use some handy options; man 2 open
try:
item_name = construct(( now, entropy, pid, host,
tries, ) + tuple(args))
tmp_name = os.path.join(fsq_path.tmp(trg_queue), item_name)
trg_fd = os.open(tmp_name, os.O_WRONLY|os.O_CREAT|os.O_EXCL, mode)
except (OSError, IOError, ), e:
if isinstance(e, FSQError):
raise e
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
try:
if user is not None or group is not None:
# set user/group ownership for file; man 2 fchown
os.fchown(trg_fd, *uid_gid(user, group, fd=trg_fd))
with closing(os.fdopen(trg_fd, 'wb', 1)) as trg_file:
# i/o time ... assume line-buffered
while True:
if real_file:
reads, dis, card = select.select([src_file], [], [])
try:
msg = os.read(reads[0].fileno(), 2048)
if 0 == len(msg):
break
except (OSError, IOError, ), e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN,):
continue
raise e
trg_file.write(msg)
else:
line = src_file.readline()
if not line:
break
trg_file.write(line)
# flush buffers, and force write to disk pre mv.
trg_file.flush()
os.fsync(trg_file.fileno())
# hard-link into queue, unlink tmp, failure case here leaves
# cruft in tmp, but no race condition into queue
os.link(tmp_name, os.path.join(fsq_path.item(trg_queue,
item_name)))
os.unlink(tmp_name)
# return the queue item id (filename)
return item_name
except Exception, e:
try:
os.close(trg_fd)
except (OSError, IOError, ), err:
if err.errno != errno.EBADF:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
try:
if tmp_name is not None:
os.unlink(tmp_name)
except (OSError, IOError, ), err:
if err.errno != errno.ENOENT:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
try:
if name is not None:
os.unlink(name)
except OSError, err:
if err.errno != errno.ENOENT:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
if (isinstance(e, OSError) or isinstance(e, IOError)) and\
not isinstance(e, FSQError):
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
raise e
finally:
src_file.close() | [
"def",
"venqueue",
"(",
"trg_queue",
",",
"item_f",
",",
"args",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"mode",
"=",
"None",
")",
":",
"# setup defaults",
"trg_fd",
"=",
"name",
"=",
"None",
"user",
"=",
"_c",
".",
"FSQ_ITEM_USER",
... | Enqueue the contents of a file, or file-like object, file-descriptor or
the contents of a file at an address (e.g. '/my/file') queue with
an argument list, venqueue is to enqueue what vprintf is to printf
If entropy is passed in, failure on duplicates is raised to the caller,
if entropy is not passed in, venqueue will increment entropy until it
can create the queue item. | [
"Enqueue",
"the",
"contents",
"of",
"a",
"file",
"or",
"file",
"-",
"like",
"object",
"file",
"-",
"descriptor",
"or",
"the",
"contents",
"of",
"a",
"file",
"at",
"an",
"address",
"(",
"e",
".",
"g",
".",
"/",
"my",
"/",
"file",
")",
"queue",
"with... | python | train |
inveniosoftware/invenio-access | invenio_access/loaders.py | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/loaders.py#L15-L29 | def load_permissions_on_identity_loaded(sender, identity):
"""Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need.
"""
identity.provides.add(
any_user
)
# if the user is not anonymous
if current_user.is_authenticated:
# Add the need provided to authenticated users
identity.provides.add(
authenticated_user
) | [
"def",
"load_permissions_on_identity_loaded",
"(",
"sender",
",",
"identity",
")",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"any_user",
")",
"# if the user is not anonymous",
"if",
"current_user",
".",
"is_authenticated",
":",
"# Add the need provided to authent... | Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need. | [
"Add",
"system",
"roles",
"Needs",
"to",
"users",
"identities",
"."
] | python | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L153-L190 | def display_drilldown_as_ul(category, using='categories.Category'):
"""
Render the category with ancestors and children using the
``categories/ul_tree.html`` template.
Example::
{% display_drilldown_as_ul "/Grandparent/Parent" %}
or ::
{% display_drilldown_as_ul category_obj %}
Returns::
<ul>
<li><a href="/categories/">Top</a>
<ul>
<li><a href="/categories/grandparent/">Grandparent</a>
<ul>
<li><a href="/categories/grandparent/parent/">Parent</a>
<ul>
<li><a href="/categories/grandparent/parent/child1">Child1</a></li>
<li><a href="/categories/grandparent/parent/child2">Child2</a></li>
<li><a href="/categories/grandparent/parent/child3">Child3</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
"""
cat = get_category(category, using)
if cat is None:
return {'category': cat, 'path': []}
else:
return {'category': cat, 'path': drilldown_tree_for_node(cat)} | [
"def",
"display_drilldown_as_ul",
"(",
"category",
",",
"using",
"=",
"'categories.Category'",
")",
":",
"cat",
"=",
"get_category",
"(",
"category",
",",
"using",
")",
"if",
"cat",
"is",
"None",
":",
"return",
"{",
"'category'",
":",
"cat",
",",
"'path'",
... | Render the category with ancestors and children using the
``categories/ul_tree.html`` template.
Example::
{% display_drilldown_as_ul "/Grandparent/Parent" %}
or ::
{% display_drilldown_as_ul category_obj %}
Returns::
<ul>
<li><a href="/categories/">Top</a>
<ul>
<li><a href="/categories/grandparent/">Grandparent</a>
<ul>
<li><a href="/categories/grandparent/parent/">Parent</a>
<ul>
<li><a href="/categories/grandparent/parent/child1">Child1</a></li>
<li><a href="/categories/grandparent/parent/child2">Child2</a></li>
<li><a href="/categories/grandparent/parent/child3">Child3</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul> | [
"Render",
"the",
"category",
"with",
"ancestors",
"and",
"children",
"using",
"the",
"categories",
"/",
"ul_tree",
".",
"html",
"template",
"."
] | python | train |
bartTC/django-wakawaka | wakawaka/forms.py | https://github.com/bartTC/django-wakawaka/blob/95daff9703a1de07d3393e4b2145bcb903f80e72/wakawaka/forms.py#L62-L131 | def delete_wiki(self, request, page, rev):
"""
Deletes the page with all revisions or the revision, based on the
users choice.
Returns a HttpResponseRedirect.
"""
# Delete the page
if (
self.cleaned_data.get('delete') == 'page'
and request.user.has_perm('wakawaka.delete_revision')
and request.user.has_perm('wakawaka.delete_wikipage')
):
self._delete_page(page)
messages.success(
request, ugettext('The page %s was deleted' % page.slug)
)
return HttpResponseRedirect(reverse('wakawaka_index'))
# Revision handling
if self.cleaned_data.get('delete') == 'rev':
revision_length = len(page.revisions.all())
# Delete the revision if there are more than 1 and the user has permission
if revision_length > 1 and request.user.has_perm(
'wakawaka.delete_revision'
):
self._delete_revision(rev)
messages.success(
request,
ugettext('The revision for %s was deleted' % page.slug),
)
return HttpResponseRedirect(
reverse('wakawaka_page', kwargs={'slug': page.slug})
)
# Do not allow deleting the revision, if it's the only one and the user
# has no permisson to delete the page.
if revision_length <= 1 and not request.user.has_perm(
'wakawaka.delete_wikipage'
):
messages.error(
request,
ugettext(
'You can not delete this revison for %s because it\'s the '
'only one and you have no permission to delete the whole page.'
% page.slug
),
)
return HttpResponseRedirect(
reverse('wakawaka_page', kwargs={'slug': page.slug})
)
# Delete the page and the revision if the user has both permissions
if (
revision_length <= 1
and request.user.has_perm('wakawaka.delete_revision')
and request.user.has_perm('wakawaka.delete_wikipage')
):
self._delete_page(page)
messages.success(
request,
ugettext(
'The page for %s was deleted because you deleted the only revision'
% page.slug
),
)
return HttpResponseRedirect(reverse('wakawaka_index')) | [
"def",
"delete_wiki",
"(",
"self",
",",
"request",
",",
"page",
",",
"rev",
")",
":",
"# Delete the page",
"if",
"(",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'delete'",
")",
"==",
"'page'",
"and",
"request",
".",
"user",
".",
"has_perm",
"(",
"'... | Deletes the page with all revisions or the revision, based on the
users choice.
Returns a HttpResponseRedirect. | [
"Deletes",
"the",
"page",
"with",
"all",
"revisions",
"or",
"the",
"revision",
"based",
"on",
"the",
"users",
"choice",
"."
] | python | train |
andy-z/ged4py | ged4py/model.py | https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L393-L397 | def mother(self):
"""Parent of this individual"""
if self._mother == []:
self._mother = self.sub_tag("FAMC/WIFE")
return self._mother | [
"def",
"mother",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mother",
"==",
"[",
"]",
":",
"self",
".",
"_mother",
"=",
"self",
".",
"sub_tag",
"(",
"\"FAMC/WIFE\"",
")",
"return",
"self",
".",
"_mother"
] | Parent of this individual | [
"Parent",
"of",
"this",
"individual"
] | python | train |
saltstack/salt | salt/modules/openvswitch.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L301-L327 | def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode) | [
"def",
"port_add",
"(",
"br",
",",
"port",
",",
"may_exist",
"=",
"False",
",",
"internal",
"=",
"False",
")",
":",
"param_may_exist",
"=",
"_param_may_exist",
"(",
"may_exist",
")",
"cmd",
"=",
"'ovs-vsctl {2}add-port {0} {1}'",
".",
"format",
"(",
"br",
",... | Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080 | [
"Creates",
"on",
"bridge",
"a",
"new",
"port",
"named",
"port",
"."
] | python | train |
rosenbrockc/ci | pyci/scripts/ci.py | https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/scripts/ci.py#L137-L158 | def _check_virtualenv():
"""Makes sure that the virtualenv specified in the global settings file
actually exists.
"""
from os import waitpid
from subprocess import Popen, PIPE
penvs = Popen("source /usr/local/bin/virtualenvwrapper.sh; workon",
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(penvs.pid, 0)
envs = penvs.stdout.readlines()
enverr = penvs.stderr.readlines()
result = (settings.venv + '\n') in envs and len(enverr) == 0
vms("Find virtualenv: {}".format(' '.join(envs).replace('\n', '')))
vms("Find virtualenv | stderr: {}".format(' '.join(enverr)))
if not result:
info(envs)
err("The virtualenv '{}' does not exist; can't use CI server.".format(settings.venv))
if len(enverr) > 0:
map(err, enverr)
return result | [
"def",
"_check_virtualenv",
"(",
")",
":",
"from",
"os",
"import",
"waitpid",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"penvs",
"=",
"Popen",
"(",
"\"source /usr/local/bin/virtualenvwrapper.sh; workon\"",
",",
"shell",
"=",
"True",
",",
"executable",
... | Makes sure that the virtualenv specified in the global settings file
actually exists. | [
"Makes",
"sure",
"that",
"the",
"virtualenv",
"specified",
"in",
"the",
"global",
"settings",
"file",
"actually",
"exists",
"."
] | python | train |
HumanBrainProject/hbp-service-client | hbp_service_client/storage_service/api.py | https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/api.py#L175-L225 | def get_entity_by_query(self, uuid=None, path=None, metadata=None):
'''Retrieve entity by query param which can be either uuid/path/metadata.
Args:
uuid (str): The UUID of the requested entity.
path (str): The path of the requested entity.
metadata (dict): A dictionary of one metadata {key: value} of the
requested entitity.
Returns:
The details of the entity, if found::
{
u'content_type': u'plain/text',
u'created_by': u'303447',
u'created_on': u'2017-03-13T10:52:23.275087Z',
u'description': u'',
u'entity_type': u'file',
u'modified_by': u'303447',
u'modified_on': u'2017-03-13T10:52:23.275126Z',
u'name': u'myfile',
u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682',
u'uuid': u'e2c25c1b-f6a9-4cf6-b8d2-271e628a9a56'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not (uuid or path or metadata):
raise StorageArgumentException('No parameter given for the query.')
if uuid and not is_valid_uuid(uuid):
raise StorageArgumentException(
'Invalid UUID for uuid: {0}'.format(uuid))
params = locals().copy()
if metadata:
if not isinstance(metadata, dict):
raise StorageArgumentException('The metadata needs to be provided'
' as a dictionary.')
key, value = next(iter(metadata.items()))
params[key] = value
del params['metadata']
params = self._prep_params(params)
return self._authenticated_request \
.to_endpoint('entity/') \
.with_params(params) \
.return_body() \
.get() | [
"def",
"get_entity_by_query",
"(",
"self",
",",
"uuid",
"=",
"None",
",",
"path",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"if",
"not",
"(",
"uuid",
"or",
"path",
"or",
"metadata",
")",
":",
"raise",
"StorageArgumentException",
"(",
"'No para... | Retrieve entity by query param which can be either uuid/path/metadata.
Args:
uuid (str): The UUID of the requested entity.
path (str): The path of the requested entity.
metadata (dict): A dictionary of one metadata {key: value} of the
requested entitity.
Returns:
The details of the entity, if found::
{
u'content_type': u'plain/text',
u'created_by': u'303447',
u'created_on': u'2017-03-13T10:52:23.275087Z',
u'description': u'',
u'entity_type': u'file',
u'modified_by': u'303447',
u'modified_on': u'2017-03-13T10:52:23.275126Z',
u'name': u'myfile',
u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682',
u'uuid': u'e2c25c1b-f6a9-4cf6-b8d2-271e628a9a56'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | [
"Retrieve",
"entity",
"by",
"query",
"param",
"which",
"can",
"be",
"either",
"uuid",
"/",
"path",
"/",
"metadata",
"."
] | python | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.