repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
reingart/gui2py | gui/doc/ext/autosummary/__init__.py | https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/doc/ext/autosummary/__init__.py#L132-L165 | def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in AutoDirective._registry.values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter | [
"def",
"get_documenter",
"(",
"obj",
",",
"parent",
")",
":",
"from",
"sphinx",
".",
"ext",
".",
"autodoc",
"import",
"AutoDirective",
",",
"DataDocumenter",
",",
"ModuleDocumenter",
"if",
"inspect",
".",
"ismodule",
"(",
"obj",
")",
":",
"# ModuleDocumenter.c... | Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to. | [
"Get",
"an",
"autodoc",
".",
"Documenter",
"class",
"suitable",
"for",
"documenting",
"the",
"given",
"object",
"."
] | python | test |
bhmm/bhmm | bhmm/hidden/api.py | https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hidden/api.py#L277-L302 | def sample_path(alpha, A, pobs, T=None):
""" Sample the hidden pathway S from the conditional distribution P ( S | Parameters, Observations )
Parameters
----------
alpha : ndarray((T,N), dtype = float), optional, default = None
alpha[t,i] is the ith forward coefficient of time t.
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
T : int
number of time steps
Returns
-------
S : numpy.array shape (T)
maximum likelihood hidden path
"""
if __impl__ == __IMPL_PYTHON__:
return ip.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: '+str(__impl__)) | [
"def",
"sample_path",
"(",
"alpha",
",",
"A",
",",
"pobs",
",",
"T",
"=",
"None",
")",
":",
"if",
"__impl__",
"==",
"__IMPL_PYTHON__",
":",
"return",
"ip",
".",
"sample_path",
"(",
"alpha",
",",
"A",
",",
"pobs",
",",
"T",
"=",
"T",
",",
"dtype",
... | Sample the hidden pathway S from the conditional distribution P ( S | Parameters, Observations )
Parameters
----------
alpha : ndarray((T,N), dtype = float), optional, default = None
alpha[t,i] is the ith forward coefficient of time t.
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
T : int
number of time steps
Returns
-------
S : numpy.array shape (T)
maximum likelihood hidden path | [
"Sample",
"the",
"hidden",
"pathway",
"S",
"from",
"the",
"conditional",
"distribution",
"P",
"(",
"S",
"|",
"Parameters",
"Observations",
")"
] | python | train |
threeML/astromodels | astromodels/core/sky_direction.py | https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/sky_direction.py#L216-L229 | def parameters(self):
"""
Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters
"""
if self._coord_type == 'galactic':
return collections.OrderedDict((('l', self.l), ('b', self.b)))
else:
return collections.OrderedDict((('ra', self.ra), ('dec', self.dec))) | [
"def",
"parameters",
"(",
"self",
")",
":",
"if",
"self",
".",
"_coord_type",
"==",
"'galactic'",
":",
"return",
"collections",
".",
"OrderedDict",
"(",
"(",
"(",
"'l'",
",",
"self",
".",
"l",
")",
",",
"(",
"'b'",
",",
"self",
".",
"b",
")",
")",
... | Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters | [
"Get",
"the",
"dictionary",
"of",
"parameters",
"(",
"either",
"ra",
"dec",
"or",
"l",
"b",
")"
] | python | train |
resync/resync | resync/resource_set.py | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/resource_set.py#L30-L36 | def add(self, resource, replace=False):
"""Add just a single resource."""
uri = resource.uri
if (uri in self and not replace):
raise ResourceSetDupeError(
"Attempt to add resource already in this set")
self[uri] = resource | [
"def",
"add",
"(",
"self",
",",
"resource",
",",
"replace",
"=",
"False",
")",
":",
"uri",
"=",
"resource",
".",
"uri",
"if",
"(",
"uri",
"in",
"self",
"and",
"not",
"replace",
")",
":",
"raise",
"ResourceSetDupeError",
"(",
"\"Attempt to add resource alre... | Add just a single resource. | [
"Add",
"just",
"a",
"single",
"resource",
"."
] | python | train |
MLAB-project/pymlab | src/pymlab/sensors/gpio.py | https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/src/pymlab/sensors/gpio.py#L236-L240 | def set_pullups(self, port0 = 0x00, port1 = 0x00):
'Sets INPUT (1) or OUTPUT (0) direction on pins. Inversion setting is applicable for input pins 1-inverted 0-noninverted input polarity.'
self.bus.write_byte_data(self.address, self.PULLUP_PORT0, port0)
self.bus.write_byte_data(self.address, self.PULLUP_PORT1, port1)
return | [
"def",
"set_pullups",
"(",
"self",
",",
"port0",
"=",
"0x00",
",",
"port1",
"=",
"0x00",
")",
":",
"self",
".",
"bus",
".",
"write_byte_data",
"(",
"self",
".",
"address",
",",
"self",
".",
"PULLUP_PORT0",
",",
"port0",
")",
"self",
".",
"bus",
".",
... | Sets INPUT (1) or OUTPUT (0) direction on pins. Inversion setting is applicable for input pins 1-inverted 0-noninverted input polarity. | [
"Sets",
"INPUT",
"(",
"1",
")",
"or",
"OUTPUT",
"(",
"0",
")",
"direction",
"on",
"pins",
".",
"Inversion",
"setting",
"is",
"applicable",
"for",
"input",
"pins",
"1",
"-",
"inverted",
"0",
"-",
"noninverted",
"input",
"polarity",
"."
] | python | train |
albu/albumentations | albumentations/augmentations/functional.py | https://github.com/albu/albumentations/blob/b31393cd6126516d37a84e44c879bd92c68ffc93/albumentations/augmentations/functional.py#L949-L957 | def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width) | [
"def",
"crop_bbox_by_coords",
"(",
"bbox",
",",
"crop_coords",
",",
"crop_height",
",",
"crop_width",
",",
"rows",
",",
"cols",
")",
":",
"bbox",
"=",
"denormalize_bbox",
"(",
"bbox",
",",
"rows",
",",
"cols",
")",
"x_min",
",",
"y_min",
",",
"x_max",
",... | Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop. | [
"Crop",
"a",
"bounding",
"box",
"using",
"the",
"provided",
"coordinates",
"of",
"bottom",
"-",
"left",
"and",
"top",
"-",
"right",
"corners",
"in",
"pixels",
"and",
"the",
"required",
"height",
"and",
"width",
"of",
"the",
"crop",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py#L139-L148 | def find_message(self):
'''find the next valid message'''
while True:
self.msg = self.mlog.recv_match(condition=args.condition)
if self.msg is not None and self.msg.get_type() != 'BAD_DATA':
break
if self.mlog.f.tell() > self.filesize - 10:
self.paused = True
break
self.last_timestamp = getattr(self.msg, '_timestamp') | [
"def",
"find_message",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"msg",
"=",
"self",
".",
"mlog",
".",
"recv_match",
"(",
"condition",
"=",
"args",
".",
"condition",
")",
"if",
"self",
".",
"msg",
"is",
"not",
"None",
"and",
"self",
... | find the next valid message | [
"find",
"the",
"next",
"valid",
"message"
] | python | train |
rosenbrockc/fortpy | fortpy/interop/ftypes.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/ftypes.py#L683-L690 | def _py_outvar(parameter, lparams, tab):
"""Returns the code to produce a ctypes output variable for interacting with fortran.
"""
if ("out" in parameter.direction and parameter.D > 0 and ":" in parameter.dimension and
("allocatable" in parameter.modifiers or "pointer" in parameter.modifiers)):
lparams.append("byref({}_o)".format(parameter.lname))
blank = True if parameter.direction == "(inout)" else False
return ("{0}_o = POINTER({1})()".format(parameter.lname, _py_ctype(parameter)), blank) | [
"def",
"_py_outvar",
"(",
"parameter",
",",
"lparams",
",",
"tab",
")",
":",
"if",
"(",
"\"out\"",
"in",
"parameter",
".",
"direction",
"and",
"parameter",
".",
"D",
">",
"0",
"and",
"\":\"",
"in",
"parameter",
".",
"dimension",
"and",
"(",
"\"allocatabl... | Returns the code to produce a ctypes output variable for interacting with fortran. | [
"Returns",
"the",
"code",
"to",
"produce",
"a",
"ctypes",
"output",
"variable",
"for",
"interacting",
"with",
"fortran",
"."
] | python | train |
tanghaibao/goatools | goatools/rpt/rpt_lev_depth.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/rpt_lev_depth.py#L87-L90 | def write_summary_cnts_all(self):
"""Write summary of level and depth counts for all active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(set(self.obo.values()))
self._write_summary_cnts(cnts) | [
"def",
"write_summary_cnts_all",
"(",
"self",
")",
":",
"cnts",
"=",
"self",
".",
"get_cnts_levels_depths_recs",
"(",
"set",
"(",
"self",
".",
"obo",
".",
"values",
"(",
")",
")",
")",
"self",
".",
"_write_summary_cnts",
"(",
"cnts",
")"
] | Write summary of level and depth counts for all active GO Terms. | [
"Write",
"summary",
"of",
"level",
"and",
"depth",
"counts",
"for",
"all",
"active",
"GO",
"Terms",
"."
] | python | train |
openpermissions/koi | koi/commands.py | https://github.com/openpermissions/koi/blob/d721f8e1dfa8f07ad265d9dec32e8aaf80a9f281/koi/commands.py#L202-L214 | def _get_existing_conf(config):
"""
Read existing local.conf and strip out service id and client secret
:param config: Location of config files
:param lines of existing config (excluding service id and client secret)
"""
try:
with open(os.path.join(config, 'local.conf'), 'r') as f:
lines = [line for line in f.readlines()
if not (line.startswith('service_id') or line.startswith('client_secret'))]
except IOError:
lines = []
return lines | [
"def",
"_get_existing_conf",
"(",
"config",
")",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config",
",",
"'local.conf'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"f",
... | Read existing local.conf and strip out service id and client secret
:param config: Location of config files
:param lines of existing config (excluding service id and client secret) | [
"Read",
"existing",
"local",
".",
"conf",
"and",
"strip",
"out",
"service",
"id",
"and",
"client",
"secret",
":",
"param",
"config",
":",
"Location",
"of",
"config",
"files",
":",
"param",
"lines",
"of",
"existing",
"config",
"(",
"excluding",
"service",
"... | python | train |
senaite/senaite.core | bika/lims/exportimport/instruments/sysmex/xs/i500.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/sysmex/xs/i500.py#L31-L46 | def getForm(instrument_name, request):
"""
Since 500i and 1000i print the same results structure (https://jira.bikalabs.com/browse/LIMS-1571), this function
will be overwrote on i1000 importer to save code.
:param instrument_name: a string containing the instrument's name with the format: 'sysmex_xs_500i'
:param request: the request object
:returns: a dictionary with the requests results.
"""
d = {'infile': request.form[instrument_name + '_file'],
'fileformat': request.form[instrument_name + '_format'],
'artoapply': request.form[instrument_name + '_artoapply'],
'override': request.form[instrument_name + '_override'],
'sample': request.form.get(instrument_name + '_sample',
'requestid'),
'instrument': request.form.get(instrument_name + '_instrument', None)}
return d | [
"def",
"getForm",
"(",
"instrument_name",
",",
"request",
")",
":",
"d",
"=",
"{",
"'infile'",
":",
"request",
".",
"form",
"[",
"instrument_name",
"+",
"'_file'",
"]",
",",
"'fileformat'",
":",
"request",
".",
"form",
"[",
"instrument_name",
"+",
"'_forma... | Since 500i and 1000i print the same results structure (https://jira.bikalabs.com/browse/LIMS-1571), this function
will be overwrote on i1000 importer to save code.
:param instrument_name: a string containing the instrument's name with the format: 'sysmex_xs_500i'
:param request: the request object
:returns: a dictionary with the requests results. | [
"Since",
"500i",
"and",
"1000i",
"print",
"the",
"same",
"results",
"structure",
"(",
"https",
":",
"//",
"jira",
".",
"bikalabs",
".",
"com",
"/",
"browse",
"/",
"LIMS",
"-",
"1571",
")",
"this",
"function",
"will",
"be",
"overwrote",
"on",
"i1000",
"... | python | train |
cloudera/cm_api | python/src/cm_api/endpoints/clusters.py | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L387-L393 | def delete_host_template(self, name):
"""
Deletes a host template.
@param name: Name of the host template to delete.
@return: An ApiHostTemplate object.
"""
return host_templates.delete_host_template(self._get_resource_root(), name, self.name) | [
"def",
"delete_host_template",
"(",
"self",
",",
"name",
")",
":",
"return",
"host_templates",
".",
"delete_host_template",
"(",
"self",
".",
"_get_resource_root",
"(",
")",
",",
"name",
",",
"self",
".",
"name",
")"
] | Deletes a host template.
@param name: Name of the host template to delete.
@return: An ApiHostTemplate object. | [
"Deletes",
"a",
"host",
"template",
"."
] | python | train |
pgmpy/pgmpy | pgmpy/factors/discrete/CPD.py | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/CPD.py#L354-L464 | def reorder_parents(self, new_order, inplace=True):
"""
Returns a new cpd table according to provided order.
Parameters
----------
new_order: list
list of new ordering of variables
inplace: boolean
If inplace == True it will modify the CPD itself
otherwise new value will be returned without affecting old values
Examples
--------
Consider a CPD P(grade| diff, intel)
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.1,0.1,0.1,0.1],
[0.1,0.1,0.1,0.1,0.1,0.1],
[0.8,0.8,0.8,0.8,0.8,0.8]],
evidence=['diff', 'intel'], evidence_card=[2,3])
>>> print(cpd)
+---------+---------+---------+---------+---------+---------+---------+
| diff | diff_0 | diff_0 | diff_0 | diff_1 | diff_1 | diff_1 |
+---------+---------+---------+---------+---------+---------+---------+
| intel | intel_0 | intel_1 | intel_2 | intel_0 | intel_1 | intel_2 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_0 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_2 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+---------+---------+---------+---------+---------+---------+---------+
>>> cpd.values
array([[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.8, 0.8, 0.8],
[ 0.8, 0.8, 0.8]]])
>>> cpd.variables
['grade', 'diff', 'intel']
>>> cpd.cardinality
array([3, 2, 3])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
>>> cpd.reorder_parents(['intel', 'diff'])
array([[ 0.1, 0.1, 0.2, 0.2, 0.1, 0.1],
[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[ 0.8, 0.8, 0.7, 0.7, 0.8, 0.8]])
>>> print(cpd)
+---------+---------+---------+---------+---------+---------+---------+
| intel | intel_0 | intel_0 | intel_1 | intel_1 | intel_2 | intel_2 |
+---------+---------+---------+---------+---------+---------+---------+
| diff | diff_0 | diff_1 | diff_0 | diff_1 | diff_0 | diff_1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_0 | 0.1 | 0.1 | 0.2 | 0.2 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_2 | 0.8 | 0.8 | 0.7 | 0.7 | 0.8 | 0.8 |
+---------+---------+---------+---------+---------+---------+---------+
>>> cpd.values
array([[[ 0.1, 0.1],
[ 0.2, 0.2],
[ 0.1, 0.1]],
[[ 0.1, 0.1],
[ 0.1, 0.1],
[ 0.1, 0.1]],
[[ 0.8, 0.8],
[ 0.7, 0.7],
[ 0.8, 0.8]]])
>>> cpd.variables
['grade', 'intel', 'diff']
>>> cpd.cardinality
array([3, 3, 2])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
"""
if (len(self.variables) <= 1 or (set(new_order) - set(self.variables)) or
(set(self.variables[1:]) - set(new_order))):
raise ValueError("New order either has missing or extra arguments")
else:
if new_order != self.variables[1:]:
evidence = self.variables[1:]
evidence_card = self.cardinality[1:]
card_map = dict(zip(evidence, evidence_card))
old_pos_map = dict(zip(evidence, range(len(evidence))))
trans_ord = [0] + [(old_pos_map[letter] + 1) for letter in new_order]
new_values = np.transpose(self.values, trans_ord)
if inplace:
variables = [self.variables[0]] + new_order
cardinality = [self.variable_card] + [card_map[var] for var in new_order]
super(TabularCPD, self).__init__(variables, cardinality, new_values.flatten('C'))
return self.get_values()
else:
return new_values.reshape(self.cardinality[0], np.prod([card_map[var] for var in new_order]))
else:
warn("Same ordering provided as current")
return self.get_values() | [
"def",
"reorder_parents",
"(",
"self",
",",
"new_order",
",",
"inplace",
"=",
"True",
")",
":",
"if",
"(",
"len",
"(",
"self",
".",
"variables",
")",
"<=",
"1",
"or",
"(",
"set",
"(",
"new_order",
")",
"-",
"set",
"(",
"self",
".",
"variables",
")"... | Returns a new cpd table according to provided order.
Parameters
----------
new_order: list
list of new ordering of variables
inplace: boolean
If inplace == True it will modify the CPD itself
otherwise new value will be returned without affecting old values
Examples
--------
Consider a CPD P(grade| diff, intel)
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.1,0.1,0.1,0.1],
[0.1,0.1,0.1,0.1,0.1,0.1],
[0.8,0.8,0.8,0.8,0.8,0.8]],
evidence=['diff', 'intel'], evidence_card=[2,3])
>>> print(cpd)
+---------+---------+---------+---------+---------+---------+---------+
| diff | diff_0 | diff_0 | diff_0 | diff_1 | diff_1 | diff_1 |
+---------+---------+---------+---------+---------+---------+---------+
| intel | intel_0 | intel_1 | intel_2 | intel_0 | intel_1 | intel_2 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_0 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_2 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+---------+---------+---------+---------+---------+---------+---------+
>>> cpd.values
array([[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.8, 0.8, 0.8],
[ 0.8, 0.8, 0.8]]])
>>> cpd.variables
['grade', 'diff', 'intel']
>>> cpd.cardinality
array([3, 2, 3])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
>>> cpd.reorder_parents(['intel', 'diff'])
array([[ 0.1, 0.1, 0.2, 0.2, 0.1, 0.1],
[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[ 0.8, 0.8, 0.7, 0.7, 0.8, 0.8]])
>>> print(cpd)
+---------+---------+---------+---------+---------+---------+---------+
| intel | intel_0 | intel_0 | intel_1 | intel_1 | intel_2 | intel_2 |
+---------+---------+---------+---------+---------+---------+---------+
| diff | diff_0 | diff_1 | diff_0 | diff_1 | diff_0 | diff_1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_0 | 0.1 | 0.1 | 0.2 | 0.2 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_2 | 0.8 | 0.8 | 0.7 | 0.7 | 0.8 | 0.8 |
+---------+---------+---------+---------+---------+---------+---------+
>>> cpd.values
array([[[ 0.1, 0.1],
[ 0.2, 0.2],
[ 0.1, 0.1]],
[[ 0.1, 0.1],
[ 0.1, 0.1],
[ 0.1, 0.1]],
[[ 0.8, 0.8],
[ 0.7, 0.7],
[ 0.8, 0.8]]])
>>> cpd.variables
['grade', 'intel', 'diff']
>>> cpd.cardinality
array([3, 3, 2])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3 | [
"Returns",
"a",
"new",
"cpd",
"table",
"according",
"to",
"provided",
"order",
"."
] | python | train |
googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/_package.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L550-L592 | def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data) | [
"def",
"predict",
"(",
"data",
",",
"training_dir",
"=",
"None",
",",
"model_name",
"=",
"None",
",",
"model_version",
"=",
"None",
",",
"cloud",
"=",
"False",
")",
":",
"if",
"cloud",
":",
"if",
"not",
"model_version",
"or",
"not",
"model_name",
":",
... | Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame. | [
"Runs",
"prediction",
"locally",
"or",
"on",
"the",
"cloud",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L28586-L28607 | def on_shared_folder_change(self, global_p):
"""Triggered when a permanent (global or machine) shared folder has been
created or removed.
We don't pass shared folder parameters in this notification because
the order in which parallel notifications are delivered is not defined,
therefore it could happen that these parameters were outdated by the
time of processing this notification.
in global_p of type bool
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(global_p, bool):
raise TypeError("global_p can only be an instance of type bool")
self._call("onSharedFolderChange",
in_p=[global_p]) | [
"def",
"on_shared_folder_change",
"(",
"self",
",",
"global_p",
")",
":",
"if",
"not",
"isinstance",
"(",
"global_p",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"global_p can only be an instance of type bool\"",
")",
"self",
".",
"_call",
"(",
"\"onShared... | Triggered when a permanent (global or machine) shared folder has been
created or removed.
We don't pass shared folder parameters in this notification because
the order in which parallel notifications are delivered is not defined,
therefore it could happen that these parameters were outdated by the
time of processing this notification.
in global_p of type bool
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation. | [
"Triggered",
"when",
"a",
"permanent",
"(",
"global",
"or",
"machine",
")",
"shared",
"folder",
"has",
"been",
"created",
"or",
"removed",
".",
"We",
"don",
"t",
"pass",
"shared",
"folder",
"parameters",
"in",
"this",
"notification",
"because",
"the",
"order... | python | train |
inasafe/inasafe | safe/gui/tools/wizard/step_kw44_fields_mapping.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw44_fields_mapping.py#L117-L133 | def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
field_mapping = self.field_mapping_widget.get_field_mapping()
for k, v in list(field_mapping['values'].items()):
if not v:
field_mapping['values'].pop(k)
for k, v in list(field_mapping['fields'].items()):
if not v:
field_mapping['fields'].pop(k)
return field_mapping | [
"def",
"get_field_mapping",
"(",
"self",
")",
":",
"field_mapping",
"=",
"self",
".",
"field_mapping_widget",
".",
"get_field_mapping",
"(",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"field_mapping",
"[",
"'values'",
"]",
".",
"items",
"(",
")",
")",
... | Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict | [
"Obtain",
"metadata",
"from",
"current",
"state",
"of",
"the",
"widget",
"."
] | python | train |
sorgerlab/indra | indra/databases/cbio_client.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L223-L258 | def get_genetic_profiles(study_id, profile_filter=None):
"""Return all the genetic profiles (data sets) for a given study.
Genetic profiles are different types of data for a given study. For
instance the study 'cellline_ccle_broad' has profiles such as
'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA'
for copy number alterations, etc.
Parameters
----------
study_id : str
The ID of the cBio study.
Example: 'paad_icgc'
profile_filter : Optional[str]
A string used to filter the profiles to return.
Will be one of:
- MUTATION
- MUTATION_EXTENDED
- COPY_NUMBER_ALTERATION
- MRNA_EXPRESSION
- METHYLATION
The genetic profiles can include "mutation", "CNA", "rppa",
"methylation", etc.
Returns
-------
genetic_profiles : list[str]
A list of genetic profiles available for the given study.
"""
data = {'cmd': 'getGeneticProfiles',
'cancer_study_id': study_id}
df = send_request(**data)
res = _filter_data_frame(df, ['genetic_profile_id'],
'genetic_alteration_type', profile_filter)
genetic_profiles = list(res['genetic_profile_id'].values())
return genetic_profiles | [
"def",
"get_genetic_profiles",
"(",
"study_id",
",",
"profile_filter",
"=",
"None",
")",
":",
"data",
"=",
"{",
"'cmd'",
":",
"'getGeneticProfiles'",
",",
"'cancer_study_id'",
":",
"study_id",
"}",
"df",
"=",
"send_request",
"(",
"*",
"*",
"data",
")",
"res"... | Return all the genetic profiles (data sets) for a given study.
Genetic profiles are different types of data for a given study. For
instance the study 'cellline_ccle_broad' has profiles such as
'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA'
for copy number alterations, etc.
Parameters
----------
study_id : str
The ID of the cBio study.
Example: 'paad_icgc'
profile_filter : Optional[str]
A string used to filter the profiles to return.
Will be one of:
- MUTATION
- MUTATION_EXTENDED
- COPY_NUMBER_ALTERATION
- MRNA_EXPRESSION
- METHYLATION
The genetic profiles can include "mutation", "CNA", "rppa",
"methylation", etc.
Returns
-------
genetic_profiles : list[str]
A list of genetic profiles available for the given study. | [
"Return",
"all",
"the",
"genetic",
"profiles",
"(",
"data",
"sets",
")",
"for",
"a",
"given",
"study",
"."
] | python | train |
pandas-dev/pandas | pandas/core/reshape/concat.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/concat.py#L24-L229 | def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result() | [
"def",
"concat",
"(",
"objs",
",",
"axis",
"=",
"0",
",",
"join",
"=",
"'outer'",
",",
"join_axes",
"=",
"None",
",",
"ignore_index",
"=",
"False",
",",
"keys",
"=",
"None",
",",
"levels",
"=",
"None",
",",
"names",
"=",
"None",
",",
"verify_integrit... | Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a'] | [
"Concatenate",
"pandas",
"objects",
"along",
"a",
"particular",
"axis",
"with",
"optional",
"set",
"logic",
"along",
"the",
"other",
"axes",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/core/parallel.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/parallel.py#L233-L245 | def process_single_message_from_queue(self):
"""
Tries to read a single message from the queue and let the associated task process it.
:return: bool: True if we processed a message, otherwise False
"""
try:
message = self.message_queue.get_nowait()
task_id, data = message
task = self.task_id_to_task[task_id]
task.on_message(data)
return True
except queue.Empty:
return False | [
"def",
"process_single_message_from_queue",
"(",
"self",
")",
":",
"try",
":",
"message",
"=",
"self",
".",
"message_queue",
".",
"get_nowait",
"(",
")",
"task_id",
",",
"data",
"=",
"message",
"task",
"=",
"self",
".",
"task_id_to_task",
"[",
"task_id",
"]"... | Tries to read a single message from the queue and let the associated task process it.
:return: bool: True if we processed a message, otherwise False | [
"Tries",
"to",
"read",
"a",
"single",
"message",
"from",
"the",
"queue",
"and",
"let",
"the",
"associated",
"task",
"process",
"it",
".",
":",
"return",
":",
"bool",
":",
"True",
"if",
"we",
"processed",
"a",
"message",
"otherwise",
"False"
] | python | train |
aquatix/python-utilkit | utilkit/stringutil.py | https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/stringutil.py#L6-L20 | def safe_unicode(obj, *args):
""" return the unicode representation of obj """
try:
return unicode(obj, *args) # noqa for undefined-variable
except UnicodeDecodeError:
# obj is byte string
ascii_text = str(obj).encode('string_escape')
try:
return unicode(ascii_text) # noqa for undefined-variable
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj | [
"def",
"safe_unicode",
"(",
"obj",
",",
"*",
"args",
")",
":",
"try",
":",
"return",
"unicode",
"(",
"obj",
",",
"*",
"args",
")",
"# noqa for undefined-variable",
"except",
"UnicodeDecodeError",
":",
"# obj is byte string",
"ascii_text",
"=",
"str",
"(",
"obj... | return the unicode representation of obj | [
"return",
"the",
"unicode",
"representation",
"of",
"obj"
] | python | train |
QuantEcon/QuantEcon.py | quantecon/quad.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L685-L730 | def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights | [
"def",
"_qnwcheb1",
"(",
"n",
",",
"a",
",",
"b",
")",
":",
"nodes",
"=",
"(",
"b",
"+",
"a",
")",
"/",
"2",
"-",
"(",
"b",
"-",
"a",
")",
"/",
"2",
"*",
"np",
".",
"cos",
"(",
"np",
".",
"pi",
"/",
"n",
"*",
"np",
".",
"linspace",
"(... | Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002. | [
"Compute",
"univariate",
"Guass",
"-",
"Checbychev",
"quadrature",
"nodes",
"and",
"weights"
] | python | train |
wimglenn/wimpy | wimpy/util.py | https://github.com/wimglenn/wimpy/blob/4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e/wimpy/util.py#L61-L68 | def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s | [
"def",
"strip_suffix",
"(",
"s",
",",
"suffix",
",",
"strict",
"=",
"False",
")",
":",
"if",
"s",
".",
"endswith",
"(",
"suffix",
")",
":",
"return",
"s",
"[",
":",
"len",
"(",
"s",
")",
"-",
"len",
"(",
"suffix",
")",
"]",
"elif",
"strict",
":... | Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present | [
"Removes",
"the",
"suffix",
"if",
"it",
"s",
"there",
"otherwise",
"returns",
"input",
"string",
"unchanged",
".",
"If",
"strict",
"is",
"True",
"also",
"ensures",
"the",
"suffix",
"was",
"present"
] | python | test |
materialsproject/pymatgen-db | matgendb/builders/core.py | https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/core.py#L45-L80 | def parse_fn_docstring(fn):
"""Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
params[name]['type'] = desc.strip()
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
return_['type'] = desc.strip()
return params | [
"def",
"parse_fn_docstring",
"(",
"fn",
")",
":",
"doc",
"=",
"fn",
".",
"__doc__",
"params",
",",
"return_",
"=",
"{",
"}",
",",
"{",
"}",
"param_order",
"=",
"[",
"]",
"for",
"line",
"in",
"doc",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"line",
... | Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict) | [
"Get",
"parameter",
"and",
"return",
"types",
"from",
"function",
"s",
"docstring",
"."
] | python | train |
census-instrumentation/opencensus-python | opencensus/stats/measurement_map.py | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measurement_map.py#L93-L119 | def record(self, tags=None):
"""records all the measures at the same time with a tag_map.
tag_map could either be explicitly passed to the method, or implicitly
read from current runtime context.
"""
if tags is None:
tags = TagContext.get()
if self._invalid:
logger.warning("Measurement map has included negative value "
"measurements, refusing to record")
return
for measure, value in self.measurement_map.items():
if value < 0:
self._invalid = True
logger.warning("Dropping values, value to record must be "
"non-negative")
logger.info("Measure '{}' has negative value ({}), refusing "
"to record measurements from {}"
.format(measure.name, value, self))
return
self.measure_to_view_map.record(
tags=tags,
measurement_map=self.measurement_map,
timestamp=utils.to_iso_str(),
attachments=self.attachments
) | [
"def",
"record",
"(",
"self",
",",
"tags",
"=",
"None",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"TagContext",
".",
"get",
"(",
")",
"if",
"self",
".",
"_invalid",
":",
"logger",
".",
"warning",
"(",
"\"Measurement map has included negativ... | records all the measures at the same time with a tag_map.
tag_map could either be explicitly passed to the method, or implicitly
read from current runtime context. | [
"records",
"all",
"the",
"measures",
"at",
"the",
"same",
"time",
"with",
"a",
"tag_map",
".",
"tag_map",
"could",
"either",
"be",
"explicitly",
"passed",
"to",
"the",
"method",
"or",
"implicitly",
"read",
"from",
"current",
"runtime",
"context",
"."
] | python | train |
apache/incubator-mxnet | example/ssd/symbol/common.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L153-L304 | def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
# Ref for start_offset value:
# https://arxiv.org/abs/1512.02325
start_offset = 0.1
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes] | [
"def",
"multibox_layer",
"(",
"from_layers",
",",
"num_classes",
",",
"sizes",
"=",
"[",
".2",
",",
".95",
"]",
",",
"ratios",
"=",
"[",
"1",
"]",
",",
"normalization",
"=",
"-",
"1",
",",
"num_channels",
"=",
"[",
"]",
",",
"clip",
"=",
"False",
"... | the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes | [
"the",
"basic",
"aggregation",
"module",
"for",
"SSD",
"detection",
".",
"Takes",
"in",
"multiple",
"layers",
"generate",
"multiple",
"object",
"detection",
"targets",
"by",
"customized",
"layers"
] | python | train |
berkeley-cocosci/Wallace | wallace/custom.py | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L419-L431 | def get_participant(participant_id):
"""Get the participant with the given id."""
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/participant GET: no participant found",
status=403)
# return the data
return success_response(field="participant",
data=ppt.__json__(),
request_type="participant get") | [
"def",
"get_participant",
"(",
"participant_id",
")",
":",
"try",
":",
"ppt",
"=",
"models",
".",
"Participant",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"participant_id",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"return",
"error_r... | Get the participant with the given id. | [
"Get",
"the",
"participant",
"with",
"the",
"given",
"id",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/learning/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L3343-L3368 | def delete_activity(self, activity_id):
"""Deletes the ``Activity`` identified by the given ``Id``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
to delete
raise: NotFound - an ``Activity`` was not found identified by
the given ``Id``
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('learning',
collection='Activity',
runtime=self._runtime)
if not isinstance(activity_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
activity_map = collection.find_one(
dict({'_id': ObjectId(activity_id.get_identifier())},
**self._view_filter()))
objects.Activity(osid_object_map=activity_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(activity_id.get_identifier())}) | [
"def",
"delete_activity",
"(",
"self",
",",
"activity_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceAdminSession.delete_resource_template",
"collection",
"=",
"JSONClientValidated",
"(",
"'learning'",
",",
"collection",
"=",
"'Activity'",
",",
"ru... | Deletes the ``Activity`` identified by the given ``Id``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
to delete
raise: NotFound - an ``Activity`` was not found identified by
the given ``Id``
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Deletes",
"the",
"Activity",
"identified",
"by",
"the",
"given",
"Id",
"."
] | python | train |
briancappello/flask-unchained | flask_unchained/bundles/controller/decorators.py | https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/controller/decorators.py#L5-L57 | def route(rule=None, blueprint=None, defaults=None, endpoint=None,
is_member=False, methods=None, only_if=None, **rule_options):
"""
Decorator to set default route rules for a view function. The arguments this
function accepts are very similar to Flask's :meth:`~flask.Flask.route`,
however, the ``is_member`` perhaps deserves an example::
class UserResource(ModelResource):
class Meta:
model = User
member_param = '<int:id>'
include_methods = ['list', 'get']
@route(is_member=True, methods=['POST'])
def set_profile_pic(user):
# do stuff
# registered like so in your ``app_bundle/routes.py``:
routes = lambda: [
resource(UserResource),
]
# results in the following routes:
# UserResource.list => GET /users
# UserResource.get => GET /users/<int:id>
# UserResource.set_profile_pic => POST /users/<int:id>/set-profile-pic
:param rule: The URL rule.
:param defaults: Any default values for parameters in the URL rule.
:param endpoint: The endpoint name of this view. Determined automatically if left
unspecified.
:param is_member: Whether or not this view is for a
:class:`~flask_unchained.bundles.resource.resource.Resource`
member method.
:param methods: A list of HTTP methods supported by this view. Defaults to
``['GET']``.
:param only_if: A boolean or callable to dynamically determine whether or not to
register this route with the app.
:param rule_options: Other kwargs passed on to :class:`~werkzeug.routing.Rule`.
"""
def wrapper(fn):
fn_routes = getattr(fn, FN_ROUTES_ATTR, [])
route = Route(rule, fn, blueprint=blueprint, defaults=defaults,
endpoint=endpoint, is_member=is_member, methods=methods,
only_if=only_if, **rule_options)
setattr(fn, FN_ROUTES_ATTR, fn_routes + [route])
return fn
if callable(rule):
fn = rule
rule = None
return wrapper(fn)
return wrapper | [
"def",
"route",
"(",
"rule",
"=",
"None",
",",
"blueprint",
"=",
"None",
",",
"defaults",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"is_member",
"=",
"False",
",",
"methods",
"=",
"None",
",",
"only_if",
"=",
"None",
",",
"*",
"*",
"rule_option... | Decorator to set default route rules for a view function. The arguments this
function accepts are very similar to Flask's :meth:`~flask.Flask.route`,
however, the ``is_member`` perhaps deserves an example::
class UserResource(ModelResource):
class Meta:
model = User
member_param = '<int:id>'
include_methods = ['list', 'get']
@route(is_member=True, methods=['POST'])
def set_profile_pic(user):
# do stuff
# registered like so in your ``app_bundle/routes.py``:
routes = lambda: [
resource(UserResource),
]
# results in the following routes:
# UserResource.list => GET /users
# UserResource.get => GET /users/<int:id>
# UserResource.set_profile_pic => POST /users/<int:id>/set-profile-pic
:param rule: The URL rule.
:param defaults: Any default values for parameters in the URL rule.
:param endpoint: The endpoint name of this view. Determined automatically if left
unspecified.
:param is_member: Whether or not this view is for a
:class:`~flask_unchained.bundles.resource.resource.Resource`
member method.
:param methods: A list of HTTP methods supported by this view. Defaults to
``['GET']``.
:param only_if: A boolean or callable to dynamically determine whether or not to
register this route with the app.
:param rule_options: Other kwargs passed on to :class:`~werkzeug.routing.Rule`. | [
"Decorator",
"to",
"set",
"default",
"route",
"rules",
"for",
"a",
"view",
"function",
".",
"The",
"arguments",
"this",
"function",
"accepts",
"are",
"very",
"similar",
"to",
"Flask",
"s",
":",
"meth",
":",
"~flask",
".",
"Flask",
".",
"route",
"however",
... | python | train |
empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/srl.py | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/srl.py#L4-L16 | def perform_srl(responses, prompt):
""" Perform semantic role labeling on a list of responses, given a prompt."""
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz")
sentences = [{"sentence": prompt + " " + response} for response in responses]
output = predictor.predict_batch_json(sentences)
full_output = [{"sentence": prompt + response,
"response": response,
"srl": srl} for (response, srl) in zip(responses, output)]
return full_output | [
"def",
"perform_srl",
"(",
"responses",
",",
"prompt",
")",
":",
"predictor",
"=",
"Predictor",
".",
"from_path",
"(",
"\"https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz\"",
")",
"sentences",
"=",
"[",
"{",
"\"sentence\"",
":",
"prompt",
"... | Perform semantic role labeling on a list of responses, given a prompt. | [
"Perform",
"semantic",
"role",
"labeling",
"on",
"a",
"list",
"of",
"responses",
"given",
"a",
"prompt",
"."
] | python | train |
basho/riak-python-client | riak/datatypes/map.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/datatypes/map.py#L227-L234 | def _check_key(self, key):
"""
Ensures well-formedness of a key.
"""
if not len(key) == 2:
raise TypeError('invalid key: %r' % key)
elif key[1] not in TYPES:
raise TypeError('invalid datatype: %s' % key[1]) | [
"def",
"_check_key",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"len",
"(",
"key",
")",
"==",
"2",
":",
"raise",
"TypeError",
"(",
"'invalid key: %r'",
"%",
"key",
")",
"elif",
"key",
"[",
"1",
"]",
"not",
"in",
"TYPES",
":",
"raise",
"TypeErr... | Ensures well-formedness of a key. | [
"Ensures",
"well",
"-",
"formedness",
"of",
"a",
"key",
"."
] | python | train |
lpomfrey/django-debreach | debreach/context_processors.py | https://github.com/lpomfrey/django-debreach/blob/b425bb719ea5de583fae7db5b7419e5fed569cb0/debreach/context_processors.py#L14-L36 | def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
token = force_bytes(token, encoding='latin-1')
key = force_bytes(
get_random_string(len(token)),
encoding='latin-1'
)
value = b64_encode(xor(token, key))
return force_text(b'$'.join((key, value)), encoding='latin-1')
_get_val = lazy(_get_val, text_type)
return {'csrf_token': _get_val()} | [
"def",
"csrf",
"(",
"request",
")",
":",
"def",
"_get_val",
"(",
")",
":",
"token",
"=",
"get_token",
"(",
"request",
")",
"if",
"token",
"is",
"None",
":",
"# In order to be able to provide debugging info in the",
"# case of misconfiguration, we use a sentinel value",
... | Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware | [
"Context",
"processor",
"that",
"provides",
"a",
"CSRF",
"token",
"or",
"the",
"string",
"NOTPROVIDED",
"if",
"it",
"has",
"not",
"been",
"provided",
"by",
"either",
"a",
"view",
"decorator",
"or",
"the",
"middleware"
] | python | train |
apache/airflow | airflow/contrib/hooks/salesforce_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/salesforce_hook.py#L186-L293 | def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pd.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "").str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df | [
"def",
"write_object_to_file",
"(",
"self",
",",
"query_results",
",",
"filename",
",",
"fmt",
"=",
"\"csv\"",
",",
"coerce_to_timestamp",
"=",
"False",
",",
"record_time_added",
"=",
"False",
")",
":",
"fmt",
"=",
"fmt",
".",
"lower",
"(",
")",
"if",
"fmt... | Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pd.Dataframe | [
"Write",
"query",
"results",
"to",
"file",
"."
] | python | test |
sdispater/cachy | cachy/stores/memcached_store.py | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/memcached_store.py#L80-L92 | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._memcache.decr(self._prefix + key, value) | [
"def",
"decrement",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_memcache",
".",
"decr",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool | [
"Decrement",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | python | train |
musicmetric/mmpy | src/entity.py | https://github.com/musicmetric/mmpy/blob/2b5d975c61f9ea8c7f19f76a90b59771833ef881/src/entity.py#L70-L104 | def response_from(self, ext_endpoint=None, params = {}):
"""
fetches and parses data from the semetric API, returning whatever is
in the 'response' field in the top level dict on success (200)
if the endpoint returns a 204, returns None (no data available for id)
else throws a value error
self should have these attributes as needed:
@entity_type the entity type (eg. list, artist)
@entity_id the semetric UUID or resolvable equivelant for the entity to be retrieved
these can be passed on call:
@ext_endpoint (default: None) if an endpoint beyond the id is required, this is where it should go
@params and key value params *besides* the api token
on success self.response will have the contents of the response
a
"""
params['token'] = API_KEY
base_endpoint="{base_url}/{entity}/{entityID}"
uri = base_endpoint.format(base_url=BASE_URL, entity=self.entity_type, entityID=self.entity_id)
if ext_endpoint:
if ext_endpoint[0] != '/':
ext_endpoint = '/' + ext_endpoint
uri += ext_endpoint
full_uri = uri + '?' + urlencode(params)
log.debug('fetching: {0}'.format(full_uri))
wrapped_resp = loads(urllib2.urlopen(full_uri).read(), encoding='utf-8')
#better error handling should go here
if not wrapped_resp['success']:
if wrapped_resp["error"]["code"] == 204:
self.response = None
return
raise ValueError(\
'Unable to fetch data for {0} entity with id {1}, ext_endpoint was {2}, params-{3}'.format(\
self.entity_type, self.entity_id, ext_endpoint, params)+
' code was {0}, msg {1}'.format(wrapped_resp["error"]["code"],wrapped_resp["error"]["msg"]))
self.response = wrapped_resp["response"] | [
"def",
"response_from",
"(",
"self",
",",
"ext_endpoint",
"=",
"None",
",",
"params",
"=",
"{",
"}",
")",
":",
"params",
"[",
"'token'",
"]",
"=",
"API_KEY",
"base_endpoint",
"=",
"\"{base_url}/{entity}/{entityID}\"",
"uri",
"=",
"base_endpoint",
".",
"format"... | fetches and parses data from the semetric API, returning whatever is
in the 'response' field in the top level dict on success (200)
if the endpoint returns a 204, returns None (no data available for id)
else throws a value error
self should have these attributes as needed:
@entity_type the entity type (eg. list, artist)
@entity_id the semetric UUID or resolvable equivelant for the entity to be retrieved
these can be passed on call:
@ext_endpoint (default: None) if an endpoint beyond the id is required, this is where it should go
@params and key value params *besides* the api token
on success self.response will have the contents of the response
a | [
"fetches",
"and",
"parses",
"data",
"from",
"the",
"semetric",
"API",
"returning",
"whatever",
"is",
"in",
"the",
"response",
"field",
"in",
"the",
"top",
"level",
"dict",
"on",
"success",
"(",
"200",
")",
"if",
"the",
"endpoint",
"returns",
"a",
"204",
... | python | train |
inasafe/inasafe | safe/gui/tools/wizard/step_kw20_unit.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw20_unit.py#L43-L60 | def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
is_raster = is_raster_layer(self.parent.layer)
has_classifications = get_classifications(subcategory['key'])
# Vector
if not is_raster:
return self.parent.step_kw_field
# Raster and has classifications
elif has_classifications:
return self.parent.step_kw_multi_classifications
# else go to source
return self.parent.step_kw_source | [
"def",
"get_next_step",
"(",
"self",
")",
":",
"subcategory",
"=",
"self",
".",
"parent",
".",
"step_kw_subcategory",
".",
"selected_subcategory",
"(",
")",
"is_raster",
"=",
"is_raster_layer",
"(",
"self",
".",
"parent",
".",
"layer",
")",
"has_classifications"... | Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None | [
"Find",
"the",
"proper",
"step",
"when",
"user",
"clicks",
"the",
"Next",
"button",
"."
] | python | train |
trezor/python-trezor | trezorlib/stellar.py | https://github.com/trezor/python-trezor/blob/2813522b05cef4e0e545a101f8b3559a3183b45b/trezorlib/stellar.py#L294-L306 | def _xdr_read_asset(unpacker):
"""Reads a stellar Asset from unpacker"""
asset = messages.StellarAssetType(type=unpacker.unpack_uint())
if asset.type == ASSET_TYPE_ALPHA4:
asset.code = unpacker.unpack_fstring(4)
asset.issuer = _xdr_read_address(unpacker)
if asset.type == ASSET_TYPE_ALPHA12:
asset.code = unpacker.unpack_fstring(12)
asset.issuer = _xdr_read_address(unpacker)
return asset | [
"def",
"_xdr_read_asset",
"(",
"unpacker",
")",
":",
"asset",
"=",
"messages",
".",
"StellarAssetType",
"(",
"type",
"=",
"unpacker",
".",
"unpack_uint",
"(",
")",
")",
"if",
"asset",
".",
"type",
"==",
"ASSET_TYPE_ALPHA4",
":",
"asset",
".",
"code",
"=",
... | Reads a stellar Asset from unpacker | [
"Reads",
"a",
"stellar",
"Asset",
"from",
"unpacker"
] | python | train |
GNS3/gns3-server | gns3server/controller/project.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L844-L875 | def duplicate(self, name=None, location=None):
"""
Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project
"""
# If the project was not open we open it temporary
previous_status = self._status
if self._status == "closed":
yield from self.open()
self.dump()
try:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
for data in zipstream:
f.write(data)
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
except (OSError, UnicodeEncodeError) as e:
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
if previous_status == "closed":
yield from self.close()
return project | [
"def",
"duplicate",
"(",
"self",
",",
"name",
"=",
"None",
",",
"location",
"=",
"None",
")",
":",
"# If the project was not open we open it temporary",
"previous_status",
"=",
"self",
".",
"_status",
"if",
"self",
".",
"_status",
"==",
"\"closed\"",
":",
"yield... | Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project | [
"Duplicate",
"a",
"project"
] | python | train |
jakevdp/supersmoother | supersmoother/utils.py | https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L195-L235 | def multinterp(x, y, xquery, slow=False):
"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""
x, y, xquery = map(np.asarray, (x, y, xquery))
assert x.ndim == 1
assert xquery.ndim == 1
assert y.shape == x.shape + xquery.shape
# make sure xmin < xquery < xmax in all cases
xquery = np.clip(xquery, x.min(), x.max())
if slow:
from scipy.interpolate import interp1d
return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)])
elif len(x) == 3:
# Most common case: use a faster approach
yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1])
return np.where(xquery < x[1], yq_lower, yq_upper)
else:
i = np.clip(np.searchsorted(x, xquery, side='right') - 1,
0, len(x) - 2)
j = np.arange(len(xquery))
return y[i, j] + ((xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i])) | [
"def",
"multinterp",
"(",
"x",
",",
"y",
",",
"xquery",
",",
"slow",
"=",
"False",
")",
":",
"x",
",",
"y",
",",
"xquery",
"=",
"map",
"(",
"np",
".",
"asarray",
",",
"(",
"x",
",",
"y",
",",
"xquery",
")",
")",
"assert",
"x",
".",
"ndim",
... | Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query. | [
"Multiple",
"linear",
"interpolations"
] | python | train |
crytic/slither | utils/possible_paths/possible_paths.py | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/utils/possible_paths/possible_paths.py#L4-L26 | def resolve_function(slither, contract_name, function_name):
"""
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
"""
# Obtain the target contract
contract = slither.get_contract_from_name(contract_name)
# Verify the contract was resolved successfully
if contract is None:
raise ResolveFunctionException(f"Could not resolve target contract: {contract_name}")
# Obtain the target function
target_function = next((function for function in contract.functions if function.name == function_name), None)
# Verify we have resolved the function specified.
if target_function is None:
raise ResolveFunctionException(f"Could not resolve target function: {contract_name}.{function_name}")
# Add the resolved function to the new list.
return target_function | [
"def",
"resolve_function",
"(",
"slither",
",",
"contract_name",
",",
"function_name",
")",
":",
"# Obtain the target contract",
"contract",
"=",
"slither",
".",
"get_contract_from_name",
"(",
"contract_name",
")",
"# Verify the contract was resolved successfully",
"if",
"c... | Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise. | [
"Resolves",
"a",
"function",
"instance",
"given",
"a",
"contract",
"name",
"and",
"function",
".",
":",
"param",
"contract_name",
":",
"The",
"name",
"of",
"the",
"contract",
"the",
"function",
"is",
"declared",
"in",
".",
":",
"param",
"function_name",
":",... | python | train |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L162-L166 | def set_total_count(self, value):
"""Setter for count attribute. Set should append only one count per splitted url."""
if not self.total_count_flag and value:
self._count.append(int(value))
self.total_count_flag = True | [
"def",
"set_total_count",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"total_count_flag",
"and",
"value",
":",
"self",
".",
"_count",
".",
"append",
"(",
"int",
"(",
"value",
")",
")",
"self",
".",
"total_count_flag",
"=",
"True"
] | Setter for count attribute. Set should append only one count per splitted url. | [
"Setter",
"for",
"count",
"attribute",
".",
"Set",
"should",
"append",
"only",
"one",
"count",
"per",
"splitted",
"url",
"."
] | python | train |
rootpy/rootpy | rootpy/tree/tree.py | https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/tree.py#L129-L139 | def create_branches(self, branches):
"""
Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict
"""
if not isinstance(branches, TreeBuffer):
branches = TreeBuffer(branches)
self.set_buffer(branches, create_branches=True) | [
"def",
"create_branches",
"(",
"self",
",",
"branches",
")",
":",
"if",
"not",
"isinstance",
"(",
"branches",
",",
"TreeBuffer",
")",
":",
"branches",
"=",
"TreeBuffer",
"(",
"branches",
")",
"self",
".",
"set_buffer",
"(",
"branches",
",",
"create_branches"... | Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict | [
"Create",
"branches",
"from",
"a",
"TreeBuffer",
"or",
"dict",
"mapping",
"names",
"to",
"type",
"names"
] | python | train |
mushkevych/scheduler | synergy/scheduler/abstract_state_machine.py | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/abstract_state_machine.py#L45-L67 | def _insert_uow(self, process_name, timeperiod, start_timeperiod, end_timeperiod, start_id, end_id):
"""creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists """
uow = UnitOfWork()
uow.process_name = process_name
uow.timeperiod = timeperiod
uow.start_id = str(start_id)
uow.end_id = str(end_id)
uow.start_timeperiod = start_timeperiod
uow.end_timeperiod = end_timeperiod
uow.created_at = datetime.utcnow()
uow.submitted_at = datetime.utcnow()
uow.source = context.process_context[process_name].source
uow.sink = context.process_context[process_name].sink
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = unit_of_work.TYPE_MANAGED
uow.number_of_retries = 0
uow.arguments = context.process_context[process_name].arguments
uow.db_id = self.uow_dao.insert(uow)
msg = 'Created: UOW {0} for {1}@{2}.'.format(uow.db_id, process_name, start_timeperiod)
self._log_message(INFO, process_name, start_timeperiod, msg)
return uow | [
"def",
"_insert_uow",
"(",
"self",
",",
"process_name",
",",
"timeperiod",
",",
"start_timeperiod",
",",
"end_timeperiod",
",",
"start_id",
",",
"end_id",
")",
":",
"uow",
"=",
"UnitOfWork",
"(",
")",
"uow",
".",
"process_name",
"=",
"process_name",
"uow",
"... | creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists | [
"creates",
"unit_of_work",
"and",
"inserts",
"it",
"into",
"the",
"DB",
":",
"raise",
"DuplicateKeyError",
":",
"if",
"unit_of_work",
"with",
"given",
"parameters",
"already",
"exists"
] | python | train |
buildbot/buildbot | master/buildbot/schedulers/forcesched.py | https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/schedulers/forcesched.py#L425-L437 | def collectChildProperties(self, kwargs, properties, collector, **kw):
"""Collapse the child values into a dictionary. This is intended to be
called by child classes to fix up the fullName->name conversions."""
childProperties = {}
for field in self.fields: # pylint: disable=not-an-iterable
yield collector.collectValidationErrors(field.fullName,
field.updateFromKwargs,
kwargs=kwargs,
properties=childProperties,
collector=collector,
**kw)
kwargs[self.fullName] = childProperties | [
"def",
"collectChildProperties",
"(",
"self",
",",
"kwargs",
",",
"properties",
",",
"collector",
",",
"*",
"*",
"kw",
")",
":",
"childProperties",
"=",
"{",
"}",
"for",
"field",
"in",
"self",
".",
"fields",
":",
"# pylint: disable=not-an-iterable",
"yield",
... | Collapse the child values into a dictionary. This is intended to be
called by child classes to fix up the fullName->name conversions. | [
"Collapse",
"the",
"child",
"values",
"into",
"a",
"dictionary",
".",
"This",
"is",
"intended",
"to",
"be",
"called",
"by",
"child",
"classes",
"to",
"fix",
"up",
"the",
"fullName",
"-",
">",
"name",
"conversions",
"."
] | python | train |
log2timeline/plaso | plaso/multi_processing/psort.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/psort.py#L635-L648 | def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
# Make a local copy of the PIDs in case the dict is changed by
# the main thread.
for pid in list(self._process_information_per_pid.keys()):
self._CheckStatusAnalysisProcess(pid)
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
time.sleep(self._STATUS_UPDATE_INTERVAL) | [
"def",
"_StatusUpdateThreadMain",
"(",
"self",
")",
":",
"while",
"self",
".",
"_status_update_active",
":",
"# Make a local copy of the PIDs in case the dict is changed by",
"# the main thread.",
"for",
"pid",
"in",
"list",
"(",
"self",
".",
"_process_information_per_pid",
... | Main function of the status update thread. | [
"Main",
"function",
"of",
"the",
"status",
"update",
"thread",
"."
] | python | train |
lehins/python-wepay | wepay/calls/checkout.py | https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/checkout.py#L135-L158 | def __refund(self, checkout_id, refund_reason, **kwargs):
"""Call documentation: `/checkout/refund
<https://www.wepay.com/developer/reference/checkout#refund>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'checkout_id': checkout_id,
'refund_reason': refund_reason
}
return self.make_call(self.__refund, params, kwargs) | [
"def",
"__refund",
"(",
"self",
",",
"checkout_id",
",",
"refund_reason",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'checkout_id'",
":",
"checkout_id",
",",
"'refund_reason'",
":",
"refund_reason",
"}",
"return",
"self",
".",
"make_call",
"(",
... | Call documentation: `/checkout/refund
<https://www.wepay.com/developer/reference/checkout#refund>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | [
"Call",
"documentation",
":",
"/",
"checkout",
"/",
"refund",
"<https",
":",
"//",
"www",
".",
"wepay",
".",
"com",
"/",
"developer",
"/",
"reference",
"/",
"checkout#refund",
">",
"_",
"plus",
"extra",
"keyword",
"parameters",
":",
":",
"keyword",
"str",
... | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/global_variable_manager.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/global_variable_manager.py#L96-L106 | def global_variable_is_editable(self, gv_name, intro_message='edit'):
"""Check whether global variable is locked
:param str gv_name: Name of global variable to be checked
:param str intro_message: Message which is used form a useful logger error message if needed
:return:
"""
if self.model.global_variable_manager.is_locked(gv_name):
logger.error("{1} of global variable '{0}' is not possible, as it is locked".format(gv_name, intro_message))
return False
return True | [
"def",
"global_variable_is_editable",
"(",
"self",
",",
"gv_name",
",",
"intro_message",
"=",
"'edit'",
")",
":",
"if",
"self",
".",
"model",
".",
"global_variable_manager",
".",
"is_locked",
"(",
"gv_name",
")",
":",
"logger",
".",
"error",
"(",
"\"{1} of glo... | Check whether global variable is locked
:param str gv_name: Name of global variable to be checked
:param str intro_message: Message which is used form a useful logger error message if needed
:return: | [
"Check",
"whether",
"global",
"variable",
"is",
"locked"
] | python | train |
jtwhite79/pyemu | pyemu/pst/pst_handler.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L175-L186 | def set_res(self,res):
""" reset the private Pst.res attribute
Parameters
----------
res : (varies)
something to use as Pst.res attribute
"""
if isinstance(res,str):
res = pst_utils.read_resfile(res)
self.__res = res | [
"def",
"set_res",
"(",
"self",
",",
"res",
")",
":",
"if",
"isinstance",
"(",
"res",
",",
"str",
")",
":",
"res",
"=",
"pst_utils",
".",
"read_resfile",
"(",
"res",
")",
"self",
".",
"__res",
"=",
"res"
] | reset the private Pst.res attribute
Parameters
----------
res : (varies)
something to use as Pst.res attribute | [
"reset",
"the",
"private",
"Pst",
".",
"res",
"attribute"
] | python | train |
Datary/scrapbag | scrapbag/collections.py | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/collections.py#L132-L218 | def _add_element_by_names(src, names, value, override=False, digit=True):
"""
Internal method recursive to Add element into a list or dict easily using
a path.
============= ============= =======================================
Parameter Type Description
============= ============= =======================================
src list or dict element where add the value.
names list list with names to navigate in src.
value ¿all? value to add in src.
override boolean Override the value in path src.
============= ============= =======================================
Returns: src with added value
"""
if src is None:
return False
else:
if names and names[0]:
head, *rest = names
# list and digit head
if isinstance(src, list):
if force_list(digit)[0] and head.isdigit():
head = int(head)
# if src is a list and lenght <= head
if len(src) <= head:
src.extend([""] * (head + 1 - len(src)))
# head not in src :(
elif isinstance(src, dict):
if head not in src:
src[head] = [""] * (int(rest[0]) + 1) if rest and force_list(digit)[0] and rest[0].isdigit() else {}
# more heads in rest
if rest:
# Head find but isn't a dict or list to navigate for it.
if not isinstance(src[head], (dict, list)):
# only could be str for dict or int for list
src[head] = [""] * (int(rest[0]) + 1) if force_list(digit)[0] and rest[0].isdigit() else {}
digit = digit if not digit or not isinstance(digit, list) else digit[1:]
if not force_list(digit)[0] and rest and str(rest[0]).isdigit() and isinstance(src[head], list) and override:
src[head] = {}
_add_element_by_names(src[head], rest, value, override=override, digit=digit)
else:
digit = digit if not digit or not isinstance(digit, list) else digit[1:]
if not force_list(digit)[0] and rest and str(rest[0]).isdigit() and isinstance(src[head], list) and override:
src[head] = {}
_add_element_by_names(src[head], rest, value, override=override, digit=digit)
# it's final head
else:
if not override:
if isinstance(src, list) and isinstance(head, int):
if src[head] == '':
src[head] = value
else:
src.append(value)
elif isinstance(src[head], list):
src[head].append(value)
elif isinstance(src[head], dict) and isinstance(value, dict):
src[head].update(value)
else:
src[head] = value
else:
src[head] = value
return src | [
"def",
"_add_element_by_names",
"(",
"src",
",",
"names",
",",
"value",
",",
"override",
"=",
"False",
",",
"digit",
"=",
"True",
")",
":",
"if",
"src",
"is",
"None",
":",
"return",
"False",
"else",
":",
"if",
"names",
"and",
"names",
"[",
"0",
"]",
... | Internal method recursive to Add element into a list or dict easily using
a path.
============= ============= =======================================
Parameter Type Description
============= ============= =======================================
src list or dict element where add the value.
names list list with names to navigate in src.
value ¿all? value to add in src.
override boolean Override the value in path src.
============= ============= =======================================
Returns: src with added value | [
"Internal",
"method",
"recursive",
"to",
"Add",
"element",
"into",
"a",
"list",
"or",
"dict",
"easily",
"using",
"a",
"path",
".",
"=============",
"=============",
"=======================================",
"Parameter",
"Type",
"Description",
"=============",
"========... | python | train |
IDSIA/sacred | sacred/experiment.py | https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/experiment.py#L96-L107 | def main(self, function):
"""
Decorator to define the main function of the experiment.
The main function of an experiment is the default command that is being
run when no command is specified, or when calling the run() method.
Usually it is more convenient to use ``automain`` instead.
"""
captured = self.command(function)
self.default_command = captured.__name__
return captured | [
"def",
"main",
"(",
"self",
",",
"function",
")",
":",
"captured",
"=",
"self",
".",
"command",
"(",
"function",
")",
"self",
".",
"default_command",
"=",
"captured",
".",
"__name__",
"return",
"captured"
] | Decorator to define the main function of the experiment.
The main function of an experiment is the default command that is being
run when no command is specified, or when calling the run() method.
Usually it is more convenient to use ``automain`` instead. | [
"Decorator",
"to",
"define",
"the",
"main",
"function",
"of",
"the",
"experiment",
"."
] | python | train |
rstoneback/pysat | pysat/_constellation.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_constellation.py#L64-L69 | def set_bounds(self, start, stop):
"""
Sets boundaries for all instruments in constellation
"""
for instrument in self.instruments:
instrument.bounds = (start, stop) | [
"def",
"set_bounds",
"(",
"self",
",",
"start",
",",
"stop",
")",
":",
"for",
"instrument",
"in",
"self",
".",
"instruments",
":",
"instrument",
".",
"bounds",
"=",
"(",
"start",
",",
"stop",
")"
] | Sets boundaries for all instruments in constellation | [
"Sets",
"boundaries",
"for",
"all",
"instruments",
"in",
"constellation"
] | python | train |
pypa/pipenv | pipenv/vendor/pexpect/spawnbase.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/spawnbase.py#L420-L428 | def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
exp = Expecter(self, searcher, searchwindowsize)
return exp.expect_loop(timeout) | [
"def",
"expect_loop",
"(",
"self",
",",
"searcher",
",",
"timeout",
"=",
"-",
"1",
",",
"searchwindowsize",
"=",
"-",
"1",
")",
":",
"exp",
"=",
"Expecter",
"(",
"self",
",",
"searcher",
",",
"searchwindowsize",
")",
"return",
"exp",
".",
"expect_loop",
... | This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. | [
"This",
"is",
"the",
"common",
"loop",
"used",
"inside",
"expect",
".",
"The",
"searcher",
"should",
"be",
"an",
"instance",
"of",
"searcher_re",
"or",
"searcher_string",
"which",
"describes",
"how",
"and",
"what",
"to",
"search",
"for",
"in",
"the",
"input"... | python | train |
hannorein/rebound | rebound/simulation.py | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L872-L889 | def gravity(self):
"""
Get or set the gravity module.
Available gravity modules are:
- ``'none'``
- ``'basic'`` (default)
- ``'compensated'``
- ``'tree'``
Check the online documentation for a full description of each of the modules.
"""
i = self._gravity
for name, _i in GRAVITIES.items():
if i==_i:
return name
return i | [
"def",
"gravity",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_gravity",
"for",
"name",
",",
"_i",
"in",
"GRAVITIES",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the gravity module.
Available gravity modules are:
- ``'none'``
- ``'basic'`` (default)
- ``'compensated'``
- ``'tree'``
Check the online documentation for a full description of each of the modules. | [
"Get",
"or",
"set",
"the",
"gravity",
"module",
"."
] | python | train |
elliterate/capybara.py | capybara/node/actions.py | https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/node/actions.py#L44-L59 | def check(self, locator=None, allow_label_click=None, **kwargs):
"""
Find a check box and mark it as checked. The check box can be found via name, id, or label
text. ::
page.check("German")
Args:
locator (str, optional): Which check box to check.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
"""
self._check_with_label(
"checkbox", True, locator=locator, allow_label_click=allow_label_click, **kwargs) | [
"def",
"check",
"(",
"self",
",",
"locator",
"=",
"None",
",",
"allow_label_click",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_with_label",
"(",
"\"checkbox\"",
",",
"True",
",",
"locator",
"=",
"locator",
",",
"allow_label_click"... | Find a check box and mark it as checked. The check box can be found via name, id, or label
text. ::
page.check("German")
Args:
locator (str, optional): Which check box to check.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. | [
"Find",
"a",
"check",
"box",
"and",
"mark",
"it",
"as",
"checked",
".",
"The",
"check",
"box",
"can",
"be",
"found",
"via",
"name",
"id",
"or",
"label",
"text",
".",
"::"
] | python | test |
pyecore/pyecore | pyecore/resources/resource.py | https://github.com/pyecore/pyecore/blob/22b67ad8799594f8f44fd8bee497583d4f12ed63/pyecore/resources/resource.py#L47-L68 | def create_resource(self, uri):
"""Creates a new Resource.
The created ressource type depends on the used URI.
:param uri: the resource URI
:type uri: URI
:return: a new Resource
:rtype: Resource
.. seealso:: URI, Resource, XMIResource
"""
if isinstance(uri, str):
uri = URI(uri)
try:
resource = self.resource_factory[uri.extension](uri)
except KeyError:
resource = self.resource_factory['*'](uri)
self.resources[uri.normalize()] = resource
resource.resource_set = self
resource.decoders.insert(0, self)
return resource | [
"def",
"create_resource",
"(",
"self",
",",
"uri",
")",
":",
"if",
"isinstance",
"(",
"uri",
",",
"str",
")",
":",
"uri",
"=",
"URI",
"(",
"uri",
")",
"try",
":",
"resource",
"=",
"self",
".",
"resource_factory",
"[",
"uri",
".",
"extension",
"]",
... | Creates a new Resource.
The created ressource type depends on the used URI.
:param uri: the resource URI
:type uri: URI
:return: a new Resource
:rtype: Resource
.. seealso:: URI, Resource, XMIResource | [
"Creates",
"a",
"new",
"Resource",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/client.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L2912-L2976 | def get_block_from_consensus(consensus_hash, hostport=None, proxy=None):
"""
Get a block height from a consensus hash
Returns the block height on success
Returns {'error': ...} on failure
"""
assert hostport or proxy, 'Need hostport or proxy'
if proxy is None:
proxy = connect_hostport(hostport)
consensus_schema = {
'type': 'object',
'properties': {
'block_id': {
'anyOf': [
{
'type': 'integer',
'minimum': 0,
},
{
'type': 'null',
},
],
},
},
'required': [
'block_id'
],
}
schema = json_response_schema( consensus_schema )
resp = {}
try:
resp = proxy.get_block_from_consensus(consensus_hash)
resp = json_validate( schema, resp )
if json_is_error(resp):
log.error("Failed to find block ID for %s" % consensus_hash)
return resp
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp['block_id'] | [
"def",
"get_block_from_consensus",
"(",
"consensus_hash",
",",
"hostport",
"=",
"None",
",",
"proxy",
"=",
"None",
")",
":",
"assert",
"hostport",
"or",
"proxy",
",",
"'Need hostport or proxy'",
"if",
"proxy",
"is",
"None",
":",
"proxy",
"=",
"connect_hostport",... | Get a block height from a consensus hash
Returns the block height on success
Returns {'error': ...} on failure | [
"Get",
"a",
"block",
"height",
"from",
"a",
"consensus",
"hash",
"Returns",
"the",
"block",
"height",
"on",
"success",
"Returns",
"{",
"error",
":",
"...",
"}",
"on",
"failure"
] | python | train |
Bystroushaak/pyDHTMLParser | src/dhtmlparser/htmlelement/html_parser.py | https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/html_parser.py#L162-L169 | def _parseIsTag(self):
"""
Detect whether the element is HTML tag or not.
Result is saved to the :attr:`_istag` property.
"""
el = self._element
self._istag = el and el[0] == "<" and el[-1] == ">" | [
"def",
"_parseIsTag",
"(",
"self",
")",
":",
"el",
"=",
"self",
".",
"_element",
"self",
".",
"_istag",
"=",
"el",
"and",
"el",
"[",
"0",
"]",
"==",
"\"<\"",
"and",
"el",
"[",
"-",
"1",
"]",
"==",
"\">\""
] | Detect whether the element is HTML tag or not.
Result is saved to the :attr:`_istag` property. | [
"Detect",
"whether",
"the",
"element",
"is",
"HTML",
"tag",
"or",
"not",
"."
] | python | train |
closeio/tasktiger | tasktiger/task.py | https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L421-L433 | def n_executions(self):
"""
Queries and returns the number of past task executions.
"""
pipeline = self.tiger.connection.pipeline()
pipeline.exists(self.tiger._key('task', self.id))
pipeline.llen(self.tiger._key('task', self.id, 'executions'))
exists, n_executions = pipeline.execute()
if not exists:
raise TaskNotFound('Task {} not found.'.format(
self.id
))
return n_executions | [
"def",
"n_executions",
"(",
"self",
")",
":",
"pipeline",
"=",
"self",
".",
"tiger",
".",
"connection",
".",
"pipeline",
"(",
")",
"pipeline",
".",
"exists",
"(",
"self",
".",
"tiger",
".",
"_key",
"(",
"'task'",
",",
"self",
".",
"id",
")",
")",
"... | Queries and returns the number of past task executions. | [
"Queries",
"and",
"returns",
"the",
"number",
"of",
"past",
"task",
"executions",
"."
] | python | train |
mesowx/MesoPy | MesoPy.py | https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L422-L495 | def timeseries(self, start, end, **kwargs):
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None.
"""
self._check_geo_param(kwargs)
kwargs['start'] = start
kwargs['end'] = end
kwargs['token'] = self.token
return self._get_response('stations/timeseries', kwargs) | [
"def",
"timeseries",
"(",
"self",
",",
"start",
",",
"end",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_geo_param",
"(",
"kwargs",
")",
"kwargs",
"[",
"'start'",
"]",
"=",
"start",
"kwargs",
"[",
"'end'",
"]",
"=",
"end",
"kwargs",
"[",
... | r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None. | [
"r",
"Returns",
"a",
"time",
"series",
"of",
"observations",
"at",
"a",
"user",
"specified",
"location",
"for",
"a",
"specified",
"time",
".",
"Users",
"must",
"specify",
"at",
"least",
"one",
"geographic",
"search",
"parameter",
"(",
"stid",
"state",
"count... | python | train |
cole/aiosmtplib | src/aiosmtplib/auth.py | https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/auth.py#L84-L122 | async def auth_crammd5(
self, username: str, password: str, timeout: DefaultNumType = _default
) -> SMTPResponse:
"""
CRAM-MD5 auth uses the password as a shared secret to MD5 the server's
response.
Example::
250 AUTH CRAM-MD5
auth cram-md5
334 PDI0NjA5LjEwNDc5MTQwNDZAcG9wbWFpbC5TcGFjZS5OZXQ+
dGltIGI5MTNhNjAyYzdlZGE3YTQ5NWI0ZTZlNzMzNGQzODkw
"""
async with self._command_lock:
initial_response = await self.execute_command(
b"AUTH", b"CRAM-MD5", timeout=timeout
)
if initial_response.code != SMTPStatus.auth_continue:
raise SMTPAuthenticationError(
initial_response.code, initial_response.message
)
password_bytes = password.encode("ascii")
username_bytes = username.encode("ascii")
response_bytes = initial_response.message.encode("ascii")
verification_bytes = crammd5_verify(
username_bytes, password_bytes, response_bytes
)
response = await self.execute_command(verification_bytes)
if response.code != SMTPStatus.auth_successful:
raise SMTPAuthenticationError(response.code, response.message)
return response | [
"async",
"def",
"auth_crammd5",
"(",
"self",
",",
"username",
":",
"str",
",",
"password",
":",
"str",
",",
"timeout",
":",
"DefaultNumType",
"=",
"_default",
")",
"->",
"SMTPResponse",
":",
"async",
"with",
"self",
".",
"_command_lock",
":",
"initial_respon... | CRAM-MD5 auth uses the password as a shared secret to MD5 the server's
response.
Example::
250 AUTH CRAM-MD5
auth cram-md5
334 PDI0NjA5LjEwNDc5MTQwNDZAcG9wbWFpbC5TcGFjZS5OZXQ+
dGltIGI5MTNhNjAyYzdlZGE3YTQ5NWI0ZTZlNzMzNGQzODkw | [
"CRAM",
"-",
"MD5",
"auth",
"uses",
"the",
"password",
"as",
"a",
"shared",
"secret",
"to",
"MD5",
"the",
"server",
"s",
"response",
"."
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/developer_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/developer_api.py#L1569-L1589 | def remove_my_api_key_from_groups(self, body, **kwargs): # noqa: E501
"""Remove API key from groups. # noqa: E501
An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501
return data | [
"def",
"remove_my_api_key_from_groups",
"(",
"self",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
"... | Remove API key from groups. # noqa: E501
An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread. | [
"Remove",
"API",
"key",
"from",
"groups",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
gabstopper/smc-python | smc-monitoring/smc_monitoring/models/formats.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc-monitoring/smc_monitoring/models/formats.py#L101-L116 | def set_resolving(self, **kw):
"""
Certain log fields can be individually resolved. Use this
method to set these fields. Valid keyword arguments:
:param str timezone: string value to set timezone for audits
:param bool time_show_zone: show the time zone in the audit.
:param bool time_show_millis: show timezone in milliseconds
:param bool keys: resolve log field keys
:param bool ip_elements: resolve IP's to SMC elements
:param bool ip_dns: resolve IP addresses using DNS
:param bool ip_locations: resolve locations
"""
if 'timezone' in kw and 'time_show_zone' not in kw:
kw.update(time_show_zone=True)
self.data['resolving'].update(**kw) | [
"def",
"set_resolving",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"if",
"'timezone'",
"in",
"kw",
"and",
"'time_show_zone'",
"not",
"in",
"kw",
":",
"kw",
".",
"update",
"(",
"time_show_zone",
"=",
"True",
")",
"self",
".",
"data",
"[",
"'resolving'"... | Certain log fields can be individually resolved. Use this
method to set these fields. Valid keyword arguments:
:param str timezone: string value to set timezone for audits
:param bool time_show_zone: show the time zone in the audit.
:param bool time_show_millis: show timezone in milliseconds
:param bool keys: resolve log field keys
:param bool ip_elements: resolve IP's to SMC elements
:param bool ip_dns: resolve IP addresses using DNS
:param bool ip_locations: resolve locations | [
"Certain",
"log",
"fields",
"can",
"be",
"individually",
"resolved",
".",
"Use",
"this",
"method",
"to",
"set",
"these",
"fields",
".",
"Valid",
"keyword",
"arguments",
":",
":",
"param",
"str",
"timezone",
":",
"string",
"value",
"to",
"set",
"timezone",
... | python | train |
djgagne/hagelslag | hagelslag/util/output_tree_ensembles.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/output_tree_ensembles.py#L72-L116 | def print_tree_recursive(tree_obj, node_index, attribute_names=None):
"""
Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library.
"""
tree_str = ""
if node_index == 0:
tree_str += "{0:d}\n".format(tree_obj.node_count)
if tree_obj.feature[node_index] >= 0:
if attribute_names is None:
attr_val = "{0:d}".format(tree_obj.feature[node_index])
else:
attr_val = attribute_names[tree_obj.feature[node_index]]
tree_str += "b {0:d} {1} {2:0.4f} {3:d} {4:1.5e}\n".format(node_index,
attr_val,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index],
tree_obj.threshold[node_index])
else:
if tree_obj.max_n_classes > 1:
leaf_value = "{0:d}".format(tree_obj.value[node_index].argmax())
else:
leaf_value = "{0}".format(tree_obj.value[node_index][0][0])
tree_str += "l {0:d} {1} {2:0.4f} {3:d}\n".format(node_index,
leaf_value,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index])
if tree_obj.children_left[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_left[node_index], attribute_names)
if tree_obj.children_right[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_right[node_index], attribute_names)
return tree_str | [
"def",
"print_tree_recursive",
"(",
"tree_obj",
",",
"node_index",
",",
"attribute_names",
"=",
"None",
")",
":",
"tree_str",
"=",
"\"\"",
"if",
"node_index",
"==",
"0",
":",
"tree_str",
"+=",
"\"{0:d}\\n\"",
".",
"format",
"(",
"tree_obj",
".",
"node_count",
... | Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library. | [
"Recursively",
"writes",
"a",
"string",
"representation",
"of",
"a",
"decision",
"tree",
"object",
"."
] | python | train |
adafruit/Adafruit_CircuitPython_MatrixKeypad | adafruit_matrixkeypad.py | https://github.com/adafruit/Adafruit_CircuitPython_MatrixKeypad/blob/f530b1a920a40ef09ec1394b7760f243a243045a/adafruit_matrixkeypad.py#L69-L91 | def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pins+self.col_pins:
pin.direction = Direction.INPUT
pin.pull = Pull.UP
for row in range(len(self.row_pins)):
# set one row low at a time
self.row_pins[row].direction = Direction.OUTPUT
self.row_pins[row].value = False
# check the column pins, which ones are pulled down
for col in range(len(self.col_pins)):
if not self.col_pins[col].value:
pressed.append(self.keys[row][col])
# reset the pin to be an input
self.row_pins[row].direction = Direction.INPUT
self.row_pins[row].pull = Pull.UP
return pressed | [
"def",
"pressed_keys",
"(",
"self",
")",
":",
"# make a list of all the keys that are detected",
"pressed",
"=",
"[",
"]",
"# set all pins pins to be inputs w/pullups",
"for",
"pin",
"in",
"self",
".",
"row_pins",
"+",
"self",
".",
"col_pins",
":",
"pin",
".",
"dire... | An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation | [
"An",
"array",
"containing",
"all",
"detected",
"keys",
"that",
"are",
"pressed",
"from",
"the",
"initalized",
"list",
"-",
"of",
"-",
"lists",
"passed",
"in",
"during",
"creation"
] | python | train |
qiniu/python-sdk | qiniu/services/compute/app.py | https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/compute/app.py#L180-L192 | def list_apps(self):
"""获得当前账号的应用列表
列出所属应用为当前请求方的应用列表。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回应用列表,失败返回None
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/apps'.format(self.host)
return http._get_with_qiniu_mac(url, None, self.auth) | [
"def",
"list_apps",
"(",
"self",
")",
":",
"url",
"=",
"'{0}/v3/apps'",
".",
"format",
"(",
"self",
".",
"host",
")",
"return",
"http",
".",
"_get_with_qiniu_mac",
"(",
"url",
",",
"None",
",",
"self",
".",
"auth",
")"
] | 获得当前账号的应用列表
列出所属应用为当前请求方的应用列表。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回应用列表,失败返回None
- ResponseInfo 请求的Response信息 | [
"获得当前账号的应用列表"
] | python | train |
thespacedoctor/tastic | tastic/tastic.py | https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/tastic.py#L625-L724 | def sort_tasks(
self,
workflowTags,
indentLevel=1):
"""*order tasks within this taskpaper object via a list of tags*
The order of the tags in the list dictates the order of the sort - first comes first*
**Key Arguments:**
- ``workflowTags`` -- a string of space seperated tags.
**Return:**
- ``None``
**Usage:**
To recursively sort the tasks within a taskpaper document with the following order:
1. *@due*
2. *@flag*
3. *@hold*
4. *@next*
5. *@someday*
6. *@wait*
use the following:
.. code-block:: python
doc.sort_tasks("@due, @flag, @hold, @next, @someday, @wait")
"""
self.refresh
if not isinstance(workflowTags, list):
workflowTagsLists = workflowTags.strip().replace(",", "").replace("@", "")
workflowTagsLists = workflowTagsLists.split(" ")
else:
workflowTagsLists = []
workflowTagsLists[:] = [l.replace("@", "") for l in workflowTags]
matchedTasks = collections.OrderedDict(sorted({}.items()))
unmatchedTasks = []
for wt in workflowTagsLists:
matchedTasks[wt.lower()] = []
for t in self.tasks:
matched = False
for tt in t.tags:
if matched:
break
for wt in workflowTagsLists:
thisTag = tt.lower()
if "(" not in wt:
thisTag = tt.split("(")[0].lower()
if thisTag == wt.lower() and matched == False:
matchedTasks[wt.lower()].append(t)
matched = True
break
if matched == False:
unmatchedTasks.append(t)
sortedTasks = []
for k, v in matchedTasks.iteritems():
sortedTasks += v
oldContent = self.to_string(indentLevel=1)
sortedTasks += unmatchedTasks
self.tasks = sortedTasks
self.content = self.to_string(
title=False, tasks=sortedTasks, indentLevel=0)
hasProjects = False
try:
this = self.projects
hasProjects = True
except:
pass
if hasProjects:
for p in self.projects:
p.tasks = p.sort_tasks(workflowTags, 1)
for t in self.tasks:
t.tasks = t.sort_tasks(workflowTags, 1)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
newContent = self.to_string(
tasks=sortedTasks, indentLevel=1)
if self.parent:
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.content = self.content.replace(self.to_string(indentLevel=0, title=False), self.to_string(
indentLevel=0, title=False, tasks=sortedTasks))
self.refresh
return sortedTasks | [
"def",
"sort_tasks",
"(",
"self",
",",
"workflowTags",
",",
"indentLevel",
"=",
"1",
")",
":",
"self",
".",
"refresh",
"if",
"not",
"isinstance",
"(",
"workflowTags",
",",
"list",
")",
":",
"workflowTagsLists",
"=",
"workflowTags",
".",
"strip",
"(",
")",
... | *order tasks within this taskpaper object via a list of tags*
The order of the tags in the list dictates the order of the sort - first comes first*
**Key Arguments:**
- ``workflowTags`` -- a string of space seperated tags.
**Return:**
- ``None``
**Usage:**
To recursively sort the tasks within a taskpaper document with the following order:
1. *@due*
2. *@flag*
3. *@hold*
4. *@next*
5. *@someday*
6. *@wait*
use the following:
.. code-block:: python
doc.sort_tasks("@due, @flag, @hold, @next, @someday, @wait") | [
"*",
"order",
"tasks",
"within",
"this",
"taskpaper",
"object",
"via",
"a",
"list",
"of",
"tags",
"*"
] | python | train |
openstack/proliantutils | proliantutils/redfish/resources/account_service/account.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/account_service/account.py#L25-L33 | def update_credentials(self, password):
"""Update credentials of a redfish system
:param password: password to be updated
"""
data = {
'Password': password,
}
self._conn.patch(self.path, data=data) | [
"def",
"update_credentials",
"(",
"self",
",",
"password",
")",
":",
"data",
"=",
"{",
"'Password'",
":",
"password",
",",
"}",
"self",
".",
"_conn",
".",
"patch",
"(",
"self",
".",
"path",
",",
"data",
"=",
"data",
")"
] | Update credentials of a redfish system
:param password: password to be updated | [
"Update",
"credentials",
"of",
"a",
"redfish",
"system"
] | python | train |
jsvine/spectra | spectra/grapefruit.py | https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L1824-L1858 | def MonochromeScheme(self):
'''Return 4 colors in the same hue with varying saturation/lightness.
Returns:
A tuple of 4 grapefruit.Color in the same hue as this one,
with varying saturation/lightness.
>>> c = Color.NewFromHsl(30, 0.5, 0.5)
>>> ['(%g, %g, %g)' % clr.hsl for clr in c.MonochromeScheme()]
['(30, 0.2, 0.8)', '(30, 0.5, 0.3)', '(30, 0.2, 0.6)', '(30, 0.5, 0.8)']
'''
def _wrap(x, min, thres, plus):
if (x-min) < thres: return x + plus
else: return x-min
h, s, l = self.__hsl
s1 = _wrap(s, 0.3, 0.1, 0.3)
l1 = _wrap(l, 0.5, 0.2, 0.3)
s2 = s
l2 = _wrap(l, 0.2, 0.2, 0.6)
s3 = s1
l3 = max(0.2, l + (1-l)*0.2)
s4 = s
l4 = _wrap(l, 0.5, 0.2, 0.3)
return (
Color((h, s1, l1), 'hsl', self.__a, self.__wref),
Color((h, s2, l2), 'hsl', self.__a, self.__wref),
Color((h, s3, l3), 'hsl', self.__a, self.__wref),
Color((h, s4, l4), 'hsl', self.__a, self.__wref)) | [
"def",
"MonochromeScheme",
"(",
"self",
")",
":",
"def",
"_wrap",
"(",
"x",
",",
"min",
",",
"thres",
",",
"plus",
")",
":",
"if",
"(",
"x",
"-",
"min",
")",
"<",
"thres",
":",
"return",
"x",
"+",
"plus",
"else",
":",
"return",
"x",
"-",
"min",... | Return 4 colors in the same hue with varying saturation/lightness.
Returns:
A tuple of 4 grapefruit.Color in the same hue as this one,
with varying saturation/lightness.
>>> c = Color.NewFromHsl(30, 0.5, 0.5)
>>> ['(%g, %g, %g)' % clr.hsl for clr in c.MonochromeScheme()]
['(30, 0.2, 0.8)', '(30, 0.5, 0.3)', '(30, 0.2, 0.6)', '(30, 0.5, 0.8)'] | [
"Return",
"4",
"colors",
"in",
"the",
"same",
"hue",
"with",
"varying",
"saturation",
"/",
"lightness",
"."
] | python | train |
bear/parsedatetime | parsedatetime/__init__.py | https://github.com/bear/parsedatetime/blob/830775dc5e36395622b41f12317f5e10c303d3a2/parsedatetime/__init__.py#L1644-L1687 | def _partialParseMeridian(self, s, sourceTime):
"""
test if giving C{s} matched CRE_TIMEHMS2, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
else:
parseStr = m.group('hours')
parseStr += ' ' + m.group('meridian')
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
if parseStr:
debug and log.debug('found (meridian) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalMeridian(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | [
"def",
"_partialParseMeridian",
"(",
"self",
",",
"s",
",",
"sourceTime",
")",
":",
"parseStr",
"=",
"None",
"chunk1",
"=",
"chunk2",
"=",
"''",
"# HH:MM(:SS) am/pm time strings",
"m",
"=",
"self",
".",
"ptc",
".",
"CRE_TIMEHMS2",
".",
"search",
"(",
"s",
... | test if giving C{s} matched CRE_TIMEHMS2, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | [
"test",
"if",
"giving",
"C",
"{",
"s",
"}",
"matched",
"CRE_TIMEHMS2",
"used",
"by",
"L",
"{",
"parse",
"()",
"}"
] | python | train |
tradenity/python-sdk | tradenity/resources/table_rate_shipping.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_shipping.py#L462-L482 | def create_table_rate_shipping(cls, table_rate_shipping, **kwargs):
"""Create TableRateShipping
Create a new TableRateShipping
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_table_rate_shipping(table_rate_shipping, async=True)
>>> result = thread.get()
:param async bool
:param TableRateShipping table_rate_shipping: Attributes of tableRateShipping to create (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_table_rate_shipping_with_http_info(table_rate_shipping, **kwargs)
else:
(data) = cls._create_table_rate_shipping_with_http_info(table_rate_shipping, **kwargs)
return data | [
"def",
"create_table_rate_shipping",
"(",
"cls",
",",
"table_rate_shipping",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_create_... | Create TableRateShipping
Create a new TableRateShipping
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_table_rate_shipping(table_rate_shipping, async=True)
>>> result = thread.get()
:param async bool
:param TableRateShipping table_rate_shipping: Attributes of tableRateShipping to create (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread. | [
"Create",
"TableRateShipping"
] | python | train |
hammerlab/stancache | stancache/utils.py | https://github.com/hammerlab/stancache/blob/22f2548731d0960c14c0d41f4f64e418d3f22e4c/stancache/utils.py#L45-L56 | def _list_files_in_path(path, pattern="*.stan"):
"""
indexes a directory of stan files
returns as dictionary containing contents of files
"""
results = []
for dirname, subdirs, files in os.walk(path):
for name in files:
if fnmatch(name, pattern):
results.append(os.path.join(dirname, name))
return(results) | [
"def",
"_list_files_in_path",
"(",
"path",
",",
"pattern",
"=",
"\"*.stan\"",
")",
":",
"results",
"=",
"[",
"]",
"for",
"dirname",
",",
"subdirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"name",
"in",
"files",
":",
"if",... | indexes a directory of stan files
returns as dictionary containing contents of files | [
"indexes",
"a",
"directory",
"of",
"stan",
"files",
"returns",
"as",
"dictionary",
"containing",
"contents",
"of",
"files"
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/database.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L407-L439 | def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result | [
"def",
"matches_requirement",
"(",
"self",
",",
"req",
")",
":",
"# Requirement may contain extras - parse to lose those",
"# from what's passed to the matcher",
"r",
"=",
"parse_requirement",
"(",
"req",
")",
"scheme",
"=",
"get_scheme",
"(",
"self",
".",
"metadata",
"... | Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False. | [
"Say",
"if",
"this",
"instance",
"matches",
"(",
"fulfills",
")",
"a",
"requirement",
".",
":",
"param",
"req",
":",
"The",
"requirement",
"to",
"match",
".",
":",
"rtype",
"req",
":",
"str",
":",
"return",
":",
"True",
"if",
"it",
"matches",
"else",
... | python | train |
quantopian/zipline | zipline/utils/pandas_utils.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L48-L63 | def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond | [
"def",
"_time_to_micros",
"(",
"time",
")",
":",
"seconds",
"=",
"time",
".",
"hour",
"*",
"60",
"*",
"60",
"+",
"time",
".",
"minute",
"*",
"60",
"+",
"time",
".",
"second",
"return",
"1000000",
"*",
"seconds",
"+",
"time",
".",
"microsecond"
] | Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings. | [
"Convert",
"a",
"time",
"into",
"microseconds",
"since",
"midnight",
".",
"Parameters",
"----------",
"time",
":",
"datetime",
".",
"time",
"The",
"time",
"to",
"convert",
".",
"Returns",
"-------",
"us",
":",
"int",
"The",
"number",
"of",
"microseconds",
"s... | python | train |
onecodex/onecodex | onecodex/lib/upload.py | https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/lib/upload.py#L782-L861 | def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
"""Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
# issue a callback
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {} | [
"def",
"_s3_intermediate_upload",
"(",
"file_obj",
",",
"file_name",
",",
"fields",
",",
"session",
",",
"callback_url",
")",
":",
"import",
"boto3",
"from",
"boto3",
".",
"s3",
".",
"transfer",
"import",
"TransferConfig",
"from",
"boto3",
".",
"exceptions",
"... | Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL | [
"Uploads",
"a",
"single",
"file",
"-",
"like",
"object",
"to",
"an",
"intermediate",
"S3",
"bucket",
"which",
"One",
"Codex",
"can",
"pull",
"from",
"after",
"receiving",
"a",
"callback",
"."
] | python | train |
blockstack/blockstack-core | blockstack/blockstackd.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1845-L1875 | def rpc_get_zonefiles_by_block( self, from_block, to_block, offset, count, **con_info ):
"""
Get information about zonefiles announced in blocks [@from_block, @to_block]
@offset - offset into result set
@count - max records to return, must be <= 100
Returns {'status': True, 'lastblock' : blockNumber,
'zonefile_info' : [ { 'block_height' : 470000,
'txid' : '0000000',
'zonefile_hash' : '0000000' } ] }
"""
conf = get_blockstack_opts()
if not is_atlas_enabled(conf):
return {'error': 'Not an atlas node', 'http_status': 400}
if not check_block(from_block):
return {'error': 'Invalid from_block height', 'http_status': 400}
if not check_block(to_block):
return {'error': 'Invalid to_block height', 'http_status': 400}
if not check_offset(offset):
return {'error': 'invalid offset', 'http_status': 400}
if not check_count(count, 100):
return {'error': 'invalid count', 'http_status': 400}
zonefile_info = atlasdb_get_zonefiles_by_block(from_block, to_block, offset, count, path=conf['atlasdb_path'])
if 'error' in zonefile_info:
return zonefile_info
return self.success_response( {'zonefile_info': zonefile_info } ) | [
"def",
"rpc_get_zonefiles_by_block",
"(",
"self",
",",
"from_block",
",",
"to_block",
",",
"offset",
",",
"count",
",",
"*",
"*",
"con_info",
")",
":",
"conf",
"=",
"get_blockstack_opts",
"(",
")",
"if",
"not",
"is_atlas_enabled",
"(",
"conf",
")",
":",
"r... | Get information about zonefiles announced in blocks [@from_block, @to_block]
@offset - offset into result set
@count - max records to return, must be <= 100
Returns {'status': True, 'lastblock' : blockNumber,
'zonefile_info' : [ { 'block_height' : 470000,
'txid' : '0000000',
'zonefile_hash' : '0000000' } ] } | [
"Get",
"information",
"about",
"zonefiles",
"announced",
"in",
"blocks",
"["
] | python | train |
collectiveacuity/labPack | labpack/platforms/aws/ssh.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ssh.py#L228-L301 | def script(self, commands, synopsis=True):
'''
a method to run a list of shell command scripts on AWS instance
:param commands: list of strings with shell commands to pass through connection
:param synopsis: [optional] boolean to simplify progress messages to one line
:return: string with response to last command
'''
title = '%s.script' % self.__class__.__name__
# validate inputs
if isinstance(commands, str):
commands = [commands]
input_fields = {
'commands': commands
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# run commands through paramiko on Windows
response = ''
if self.localhost.os.sysname in ('Windows'):
ssh_key = paramiko.RSAKey.from_private_key_file(self.pem_file)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=self.instance_ip, username=self.ec2.iam.user_name, pkey=ssh_key)
for i in range(len(commands)):
self.ec2.iam.printer('[%s@%s]: %s ... ' % (self.login_name, self.instance_ip, commands[i]), flush=True)
std_in, std_out, std_err = client.exec_command(commands[i], get_pty=True)
if std_err:
self.ec2.iam.printer('ERROR.')
raise Exception('Failure running [%s@%s]: %s\n%s' % (self.login_name, self.instance_ip, commands[i], std_err.decode('utf-8').strip()))
else:
response = std_out.decode('utf-8')
if synopsis:
self.ec2.iam.printer('done.')
else:
if response:
self.ec2.iam.printer('\n%s' % response)
else:
self.ec2.iam.printer('done.')
client.close()
# run command through ssh on other platforms
else:
from subprocess import Popen, PIPE, STDOUT
for i in range(len(commands)):
self.ec2.iam.printer('[%s@%s]: %s ... ' % (self.login_name, self.instance_ip, commands[i]), flush=True)
sys_command = 'ssh -i %s %s@%s %s' % (self.pem_file, self.login_name, self.instance_ip, commands[i])
# DEBUG print(sys_command)
pipes = Popen(sys_command.split(), stdout=PIPE, stderr=STDOUT)
# automatically accept keys
std_out, std_err = pipes.communicate('yes\n'.encode('utf-8'))
if pipes.returncode != 0:
self.ec2.iam.printer('ERROR.')
raise Exception('Failure running [%s@%s]: %s\n%s' % (self.login_name, self.instance_ip, commands[i], std_out.decode('utf-8').strip()))
# report response to individual commands
else:
response = std_out.decode('utf-8')
if synopsis:
self.ec2.iam.printer('done.')
else:
if response:
self.ec2.iam.printer('\n%s' % response)
else:
self.ec2.iam.printer('done.')
# close connection and return last response
return response | [
"def",
"script",
"(",
"self",
",",
"commands",
",",
"synopsis",
"=",
"True",
")",
":",
"title",
"=",
"'%s.script'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"if",
"isinstance",
"(",
"commands",
",",
"str",
")",
":",
"commands",
... | a method to run a list of shell command scripts on AWS instance
:param commands: list of strings with shell commands to pass through connection
:param synopsis: [optional] boolean to simplify progress messages to one line
:return: string with response to last command | [
"a",
"method",
"to",
"run",
"a",
"list",
"of",
"shell",
"command",
"scripts",
"on",
"AWS",
"instance"
] | python | train |
PythonCharmers/python-future | src/future/backports/email/_header_value_parser.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2380-L2446 | def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version | [
"def",
"parse_mime_version",
"(",
"value",
")",
":",
"# The [CFWS] is implicit in the RFC 2045 BNF.",
"# XXX: This routine is a bit verbose, should factor out a get_int method.",
"mime_version",
"=",
"MIMEVersion",
"(",
")",
"if",
"not",
"value",
":",
"mime_version",
".",
"defe... | mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS] | [
"mime",
"-",
"version",
"=",
"[",
"CFWS",
"]",
"1",
"*",
"digit",
"[",
"CFWS",
"]",
".",
"[",
"CFWS",
"]",
"1",
"*",
"digit",
"[",
"CFWS",
"]"
] | python | train |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/pool.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/pool.py#L551-L571 | def check_auth(self, all_credentials):
"""Update this socket's authentication.
Log in or out to bring this socket's credentials up to date with
those provided. Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
"""
if all_credentials or self.authset:
cached = set(itervalues(all_credentials))
authset = self.authset.copy()
# Logout any credentials that no longer exist in the cache.
for credentials in authset - cached:
auth.logout(credentials.source, self)
self.authset.discard(credentials)
for credentials in cached - authset:
auth.authenticate(credentials, self)
self.authset.add(credentials) | [
"def",
"check_auth",
"(",
"self",
",",
"all_credentials",
")",
":",
"if",
"all_credentials",
"or",
"self",
".",
"authset",
":",
"cached",
"=",
"set",
"(",
"itervalues",
"(",
"all_credentials",
")",
")",
"authset",
"=",
"self",
".",
"authset",
".",
"copy",
... | Update this socket's authentication.
Log in or out to bring this socket's credentials up to date with
those provided. Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential. | [
"Update",
"this",
"socket",
"s",
"authentication",
"."
] | python | train |
Hackerfleet/hfos | hfos/schemata/base.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/schemata/base.py#L42-L62 | def uuid_object(title="Reference", description="Select an object", default=None, display=True):
"""Generates a regular expression controlled UUID field"""
uuid = {
'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{'
'4}-['
'a-fA-F0-9]{4}-[a-fA-F0-9]{12}$',
'type': 'string',
'title': title,
'description': description,
}
if not display:
uuid['x-schema-form'] = {
'condition': "false"
}
if default is not None:
uuid['default'] = default
return uuid | [
"def",
"uuid_object",
"(",
"title",
"=",
"\"Reference\"",
",",
"description",
"=",
"\"Select an object\"",
",",
"default",
"=",
"None",
",",
"display",
"=",
"True",
")",
":",
"uuid",
"=",
"{",
"'pattern'",
":",
"'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{'",
"'4}... | Generates a regular expression controlled UUID field | [
"Generates",
"a",
"regular",
"expression",
"controlled",
"UUID",
"field"
] | python | train |
theno/fabsetup | fabsetup/fabfile/setup/__init__.py | https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/__init__.py#L428-L445 | def telegram():
'''Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/
'''
if not exists('~/bin/Telegram', msg='Download and install Telegram:'):
run('mkdir -p /tmp/telegram')
run('cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux')
run('cd /tmp/telegram && tar xf linux')
with warn_only():
run('mv /tmp/telegram/Telegram ~/bin')
run('rm -rf /tmp/telegram')
else:
print('skip download, dir ~/bin/Telegram already exists')
run('ln -snf ~/bin/Telegram/Telegram ~/bin/telegram',
msg="\nCreate executable 'telegram':") | [
"def",
"telegram",
"(",
")",
":",
"if",
"not",
"exists",
"(",
"'~/bin/Telegram'",
",",
"msg",
"=",
"'Download and install Telegram:'",
")",
":",
"run",
"(",
"'mkdir -p /tmp/telegram'",
")",
"run",
"(",
"'cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux'",
... | Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/ | [
"Install",
"Telegram",
"desktop",
"client",
"for",
"linux",
"(",
"x64",
")",
"."
] | python | train |
Felspar/django-fost-authn | fost_authn/signature.py | https://github.com/Felspar/django-fost-authn/blob/31623fa9f77570fe9b99962595da12f67f24c409/fost_authn/signature.py#L23-L33 | def fost_hmac_url_signature(
key, secret, host, path, query_string, expires):
"""
Return a signature that corresponds to the signed URL.
"""
if query_string:
document = '%s%s?%s\n%s' % (host, path, query_string, expires)
else:
document = '%s%s\n%s' % (host, path, expires)
signature = sha1_hmac(secret, document)
return signature | [
"def",
"fost_hmac_url_signature",
"(",
"key",
",",
"secret",
",",
"host",
",",
"path",
",",
"query_string",
",",
"expires",
")",
":",
"if",
"query_string",
":",
"document",
"=",
"'%s%s?%s\\n%s'",
"%",
"(",
"host",
",",
"path",
",",
"query_string",
",",
"ex... | Return a signature that corresponds to the signed URL. | [
"Return",
"a",
"signature",
"that",
"corresponds",
"to",
"the",
"signed",
"URL",
"."
] | python | train |
justquick/python-varnish | varnish.py | https://github.com/justquick/python-varnish/blob/8f114c74898e6c5ade2ce49c8b595040bd150465/varnish.py#L289-L302 | def run(addr, *commands, **kwargs):
"""
Non-threaded batch command runner returning output results
"""
results = []
handler = VarnishHandler(addr, **kwargs)
for cmd in commands:
if isinstance(cmd, tuple) and len(cmd)>1:
results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd])
else:
results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:]))
break
handler.close()
return results | [
"def",
"run",
"(",
"addr",
",",
"*",
"commands",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"[",
"]",
"handler",
"=",
"VarnishHandler",
"(",
"addr",
",",
"*",
"*",
"kwargs",
")",
"for",
"cmd",
"in",
"commands",
":",
"if",
"isinstance",
"(",... | Non-threaded batch command runner returning output results | [
"Non",
"-",
"threaded",
"batch",
"command",
"runner",
"returning",
"output",
"results"
] | python | train |
googleapis/google-cloud-python | spanner/google/cloud/spanner_v1/keyset.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/keyset.py#L68-L88 | def _to_pb(self):
"""Construct a KeyRange protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeyRange`
:returns: protobuf corresponding to this instance.
"""
kwargs = {}
if self.start_open is not None:
kwargs["start_open"] = _make_list_value_pb(self.start_open)
if self.start_closed is not None:
kwargs["start_closed"] = _make_list_value_pb(self.start_closed)
if self.end_open is not None:
kwargs["end_open"] = _make_list_value_pb(self.end_open)
if self.end_closed is not None:
kwargs["end_closed"] = _make_list_value_pb(self.end_closed)
return KeyRangePB(**kwargs) | [
"def",
"_to_pb",
"(",
"self",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"self",
".",
"start_open",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"start_open\"",
"]",
"=",
"_make_list_value_pb",
"(",
"self",
".",
"start_open",
")",
"if",
"self",
".",
"start... | Construct a KeyRange protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeyRange`
:returns: protobuf corresponding to this instance. | [
"Construct",
"a",
"KeyRange",
"protobuf",
"."
] | python | train |
gem/oq-engine | openquake/commonlib/readinput.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L536-L558 | def check_nonparametric_sources(fname, smodel, investigation_time):
"""
:param fname:
full path to a source model file
:param smodel:
source model object
:param investigation_time:
investigation_time to compare with in the case of
nonparametric sources
:returns:
the nonparametric sources in the model
:raises:
a ValueError if the investigation_time is different from the expected
"""
# NonParametricSeismicSources
np = [src for sg in smodel.src_groups for src in sg
if hasattr(src, 'data')]
if np and smodel.investigation_time != investigation_time:
raise ValueError(
'The source model %s contains an investigation_time '
'of %s, while the job.ini has %s' % (
fname, smodel.investigation_time, investigation_time))
return np | [
"def",
"check_nonparametric_sources",
"(",
"fname",
",",
"smodel",
",",
"investigation_time",
")",
":",
"# NonParametricSeismicSources",
"np",
"=",
"[",
"src",
"for",
"sg",
"in",
"smodel",
".",
"src_groups",
"for",
"src",
"in",
"sg",
"if",
"hasattr",
"(",
"src... | :param fname:
full path to a source model file
:param smodel:
source model object
:param investigation_time:
investigation_time to compare with in the case of
nonparametric sources
:returns:
the nonparametric sources in the model
:raises:
a ValueError if the investigation_time is different from the expected | [
":",
"param",
"fname",
":",
"full",
"path",
"to",
"a",
"source",
"model",
"file",
":",
"param",
"smodel",
":",
"source",
"model",
"object",
":",
"param",
"investigation_time",
":",
"investigation_time",
"to",
"compare",
"with",
"in",
"the",
"case",
"of",
"... | python | train |
Opentrons/opentrons | api/src/opentrons/protocol_api/contexts.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L1085-L1103 | def consolidate(self,
volume: float,
source: List[Well],
dest: Well,
*args, **kwargs) -> 'InstrumentContext':
"""
Move liquid from multiple wells (sources) to a single well(destination)
:param volume: The amount of volume to consolidate from each source
well.
:param source: List of wells from where liquid will be aspirated.
:param dest: The single well into which liquid will be dispensed.
:param kwargs: See :py:meth:`transfer`.
:returns: This instance
"""
self._log.debug("Consolidate {} from {} to {}"
.format(volume, source, dest))
kwargs['mode'] = 'consolidate'
return self.transfer(volume, source, dest, **kwargs) | [
"def",
"consolidate",
"(",
"self",
",",
"volume",
":",
"float",
",",
"source",
":",
"List",
"[",
"Well",
"]",
",",
"dest",
":",
"Well",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"'InstrumentContext'",
":",
"self",
".",
"_log",
".",
"deb... | Move liquid from multiple wells (sources) to a single well(destination)
:param volume: The amount of volume to consolidate from each source
well.
:param source: List of wells from where liquid will be aspirated.
:param dest: The single well into which liquid will be dispensed.
:param kwargs: See :py:meth:`transfer`.
:returns: This instance | [
"Move",
"liquid",
"from",
"multiple",
"wells",
"(",
"sources",
")",
"to",
"a",
"single",
"well",
"(",
"destination",
")"
] | python | train |
prompt-toolkit/ptpython | ptpython/ipython.py | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/ipython.py#L227-L243 | def initialize_extensions(shell, extensions):
"""
Partial copy of `InteractiveShellApp.init_extensions` from IPython.
"""
try:
iter(extensions)
except TypeError:
pass # no extensions found
else:
for ext in extensions:
try:
shell.extension_manager.load_extension(ext)
except:
ipy_utils.warn.warn(
"Error in loading extension: %s" % ext +
"\nCheck your config files in %s" % ipy_utils.path.get_ipython_dir())
shell.showtraceback() | [
"def",
"initialize_extensions",
"(",
"shell",
",",
"extensions",
")",
":",
"try",
":",
"iter",
"(",
"extensions",
")",
"except",
"TypeError",
":",
"pass",
"# no extensions found",
"else",
":",
"for",
"ext",
"in",
"extensions",
":",
"try",
":",
"shell",
".",
... | Partial copy of `InteractiveShellApp.init_extensions` from IPython. | [
"Partial",
"copy",
"of",
"InteractiveShellApp",
".",
"init_extensions",
"from",
"IPython",
"."
] | python | train |
wakatime/wakatime | wakatime/packages/pygments/lexers/data.py | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L120-L135 | def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback | [
"def",
"parse_block_scalar_empty_line",
"(",
"indent_token_class",
",",
"content_token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"if",
"(",
"context",
".",
"block_... | Process an empty line in a block scalar. | [
"Process",
"an",
"empty",
"line",
"in",
"a",
"block",
"scalar",
"."
] | python | train |
domainaware/parsedmarc | parsedmarc/__init__.py | https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L636-L676 | def parsed_forensic_reports_to_csv(reports):
"""
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
"""
fields = ["feedback_type", "user_agent", "version", "original_envelope_id",
"original_mail_from", "original_rcpt_to", "arrival_date",
"arrival_date_utc", "subject", "message_id",
"authentication_results", "dkim_domain", "source_ip_address",
"source_country", "source_reverse_dns", "source_base_domain",
"delivery_result", "auth_failure", "reported_domain",
"authentication_mechanisms", "sample_headers_only"]
if type(reports) == OrderedDict:
reports = [reports]
csv_file = StringIO()
csv_writer = DictWriter(csv_file, fieldnames=fields)
csv_writer.writeheader()
for report in reports:
row = report.copy()
row["source_ip_address"] = report["source"]["ip_address"]
row["source_reverse_dns"] = report["source"]["reverse_dns"]
row["source_base_domain"] = report["source"]["base_domain"]
row["source_country"] = report["source"]["country"]
del row["source"]
row["subject"] = report["parsed_sample"]["subject"]
row["auth_failure"] = ",".join(report["auth_failure"])
authentication_mechanisms = report["authentication_mechanisms"]
row["authentication_mechanisms"] = ",".join(
authentication_mechanisms)
del row["sample"]
del row["parsed_sample"]
csv_writer.writerow(row)
return csv_file.getvalue() | [
"def",
"parsed_forensic_reports_to_csv",
"(",
"reports",
")",
":",
"fields",
"=",
"[",
"\"feedback_type\"",
",",
"\"user_agent\"",
",",
"\"version\"",
",",
"\"original_envelope_id\"",
",",
"\"original_mail_from\"",
",",
"\"original_rcpt_to\"",
",",
"\"arrival_date\"",
","... | Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers | [
"Converts",
"one",
"or",
"more",
"parsed",
"forensic",
"reports",
"to",
"flat",
"CSV",
"format",
"including",
"headers"
] | python | test |
dswah/pyGAM | pygam/terms.py | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L767-L795 | def build_columns(self, X, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
X[:, self.feature][:, np.newaxis]
splines = b_spline_basis(X[:, self.feature],
edge_knots=self.edge_knots_,
spline_order=self.spline_order,
n_splines=self.n_splines,
sparse=True,
periodic=self.basis in ['cp'],
verbose=verbose)
if self.by is not None:
splines = splines.multiply(X[:, self.by][:, np.newaxis])
return splines | [
"def",
"build_columns",
"(",
"self",
",",
"X",
",",
"verbose",
"=",
"False",
")",
":",
"X",
"[",
":",
",",
"self",
".",
"feature",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"splines",
"=",
"b_spline_basis",
"(",
"X",
"[",
":",
",",
"self",
... | construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows | [
"construct",
"the",
"model",
"matrix",
"columns",
"for",
"the",
"term"
] | python | train |
blackecho/Deep-Learning-TensorFlow | yadlt/models/autoencoders/deep_autoencoder.py | https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/autoencoders/deep_autoencoder.py#L180-L206 | def build_model(self, n_features, encoding_w=None, encoding_b=None):
"""Create the computational graph for the reconstruction task.
:param n_features: Number of features
:param encoding_w: list of weights for the encoding layers.
:param encoding_b: list of biases for the encoding layers.
:return: self
"""
self._create_placeholders(n_features, n_features)
if encoding_w and encoding_b:
self.encoding_w_ = encoding_w
self.encoding_b_ = encoding_b
else:
self._create_variables(n_features)
self._create_encoding_layers()
self._create_decoding_layers()
variables = []
variables.extend(self.encoding_w_)
variables.extend(self.encoding_b_)
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(
self.reconstruction, self.input_labels, regterm=regterm)
self.train_step = self.trainer.compile(self.cost) | [
"def",
"build_model",
"(",
"self",
",",
"n_features",
",",
"encoding_w",
"=",
"None",
",",
"encoding_b",
"=",
"None",
")",
":",
"self",
".",
"_create_placeholders",
"(",
"n_features",
",",
"n_features",
")",
"if",
"encoding_w",
"and",
"encoding_b",
":",
"sel... | Create the computational graph for the reconstruction task.
:param n_features: Number of features
:param encoding_w: list of weights for the encoding layers.
:param encoding_b: list of biases for the encoding layers.
:return: self | [
"Create",
"the",
"computational",
"graph",
"for",
"the",
"reconstruction",
"task",
"."
] | python | train |
gbowerman/azurerm | azurerm/networkrp.py | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/networkrp.py#L608-L621 | def list_nsgs_all(access_token, subscription_id):
'''List all network security groups in a subscription.
Args:
access_token (str): a valid Azure Authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of all network security groups in a subscription.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'networkSEcurityGroups?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | [
"def",
"list_nsgs_all",
"(",
"access_token",
",",
"subscription_id",
")",
":",
"endpoint",
"=",
"''",
".",
"join",
"(",
"[",
"get_rm_endpoint",
"(",
")",
",",
"'/subscriptions/'",
",",
"subscription_id",
",",
"'/providers/Microsoft.Network/'",
",",
"'networkSEcurity... | List all network security groups in a subscription.
Args:
access_token (str): a valid Azure Authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of all network security groups in a subscription. | [
"List",
"all",
"network",
"security",
"groups",
"in",
"a",
"subscription",
".",
"Args",
":",
"access_token",
"(",
"str",
")",
":",
"a",
"valid",
"Azure",
"Authentication",
"token",
".",
"subscription_id",
"(",
"str",
")",
":",
"Azure",
"subscription",
"id",
... | python | train |
ambitioninc/django-entity | entity/sync.py | https://github.com/ambitioninc/django-entity/blob/ebc61f34313c52f4ef5819eb1da25b2ad837e80c/entity/sync.py#L120-L135 | def _get_model_objs_to_sync(model_ids_to_sync, model_objs_map, sync_all):
"""
Given the model IDs to sync, fetch all model objects to sync
"""
model_objs_to_sync = {}
for ctype, model_ids_to_sync_for_ctype in model_ids_to_sync.items():
model_qset = entity_registry.entity_registry.get(ctype.model_class()).queryset
if not sync_all:
model_objs_to_sync[ctype] = model_qset.filter(id__in=model_ids_to_sync_for_ctype)
else:
model_objs_to_sync[ctype] = [
model_objs_map[ctype, model_id] for model_id in model_ids_to_sync_for_ctype
]
return model_objs_to_sync | [
"def",
"_get_model_objs_to_sync",
"(",
"model_ids_to_sync",
",",
"model_objs_map",
",",
"sync_all",
")",
":",
"model_objs_to_sync",
"=",
"{",
"}",
"for",
"ctype",
",",
"model_ids_to_sync_for_ctype",
"in",
"model_ids_to_sync",
".",
"items",
"(",
")",
":",
"model_qset... | Given the model IDs to sync, fetch all model objects to sync | [
"Given",
"the",
"model",
"IDs",
"to",
"sync",
"fetch",
"all",
"model",
"objects",
"to",
"sync"
] | python | train |
Phlya/adjustText | adjustText/__init__.py | https://github.com/Phlya/adjustText/blob/bebc4925dffb24508af6e371d4961850fe815fe8/adjustText/__init__.py#L206-L253 | def repel_text(texts, renderer=None, ax=None, expand=(1.2, 1.2),
only_use_max_min=False, move=False):
"""
Repel texts from each other while expanding their bounding boxes by expand
(x, y), e.g. (1.2, 1.2) would multiply width and height by 1.2.
Requires a renderer to get the actual sizes of the text, and to that end
either one needs to be directly provided, or the axes have to be specified,
and the renderer is then got from the axes object.
"""
if ax is None:
ax = plt.gca()
if renderer is None:
r = get_renderer(ax.get_figure())
else:
r = renderer
bboxes = get_bboxes(texts, r, expand, ax=ax)
xmins = [bbox.xmin for bbox in bboxes]
xmaxs = [bbox.xmax for bbox in bboxes]
ymaxs = [bbox.ymax for bbox in bboxes]
ymins = [bbox.ymin for bbox in bboxes]
overlaps_x = np.zeros((len(bboxes), len(bboxes)))
overlaps_y = np.zeros_like(overlaps_x)
overlap_directions_x = np.zeros_like(overlaps_x)
overlap_directions_y = np.zeros_like(overlaps_y)
for i, bbox1 in enumerate(bboxes):
overlaps = get_points_inside_bbox(xmins*2+xmaxs*2, (ymins+ymaxs)*2,
bbox1) % len(bboxes)
overlaps = np.unique(overlaps)
for j in overlaps:
bbox2 = bboxes[j]
x, y = bbox1.intersection(bbox1, bbox2).size
overlaps_x[i, j] = x
overlaps_y[i, j] = y
direction = np.sign(bbox1.extents - bbox2.extents)[:2]
overlap_directions_x[i, j] = direction[0]
overlap_directions_y[i, j] = direction[1]
move_x = overlaps_x*overlap_directions_x
move_y = overlaps_y*overlap_directions_y
delta_x = move_x.sum(axis=1)
delta_y = move_y.sum(axis=1)
q = np.sum(overlaps_x), np.sum(overlaps_y)
if move:
move_texts(texts, delta_x, delta_y, bboxes, ax=ax)
return delta_x, delta_y, q | [
"def",
"repel_text",
"(",
"texts",
",",
"renderer",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"expand",
"=",
"(",
"1.2",
",",
"1.2",
")",
",",
"only_use_max_min",
"=",
"False",
",",
"move",
"=",
"False",
")",
":",
"if",
"ax",
"is",
"None",
":",
"... | Repel texts from each other while expanding their bounding boxes by expand
(x, y), e.g. (1.2, 1.2) would multiply width and height by 1.2.
Requires a renderer to get the actual sizes of the text, and to that end
either one needs to be directly provided, or the axes have to be specified,
and the renderer is then got from the axes object. | [
"Repel",
"texts",
"from",
"each",
"other",
"while",
"expanding",
"their",
"bounding",
"boxes",
"by",
"expand",
"(",
"x",
"y",
")",
"e",
".",
"g",
".",
"(",
"1",
".",
"2",
"1",
".",
"2",
")",
"would",
"multiply",
"width",
"and",
"height",
"by",
"1",... | python | train |
aws/sagemaker-python-sdk | src/sagemaker/session.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/session.py#L937-L985 | def endpoint_from_job(self, job_name, initial_instance_count, instance_type,
deployment_image=None, name=None, role=None, wait=True,
model_environment_vars=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT,
accelerator_type=None):
"""Create an ``Endpoint`` using the results of a successful training job.
Specify the job name, Docker image containing the inference code, and hardware configuration to deploy
the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and
the Docker image containing inference code), endpoint configuration (describing the hardware to deploy
for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them).
In response, the API returns the endpoint name to which you can send requests for inferences.
Args:
job_name (str): Name of the training job to deploy the results of.
initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of
active instances for an endpoint at any given time varies due to autoscaling.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
deployment_image (str): The Docker image which defines the inference code to be used as the entry point for
accepting prediction requests. If not specified, uses the image used for the training job.
name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name.
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).
model_environment_vars (dict[str, str]): Environment variables to set on the model container
(default: None).
vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model.
Default: use VpcConfig from training job.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
str: Name of the ``Endpoint`` that is created.
"""
job_desc = self.sagemaker_client.describe_training_job(TrainingJobName=job_name)
output_url = job_desc['ModelArtifacts']['S3ModelArtifacts']
deployment_image = deployment_image or job_desc['AlgorithmSpecification']['TrainingImage']
role = role or job_desc['RoleArn']
name = name or job_name
vpc_config_override = _vpc_config_from_training_job(job_desc, vpc_config_override)
return self.endpoint_from_model_data(model_s3_location=output_url, deployment_image=deployment_image,
initial_instance_count=initial_instance_count, instance_type=instance_type,
name=name, role=role, wait=wait,
model_environment_vars=model_environment_vars,
model_vpc_config=vpc_config_override, accelerator_type=accelerator_type) | [
"def",
"endpoint_from_job",
"(",
"self",
",",
"job_name",
",",
"initial_instance_count",
",",
"instance_type",
",",
"deployment_image",
"=",
"None",
",",
"name",
"=",
"None",
",",
"role",
"=",
"None",
",",
"wait",
"=",
"True",
",",
"model_environment_vars",
"=... | Create an ``Endpoint`` using the results of a successful training job.
Specify the job name, Docker image containing the inference code, and hardware configuration to deploy
the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and
the Docker image containing inference code), endpoint configuration (describing the hardware to deploy
for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them).
In response, the API returns the endpoint name to which you can send requests for inferences.
Args:
job_name (str): Name of the training job to deploy the results of.
initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of
active instances for an endpoint at any given time varies due to autoscaling.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
deployment_image (str): The Docker image which defines the inference code to be used as the entry point for
accepting prediction requests. If not specified, uses the image used for the training job.
name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name.
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).
model_environment_vars (dict[str, str]): Environment variables to set on the model container
(default: None).
vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model.
Default: use VpcConfig from training job.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
str: Name of the ``Endpoint`` that is created. | [
"Create",
"an",
"Endpoint",
"using",
"the",
"results",
"of",
"a",
"successful",
"training",
"job",
"."
] | python | train |
yoavaviram/python-amazon-simple-product-api | amazon/api.py | https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L390-L424 | def cart_modify(self, items, CartId=None, HMAC=None, **kwargs):
"""CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`.
"""
if not CartId or not HMAC:
raise CartException('CartId required for CartModify call')
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
cart_item_id_key_template = 'Item.{0}.CartItemId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[cart_item_id_key_template.format(i)] = item['cart_item_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartModify(CartId=CartId, HMAC=HMAC, **kwargs)
root = objectify.fromstring(response)
new_cart = AmazonCart(root)
self._check_for_cart_error(new_cart)
return new_cart | [
"def",
"cart_modify",
"(",
"self",
",",
"items",
",",
"CartId",
"=",
"None",
",",
"HMAC",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"CartId",
"or",
"not",
"HMAC",
":",
"raise",
"CartException",
"(",
"'CartId required for CartModify call'... | CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`. | [
"CartAdd",
".",
":",
"param",
"items",
":",
"A",
"dictionary",
"containing",
"the",
"items",
"to",
"be",
"added",
"to",
"the",
"cart",
".",
"Or",
"a",
"list",
"containing",
"these",
"dictionaries",
".",
"example",
":",
"[",
"{",
"cart_item_id",
":",
"rt2... | python | train |
pyrogram/pyrogram | pyrogram/client/client.py | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L410-L431 | def idle(self, stop_signals: tuple = (SIGINT, SIGTERM, SIGABRT)):
"""Blocks the program execution until one of the signals are received,
then gently stop the Client by closing the underlying connection.
Args:
stop_signals (``tuple``, *optional*):
Iterable containing signals the signal handler will listen to.
Defaults to (SIGINT, SIGTERM, SIGABRT).
"""
def signal_handler(*args):
self.is_idle = False
for s in stop_signals:
signal(s, signal_handler)
self.is_idle = True
while self.is_idle:
time.sleep(1)
self.stop() | [
"def",
"idle",
"(",
"self",
",",
"stop_signals",
":",
"tuple",
"=",
"(",
"SIGINT",
",",
"SIGTERM",
",",
"SIGABRT",
")",
")",
":",
"def",
"signal_handler",
"(",
"*",
"args",
")",
":",
"self",
".",
"is_idle",
"=",
"False",
"for",
"s",
"in",
"stop_signa... | Blocks the program execution until one of the signals are received,
then gently stop the Client by closing the underlying connection.
Args:
stop_signals (``tuple``, *optional*):
Iterable containing signals the signal handler will listen to.
Defaults to (SIGINT, SIGTERM, SIGABRT). | [
"Blocks",
"the",
"program",
"execution",
"until",
"one",
"of",
"the",
"signals",
"are",
"received",
"then",
"gently",
"stop",
"the",
"Client",
"by",
"closing",
"the",
"underlying",
"connection",
"."
] | python | train |
Opentrons/opentrons | api/src/opentrons/legacy_api/instruments/pipette.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L1256-L1371 | def transfer(self, volume, source, dest, **kwargs):
"""
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
kwargs['mode'] = kwargs.get('mode', 'transfer')
touch_tip = kwargs.get('touch_tip', False)
if touch_tip is True:
touch_tip = -1
kwargs['touch_tip'] = touch_tip
tip_options = {
'once': 1,
'never': 0,
'always': float('inf')
}
tip_option = kwargs.get('new_tip', 'once')
tips = tip_options.get(tip_option)
if tips is None:
raise ValueError('Unknown "new_tip" option: {}'.format(tip_option))
plan = self._create_transfer_plan(volume, source, dest, **kwargs)
self._run_transfer_plan(tips, plan, **kwargs)
return self | [
"def",
"transfer",
"(",
"self",
",",
"volume",
",",
"source",
",",
"dest",
",",
"*",
"*",
"kwargs",
")",
":",
"# Note: currently it varies whether the pipette should have a tip on",
"# or not depending on the parameters for this call, so we cannot",
"# create a very reliable asse... | Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP | [
"Transfer",
"will",
"move",
"a",
"volume",
"of",
"liquid",
"from",
"a",
"source",
"location",
"(",
"s",
")",
"to",
"a",
"dest",
"location",
"(",
"s",
")",
".",
"It",
"is",
"a",
"higher",
"-",
"level",
"command",
"incorporating",
"other",
":",
"any",
... | python | train |
ZELLMECHANIK-DRESDEN/fcswrite | fcswrite/fcswrite.py | https://github.com/ZELLMECHANIK-DRESDEN/fcswrite/blob/5584983aa1eb927660183252039e73285c0724b3/fcswrite/fcswrite.py#L13-L201 | def write_fcs(filename, chn_names, data,
endianness="big",
compat_chn_names=True,
compat_copy=True,
compat_negative=True,
compat_percent=True,
compat_max_int16=10000):
"""Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format.
"""
filename = pathlib.Path(filename)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=float)
# remove rows with nan values
nanrows = np.isnan(data).any(axis=1)
if np.sum(nanrows):
msg = "Rows containing NaNs are not written to {}!".format(filename)
warnings.warn(msg)
data = data[~nanrows]
if endianness not in ["little", "big"]:
raise ValueError("`endianness` must be 'little' or 'big'!")
msg = "length of `chn_names` must match length of 2nd axis of `data`"
assert len(chn_names) == data.shape[1], msg
rpl = [["µ", "u"],
["²", "2"],
]
if compat_chn_names:
# Compatibility mode: Clean up headers.
rpl += [[" ", ""],
["?", ""],
["_", ""],
]
for ii in range(len(chn_names)):
for (a, b) in rpl:
chn_names[ii] = chn_names[ii].replace(a, b)
# Data with values between 0 and 1
pcnt_cands = []
for ch in range(data.shape[1]):
if data[:, ch].min() >= 0 and data[:, ch].max() <= 1:
pcnt_cands.append(ch)
if compat_percent and pcnt_cands:
# Compatibility mode: Scale values b/w 0 and 1 to percent
if compat_copy:
# copy if requested
data = data.copy()
for ch in pcnt_cands:
data[:, ch] *= 100
if compat_negative:
toflip = []
for ch in range(data.shape[1]):
if np.mean(data[:, ch]) < 0:
toflip.append(ch)
if len(toflip):
if compat_copy:
# copy if requested
data = data.copy()
for ch in toflip:
data[:, ch] *= -1
# DATA segment
data1 = data.flatten().tolist()
DATA = struct.pack('>%sf' % len(data1), *data1)
# TEXT segment
header_size = 256
if endianness == "little":
# use little endian
byteord = '1,2,3,4'
else:
# use big endian
byteord = '4,3,2,1'
TEXT = '/$BEGINANALYSIS/0/$ENDANALYSIS/0'
TEXT += '/$BEGINSTEXT/0/$ENDSTEXT/0'
# Add placeholders for $BEGINDATA and $ENDDATA, because we don't
# know yet how long TEXT is.
TEXT += '/$BEGINDATA/{data_start_byte}/$ENDDATA/{data_end_byte}'
TEXT += '/$BYTEORD/{0}/$DATATYPE/F'.format(byteord)
TEXT += '/$MODE/L/$NEXTDATA/0/$TOT/{0}'.format(data.shape[0])
TEXT += '/$PAR/{0}'.format(data.shape[1])
# Check for content of data columns and set range
for jj in range(data.shape[1]):
# Set data maximum to that of int16
if (compat_max_int16 and
np.max(data[:, jj]) > compat_max_int16 and
np.max(data[:, jj]) < 2**15):
pnrange = int(2**15)
# Set range for data with values between 0 and 1
elif jj in pcnt_cands:
if compat_percent: # scaled to 100%
pnrange = 100
else: # not scaled
pnrange = 1
# default: set range to maxium value found in column
else:
pnrange = int(abs(np.max(data[:, jj])))
# TODO:
# - Set log/lin
fmt_str = '/$P{0}B/32/$P{0}E/0,0/$P{0}N/{1}/$P{0}R/{2}/$P{0}D/Linear'
TEXT += fmt_str.format(jj+1, chn_names[jj], pnrange)
TEXT += '/'
# SET $BEGINDATA and $ENDDATA using the current size of TEXT plus padding.
text_padding = 47 # for visual separation and safety
data_start_byte = header_size + len(TEXT) + text_padding
data_end_byte = data_start_byte + len(DATA) - 1
TEXT = TEXT.format(data_start_byte=data_start_byte,
data_end_byte=data_end_byte)
lentxt = len(TEXT)
# Pad TEXT segment with spaces until data_start_byte
TEXT = TEXT.ljust(data_start_byte - header_size, " ")
# HEADER segment
ver = 'FCS3.0'
textfirst = '{0: >8}'.format(header_size)
textlast = '{0: >8}'.format(lentxt + header_size - 1)
# Starting with FCS 3.0, data segment can end beyond byte 99,999,999,
# in which case a zero is written in each of the two header fields (the
# values are given in the text segment keywords $BEGINDATA and $ENDDATA)
if data_end_byte <= 99999999:
datafirst = '{0: >8}'.format(data_start_byte)
datalast = '{0: >8}'.format(data_end_byte)
else:
datafirst = '{0: >8}'.format(0)
datalast = '{0: >8}'.format(0)
anafirst = '{0: >8}'.format(0)
analast = '{0: >8}'.format(0)
HEADER = '{0: <256}'.format(ver + ' '
+ textfirst
+ textlast
+ datafirst
+ datalast
+ anafirst
+ analast)
# Write data
with filename.open("wb") as fd:
fd.write(HEADER.encode("ascii", "replace"))
fd.write(TEXT.encode("ascii", "replace"))
fd.write(DATA)
fd.write(b'00000000') | [
"def",
"write_fcs",
"(",
"filename",
",",
"chn_names",
",",
"data",
",",
"endianness",
"=",
"\"big\"",
",",
"compat_chn_names",
"=",
"True",
",",
"compat_copy",
"=",
"True",
",",
"compat_negative",
"=",
"True",
",",
"compat_percent",
"=",
"True",
",",
"compa... | Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format. | [
"Write",
"numpy",
"data",
"to",
"an",
".",
"fcs",
"file",
"(",
"FCS3",
".",
"0",
"file",
"format",
")"
] | python | test |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L457-L468 | def get_vnetwork_dvpgs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_vnetwork_dvpgs_output_instance_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_vnetwork_dvpgs",
"=",
"ET",
".",
"Element",
"(",
"\"get_vnetwork_dvpgs\"",
")",
"config",
"=",
"ge... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
chaoss/grimoirelab-sortinghat | sortinghat/parsing/mozilla.py | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/parsing/mozilla.py#L84-L160 | def __parse_identities(self, json):
"""Parse identities using Mozillians format.
The Mozillians identities format is a JSON document under the
"results" key. The document should follow the next schema:
{
"results" : [
{
"_url": "https://example.com/api/v2/users/1/",
"alternate_emails": [{
"email": "jsmith@example.net",
"privacy": "Public"
}],
"email": {
"privacy": "Public",
"value": "jsmith@example.com"
},
"full_name": {
"privacy": "Public",
"value": "John Smith"
},
"ircname": {
"privacy": "Public",
"value": "jsmith"
},
"url": "https://mozillians.org/en-US/u/2apreety18/",
"username": "2apreety18"
}
]
}
:parse data: JSON object to parse
:raise InvalidFormatError: raised when the format of the JSON is
not valid.
"""
try:
for mozillian in json['results']:
name = self.__encode(mozillian['full_name']['value'])
email = self.__encode(mozillian['email']['value'])
username = self.__encode(mozillian['username'])
uuid = username
uid = UniqueIdentity(uuid=uuid)
identity = Identity(name=name, email=email, username=username,
source=self.source, uuid=uuid)
uid.identities.append(identity)
# Alternate emails
for alt_email in mozillian['alternate_emails']:
alt_email = self.__encode(alt_email['email'])
if alt_email == email:
continue
identity = Identity(name=name, email=alt_email, username=username,
source=self.source, uuid=uuid)
uid.identities.append(identity)
# IRC account
ircname = self.__encode(mozillian['ircname']['value'])
if ircname and ircname != username:
identity = Identity(name=None, email=None, username=ircname,
source=self.source, uuid=uuid)
uid.identities.append(identity)
# Mozilla affiliation
affiliation = mozillian['date_mozillian']
rol = self.__parse_mozillian_affiliation(affiliation)
uid.enrollments.append(rol)
self._identities[uuid] = uid
except KeyError as e:
msg = "invalid json format. Attribute %s not found" % e.args
raise InvalidFormatError(cause=msg) | [
"def",
"__parse_identities",
"(",
"self",
",",
"json",
")",
":",
"try",
":",
"for",
"mozillian",
"in",
"json",
"[",
"'results'",
"]",
":",
"name",
"=",
"self",
".",
"__encode",
"(",
"mozillian",
"[",
"'full_name'",
"]",
"[",
"'value'",
"]",
")",
"email... | Parse identities using Mozillians format.
The Mozillians identities format is a JSON document under the
"results" key. The document should follow the next schema:
{
"results" : [
{
"_url": "https://example.com/api/v2/users/1/",
"alternate_emails": [{
"email": "jsmith@example.net",
"privacy": "Public"
}],
"email": {
"privacy": "Public",
"value": "jsmith@example.com"
},
"full_name": {
"privacy": "Public",
"value": "John Smith"
},
"ircname": {
"privacy": "Public",
"value": "jsmith"
},
"url": "https://mozillians.org/en-US/u/2apreety18/",
"username": "2apreety18"
}
]
}
:parse data: JSON object to parse
:raise InvalidFormatError: raised when the format of the JSON is
not valid. | [
"Parse",
"identities",
"using",
"Mozillians",
"format",
"."
] | python | train |
pmelchior/proxmin | proxmin/operators.py | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/operators.py#L91-L94 | def prox_hard_plus(X, step, thresh=0):
"""Hard thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_hard(X, step, thresh=thresh), step) | [
"def",
"prox_hard_plus",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"0",
")",
":",
"return",
"prox_plus",
"(",
"prox_hard",
"(",
"X",
",",
"step",
",",
"thresh",
"=",
"thresh",
")",
",",
"step",
")"
] | Hard thresholding with projection onto non-negative numbers | [
"Hard",
"thresholding",
"with",
"projection",
"onto",
"non",
"-",
"negative",
"numbers"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.