repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
ownport/scrapy-dblite | dblite/__init__.py | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L318-L330 | def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL) | [
"def",
"delete",
"(",
"self",
",",
"criteria",
"=",
"None",
",",
"_all",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"criteria",
",",
"self",
".",
"_item_class",
")",
":",
"criteria",
"=",
"{",
"'_id'",
":",
"criteria",
"[",
"'_id'",
"]",
"}",
... | delete dictionary(ies) in sqlite database
_all = True - delete all items | [
"delete",
"dictionary",
"(",
"ies",
")",
"in",
"sqlite",
"database"
] | python | train |
pygeobuf/pygeobuf | geobuf/scripts/cli.py | https://github.com/pygeobuf/pygeobuf/blob/c9e055ab47532781626cfe2c931a8444820acf05/geobuf/scripts/cli.py#L46-L61 | def encode(precision, with_z):
"""Given GeoJSON on stdin, writes a geobuf file to stdout."""
logger = logging.getLogger('geobuf')
stdin = click.get_text_stream('stdin')
sink = click.get_binary_stream('stdout')
try:
data = json.load(stdin)
pbf = geobuf.encode(
data,
precision if precision >= 0 else 6,
3 if with_z else 2)
sink.write(pbf)
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1) | [
"def",
"encode",
"(",
"precision",
",",
"with_z",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'geobuf'",
")",
"stdin",
"=",
"click",
".",
"get_text_stream",
"(",
"'stdin'",
")",
"sink",
"=",
"click",
".",
"get_binary_stream",
"(",
"'stdout'... | Given GeoJSON on stdin, writes a geobuf file to stdout. | [
"Given",
"GeoJSON",
"on",
"stdin",
"writes",
"a",
"geobuf",
"file",
"to",
"stdout",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/linode.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1318-L1360 | def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret} | [
"def",
"show_pricing",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudException",
"(",
"'The show_instance action must be called with -f or --function.'",
")",
"profile",
"=",
"__opts__",
"["... | Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile | [
"Show",
"pricing",
"for",
"a",
"particular",
"profile",
".",
"This",
"is",
"only",
"an",
"estimate",
"based",
"on",
"unofficial",
"pricing",
"sources",
"."
] | python | train |
eleme/meepo | meepo/utils.py | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/utils.py#L57-L64 | def cast_str(s, encoding='utf8', errors='strict'):
"""cast bytes or str to str"""
if isinstance(s, bytes):
return s.decode(encoding, errors)
elif isinstance(s, str):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s) | [
"def",
"cast_str",
"(",
"s",
",",
"encoding",
"=",
"'utf8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
",",
"errors",
")",
"elif",
"isinstance",
"(",... | cast bytes or str to str | [
"cast",
"bytes",
"or",
"str",
"to",
"str"
] | python | train |
bukun/TorCMS | torcms/core/tools.py | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L251-L288 | def gen_pager_purecss(cat_slug, page_num, current):
'''
Generate pager of purecss.
'''
if page_num == 1:
return ''
pager_shouye = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}"><< 首页</a></li>'''.format(
'hidden' if current <= 1 else '', cat_slug
)
pager_pre = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">< 前页</a>
</li>'''.format('hidden' if current <= 1 else '',
cat_slug,
current - 1)
pager_mid = ''
for ind in range(0, page_num):
tmp_mid = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">{2}</a></li>
'''.format('selected' if ind + 1 == current else '',
cat_slug,
ind + 1)
pager_mid += tmp_mid
pager_next = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">后页 ></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
current + 1)
pager_last = '''<li class="pure-menu-item {0}">
<a hclass="pure-menu-link" ref="{1}/{2}">末页
>></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
page_num)
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager | [
"def",
"gen_pager_purecss",
"(",
"cat_slug",
",",
"page_num",
",",
"current",
")",
":",
"if",
"page_num",
"==",
"1",
":",
"return",
"''",
"pager_shouye",
"=",
"'''<li class=\"pure-menu-item {0}\">\n <a class=\"pure-menu-link\" href=\"{1}\"><< 首页</a></li>'''.for",
"m"... | Generate pager of purecss. | [
"Generate",
"pager",
"of",
"purecss",
"."
] | python | train |
DataDog/integrations-core | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py#L666-L679 | def count_objects_by_tags(self, metric, scraper_config):
""" Count objects by whitelisted tags and submit counts as gauges. """
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name'])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels']
] + scraper_config['custom_tags']
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags)) | [
"def",
"count_objects_by_tags",
"(",
"self",
",",
"metric",
",",
"scraper_config",
")",
":",
"config",
"=",
"self",
".",
"object_count_params",
"[",
"metric",
".",
"name",
"]",
"metric_name",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"scraper_config",
"[",
"'names... | Count objects by whitelisted tags and submit counts as gauges. | [
"Count",
"objects",
"by",
"whitelisted",
"tags",
"and",
"submit",
"counts",
"as",
"gauges",
"."
] | python | train |
pearu/pyvtk | pyvtk/common.py | https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/common.py#L208-L233 | def get_3_3_tuple(self,obj,default=None):
"""Return tuple of 3-tuples
"""
if is_sequence2(obj):
ret = []
for i in range(3):
if i<len(obj):
ret.append(self.get_3_tuple(obj[i],default))
else:
ret.append(self.get_3_tuple(default,default))
return tuple(ret)
if is_sequence(obj):
if len(obj)>9:
log.warning('ignoring elements obj[i], i>=9')
r = obj[:9]
r = [self.get_3_tuple(r[j:j+3],default) for j in range(0,len(r),3)]
if len(r)<3:
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
while len(r)<3:
r.append(self.get_3_tuple(default,default))
return tuple(r)
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
r1 = self.get_3_tuple(obj,default)
r2 = self.get_3_tuple(default,default)
r3 = self.get_3_tuple(default,default)
return (r1,r2,r3) | [
"def",
"get_3_3_tuple",
"(",
"self",
",",
"obj",
",",
"default",
"=",
"None",
")",
":",
"if",
"is_sequence2",
"(",
"obj",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"if",
"i",
"<",
"len",
"(",
"obj",
")",
"... | Return tuple of 3-tuples | [
"Return",
"tuple",
"of",
"3",
"-",
"tuples"
] | python | train |
has2k1/mizani | mizani/bounds.py | https://github.com/has2k1/mizani/blob/312d0550ee0136fd1b0384829b33f3b2065f47c8/mizani/bounds.py#L39-L70 | def rescale(x, to=(0, 1), _from=None):
"""
Rescale numeric vector to have specified minimum and maximum.
Parameters
----------
x : array_like | numeric
1D vector of values to manipulate.
to : tuple
output range (numeric vector of length two)
_from : tuple
input range (numeric vector of length two).
If not given, is calculated from the range of x
Returns
-------
out : array_like
Rescaled values
Examples
--------
>>> x = [0, 2, 4, 6, 8, 10]
>>> rescale(x)
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> rescale(x, to=(0, 2))
array([0. , 0.4, 0.8, 1.2, 1.6, 2. ])
>>> rescale(x, to=(0, 2), _from=(0, 20))
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
if _from is None:
_from = np.min(x), np.max(x)
return np.interp(x, _from, to) | [
"def",
"rescale",
"(",
"x",
",",
"to",
"=",
"(",
"0",
",",
"1",
")",
",",
"_from",
"=",
"None",
")",
":",
"if",
"_from",
"is",
"None",
":",
"_from",
"=",
"np",
".",
"min",
"(",
"x",
")",
",",
"np",
".",
"max",
"(",
"x",
")",
"return",
"np... | Rescale numeric vector to have specified minimum and maximum.
Parameters
----------
x : array_like | numeric
1D vector of values to manipulate.
to : tuple
output range (numeric vector of length two)
_from : tuple
input range (numeric vector of length two).
If not given, is calculated from the range of x
Returns
-------
out : array_like
Rescaled values
Examples
--------
>>> x = [0, 2, 4, 6, 8, 10]
>>> rescale(x)
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> rescale(x, to=(0, 2))
array([0. , 0.4, 0.8, 1.2, 1.6, 2. ])
>>> rescale(x, to=(0, 2), _from=(0, 20))
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ]) | [
"Rescale",
"numeric",
"vector",
"to",
"have",
"specified",
"minimum",
"and",
"maximum",
"."
] | python | valid |
bcbio/bcbio-nextgen | bcbio/pipeline/qcsummary.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L38-L58 | def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
to_analyze, extras = _split_samples_by_qc(samples)
qced = run_parallel("pipeline_summary", to_analyze)
samples = _combine_qc_samples(qced) + extras
qsign_info = run_parallel("qsignature_summary", [samples])
metadata_file = _merge_metadata([samples])
summary_file = write_project_summary(samples, qsign_info)
out = []
for data in samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
data[0]["summary"]["metadata"] = metadata_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
out.append(data)
out = _add_researcher_summary(out, summary_file)
# MultiQC must be run after all file outputs are set:
return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])] | [
"def",
"generate_parallel",
"(",
"samples",
",",
"run_parallel",
")",
":",
"to_analyze",
",",
"extras",
"=",
"_split_samples_by_qc",
"(",
"samples",
")",
"qced",
"=",
"run_parallel",
"(",
"\"pipeline_summary\"",
",",
"to_analyze",
")",
"samples",
"=",
"_combine_qc... | Provide parallel preparation of summary information for alignment and variant calling. | [
"Provide",
"parallel",
"preparation",
"of",
"summary",
"information",
"for",
"alignment",
"and",
"variant",
"calling",
"."
] | python | train |
bronto/javasphinx | javasphinx/compiler.py | https://github.com/bronto/javasphinx/blob/cd1df27f1d70efaae079b74573efdd8e069ff02d/javasphinx/compiler.py#L95-L106 | def __output_see(self, see):
""" Convert the argument to a @see tag to rest """
if see.startswith('<a href'):
# HTML link -- <a href="...">...</a>
return self.__html_to_rst(see)
elif '"' in see:
# Plain text
return see
else:
# Type reference (default)
return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),) | [
"def",
"__output_see",
"(",
"self",
",",
"see",
")",
":",
"if",
"see",
".",
"startswith",
"(",
"'<a href'",
")",
":",
"# HTML link -- <a href=\"...\">...</a>",
"return",
"self",
".",
"__html_to_rst",
"(",
"see",
")",
"elif",
"'\"'",
"in",
"see",
":",
"# Plai... | Convert the argument to a @see tag to rest | [
"Convert",
"the",
"argument",
"to",
"a"
] | python | train |
robehickman/simple-http-file-sync | shttpfs/crypto.py | https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/crypto.py#L4-L10 | def prompt_for_new_password():
""" Prompt the user to enter a new password, with confirmation """
while True:
passw = getpass.getpass()
passw2 = getpass.getpass()
if passw == passw2: return passw
print 'Passwords do not match' | [
"def",
"prompt_for_new_password",
"(",
")",
":",
"while",
"True",
":",
"passw",
"=",
"getpass",
".",
"getpass",
"(",
")",
"passw2",
"=",
"getpass",
".",
"getpass",
"(",
")",
"if",
"passw",
"==",
"passw2",
":",
"return",
"passw",
"print",
"'Passwords do not... | Prompt the user to enter a new password, with confirmation | [
"Prompt",
"the",
"user",
"to",
"enter",
"a",
"new",
"password",
"with",
"confirmation"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/interval.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L925-L974 | def subset(self, interval: Interval,
flexibility: int = 2) -> "IntervalList":
"""
Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y
"""
if flexibility not in [0, 1, 2]:
raise ValueError("subset: bad flexibility value")
permitted = []
for i in self.intervals:
if flexibility == 0:
ok = i.start > interval.start and i.end < interval.end
elif flexibility == 1:
ok = i.end > interval.start and i.start < interval.end
else:
ok = i.end >= interval.start and i.start <= interval.end
if ok:
permitted.append(i)
return IntervalList(permitted) | [
"def",
"subset",
"(",
"self",
",",
"interval",
":",
"Interval",
",",
"flexibility",
":",
"int",
"=",
"2",
")",
"->",
"\"IntervalList\"",
":",
"if",
"flexibility",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"(",
"\"sub... | Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y | [
"Returns",
"an",
"IntervalList",
"that",
"s",
"a",
"subset",
"of",
"this",
"one",
"only",
"containing",
"intervals",
"that",
"meet",
"the",
"interval",
"parameter",
"criterion",
".",
"What",
"meet",
"means",
"is",
"defined",
"by",
"the",
"flexibility",
"parame... | python | train |
rodluger/everest | everest/missions/k2/k2.py | https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1847-L1867 | def GetTargetCBVs(model):
'''
Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target
'''
# Get the info
season = model.season
name = model.name
# We use the LC light curves as CBVs; there aren't
# enough SC light curves to get a good set
if name.endswith('.sc'):
name = name[:-3]
model.XCBV = sysrem.GetCBVs(season, model=name,
niter=model.cbv_niter,
sv_win=model.cbv_win,
sv_order=model.cbv_order) | [
"def",
"GetTargetCBVs",
"(",
"model",
")",
":",
"# Get the info",
"season",
"=",
"model",
".",
"season",
"name",
"=",
"model",
".",
"name",
"# We use the LC light curves as CBVs; there aren't",
"# enough SC light curves to get a good set",
"if",
"name",
".",
"endswith",
... | Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target | [
"Returns",
"the",
"design",
"matrix",
"of",
"CBVs",
"for",
"the",
"given",
"target",
"."
] | python | train |
saltstack/salt | salt/modules/file.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L3650-L3672 | def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents | [
"def",
"readdir",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'Dir path must be absolute.'",
")",
... | .. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/ | [
"..",
"versionadded",
"::",
"2014",
".",
"1",
".",
"0"
] | python | train |
cwacek/python-jsonschema-objects | python_jsonschema_objects/classbuilder.py | https://github.com/cwacek/python-jsonschema-objects/blob/54c82bfaec9c099c472663742abfc7de373a5e49/python_jsonschema_objects/classbuilder.py#L557-L736 | def _build_object(self, nm, clsdata, parents,**kw):
logger.debug(util.lazy_format("Building object {0}", nm))
# To support circular references, we tag objects that we're
# currently building as "under construction"
self.under_construction.add(nm)
props = {}
defaults = set()
properties = {}
for p in parents:
properties = util.propmerge(properties, p.__propinfo__)
if 'properties' in clsdata:
properties = util.propmerge(properties, clsdata['properties'])
name_translation = {}
for prop, detail in properties.items():
logger.debug(util.lazy_format("Handling property {0}.{1}",nm, prop))
properties[prop]['raw_name'] = prop
name_translation[prop] = prop.replace('@', '')
prop = name_translation[prop]
if detail.get('default', None) is not None:
defaults.add(prop)
if detail.get('type', None) == 'object':
uri = "{0}/{1}_{2}".format(nm,
prop, "<anonymous>")
self.resolved[uri] = self.construct(
uri,
detail,
(ProtocolBase,))
props[prop] = make_property(prop,
{'type': self.resolved[uri]},
self.resolved[uri].__doc__)
properties[prop]['type'] = self.resolved[uri]
elif 'type' not in detail and '$ref' in detail:
ref = detail['$ref']
uri = util.resolve_ref_uri(self.resolver.resolution_scope, ref)
logger.debug(util.lazy_format(
"Resolving reference {0} for {1}.{2}",
ref, nm, prop
))
if uri in self.resolved:
typ = self.resolved[uri]
else:
typ = self.construct(uri, detail, (ProtocolBase,))
props[prop] = make_property(prop,
{'type': typ},
typ.__doc__)
properties[prop]['$ref'] = uri
properties[prop]['type'] = typ
elif 'oneOf' in detail:
potential = self.resolve_classes(detail['oneOf'])
logger.debug(util.lazy_format("Designating {0} as oneOf {1}", prop, potential))
desc = detail[
'description'] if 'description' in detail else ""
props[prop] = make_property(prop,
{'type': potential}, desc
)
elif 'type' in detail and detail['type'] == 'array':
if 'items' in detail and isinstance(detail['items'], dict):
if '$ref' in detail['items']:
uri = util.resolve_ref_uri(
self.resolver.resolution_scope,
detail['items']['$ref'])
typ = self.construct(uri, detail['items'])
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {
'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
else:
uri = "{0}/{1}_{2}".format(nm,
prop, "<anonymous_field>")
try:
if 'oneOf' in detail['items']:
typ = TypeProxy([
self.construct(uri + "_%s" % i, item_detail)
if '$ref' not in item_detail else
self.construct(util.resolve_ref_uri(
self.resolver.resolution_scope,
item_detail['$ref']),
item_detail)
for i, item_detail in enumerate(detail['items']['oneOf'])]
)
else:
typ = self.construct(uri, detail['items'])
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
except NotImplementedError:
typ = detail['items']
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
props[prop] = make_property(prop,
propdata,
typ.__doc__)
elif 'items' in detail:
typs = []
for i, elem in enumerate(detail['items']):
uri = "{0}/{1}/<anonymous_{2}>".format(nm, prop, i)
typ = self.construct(uri, elem)
typs.append(typ)
props[prop] = make_property(prop,
{'type': typs},
)
else:
desc = detail[
'description'] if 'description' in detail else ""
uri = "{0}/{1}".format(nm, prop)
typ = self.construct(uri, detail)
props[prop] = make_property(prop, {'type': typ}, desc)
""" If this object itself has a 'oneOf' designation, then
make the validation 'type' the list of potential objects.
"""
if 'oneOf' in clsdata:
klasses = self.resolve_classes(clsdata['oneOf'])
# Need a validation to check that it meets one of them
props['__validation__'] = {'type': klasses}
props['__extensible__'] = pattern_properties.ExtensibleValidator(
nm,
clsdata,
self)
props['__prop_names__'] = name_translation
props['__propinfo__'] = properties
required = set.union(*[p.__required__ for p in parents])
if 'required' in clsdata:
for prop in clsdata['required']:
required.add(prop)
invalid_requires = [req for req in required if req not in props['__propinfo__']]
if len(invalid_requires) > 0:
raise validators.ValidationError("Schema Definition Error: {0} schema requires "
"'{1}', but properties are not defined"
.format(nm, invalid_requires))
props['__required__'] = required
props['__has_default__'] = defaults
if required and kw.get("strict"):
props['__strict__'] = True
props['__title__'] = clsdata.get('title')
cls = type(str(nm.split('/')[-1]), tuple(parents), props)
self.under_construction.remove(nm)
return cls | [
"def",
"_build_object",
"(",
"self",
",",
"nm",
",",
"clsdata",
",",
"parents",
",",
"*",
"*",
"kw",
")",
":",
"logger",
".",
"debug",
"(",
"util",
".",
"lazy_format",
"(",
"\"Building object {0}\"",
",",
"nm",
")",
")",
"# To support circular references, we... | If this object itself has a 'oneOf' designation, then
make the validation 'type' the list of potential objects. | [
"If",
"this",
"object",
"itself",
"has",
"a",
"oneOf",
"designation",
"then",
"make",
"the",
"validation",
"type",
"the",
"list",
"of",
"potential",
"objects",
"."
] | python | train |
BernardFW/bernard | src/bernard/utils.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L234-L259 | def dict_is_subset(subset: Any, full_set: Any) -> bool:
"""
Checks that all keys present in `subset` are present and have the same
value in `full_set`. If a key is in `full_set` but not in `subset` then
True will be returned anyways.
"""
if not isinstance(subset, full_set.__class__):
return False
elif isinstance(subset, dict):
for k, v in subset.items():
if k not in full_set or not dict_is_subset(v, full_set[k]):
return False
return True
elif isinstance(subset, list):
if len(subset) != len(full_set):
return False
for a, b in zip(subset, full_set):
if not dict_is_subset(a, b):
return False
return True
else:
return subset == full_set | [
"def",
"dict_is_subset",
"(",
"subset",
":",
"Any",
",",
"full_set",
":",
"Any",
")",
"->",
"bool",
":",
"if",
"not",
"isinstance",
"(",
"subset",
",",
"full_set",
".",
"__class__",
")",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"subset",
",",
... | Checks that all keys present in `subset` are present and have the same
value in `full_set`. If a key is in `full_set` but not in `subset` then
True will be returned anyways. | [
"Checks",
"that",
"all",
"keys",
"present",
"in",
"subset",
"are",
"present",
"and",
"have",
"the",
"same",
"value",
"in",
"full_set",
".",
"If",
"a",
"key",
"is",
"in",
"full_set",
"but",
"not",
"in",
"subset",
"then",
"True",
"will",
"be",
"returned",
... | python | train |
bslatkin/dpxdt | dpxdt/client/pdiff_worker.py | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/pdiff_worker.py#L226-L240 | def register(coordinator):
"""Registers this module as a worker with the given coordinator."""
utils.verify_binary('pdiff_compare_binary', ['-version'])
utils.verify_binary('pdiff_composite_binary', ['-version'])
assert FLAGS.pdiff_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.PDIFF_QUEUE_NAME,
DoPdiffQueueWorkflow,
max_tasks=FLAGS.pdiff_threads,
wait_seconds=FLAGS.pdiff_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | [
"def",
"register",
"(",
"coordinator",
")",
":",
"utils",
".",
"verify_binary",
"(",
"'pdiff_compare_binary'",
",",
"[",
"'-version'",
"]",
")",
"utils",
".",
"verify_binary",
"(",
"'pdiff_composite_binary'",
",",
"[",
"'-version'",
"]",
")",
"assert",
"FLAGS",
... | Registers this module as a worker with the given coordinator. | [
"Registers",
"this",
"module",
"as",
"a",
"worker",
"with",
"the",
"given",
"coordinator",
"."
] | python | train |
awslabs/sockeye | sockeye/evaluate.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/evaluate.py#L94-L103 | def raw_corpus_length_ratio(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around length ratio implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: Length ratio score as float.
"""
ratios = [len(h.split())/len(r.split()) for h, r in zip(hypotheses, references)]
return sum(ratios)/len(ratios) if len(ratios) else 0.0 | [
"def",
"raw_corpus_length_ratio",
"(",
"hypotheses",
":",
"Iterable",
"[",
"str",
"]",
",",
"references",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"float",
":",
"ratios",
"=",
"[",
"len",
"(",
"h",
".",
"split",
"(",
")",
")",
"/",
"len",
"(",
... | Simple wrapper around length ratio implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: Length ratio score as float. | [
"Simple",
"wrapper",
"around",
"length",
"ratio",
"implementation",
"."
] | python | train |
modin-project/modin | modin/pandas/general.py | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/general.py#L13-L25 | def isna(obj):
"""
Detect missing values for an array-like object.
Args:
obj: Object to check for null or missing values.
Returns:
bool or array-like of bool
"""
if isinstance(obj, BasePandasDataset):
return obj.isna()
else:
return pandas.isna(obj) | [
"def",
"isna",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"BasePandasDataset",
")",
":",
"return",
"obj",
".",
"isna",
"(",
")",
"else",
":",
"return",
"pandas",
".",
"isna",
"(",
"obj",
")"
] | Detect missing values for an array-like object.
Args:
obj: Object to check for null or missing values.
Returns:
bool or array-like of bool | [
"Detect",
"missing",
"values",
"for",
"an",
"array",
"-",
"like",
"object",
".",
"Args",
":",
"obj",
":",
"Object",
"to",
"check",
"for",
"null",
"or",
"missing",
"values",
"."
] | python | train |
postlund/pyatv | pyatv/mrp/srp.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/srp.py#L145-L153 | def step1(self, pin):
"""First pairing step."""
context = SRPContext(
'Pair-Setup', str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512)
self._session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()) | [
"def",
"step1",
"(",
"self",
",",
"pin",
")",
":",
"context",
"=",
"SRPContext",
"(",
"'Pair-Setup'",
",",
"str",
"(",
"pin",
")",
",",
"prime",
"=",
"constants",
".",
"PRIME_3072",
",",
"generator",
"=",
"constants",
".",
"PRIME_3072_GEN",
",",
"hash_fu... | First pairing step. | [
"First",
"pairing",
"step",
"."
] | python | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L64-L67 | def visitNodeConstraintValueSet(self, ctx: ShExDocParser.NodeConstraintValueSetContext):
""" nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet """
self.nodeconstraint.values = []
self.visitChildren(ctx) | [
"def",
"visitNodeConstraintValueSet",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"NodeConstraintValueSetContext",
")",
":",
"self",
".",
"nodeconstraint",
".",
"values",
"=",
"[",
"]",
"self",
".",
"visitChildren",
"(",
"ctx",
")"
] | nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet | [
"nodeConstraint",
":",
"valueSet",
"xsFacet",
"*",
"#nodeConstraintValueSet"
] | python | train |
qualisys/qualisys_python_sdk | qtm/packet.py | https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/packet.py#L484-L490 | def get_3d_markers_residual(
self, component_info=None, data=None, component_position=None
):
"""Get 3D markers with residual."""
return self._get_3d_markers(
RT3DMarkerPositionResidual, component_info, data, component_position
) | [
"def",
"get_3d_markers_residual",
"(",
"self",
",",
"component_info",
"=",
"None",
",",
"data",
"=",
"None",
",",
"component_position",
"=",
"None",
")",
":",
"return",
"self",
".",
"_get_3d_markers",
"(",
"RT3DMarkerPositionResidual",
",",
"component_info",
",",
... | Get 3D markers with residual. | [
"Get",
"3D",
"markers",
"with",
"residual",
"."
] | python | valid |
NeuroanatomyAndConnectivity/surfdist | surfdist/utils.py | https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L31-L46 | def triangles_keep_cortex(triangles, cortex):
"""
Remove triangles with nodes not contained in the cortex label array
"""
# for or each face/triangle keep only those that only contain nodes within the list of cortex nodes
input_shape = triangles.shape
triangle_is_in_cortex = np.all(np.reshape(np.in1d(triangles.ravel(), cortex), input_shape), axis=1)
cortex_triangles_old = np.array(triangles[triangle_is_in_cortex], dtype=np.int32)
# reassign node index before outputting triangles
new_index = np.digitize(cortex_triangles_old.ravel(), cortex, right=True)
cortex_triangles = np.array(np.arange(len(cortex))[new_index].reshape(cortex_triangles_old.shape), dtype=np.int32)
return cortex_triangles | [
"def",
"triangles_keep_cortex",
"(",
"triangles",
",",
"cortex",
")",
":",
"# for or each face/triangle keep only those that only contain nodes within the list of cortex nodes",
"input_shape",
"=",
"triangles",
".",
"shape",
"triangle_is_in_cortex",
"=",
"np",
".",
"all",
"(",
... | Remove triangles with nodes not contained in the cortex label array | [
"Remove",
"triangles",
"with",
"nodes",
"not",
"contained",
"in",
"the",
"cortex",
"label",
"array"
] | python | train |
NuGrid/NuGridPy | nugridpy/h5T.py | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/h5T.py#L543-L980 | def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):
'''
Get Data from HDF5 files.
There are three ways to call this function
1. get(dataitem)
Fetches the datatiem for all cycles. If dataitem is a header
attribute or list of attributes then the data is retured.
If detaitem an individulal or list of column attributes,
data columns or isotopes/elements the data is returned for
all cycles.
2. get(cycle_list, dataitem)
Fetches the dataitem or list of dataitems for the cycle
or list of cycles. The variable dataitems can contain column
attributes, data columns, and isotopes/elemnts.
3. get(cycle_list, dataitem, isotope)
Fetches the dataitems like the seccond method except that
one of the dataitems must be either "iso_massf" or "yps",
and in the data returned "iso_massf" and "yps" are replaced
with the data from the isotopes. The isotopes must be in
the form given by se.isotopes or se.elements.
Parameters
----------
cycle_list : list, integer or string
If cycle_list is a list or string and all of the entries
are header attributes then the attributes are returned.
If cycle_list is a list or string of dataitems then the
dataitems are fetched for all cycles.
If cycle_list is a list, integer or string of cycle numbers
then data is returned for those cycles.
dataitem: list or string, optional
If dataitem is not None then the data for each item is
returned for the cycle or list of cycles. dataitem may be an
individual or a mixed list of column attributes, column
data or isotopes/elements. If dataitem is None then
cycle_list must be a string. The default is None.
isotope: list or string, optional
If one of the dataitems is "iso_massf" or "yps" then it is
replaced with the data from the individual isotopes/elements
listed in isotope. The default is None.
sparse : int
Implements a sparsity factor on the fetched data i.e. only
the i th cycle in cycle_list data is returned,
where i = sparse.
'''
# Check out the inputs
t1=time.time()
isotopes_of_interest = []
nested_list = False
# if one of cycle_list, dataitem or isotope is given as a string convert it to a list
if isinstance(cycle_list, basestring):
cycle_list = [cycle_list]
else:
try:
if len(cycle_list) == 1:
nested_list = True
except TypeError:
pass #leave nested_list as false
if isinstance(dataitem, basestring):
dataitem = [dataitem]
if isinstance(isotope, basestring):
isotope = [isotope]
if dataitem==None and isotope==None:
option_ind = 1
dataitem = cycle_list
if not any([item in self.hattrs for item in dataitem]):
cycle_list = self.cycles
else:
first_file = mrT.File(self.h5s[0].filename,'r')
dat = []
# get all dataitems from header attributes
for item in dataitem:
tmp = first_file.attrs.get(item, None)
try:
if len(tmp) == 1:
tmp = tmp[0]
except TypeError: #if a scaler is returned do nothing
pass
dat.append(tmp)
# if only one header attribute is required dont return as a list
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
first_file.close()
return dat
if any([item.split('-')[0] in self.isos for item in dataitem]):
return self.get(cycle_list,dataitem,sparse=sparse)
elif isotope==None:
option_ind = 2
cycle_list = cycle_list
dataitem = dataitem
# if one dataitem is given as a string convert it to a list
if isinstance(dataitem, basestring):
dataitem = [dataitem]
new_dataitem = []
new_isotopes = []
for item in dataitem:
if item.split('-')[0] in self.isos:
new_isotopes.append(item)
else:
new_dataitem.append(item)
if len(new_isotopes) != 0:
tmp = []
try:
tmp = self.get(cycle_list,new_dataitem + ['iso_massf'],new_isotopes,sparse=sparse)
except: # in some old se files there maybe still yps as the name for the abundance arrays
tmp = self.get(cycle_list,new_dataitem + ['yps'],new_isotopes,sparse=sparse)
# modify the dat list so dat is structured like dataitems
dat = []
#make sure tmp containes the data as a list of cycles
if isinstance(cycle_list, basestring):
tmp = [tmp]
else:
try:
if len(cycle_list) == 1:
tmp = [tmp]
except TypeError:
tmp = [tmp]
for cyc in tmp:
temp_dataitem = []
for item in dataitem:
if item in new_dataitem:
temp_dataitem.append(cyc[new_dataitem.index(item)])
else:
if len(new_dataitem) == 0:
temp_dataitem = cyc
else:
if len(new_isotopes) == 1:
temp_dataitem.append(cyc[-1])
else:
temp_dataitem.append(cyc[-1][new_isotopes.index(item)])
dat.append(temp_dataitem)
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
return dat
else:
# there is an implicite rule here that if you want 2D arrays you have
# to give 3 args, or, in other words you have to give a cycle or cycle
# array; there is no good reason for that, except the programmers
# laziness
option_ind = 3
cycle_list = cycle_list
dataitem = dataitem
isotopes_of_interest = isotope
# we need to find out the shellnb to know if any yps array may just be
# a one row array, as - for example- in the surf.h5 files
# SJONES: I think here we only need to look at the first shellnb(!)
#shellnb=self.get(cycle_list,'shellnb')
try: #check if cycle_list is not a list
cycle_list[0]
except (TypeError,IndexError):
cycle_list = [cycle_list]
shellnb=self.get(cycle_list[0],'shellnb')
if sparse <1:
sparse=1
# Just in case the user inputs integers
try:
for x in range(len(cycle_list)):
cycle_list[x] = str(cycle_list[x])
except TypeError:
cycle_list = [str(cycle_list)]
if option_ind != 1:
try: #if it is a single cycle make sure its formatted correctly
if cycle_list.isdigit():
cycle_list = [cycle_list]
for cycle in cycle_list:
if len(cycle) != len(self.cycles[0]):
#print "a"
diff = len(self.cycles[0])-len(cycle)
OO = ''
while diff >=1:
OO+='0'
cycle = OO+cycle
except AttributeError: ##if it is a list of cycles make sure its formatted correctly
if cycle_list[0].isdigit():
for x in range(len(cycle_list)):
if len(str(cycle_list[x])) != len(str(self.cycles[0])):
#print "b"
diff = len(str(self.cycles[0]))-len(str(cycle_list[x]))
OO = ''
while diff >=1:
OO+='0'
diff-=1
try:
cycle_list[x] = OO+cycle_list[x]
except TypeError:
cycle_list[0] = OO+cycle_list[0]
dat = []
cycle_list.sort()
cyclelist=np.array(list(map(int, cycle_list)))
# cycles_requested is a list of indices from cyclelist
# The index of the larges and smallest indices should be stored
# in sorted order. As new requests are made if the requests
# border or over lap then only keep the index of the larges and
# smallest indices.
cycles_requested = []
# Sometimes bad data or last restart.h5 files contain no cycles,
# causing the code to crash. Do a simple try/except here:
file_min=[]
file_max=[]
try:
for h5 in self.h5s:
file_min.append(int(h5.cycle[0]))
file_max.append(int(h5.cycle[-1]))
except IndexError:
print('File '+h5.filename+' contains no data, please remove or rename it')
print('Once the file has been removed or renamed, the preprocessor file must be re-written. Do this by either removing the file h5Preproc.txt from the data directory or by invoking the se instance with rewrite=True')
print('At present, h5T cannot check for empty files since the overhead using the mounted VOSpace would be too great.')
raise IOError('Cycle-less file encountered')
file_min.sort()
file_max.sort()
for h5 in self.h5s:
#initalize file metadata
min_file = int(h5.cycle[0])
max_file = int(h5.cycle[-1])
min_list = int(cyclelist[0])
max_list = int(cyclelist[-1])
index_min = None #if None start at begining
index_max = None #if None finish at end
# SJONES Now we need to add the case that the set only contains one file:
if len(file_min) == 1:
min_file = min_list - 1
max_file = max_list + 1
else:
file_index = file_min.index(min_file)
if file_index == 0:
if min_list - 1 < min_file:
min_file = min_list - 1
max_file = (file_min[file_index + 1] + max_file)//2
elif file_index == len(file_min) - 1:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
if max_list + 1 > max_file:
max_file = max_list + 1
else:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
max_file = (file_min[file_index + 1] + max_file)//2
# calculate the left and right limits of the intersection
# of the lists h5.cycle and cyclelist
if (max_list < min_file) or (max_file < min_list):
# the lists do not intersect
continue
elif (min_list <= min_file) and (max_file <= max_list):
# all of h5.cycle is within cyclelist
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = bisect.bisect_right(cyclelist, max_file)
elif (min_file <= min_list) and (max_list <= max_file):
# all of cyclelist is within h5.cycle
index_min = None
index_max = None
else:
if min_list > min_file:
# cyclelist overlaps the right edge of h5.cycle
index_min = None
index_max = bisect.bisect_right(cyclelist, max_file)
else:
# cyclelist overlaps the left edge of h5.cylce
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = None
# maintin list of all requested cycles by keeping trak of
# the maximum and minimum indices
imin = index_min
if index_min == None:
imin = 0
imax = index_max
if index_max == None:
imax = len(cyclelist)
request_min = bisect.bisect_left(cycles_requested, imin)
request_max = bisect.bisect_right(cycles_requested, imax)
# if the new request overlabs older request remove them
del cycles_requested[request_min:request_max]
if ((request_max-request_min) % 2) ==1:
# new and old request overlaped on one edge only
if request_min % 2 == 0:
# add new starting index
cycles_requested.insert(request_min, imin)
else:
# add new ending index
cycles_requested.insert(request_min, imax)
else:
# new and old requests overlaped on two edges
if request_min % 2 == 0:
# old request was contained with in new request
cycles_requested.insert(request_min, imin)
cycles_requested.insert(request_min + 1, imax)
else:
# new request wat contained within old request
pass
if not self.h5sStarted[self.h5s.index(h5)]:
h5.start()
h5.join()
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
self.h5sStarted[self.h5s.index(h5)]=True
else:
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
temp_dat = []
for temp_num, temp_cycle in enumerate(temp):
temp_dataforcycle = []
for dataitem_num, temp_dataitem in enumerate(temp_cycle):
# identify what cycle the temp data was collected from
temp_dataitem=self.red_dim(temp_dataitem)
# if option_ind == 3 and isotopes_of_interest != []:
if (dataitem[dataitem_num] == 'iso_massf' or dataitem[dataitem_num] == 'yps') and isotopes_of_interest != []:
# Figure out the index
index = []
iso_tmp = []
if 'iso' in dataitem[dataitem_num]: #if we are looking at an isotope
iso_tmp = self.isotopes
else:
iso_tmp = self.elements
for iso in isotopes_of_interest: #finds the location of the isotope
x = iso_tmp.index(iso)
index.append(x)
if index == []:
# if none of the isotopes of interest are found
# then the index defaults to [0], so that the loop
# will still try to acess the data in t.
index = [0]
islist=True
if len(cycle_list)==1:
islist=False
# shellnb_index = 0
# if index_min == None:
# shellnb_index = temp_num
# else:
# shellnb_index = index_min + temp_num
temp_multicyc = []
for i in index:
# if islist:
# if shellnb[shellnb_index] == 1: # again take care of 1-row 2D arrays
if shellnb == 1: # again take care of 1-row 2D arrays
temp_multicyc.append(temp_dataitem[i])
else:
temp_multicyc.append(temp_dataitem[:,i])
# else:
# if shellnb == 1: # again take care of 1-row 2D arrays
# temp_multicyc.append(temp_dataitem[i])
# else:
# temp_multicyc.append(temp_dataitem[:,i])
if len(temp_multicyc) == 1: # agian take care of 1-row arrays
temp_multicyc = temp_multicyc[0]
temp_dataitem = temp_multicyc
temp_dataforcycle.append(temp_dataitem)
if len(temp_dataforcycle) == 1: # agian take care of 1-row arrays
temp_dataforcycle = temp_dataforcycle[0]
# Now add the information to the list we pass back
temp_dat.append(temp_dataforcycle)
# calculate the proper insertion point for the data colected from
# the file h5 in self.h5s
insert_pnt = 0
if index_min is not None: #alex: in py2: x < None == False
for i in range(len(cycles_requested)):
if i % 2 == 1:
if cycles_requested[i] < index_min:
insert_pnt += cycles_requested[i] - cycles_requested[i-1]
elif cycles_requested[i - 1] < index_min:
insert_pnt += index_min - cycles_requested[i - 1]
# insert the cycle data from the current file into the apropiat place
# in the output data.
dat[insert_pnt:insert_pnt] = temp_dat
#check if cycles were not requested from the file
# SJONES comment
# missing_cycles = np.array([])
# if len(cycles_requested) != 2:
# if len(cycles_requested) == 0:
# missing_cycles = np.array([cycle_list])
# else:
# cycles_requested = [None] + cycles_requested + [None]
# for i in xrange(0, len(cycles_requested), 2):
# min = cycles_requested[i]
# max = cycles_requested[i + 1]
# missing_cycles = np.append(missing_cycles, cycle_list[min:max])
# print "The requested cycles: " + str(missing_cycles) + " are not available in this data set"
# elif (cycles_requested[0] != 0) or (cycles_requested[1] != len(cyclelist)):
# min = cycles_requested[0]
# max = cycles_requested[1]
# missing_cycles = np.append(missing_cycles, cycle_list[0:min])
# missing_cycles = np.append(missing_cycles, cycle_list[max:])
# print "The requested cycles: " + str(missing_cycles) + " are not available in this data set"
if len(dat) < 2 and option_ind != 3 and (not nested_list):
try:
dat = dat[0]
except IndexError:
None
except TypeError:
None
try:
if len(dat) < 2 and isotopes_of_interest != []:
dat = dat[0]
except TypeError:
None
except IndexError:
None
t2=time.time()
return dat | [
"def",
"get",
"(",
"self",
",",
"cycle_list",
",",
"dataitem",
"=",
"None",
",",
"isotope",
"=",
"None",
",",
"sparse",
"=",
"1",
")",
":",
"# Check out the inputs",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"isotopes_of_interest",
"=",
"[",
"]",
"n... | Get Data from HDF5 files.
There are three ways to call this function
1. get(dataitem)
Fetches the datatiem for all cycles. If dataitem is a header
attribute or list of attributes then the data is retured.
If detaitem an individulal or list of column attributes,
data columns or isotopes/elements the data is returned for
all cycles.
2. get(cycle_list, dataitem)
Fetches the dataitem or list of dataitems for the cycle
or list of cycles. The variable dataitems can contain column
attributes, data columns, and isotopes/elemnts.
3. get(cycle_list, dataitem, isotope)
Fetches the dataitems like the seccond method except that
one of the dataitems must be either "iso_massf" or "yps",
and in the data returned "iso_massf" and "yps" are replaced
with the data from the isotopes. The isotopes must be in
the form given by se.isotopes or se.elements.
Parameters
----------
cycle_list : list, integer or string
If cycle_list is a list or string and all of the entries
are header attributes then the attributes are returned.
If cycle_list is a list or string of dataitems then the
dataitems are fetched for all cycles.
If cycle_list is a list, integer or string of cycle numbers
then data is returned for those cycles.
dataitem: list or string, optional
If dataitem is not None then the data for each item is
returned for the cycle or list of cycles. dataitem may be an
individual or a mixed list of column attributes, column
data or isotopes/elements. If dataitem is None then
cycle_list must be a string. The default is None.
isotope: list or string, optional
If one of the dataitems is "iso_massf" or "yps" then it is
replaced with the data from the individual isotopes/elements
listed in isotope. The default is None.
sparse : int
Implements a sparsity factor on the fetched data i.e. only
the i th cycle in cycle_list data is returned,
where i = sparse. | [
"Get",
"Data",
"from",
"HDF5",
"files",
"."
] | python | train |
fr33jc/bang | bang/providers/aws.py | https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/aws.py#L30-L42 | def server_to_dict(server):
"""
Returns the :class:`dict` representation of a server object.
The returned :class:`dict` is meant to be consumed by
:class:`~bang.deployers.cloud.ServerDeployer` objects.
"""
return {
A.server.ID: server.id,
A.server.PUBLIC_IPS: [server.public_dns_name],
A.server.PRIVATE_IPS: [server.private_dns_name],
} | [
"def",
"server_to_dict",
"(",
"server",
")",
":",
"return",
"{",
"A",
".",
"server",
".",
"ID",
":",
"server",
".",
"id",
",",
"A",
".",
"server",
".",
"PUBLIC_IPS",
":",
"[",
"server",
".",
"public_dns_name",
"]",
",",
"A",
".",
"server",
".",
"PR... | Returns the :class:`dict` representation of a server object.
The returned :class:`dict` is meant to be consumed by
:class:`~bang.deployers.cloud.ServerDeployer` objects. | [
"Returns",
"the",
":",
"class",
":",
"dict",
"representation",
"of",
"a",
"server",
"object",
"."
] | python | train |
dtmilano/AndroidViewClient | src/com/dtmilano/android/viewclient.py | https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L3188-L3212 | def traverse(self, root="ROOT", indent="", transform=None, stream=sys.stdout):
'''
Traverses the C{View} tree and prints its nodes.
The nodes are printed converting them to string but other transformations can be specified
by providing a method name as the C{transform} parameter.
@type root: L{View}
@param root: the root node from where the traverse starts
@type indent: str
@param indent: the indentation string to use to print the nodes
@type transform: method
@param transform: a method to use to transform the node before is printed
'''
if transform is None:
# this cannot be a default value, otherwise
# TypeError: 'staticmethod' object is not callable
# is raised
transform = ViewClient.TRAVERSE_CIT
if type(root) == types.StringType and root == "ROOT":
root = self.root
return ViewClient.__traverse(root, indent, transform, stream) | [
"def",
"traverse",
"(",
"self",
",",
"root",
"=",
"\"ROOT\"",
",",
"indent",
"=",
"\"\"",
",",
"transform",
"=",
"None",
",",
"stream",
"=",
"sys",
".",
"stdout",
")",
":",
"if",
"transform",
"is",
"None",
":",
"# this cannot be a default value, otherwise",
... | Traverses the C{View} tree and prints its nodes.
The nodes are printed converting them to string but other transformations can be specified
by providing a method name as the C{transform} parameter.
@type root: L{View}
@param root: the root node from where the traverse starts
@type indent: str
@param indent: the indentation string to use to print the nodes
@type transform: method
@param transform: a method to use to transform the node before is printed | [
"Traverses",
"the",
"C",
"{",
"View",
"}",
"tree",
"and",
"prints",
"its",
"nodes",
"."
] | python | train |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/common.py | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/common.py#L49-L67 | def normalize_columns(features):
"""
This performs column normalization of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix.
"""
# Calculate inverse document frequency.
features = features.tocsc()
for j in range(features.shape[1]):
document_frequency = features.getcol(j).data.size
if document_frequency > 1:
features.data[features.indptr[j]: features.indptr[j + 1]] =\
features.data[features.indptr[j]: features.indptr[j + 1]]/np.sqrt(np.log(document_frequency))
features = features.tocsr()
return features | [
"def",
"normalize_columns",
"(",
"features",
")",
":",
"# Calculate inverse document frequency.",
"features",
"=",
"features",
".",
"tocsc",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"features",
".",
"shape",
"[",
"1",
"]",
")",
":",
"document_frequency",
"=",... | This performs column normalization of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix. | [
"This",
"performs",
"column",
"normalization",
"of",
"community",
"embedding",
"features",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py#L57-L87 | def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
(message.Message,),
{'DESCRIPTOR': descriptor, '__module__': None})
# If module not set, it wrongly points to the reflection.py module.
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name] | [
"def",
"GetPrototype",
"(",
"self",
",",
"descriptor",
")",
":",
"if",
"descriptor",
".",
"full_name",
"not",
"in",
"self",
".",
"_classes",
":",
"descriptor_name",
"=",
"descriptor",
".",
"name",
"if",
"str",
"is",
"bytes",
":",
"# PY2",
"descriptor_name",
... | Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor. | [
"Builds",
"a",
"proto2",
"message",
"class",
"based",
"on",
"the",
"passed",
"in",
"descriptor",
"."
] | python | train |
ensime/ensime-vim | ensime_shared/editor.py | https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/editor.py#L298-L303 | def get_error_at(self, cursor):
"""Return error at position `cursor`."""
for error in self._errors:
if error.includes(self._vim.eval("expand('%:p')"), cursor):
return error
return None | [
"def",
"get_error_at",
"(",
"self",
",",
"cursor",
")",
":",
"for",
"error",
"in",
"self",
".",
"_errors",
":",
"if",
"error",
".",
"includes",
"(",
"self",
".",
"_vim",
".",
"eval",
"(",
"\"expand('%:p')\"",
")",
",",
"cursor",
")",
":",
"return",
"... | Return error at position `cursor`. | [
"Return",
"error",
"at",
"position",
"cursor",
"."
] | python | train |
hfaran/slack-export-viewer | slackviewer/reader.py | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/reader.py#L89-L111 | def compile_mpim_users(self):
"""
Gets the info for the members within the multiple person instant message
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
name: <name>
users: [<user_id>]
}
"""
mpim_data = self._read_from_json("mpims.json")
mpims = [c for c in mpim_data.values()]
all_mpim_users = []
for mpim in mpims:
mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]}
all_mpim_users.append(mpim_members)
return all_mpim_users | [
"def",
"compile_mpim_users",
"(",
"self",
")",
":",
"mpim_data",
"=",
"self",
".",
"_read_from_json",
"(",
"\"mpims.json\"",
")",
"mpims",
"=",
"[",
"c",
"for",
"c",
"in",
"mpim_data",
".",
"values",
"(",
")",
"]",
"all_mpim_users",
"=",
"[",
"]",
"for",... | Gets the info for the members within the multiple person instant message
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
name: <name>
users: [<user_id>]
} | [
"Gets",
"the",
"info",
"for",
"the",
"members",
"within",
"the",
"multiple",
"person",
"instant",
"message"
] | python | train |
jazzband/sorl-thumbnail | sorl/thumbnail/engines/base.py | https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/base.py#L15-L28 | def create(self, image, geometry, options):
"""
Processing conductor, returns the thumbnail as an image engine instance
"""
image = self.cropbox(image, geometry, options)
image = self.orientation(image, geometry, options)
image = self.colorspace(image, geometry, options)
image = self.remove_border(image, options)
image = self.scale(image, geometry, options)
image = self.crop(image, geometry, options)
image = self.rounded(image, geometry, options)
image = self.blur(image, geometry, options)
image = self.padding(image, geometry, options)
return image | [
"def",
"create",
"(",
"self",
",",
"image",
",",
"geometry",
",",
"options",
")",
":",
"image",
"=",
"self",
".",
"cropbox",
"(",
"image",
",",
"geometry",
",",
"options",
")",
"image",
"=",
"self",
".",
"orientation",
"(",
"image",
",",
"geometry",
... | Processing conductor, returns the thumbnail as an image engine instance | [
"Processing",
"conductor",
"returns",
"the",
"thumbnail",
"as",
"an",
"image",
"engine",
"instance"
] | python | train |
materialsproject/pymatgen | pymatgen/io/vasp/outputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L859-L877 | def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint | [
"def",
"eigenvalue_band_properties",
"(",
"self",
")",
":",
"vbm",
"=",
"-",
"float",
"(",
"\"inf\"",
")",
"vbm_kpoint",
"=",
"None",
"cbm",
"=",
"float",
"(",
"\"inf\"",
")",
"cbm_kpoint",
"=",
"None",
"for",
"spin",
",",
"d",
"in",
"self",
".",
"eige... | Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct). | [
"Band",
"properties",
"from",
"the",
"eigenvalues",
"as",
"a",
"tuple",
"(",
"band",
"gap",
"cbm",
"vbm",
"is_band_gap_direct",
")",
"."
] | python | train |
drslump/pyshould | pyshould/expectation.py | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L51-L57 | def clone(self):
""" Clone this expression """
from copy import copy
clone = copy(self)
clone.expr = copy(self.expr)
clone.factory = False
return clone | [
"def",
"clone",
"(",
"self",
")",
":",
"from",
"copy",
"import",
"copy",
"clone",
"=",
"copy",
"(",
"self",
")",
"clone",
".",
"expr",
"=",
"copy",
"(",
"self",
".",
"expr",
")",
"clone",
".",
"factory",
"=",
"False",
"return",
"clone"
] | Clone this expression | [
"Clone",
"this",
"expression"
] | python | train |
lemieuxl/pyGenClean | pyGenClean/Misc/compare_gold_standard.py | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/Misc/compare_gold_standard.py#L436-L474 | def read_same_samples_file(filename, out_prefix):
"""Reads a file containing same samples."""
# The same samples
same_samples = []
# Creating the extraction files
gold_file = None
try:
gold_file = open(out_prefix + ".gold_samples2keep", 'w')
except IOError:
msg = "{}: can't create file".format(out_prefix + ".gold_samples2keep")
raise ProgramError(msg)
source_file = None
try:
source_file = open(out_prefix + ".source_panel_samples2keep", 'w')
except IOError:
msg = ("{}: can't create "
"file".format(out_prefix + ".source_panel_samples2keep"))
raise ProgramError(msg)
with open(filename, 'r') as input_file:
for line in input_file:
row = line.rstrip("\r\n").split("\t")
# Getting the samples
gold_sample = tuple(row[:2])
source_sample = tuple(row[2:])
same_samples.append((gold_sample, source_sample))
# Printing files
print >>gold_file, "\t".join(gold_sample)
print >>source_file, "\t".join(source_sample)
# Closing the files
gold_file.close()
source_file.close()
return same_samples | [
"def",
"read_same_samples_file",
"(",
"filename",
",",
"out_prefix",
")",
":",
"# The same samples",
"same_samples",
"=",
"[",
"]",
"# Creating the extraction files",
"gold_file",
"=",
"None",
"try",
":",
"gold_file",
"=",
"open",
"(",
"out_prefix",
"+",
"\".gold_sa... | Reads a file containing same samples. | [
"Reads",
"a",
"file",
"containing",
"same",
"samples",
"."
] | python | train |
coursera-dl/coursera-dl | coursera/cookies.py | https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/cookies.py#L209-L217 | def do_we_have_enough_cookies(cj, class_name):
"""
Check whether we have all the required cookies
to authenticate on class.coursera.org.
"""
domain = 'class.coursera.org'
path = "/" + class_name
return cj.get('csrf_token', domain=domain, path=path) is not None | [
"def",
"do_we_have_enough_cookies",
"(",
"cj",
",",
"class_name",
")",
":",
"domain",
"=",
"'class.coursera.org'",
"path",
"=",
"\"/\"",
"+",
"class_name",
"return",
"cj",
".",
"get",
"(",
"'csrf_token'",
",",
"domain",
"=",
"domain",
",",
"path",
"=",
"path... | Check whether we have all the required cookies
to authenticate on class.coursera.org. | [
"Check",
"whether",
"we",
"have",
"all",
"the",
"required",
"cookies",
"to",
"authenticate",
"on",
"class",
".",
"coursera",
".",
"org",
"."
] | python | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L131-L150 | def smooth(x, y, weights):
'''
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
'''
# Spline hard to smooth properly, therefore solfed with
# bounded polynomal interpolation
# ext=3: no extrapolation, but boundary value
# return UnivariateSpline(x, y, w=weights,
# s=len(y)*weights.max()*100, ext=3)
# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
p = np.polyfit(x, y, w=weights, deg=2)
if np.any(np.isnan(p)):
# couldn't even do polynomial fit
# as last option: assume constant noise
my = np.average(y, weights=weights)
return lambda x: my
return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1])) | [
"def",
"smooth",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"# Spline hard to smooth properly, therefore solfed with\r",
"# bounded polynomal interpolation\r",
"# ext=3: no extrapolation, but boundary value\r",
"# return UnivariateSpline(x, y, w=weights,\r",
"# ... | in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation | [
"in",
"case",
"the",
"NLF",
"cannot",
"be",
"described",
"by",
"a",
"square",
"root",
"function",
"commit",
"bounded",
"polynomial",
"interpolation"
] | python | train |
Telefonica/toolium | toolium/utils.py | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L524-L538 | def _download_video(self, video_url, video_name):
"""Download a video from the remote node
:param video_url: video url
:param video_name: video name
"""
filename = '{0:0=2d}_{1}'.format(DriverWrappersPool.videos_number, video_name)
filename = '{}.mp4'.format(get_valid_filename(filename))
filepath = os.path.join(DriverWrappersPool.videos_directory, filename)
if not os.path.exists(DriverWrappersPool.videos_directory):
os.makedirs(DriverWrappersPool.videos_directory)
response = requests.get(video_url)
open(filepath, 'wb').write(response.content)
self.logger.info("Video saved in '%s'", filepath)
DriverWrappersPool.videos_number += 1 | [
"def",
"_download_video",
"(",
"self",
",",
"video_url",
",",
"video_name",
")",
":",
"filename",
"=",
"'{0:0=2d}_{1}'",
".",
"format",
"(",
"DriverWrappersPool",
".",
"videos_number",
",",
"video_name",
")",
"filename",
"=",
"'{}.mp4'",
".",
"format",
"(",
"g... | Download a video from the remote node
:param video_url: video url
:param video_name: video name | [
"Download",
"a",
"video",
"from",
"the",
"remote",
"node"
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_internal/configuration.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/configuration.py#L139-L146 | def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key)) | [
"def",
"get_value",
"(",
"self",
",",
"key",
")",
":",
"# type: (str) -> Any",
"try",
":",
"return",
"self",
".",
"_dictionary",
"[",
"key",
"]",
"except",
"KeyError",
":",
"raise",
"ConfigurationError",
"(",
"\"No such key - {}\"",
".",
"format",
"(",
"key",
... | Get a value from the configuration. | [
"Get",
"a",
"value",
"from",
"the",
"configuration",
"."
] | python | train |
cdgriffith/puremagic | puremagic/main.py | https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L116-L126 | def _file_details(filename):
""" Grab the start and end of the file"""
max_head, max_foot = _max_lengths()
with open(filename, "rb") as fin:
head = fin.read(max_head)
try:
fin.seek(-max_foot, os.SEEK_END)
except IOError:
fin.seek(0)
foot = fin.read()
return head, foot | [
"def",
"_file_details",
"(",
"filename",
")",
":",
"max_head",
",",
"max_foot",
"=",
"_max_lengths",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"fin",
":",
"head",
"=",
"fin",
".",
"read",
"(",
"max_head",
")",
"try",
":",
"... | Grab the start and end of the file | [
"Grab",
"the",
"start",
"and",
"end",
"of",
"the",
"file"
] | python | train |
aichaos/rivescript-python | rivescript/rivescript.py | https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L640-L653 | def set_variable(self, name, value):
"""Set a bot variable.
Equivalent to ``! var`` in RiveScript code.
:param str name: The name of the variable to set.
:param str value: The value of the variable.
Set this to ``None`` to delete the variable.
"""
if value is None:
# Unset the variable.
if name in self._var:
del self._var[name]
self._var[name] = value | [
"def",
"set_variable",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"# Unset the variable.",
"if",
"name",
"in",
"self",
".",
"_var",
":",
"del",
"self",
".",
"_var",
"[",
"name",
"]",
"self",
".",
"_var",
"[",
... | Set a bot variable.
Equivalent to ``! var`` in RiveScript code.
:param str name: The name of the variable to set.
:param str value: The value of the variable.
Set this to ``None`` to delete the variable. | [
"Set",
"a",
"bot",
"variable",
"."
] | python | train |
pantsbuild/pants | src/python/pants/pantsd/service/fs_event_service.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/service/fs_event_service.py#L105-L124 | def run(self):
"""Main service entrypoint. Called via Thread.start() via PantsDaemon.run()."""
if not (self._watchman and self._watchman.is_alive()):
raise PantsService.ServiceError('watchman is not running, bailing!')
# Enable watchman for the build root.
self._watchman.watch_project(self._build_root)
subscriptions = list(self._handlers.values())
# Setup subscriptions and begin the main event firing loop.
for handler_name, event_data in self._watchman.subscribed(self._build_root, subscriptions):
self._state.maybe_pause()
if self._state.is_terminating:
break
if event_data:
# As we receive events from watchman, trigger the relevant handlers.
self.fire_callback(handler_name, event_data) | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"not",
"(",
"self",
".",
"_watchman",
"and",
"self",
".",
"_watchman",
".",
"is_alive",
"(",
")",
")",
":",
"raise",
"PantsService",
".",
"ServiceError",
"(",
"'watchman is not running, bailing!'",
")",
"# Enable wa... | Main service entrypoint. Called via Thread.start() via PantsDaemon.run(). | [
"Main",
"service",
"entrypoint",
".",
"Called",
"via",
"Thread",
".",
"start",
"()",
"via",
"PantsDaemon",
".",
"run",
"()",
"."
] | python | train |
inasafe/inasafe | extras/data_audit.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/extras/data_audit.py#L237-L405 | def license_file_is_valid(license_filename, data_filename,
dirpath='.', verbose=False):
"""Check that XML license file for given filename_to_verify is valid.
Input:
license_filename: XML license file (must be an absolute path name)
data_filename: The data filename that is being audited
dir_path: Where the files live
verbose: Optional verbosity
Check for each datafile listed that
* Datafile tags are there and match the one specified
* Fields are non empty (except IP_info which can be left blank)
* Datafile exists
* Checksum is correct
* Datafile is flagged as publishable
If anything is violated an appropriate exception is raised.
If everything is honky dory the function will return True.
"""
if verbose:
print 'Parsing', license_filename
doc = xml2object(license_filename)
# Check that file is valid (e.g. all elements there)
if not doc.has_key('ga_license_file'):
msg = 'License file %s must have two elements' %license_filename
msg += ' at the root level. They are\n'
msg += ' <?xml version="1.0" encoding="iso-8859-1"?>\n'
msg += ' <ga_license_file>\n'
msg += 'The second element was found to be %s' %doc.keys()
raise WrongTags, msg
# Validate elements: metadata, datafile, datafile, ...
# FIXME (Ole): I'd like this to verified by the parser
# using a proper DTD template one day....
# For not, let's check the main ones.
elements = doc['ga_license_file']
if not elements.has_key('metadata'):
msg = 'Tag %s must have the element "metadata"'\
%doc.keys()[0]
msg += 'The element found was %s' %elements[0].nodeName
raise WrongTags, msg
if not elements.has_key('datafile'):
msg = 'Tag %s must have the element "datafile"'\
%doc.keys()[0]
msg += 'The element found was %s' %elements[0].nodeName
raise WrongTags, msg
for key in elements.keys():
msg = 'Invalid tag: %s' %key
if not key in ['metadata', 'datafile']:
raise WrongTags, msg
# Extract information for metadata section
if verbose: print
metadata = elements['metadata']
author = metadata['author']
if verbose: print 'Author: ', author
if author == '':
msg = 'Missing author'
raise Exception, msg
#svn_keywords = metadata['svn_keywords']
#if verbose: print 'SVN keywords: ', svn_keywords
# Extract information for datafile sections
datafile = elements['datafile']
if isinstance(datafile, XML_element):
datafile = [datafile]
# Check that filename to verify is listed in license file
found = False
for data in datafile:
if data['filename'] == data_filename:
found = True
break
if not found:
msg = 'Specified filename to verify %s ' %data_filename
msg += 'did not appear in license file %s' %license_filename
raise FilenameMismatch, msg
# Check contents for selected data_filename
#for data in datafile:
# if verbose: print
# Filename
if data['filename'] == '':
msg = 'Missing filename'
raise FilenameMismatch, msg
else:
filename = join(dirpath, data['filename'])
if verbose: print 'Filename: "%s"' %filename
try:
fid = open(filename, 'r')
except:
msg = 'Specified filename %s could not be opened'\
%filename
raise FilenameMismatch, msg
# CRC
reported_crc = data['checksum']
if verbose: print 'Checksum: "%s"' %reported_crc
file_crc = str(compute_checksum(filename))
if reported_crc != file_crc:
msg = 'Bad checksum (CRC).\n'
msg += ' The CRC reported in license file "%s" is "%s"\n'\
%(license_filename, reported_crc)
msg += ' The CRC computed from file "%s" is "%s"'\
%(filename, file_crc)
raise CRCMismatch, msg
# Accountable
accountable = data['accountable']
if verbose: print 'Accountable: "%s"' %accountable
if accountable == '':
msg = 'No accountable person specified'
raise Empty, msg
# Source
source = data['source']
if verbose: print 'Source: "%s"' %source
if source == '':
msg = 'No source specified'
raise Empty, msg
# IP owner
ip_owner = data['IP_owner']
if verbose: print 'IP owner: "%s"' %ip_owner
if ip_owner == '':
msg = 'No IP owner specified'
raise Empty, msg
# IP info
ip_info = data['IP_info']
if verbose: print 'IP info: "%s"' %ip_info
#if ip_info == '':
# msg = 'No IP info specified'
# raise Empty, msg
# Publishable
publishable = data['publishable']
if verbose: print 'Publishable: "%s"' %publishable
if publishable == '':
msg = 'No publishable value specified'
raise NotPublishable, msg
if publishable.upper() != 'YES':
msg = 'Data file %s is not flagged as publishable'\
%fid.name
raise NotPublishable, msg
# If we get this far, the license file is OK
return True | [
"def",
"license_file_is_valid",
"(",
"license_filename",
",",
"data_filename",
",",
"dirpath",
"=",
"'.'",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"'Parsing'",
",",
"license_filename",
"doc",
"=",
"xml2object",
"(",
"license_filena... | Check that XML license file for given filename_to_verify is valid.
Input:
license_filename: XML license file (must be an absolute path name)
data_filename: The data filename that is being audited
dir_path: Where the files live
verbose: Optional verbosity
Check for each datafile listed that
* Datafile tags are there and match the one specified
* Fields are non empty (except IP_info which can be left blank)
* Datafile exists
* Checksum is correct
* Datafile is flagged as publishable
If anything is violated an appropriate exception is raised.
If everything is honky dory the function will return True. | [
"Check",
"that",
"XML",
"license",
"file",
"for",
"given",
"filename_to_verify",
"is",
"valid",
"."
] | python | train |
Gandi/gandi.cli | gandi/cli/modules/domain.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/domain.py#L30-L77 | def create(cls, fqdn, duration, owner, admin, tech, bill, nameserver,
extra_parameter, background):
"""Create a domain."""
fqdn = fqdn.lower()
if not background and not cls.intty():
background = True
result = cls.call('domain.available', [fqdn])
while result[fqdn] == 'pending':
time.sleep(1)
result = cls.call('domain.available', [fqdn])
if result[fqdn] == 'unavailable':
raise DomainNotAvailable('%s is not available' % fqdn)
# retrieve handle of user and save it to configuration
user_handle = cls.call('contact.info')['handle']
cls.configure(True, 'api.handle', user_handle)
owner_ = owner or user_handle
admin_ = admin or user_handle
tech_ = tech or user_handle
bill_ = bill or user_handle
domain_params = {
'duration': duration,
'owner': owner_,
'admin': admin_,
'tech': tech_,
'bill': bill_,
}
if nameserver:
domain_params['nameservers'] = nameserver
if extra_parameter:
domain_params['extra'] = {}
for extra in extra_parameter:
domain_params['extra'][extra[0]] = extra[1]
result = cls.call('domain.create', fqdn, domain_params)
if background:
return result
# interactive mode, run a progress bar
cls.echo('Creating your domain.')
cls.display_progress(result)
cls.echo('Your domain %s has been created.' % fqdn) | [
"def",
"create",
"(",
"cls",
",",
"fqdn",
",",
"duration",
",",
"owner",
",",
"admin",
",",
"tech",
",",
"bill",
",",
"nameserver",
",",
"extra_parameter",
",",
"background",
")",
":",
"fqdn",
"=",
"fqdn",
".",
"lower",
"(",
")",
"if",
"not",
"backgr... | Create a domain. | [
"Create",
"a",
"domain",
"."
] | python | train |
pywbem/pywbem | pywbem/cim_obj.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L466-L577 | def _mof_escaped(strvalue):
# Note: This is a raw docstring because it shows many backslashes, and
# that avoids having to double them.
r"""
Return a MOF-escaped string from the input string.
Parameters:
strvalue (:term:`unicode string`): The string value. Must not be `None`.
Special characters must not be backslash-escaped.
Details on backslash-escaping:
`DSP0004` defines that the character repertoire for MOF string constants
is the entire repertoire for the CIM string datatype. That is, the entire
Unicode character repertoire except for U+0000.
The only character for which `DSP0004` requires the use of a MOF escape
sequence in a MOF string constant, is the double quote (because a MOF
string constant is enclosed in double quotes).
`DSP0004` defines MOF escape sequences for several more characters, but it
does not require their use in MOF. For example, it is valid for a MOF
string constant to contain the (unescaped) characters U+000D (newline) or
U+0009 (horizontal tab), and others.
Processing the MOF escape sequences as unescaped characters may not be
supported by MOF-related tools, and therefore this function plays it safe
and uses the MOF escape sequences defined in `DSP0004` as much as possible.
The following table shows the MOF escape sequences defined in `DSP0004`
and whether they are used (i.e. generated) by this function:
========== ==== ===========================================================
MOF escape Used Character
sequence
========== ==== ===========================================================
\b yes U+0008: Backspace
\t yes U+0009: Horizontal tab
\n yes U+000A: Line feed
\f yes U+000C: Form feed
\r yes U+000D: Carriage return
\" yes U+0022: Double quote (") (required to be used)
\' yes U+0027: Single quote (')
\\ yes U+005C: Backslash (\)
\x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four
hex digits, representing its UCS code position (this form
is limited to the UCS-2 character repertoire)
\X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four
hex digits, representing its UCS code position (this form
is limited to the UCS-2 character repertoire)
========== ==== ===========================================================
(1) Yes, for all other characters in the so called "control range"
U+0001..U+001F.
"""
escaped_str = strvalue
# Escape backslash (\)
escaped_str = escaped_str.replace('\\', '\\\\')
# Escape \b, \t, \n, \f, \r
# Note, the Python escape sequences happen to be the same as in MOF
escaped_str = escaped_str.\
replace('\b', '\\b').\
replace('\t', '\\t').\
replace('\n', '\\n').\
replace('\f', '\\f').\
replace('\r', '\\r')
# Escape remaining control characters (U+0001...U+001F), skipping
# U+0008, U+0009, U+000A, U+000C, U+000D that are already handled.
# We hard code it to be faster, plus we can easily skip already handled
# chars.
# The generic code would be (not skipping already handled chars):
# for cp in range(1, 32):
# c = six.unichr(cp)
# esc = '\\x{0:04X}'.format(cp)
# escaped_str = escaped_str.replace(c, esc)
escaped_str = escaped_str.\
replace(u'\u0001', '\\x0001').\
replace(u'\u0002', '\\x0002').\
replace(u'\u0003', '\\x0003').\
replace(u'\u0004', '\\x0004').\
replace(u'\u0005', '\\x0005').\
replace(u'\u0006', '\\x0006').\
replace(u'\u0007', '\\x0007').\
replace(u'\u000B', '\\x000B').\
replace(u'\u000E', '\\x000E').\
replace(u'\u000F', '\\x000F').\
replace(u'\u0010', '\\x0010').\
replace(u'\u0011', '\\x0011').\
replace(u'\u0012', '\\x0012').\
replace(u'\u0013', '\\x0013').\
replace(u'\u0014', '\\x0014').\
replace(u'\u0015', '\\x0015').\
replace(u'\u0016', '\\x0016').\
replace(u'\u0017', '\\x0017').\
replace(u'\u0018', '\\x0018').\
replace(u'\u0019', '\\x0019').\
replace(u'\u001A', '\\x001A').\
replace(u'\u001B', '\\x001B').\
replace(u'\u001C', '\\x001C').\
replace(u'\u001D', '\\x001D').\
replace(u'\u001E', '\\x001E').\
replace(u'\u001F', '\\x001F')
# Escape single and double quote
escaped_str = escaped_str.replace('"', '\\"')
escaped_str = escaped_str.replace("'", "\\'")
return escaped_str | [
"def",
"_mof_escaped",
"(",
"strvalue",
")",
":",
"# Note: This is a raw docstring because it shows many backslashes, and",
"# that avoids having to double them.",
"escaped_str",
"=",
"strvalue",
"# Escape backslash (\\)",
"escaped_str",
"=",
"escaped_str",
".",
"replace",
"(",
"... | r"""
Return a MOF-escaped string from the input string.
Parameters:
strvalue (:term:`unicode string`): The string value. Must not be `None`.
Special characters must not be backslash-escaped.
Details on backslash-escaping:
`DSP0004` defines that the character repertoire for MOF string constants
is the entire repertoire for the CIM string datatype. That is, the entire
Unicode character repertoire except for U+0000.
The only character for which `DSP0004` requires the use of a MOF escape
sequence in a MOF string constant, is the double quote (because a MOF
string constant is enclosed in double quotes).
`DSP0004` defines MOF escape sequences for several more characters, but it
does not require their use in MOF. For example, it is valid for a MOF
string constant to contain the (unescaped) characters U+000D (newline) or
U+0009 (horizontal tab), and others.
Processing the MOF escape sequences as unescaped characters may not be
supported by MOF-related tools, and therefore this function plays it safe
and uses the MOF escape sequences defined in `DSP0004` as much as possible.
The following table shows the MOF escape sequences defined in `DSP0004`
and whether they are used (i.e. generated) by this function:
========== ==== ===========================================================
MOF escape Used Character
sequence
========== ==== ===========================================================
\b yes U+0008: Backspace
\t yes U+0009: Horizontal tab
\n yes U+000A: Line feed
\f yes U+000C: Form feed
\r yes U+000D: Carriage return
\" yes U+0022: Double quote (") (required to be used)
\' yes U+0027: Single quote (')
\\ yes U+005C: Backslash (\)
\x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four
hex digits, representing its UCS code position (this form
is limited to the UCS-2 character repertoire)
\X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four
hex digits, representing its UCS code position (this form
is limited to the UCS-2 character repertoire)
========== ==== ===========================================================
(1) Yes, for all other characters in the so called "control range"
U+0001..U+001F. | [
"r",
"Return",
"a",
"MOF",
"-",
"escaped",
"string",
"from",
"the",
"input",
"string",
"."
] | python | train |
jenanwise/codequality | codequality/main.py | https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/main.py#L152-L174 | def _resolve_paths(self, *paths):
"""
Resolve paths into a set of filenames (no directories) to check.
External tools will handle directories as arguments differently, so for
consistency we just want to pass them filenames.
This method will recursively walk all directories and filter out
any paths that match self.options.ignores.
"""
result = set()
for path in paths:
if os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
path = os.path.join(dirpath, filename)
if path.startswith('.'):
path = path[1:].lstrip('/')
if not self._should_ignore(path):
result.add(path)
else:
result.add(path)
return result | [
"def",
"_resolve_paths",
"(",
"self",
",",
"*",
"paths",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"path",
"in",
"paths",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"for",
"dirpath",
",",
"_",
",",
"filenames",
"in"... | Resolve paths into a set of filenames (no directories) to check.
External tools will handle directories as arguments differently, so for
consistency we just want to pass them filenames.
This method will recursively walk all directories and filter out
any paths that match self.options.ignores. | [
"Resolve",
"paths",
"into",
"a",
"set",
"of",
"filenames",
"(",
"no",
"directories",
")",
"to",
"check",
"."
] | python | train |
hotdoc/hotdoc | hotdoc/utils/setup_utils.py | https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/setup_utils.py#L116-L133 | def symlink(source, link_name):
"""
Method to allow creating symlinks on Windows
"""
if os.path.islink(link_name) and os.readlink(link_name) == source:
return
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(source, link_name)
else:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source, flags) == 0:
raise ctypes.WinError() | [
"def",
"symlink",
"(",
"source",
",",
"link_name",
")",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"link_name",
")",
"and",
"os",
".",
"readlink",
"(",
"link_name",
")",
"==",
"source",
":",
"return",
"os_symlink",
"=",
"getattr",
"(",
"os",
"... | Method to allow creating symlinks on Windows | [
"Method",
"to",
"allow",
"creating",
"symlinks",
"on",
"Windows"
] | python | train |
rfverbruggen/rachiopy | rachiopy/schedulerule.py | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L33-L36 | def get(self, sched_rule_id):
"""Retrieve the information for a scheduleRule entity."""
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path) | [
"def",
"get",
"(",
"self",
",",
"sched_rule_id",
")",
":",
"path",
"=",
"'/'",
".",
"join",
"(",
"[",
"'schedulerule'",
",",
"sched_rule_id",
"]",
")",
"return",
"self",
".",
"rachio",
".",
"get",
"(",
"path",
")"
] | Retrieve the information for a scheduleRule entity. | [
"Retrieve",
"the",
"information",
"for",
"a",
"scheduleRule",
"entity",
"."
] | python | train |
Spinmob/spinmob | _plotting_mess.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_plotting_mess.py#L441-L524 | def realimag_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', rscale='linear', iscale='linear', rlabel='Real', ilabel='Imaginary', figure='gcf', clear=1, draw=True, **kwargs):
"""
Plots the real and imaginary parts of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
rscale='linear'
'log' or 'linear' scale of the real axis
iscale='linear'
'log' or 'linear' scale of the imaginary axis
rlabel='Magnitude'
y-axis label for real value plot
ilabel='Phase'
y-axis label for imaginary value plot
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when completed?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# Make sure the dimensionality of the data sets matches
xdata, ydata = _match_data_sets(xdata, ydata)
exdata = _match_error_to_data_set(xdata, exdata)
eydata = _match_error_to_data_set(ydata, eydata)
# convert to real imag, and get error bars
rdata = []
idata = []
erdata = []
eidata = []
for l in range(len(ydata)):
rdata.append(_n.real(ydata[l]))
idata.append(_n.imag(ydata[l]))
if eydata[l] is None:
erdata.append(None)
eidata.append(None)
else:
erdata.append(_n.real(eydata[l]))
eidata.append(_n.imag(eydata[l]))
# set up the figure and axes
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
if 'xlabel' in kwargs : xlabel=kwargs.pop('xlabel')
else: xlabel=''
if 'ylabel' in kwargs : kwargs.pop('ylabel')
if 'tall' not in kwargs: kwargs['tall'] = False
if 'autoformat' not in kwargs: kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, rdata, eydata=erdata, exdata=exdata, ylabel=rlabel, axes=axes1, clear=0, xscale=xscale, yscale=rscale, draw=False, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, idata, eydata=eidata, exdata=exdata, ylabel=ilabel, axes=axes2, clear=0, xscale=xscale, yscale=iscale, draw=False, **kwargs)
axes2.set_title('')
if draw:
_pylab.ion()
_pylab.draw()
_pylab.show() | [
"def",
"realimag_data",
"(",
"xdata",
",",
"ydata",
",",
"eydata",
"=",
"None",
",",
"exdata",
"=",
"None",
",",
"xscale",
"=",
"'linear'",
",",
"rscale",
"=",
"'linear'",
",",
"iscale",
"=",
"'linear'",
",",
"rlabel",
"=",
"'Real'",
",",
"ilabel",
"="... | Plots the real and imaginary parts of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
rscale='linear'
'log' or 'linear' scale of the real axis
iscale='linear'
'log' or 'linear' scale of the imaginary axis
rlabel='Magnitude'
y-axis label for real value plot
ilabel='Phase'
y-axis label for imaginary value plot
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when completed?
See spinmob.plot.xy.data() for additional optional keyword arguments. | [
"Plots",
"the",
"real",
"and",
"imaginary",
"parts",
"of",
"complex",
"ydata",
"vs",
"xdata",
"."
] | python | train |
kolypto/py-smsframework | smsframework/providers/loopback.py | https://github.com/kolypto/py-smsframework/blob/4f3d812711f5e2e037dc80c4014c815fe2d68a0b/smsframework/providers/loopback.py#L46-L66 | def received(self, src, body):
""" Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage
"""
# Create the message
self._msgid += 1
message = IncomingMessage(src, body, self._msgid)
# Log traffic
self._traffic.append(message)
# Handle it
self._receive_message(message)
# Finish
return message | [
"def",
"received",
"(",
"self",
",",
"src",
",",
"body",
")",
":",
"# Create the message",
"self",
".",
"_msgid",
"+=",
"1",
"message",
"=",
"IncomingMessage",
"(",
"src",
",",
"body",
",",
"self",
".",
"_msgid",
")",
"# Log traffic",
"self",
".",
"_traf... | Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage | [
"Simulate",
"an",
"incoming",
"message"
] | python | test |
CxAalto/gtfspy | gtfspy/stats.py | https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L48-L64 | def get_median_lat_lon_of_stops(gtfs):
"""
Get median latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
median_lat : float
median_lon : float
"""
stops = gtfs.get_table("stops")
median_lat = numpy.percentile(stops['lat'].values, 50)
median_lon = numpy.percentile(stops['lon'].values, 50)
return median_lat, median_lon | [
"def",
"get_median_lat_lon_of_stops",
"(",
"gtfs",
")",
":",
"stops",
"=",
"gtfs",
".",
"get_table",
"(",
"\"stops\"",
")",
"median_lat",
"=",
"numpy",
".",
"percentile",
"(",
"stops",
"[",
"'lat'",
"]",
".",
"values",
",",
"50",
")",
"median_lon",
"=",
... | Get median latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
median_lat : float
median_lon : float | [
"Get",
"median",
"latitude",
"AND",
"longitude",
"of",
"stops"
] | python | valid |
RusticiSoftware/TinCanPython | tincan/remote_lrs.py | https://github.com/RusticiSoftware/TinCanPython/blob/424eedaa6d19221efb1108edb915fc332abbb317/tincan/remote_lrs.py#L738-L779 | def retrieve_agent_profile(self, agent, profile_id):
"""Retrieve agent profile with the specified parameters
:param agent: Agent object of the desired agent profile
:type agent: :class:`tincan.agent.Agent`
:param profile_id: UUID of the desired agent profile
:type profile_id: str | unicode
:return: LRS Response object with an agent profile doc as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="GET",
resource="agents/profile",
ignore404=True
)
request.query_params = {
"profileId": profile_id,
"agent": agent.to_json(self.version)
}
lrs_response = self._send_request(request)
if lrs_response.success:
doc = AgentProfileDocument(
id=profile_id,
content=lrs_response.data,
agent=agent
)
headers = lrs_response.response.getheaders()
if "lastModified" in headers and headers["lastModified"] is not None:
doc.timestamp = headers["lastModified"]
if "contentType" in headers and headers["contentType"] is not None:
doc.content_type = headers["contentType"]
if "etag" in headers and headers["etag"] is not None:
doc.etag = headers["etag"]
lrs_response.content = doc
return lrs_response | [
"def",
"retrieve_agent_profile",
"(",
"self",
",",
"agent",
",",
"profile_id",
")",
":",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
":",
"agent",
"=",
"Agent",
"(",
"agent",
")",
"request",
"=",
"HTTPRequest",
"(",
"method",
"=",
"\"GET\... | Retrieve agent profile with the specified parameters
:param agent: Agent object of the desired agent profile
:type agent: :class:`tincan.agent.Agent`
:param profile_id: UUID of the desired agent profile
:type profile_id: str | unicode
:return: LRS Response object with an agent profile doc as content
:rtype: :class:`tincan.lrs_response.LRSResponse` | [
"Retrieve",
"agent",
"profile",
"with",
"the",
"specified",
"parameters"
] | python | train |
saltstack/salt | salt/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L586-L618 | def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
elif chunk['order'] == 'first':
chunk['order'] = 0
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunk['name'] = salt.utils.data.decode(chunk['name'])
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks | [
"def",
"order_chunks",
"(",
"self",
",",
"chunks",
")",
":",
"cap",
"=",
"1",
"for",
"chunk",
"in",
"chunks",
":",
"if",
"'order'",
"in",
"chunk",
":",
"if",
"not",
"isinstance",
"(",
"chunk",
"[",
"'order'",
"]",
",",
"int",
")",
":",
"continue",
... | Sort the chunk list verifying that the chunks follow the order
specified in the order options. | [
"Sort",
"the",
"chunk",
"list",
"verifying",
"that",
"the",
"chunks",
"follow",
"the",
"order",
"specified",
"in",
"the",
"order",
"options",
"."
] | python | train |
wtolson/pysis | pysis/util/file_manipulation.py | https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/util/file_manipulation.py#L11-L26 | def write_file_list(filename, file_list=[], glob=None):
"""Write a list of files to a file.
:param filename: the name of the file to write the list to
:param file_list: a list of filenames to write to a file
:param glob: if glob is specified, it will ignore file_list and instead
create a list of files based on the pattern provide by glob (ex. *.cub)
"""
if glob:
file_list = iglob(glob)
with open(filename, 'w') as f:
for line in file_list:
f.write(line + '\n') | [
"def",
"write_file_list",
"(",
"filename",
",",
"file_list",
"=",
"[",
"]",
",",
"glob",
"=",
"None",
")",
":",
"if",
"glob",
":",
"file_list",
"=",
"iglob",
"(",
"glob",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"for",... | Write a list of files to a file.
:param filename: the name of the file to write the list to
:param file_list: a list of filenames to write to a file
:param glob: if glob is specified, it will ignore file_list and instead
create a list of files based on the pattern provide by glob (ex. *.cub) | [
"Write",
"a",
"list",
"of",
"files",
"to",
"a",
"file",
"."
] | python | train |
trentm/cmdln | examples/svn.py | https://github.com/trentm/cmdln/blob/55e980cf52c9b03e62d2349a7e62c9101d08ae10/examples/svn.py#L1067-L1086 | def do_switch(self, subcmd, opts, *args):
"""Update the working copy to a different URL.
usage:
1. switch URL [PATH]
2. switch --relocate FROM TO [PATH...]
1. Update the working copy to mirror a new URL within the repository.
This behaviour is similar to 'svn update', and is the way to
move a working copy to a branch or tag within the same repository.
2. Rewrite working copy URL metadata to reflect a syntactic change only.
This is used when repository's root URL changes (such as a schema
or hostname change) but your working copy still reflects the same
directory within the same repository.
${cmd_option_list}
"""
print "'svn %s' opts: %s" % (subcmd, opts)
print "'svn %s' args: %s" % (subcmd, args) | [
"def",
"do_switch",
"(",
"self",
",",
"subcmd",
",",
"opts",
",",
"*",
"args",
")",
":",
"print",
"\"'svn %s' opts: %s\"",
"%",
"(",
"subcmd",
",",
"opts",
")",
"print",
"\"'svn %s' args: %s\"",
"%",
"(",
"subcmd",
",",
"args",
")"
] | Update the working copy to a different URL.
usage:
1. switch URL [PATH]
2. switch --relocate FROM TO [PATH...]
1. Update the working copy to mirror a new URL within the repository.
This behaviour is similar to 'svn update', and is the way to
move a working copy to a branch or tag within the same repository.
2. Rewrite working copy URL metadata to reflect a syntactic change only.
This is used when repository's root URL changes (such as a schema
or hostname change) but your working copy still reflects the same
directory within the same repository.
${cmd_option_list} | [
"Update",
"the",
"working",
"copy",
"to",
"a",
"different",
"URL",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L25-L36 | def system_monitor_fan_threshold_down_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
fan = ET.SubElement(system_monitor, "fan")
threshold = ET.SubElement(fan, "threshold")
down_threshold = ET.SubElement(threshold, "down-threshold")
down_threshold.text = kwargs.pop('down_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"system_monitor_fan_threshold_down_threshold",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"system_monitor",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"system-monitor\"",
",",
"x... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
hyperledger/indy-plenum | plenum/server/node.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L637-L648 | def execute_pool_txns(self, three_pc_batch) -> List:
"""
Execute a transaction that involves consensus pool management, like
adding a node, client or a steward.
:param ppTime: PrePrepare request time
:param reqs_keys: requests keys to be committed
"""
committed_txns = self.default_executer(three_pc_batch)
for txn in committed_txns:
self.poolManager.onPoolMembershipChange(txn)
return committed_txns | [
"def",
"execute_pool_txns",
"(",
"self",
",",
"three_pc_batch",
")",
"->",
"List",
":",
"committed_txns",
"=",
"self",
".",
"default_executer",
"(",
"three_pc_batch",
")",
"for",
"txn",
"in",
"committed_txns",
":",
"self",
".",
"poolManager",
".",
"onPoolMembers... | Execute a transaction that involves consensus pool management, like
adding a node, client or a steward.
:param ppTime: PrePrepare request time
:param reqs_keys: requests keys to be committed | [
"Execute",
"a",
"transaction",
"that",
"involves",
"consensus",
"pool",
"management",
"like",
"adding",
"a",
"node",
"client",
"or",
"a",
"steward",
"."
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L927-L988 | def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
"""
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) | [
"def",
"get_similar_items",
"(",
"self",
",",
"items",
"=",
"None",
",",
"k",
"=",
"10",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"items",
"is",
"None",
":",
"get_all_items",
"=",
"True",
"items",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_... | Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items() | [
"Get",
"the",
"k",
"most",
"similar",
"items",
"for",
"each",
"item",
"in",
"items",
"."
] | python | train |
jim-easterbrook/pyctools | src/pyctools/core/frame.py | https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/frame.py#L324-L342 | def get(self, tag, default=None):
"""Get a metadata value.
Each metadata value is referenced by a ``tag`` -- a short
string such as ``'xlen'`` or ``'audit'``. In the sidecar file
these tag names are prepended with ``'Xmp.pyctools.'``, which
corresponds to a custom namespace in the XML file.
:param str tag: The tag name.
:returns: The metadata value associated with ``tag``.
:rtype: :py:class:`str`
"""
full_tag = 'Xmp.pyctools.' + tag
if full_tag in self.data:
return self.data[full_tag]
return default | [
"def",
"get",
"(",
"self",
",",
"tag",
",",
"default",
"=",
"None",
")",
":",
"full_tag",
"=",
"'Xmp.pyctools.'",
"+",
"tag",
"if",
"full_tag",
"in",
"self",
".",
"data",
":",
"return",
"self",
".",
"data",
"[",
"full_tag",
"]",
"return",
"default"
] | Get a metadata value.
Each metadata value is referenced by a ``tag`` -- a short
string such as ``'xlen'`` or ``'audit'``. In the sidecar file
these tag names are prepended with ``'Xmp.pyctools.'``, which
corresponds to a custom namespace in the XML file.
:param str tag: The tag name.
:returns: The metadata value associated with ``tag``.
:rtype: :py:class:`str` | [
"Get",
"a",
"metadata",
"value",
"."
] | python | train |
ska-sa/spead2 | spead2/__init__.py | https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L81-L97 | def parse_range_list(ranges):
"""Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity.
"""
if not ranges:
return []
parts = ranges.split(',')
out = []
for part in parts:
fields = part.split('-', 1)
if len(fields) == 2:
start = int(fields[0])
end = int(fields[1])
out.extend(range(start, end + 1))
else:
out.append(int(fields[0]))
return out | [
"def",
"parse_range_list",
"(",
"ranges",
")",
":",
"if",
"not",
"ranges",
":",
"return",
"[",
"]",
"parts",
"=",
"ranges",
".",
"split",
"(",
"','",
")",
"out",
"=",
"[",
"]",
"for",
"part",
"in",
"parts",
":",
"fields",
"=",
"part",
".",
"split",... | Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity. | [
"Split",
"a",
"string",
"like",
"2",
"3",
"-",
"5",
"8",
"9",
"-",
"11",
"into",
"a",
"list",
"of",
"integers",
".",
"This",
"is",
"intended",
"to",
"ease",
"adding",
"command",
"-",
"line",
"options",
"for",
"dealing",
"with",
"affinity",
"."
] | python | train |
softlayer/softlayer-python | SoftLayer/managers/block.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/block.py#L306-L333 | def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = [
'id',
'billingItem',
'storageType[keyName]',
'capacityGb',
'provisionedIops',
'storageTierLevel',
'staasVersion',
'hasEncryptionAtRest',
]
block_mask = ','.join(mask_items)
volume = self.get_block_volume_details(volume_id, mask=block_mask)
order = storage_utils.prepare_modify_order_object(
self, volume, new_iops, new_tier_level, new_size
)
return self.client.call('Product_Order', 'placeOrder', order) | [
"def",
"order_modified_volume",
"(",
"self",
",",
"volume_id",
",",
"new_size",
"=",
"None",
",",
"new_iops",
"=",
"None",
",",
"new_tier_level",
"=",
"None",
")",
":",
"mask_items",
"=",
"[",
"'id'",
",",
"'billingItem'",
",",
"'storageType[keyName]'",
",",
... | Places an order for modifying an existing block volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt | [
"Places",
"an",
"order",
"for",
"modifying",
"an",
"existing",
"block",
"volume",
"."
] | python | train |
krukas/Trionyx | trionyx/quickstart/__init__.py | https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/quickstart/__init__.py#L47-L66 | def create_app(self, apps_path, name):
"""
Create Trionyx app in given path
:param str path: path to create app in.
:param str name: name of app
:raises FileExistsError:
"""
app_path = os.path.join(apps_path, name.lower())
shutil.copytree(self.app_path, app_path)
self.update_file(app_path, '__init__.py', {
'name': name.lower()
})
self.update_file(app_path, 'apps.py', {
'name': name.lower(),
'verbose_name': name.capitalize()
}) | [
"def",
"create_app",
"(",
"self",
",",
"apps_path",
",",
"name",
")",
":",
"app_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"apps_path",
",",
"name",
".",
"lower",
"(",
")",
")",
"shutil",
".",
"copytree",
"(",
"self",
".",
"app_path",
",",
"a... | Create Trionyx app in given path
:param str path: path to create app in.
:param str name: name of app
:raises FileExistsError: | [
"Create",
"Trionyx",
"app",
"in",
"given",
"path"
] | python | train |
peopledoc/workalendar | workalendar/core.py | https://github.com/peopledoc/workalendar/blob/d044d5dfc1709ec388db34dab583dd554cc66c4e/workalendar/core.py#L304-L318 | def get_first_weekday_after(day, weekday):
"""Get the first weekday after a given day. If the day is the same
weekday, the same day will be returned.
>>> # the first monday after Apr 1 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 1), MON)
datetime.date(2015, 4, 6)
>>> # the first tuesday after Apr 14 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 14), TUE)
datetime.date(2015, 4, 14)
"""
day_delta = (weekday - day.weekday()) % 7
day = day + timedelta(days=day_delta)
return day | [
"def",
"get_first_weekday_after",
"(",
"day",
",",
"weekday",
")",
":",
"day_delta",
"=",
"(",
"weekday",
"-",
"day",
".",
"weekday",
"(",
")",
")",
"%",
"7",
"day",
"=",
"day",
"+",
"timedelta",
"(",
"days",
"=",
"day_delta",
")",
"return",
"day"
] | Get the first weekday after a given day. If the day is the same
weekday, the same day will be returned.
>>> # the first monday after Apr 1 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 1), MON)
datetime.date(2015, 4, 6)
>>> # the first tuesday after Apr 14 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 14), TUE)
datetime.date(2015, 4, 14) | [
"Get",
"the",
"first",
"weekday",
"after",
"a",
"given",
"day",
".",
"If",
"the",
"day",
"is",
"the",
"same",
"weekday",
"the",
"same",
"day",
"will",
"be",
"returned",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/core/ignorefile.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ignorefile.py#L47-L55 | def add_filename_pattern(self, dir_name, pattern):
"""
Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern
"""
full_pattern = '{}{}{}'.format(dir_name, os.sep, pattern)
filename_regex = fnmatch.translate(full_pattern)
self.regex_list.append(re.compile(filename_regex)) | [
"def",
"add_filename_pattern",
"(",
"self",
",",
"dir_name",
",",
"pattern",
")",
":",
"full_pattern",
"=",
"'{}{}{}'",
".",
"format",
"(",
"dir_name",
",",
"os",
".",
"sep",
",",
"pattern",
")",
"filename_regex",
"=",
"fnmatch",
".",
"translate",
"(",
"fu... | Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern | [
"Adds",
"a",
"Unix",
"shell",
"-",
"style",
"wildcard",
"pattern",
"underneath",
"the",
"specified",
"directory",
":",
"param",
"dir_name",
":",
"str",
":",
"directory",
"that",
"contains",
"the",
"pattern",
":",
"param",
"pattern",
":",
"str",
":",
"Unix",
... | python | train |
tdryer/hangups | hangups/client.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L547-L552 | async def get_suggested_entities(self, get_suggested_entities_request):
"""Return suggested contacts."""
response = hangouts_pb2.GetSuggestedEntitiesResponse()
await self._pb_request('contacts/getsuggestedentities',
get_suggested_entities_request, response)
return response | [
"async",
"def",
"get_suggested_entities",
"(",
"self",
",",
"get_suggested_entities_request",
")",
":",
"response",
"=",
"hangouts_pb2",
".",
"GetSuggestedEntitiesResponse",
"(",
")",
"await",
"self",
".",
"_pb_request",
"(",
"'contacts/getsuggestedentities'",
",",
"get... | Return suggested contacts. | [
"Return",
"suggested",
"contacts",
"."
] | python | valid |
blue-yonder/tsfresh | tsfresh/utilities/string_manipulation.py | https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/utilities/string_manipulation.py#L47-L71 | def convert_to_output_format(param):
"""
Helper function to convert parameters to a valid string, that can be used in a column name.
Does the opposite which is used in the from_columns function.
The parameters are sorted by their name and written out in the form
<param name>_<param value>__<param name>_<param value>__ ...
If a <param_value> is a string, this method will wrap it with parenthesis ", so "<param_value>"
:param param: The dictionary of parameters to write out
:type param: dict
:return: The string of parsed parameters
:rtype: str
"""
def add_parenthesis_if_string_value(x):
if isinstance(x, string_types):
return '"' + str(x) + '"'
else:
return str(x)
return "__".join(str(key) + "_" + add_parenthesis_if_string_value(param[key]) for key in sorted(param.keys())) | [
"def",
"convert_to_output_format",
"(",
"param",
")",
":",
"def",
"add_parenthesis_if_string_value",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"string_types",
")",
":",
"return",
"'\"'",
"+",
"str",
"(",
"x",
")",
"+",
"'\"'",
"else",
":",
"... | Helper function to convert parameters to a valid string, that can be used in a column name.
Does the opposite which is used in the from_columns function.
The parameters are sorted by their name and written out in the form
<param name>_<param value>__<param name>_<param value>__ ...
If a <param_value> is a string, this method will wrap it with parenthesis ", so "<param_value>"
:param param: The dictionary of parameters to write out
:type param: dict
:return: The string of parsed parameters
:rtype: str | [
"Helper",
"function",
"to",
"convert",
"parameters",
"to",
"a",
"valid",
"string",
"that",
"can",
"be",
"used",
"in",
"a",
"column",
"name",
".",
"Does",
"the",
"opposite",
"which",
"is",
"used",
"in",
"the",
"from_columns",
"function",
"."
] | python | train |
nadirizr/json-logic-py | json_logic/__init__.py | https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L59-L63 | def less_or_equal(a, b, *args):
"""Implements the '<=' operator with JS-style type coertion."""
return (
less(a, b) or soft_equals(a, b)
) and (not args or less_or_equal(b, *args)) | [
"def",
"less_or_equal",
"(",
"a",
",",
"b",
",",
"*",
"args",
")",
":",
"return",
"(",
"less",
"(",
"a",
",",
"b",
")",
"or",
"soft_equals",
"(",
"a",
",",
"b",
")",
")",
"and",
"(",
"not",
"args",
"or",
"less_or_equal",
"(",
"b",
",",
"*",
"... | Implements the '<=' operator with JS-style type coertion. | [
"Implements",
"the",
"<",
"=",
"operator",
"with",
"JS",
"-",
"style",
"type",
"coertion",
"."
] | python | valid |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/billing/apis/default_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/billing/apis/default_api.py#L414-L435 | def get_service_package_quota_history(self, **kwargs): # noqa: E501
"""Service package quota history. # noqa: E501
Get your quota usage history. This API is available for commercial accounts. Aggregator accounts can see own and subtenant quota usage data. History data is ordered in ascending order based on the added timestamp. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/service-packages-quota-history -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_service_package_quota_history(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: Maximum amount of quota history entries contained in one paged response.
:param str after: To fetch after which quota history ID. The results will contain entries after specified entry.
:return: ServicePackageQuotaHistoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_service_package_quota_history_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_service_package_quota_history_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"get_service_package_quota_history",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"get_se... | Service package quota history. # noqa: E501
Get your quota usage history. This API is available for commercial accounts. Aggregator accounts can see own and subtenant quota usage data. History data is ordered in ascending order based on the added timestamp. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/service-packages-quota-history -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_service_package_quota_history(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: Maximum amount of quota history entries contained in one paged response.
:param str after: To fetch after which quota history ID. The results will contain entries after specified entry.
:return: ServicePackageQuotaHistoryResponse
If the method is called asynchronously,
returns the request thread. | [
"Service",
"package",
"quota",
"history",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
mlavin/django-all-access | allaccess/clients.py | https://github.com/mlavin/django-all-access/blob/4b15b6c9dedf8080a7c477e0af1142c609ec5598/allaccess/clients.py#L99-L109 | def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = force_text(request.build_absolute_uri(callback))
raw_token = self.get_request_token(request, callback)
token, secret = self.parse_raw_token(raw_token)
if token is not None and secret is not None:
request.session[self.session_key] = raw_token
return {
'oauth_token': token,
'oauth_callback': callback,
} | [
"def",
"get_redirect_args",
"(",
"self",
",",
"request",
",",
"callback",
")",
":",
"callback",
"=",
"force_text",
"(",
"request",
".",
"build_absolute_uri",
"(",
"callback",
")",
")",
"raw_token",
"=",
"self",
".",
"get_request_token",
"(",
"request",
",",
... | Get request parameters for redirect url. | [
"Get",
"request",
"parameters",
"for",
"redirect",
"url",
"."
] | python | train |
vtkiorg/vtki | vtki/qt_plotting.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/qt_plotting.py#L472-L501 | def update_app_icon(self):
"""
Update the app icon if the user is not trying to resize the window.
"""
if os.name == 'nt' or not hasattr(self, '_last_window_size'): # pragma: no cover
# DO NOT EVEN ATTEMPT TO UPDATE ICON ON WINDOWS
return
cur_time = time.time()
if self._last_window_size != self.window_size: # pragma: no cover
# Window size hasn't remained constant since last render.
# This means the user is resizing it so ignore update.
pass
elif ((cur_time - self._last_update_time > BackgroundPlotter.ICON_TIME_STEP)
and self._last_camera_pos != self.camera_position):
# its been a while since last update OR
# the camera position has changed and its been at leat one second
# Update app icon as preview of the window
img = pad_image(self.image)
qimage = QtGui.QImage(img.copy(), img.shape[1],
img.shape[0], QtGui.QImage.Format_RGB888)
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(qimage))
self.app.setWindowIcon(icon)
# Update trackers
self._last_update_time = cur_time
self._last_camera_pos = self.camera_position
# Update trackers
self._last_window_size = self.window_size | [
"def",
"update_app_icon",
"(",
"self",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
"or",
"not",
"hasattr",
"(",
"self",
",",
"'_last_window_size'",
")",
":",
"# pragma: no cover",
"# DO NOT EVEN ATTEMPT TO UPDATE ICON ON WINDOWS",
"return",
"cur_time",
"=",
"... | Update the app icon if the user is not trying to resize the window. | [
"Update",
"the",
"app",
"icon",
"if",
"the",
"user",
"is",
"not",
"trying",
"to",
"resize",
"the",
"window",
"."
] | python | train |
facelessuser/soupsieve | soupsieve/__meta__.py | https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__meta__.py#L157-L186 | def parse_version(ver, pre=False):
"""Parse version into a comparable Version tuple."""
m = RE_VER.match(ver)
# Handle major, minor, micro
major = int(m.group('major'))
minor = int(m.group('minor')) if m.group('minor') else 0
micro = int(m.group('micro')) if m.group('micro') else 0
# Handle pre releases
if m.group('type'):
release = PRE_REL_MAP[m.group('type')]
pre = int(m.group('pre'))
else:
release = "final"
pre = 0
# Handle development releases
dev = m.group('dev') if m.group('dev') else 0
if m.group('dev'):
dev = int(m.group('dev'))
release = '.dev-' + release if pre else '.dev'
else:
dev = 0
# Handle post
post = int(m.group('post')) if m.group('post') else 0
return Version(major, minor, micro, release, pre, post, dev) | [
"def",
"parse_version",
"(",
"ver",
",",
"pre",
"=",
"False",
")",
":",
"m",
"=",
"RE_VER",
".",
"match",
"(",
"ver",
")",
"# Handle major, minor, micro",
"major",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"'major'",
")",
")",
"minor",
"=",
"int",
"("... | Parse version into a comparable Version tuple. | [
"Parse",
"version",
"into",
"a",
"comparable",
"Version",
"tuple",
"."
] | python | train |
mbj4668/pyang | pyang/translators/dsdl.py | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L666-L680 | def install_def(self, name, dstmt, def_map, interleave=False):
"""Install definition `name` into the appropriate dictionary.
`dstmt` is the definition statement ('typedef' or 'grouping')
that is to be mapped to a RELAX NG named pattern '<define
name="`name`">'. `def_map` must be either `self.local_defs` or
`self.global_defs`. `interleave` determines the interleave
status inside the definition.
"""
delem = SchemaNode.define(name, interleave=interleave)
delem.attr["name"] = name
def_map[name] = delem
if def_map is self.global_defs: self.gg_level += 1
self.handle_substmts(dstmt, delem)
if def_map is self.global_defs: self.gg_level -= 1 | [
"def",
"install_def",
"(",
"self",
",",
"name",
",",
"dstmt",
",",
"def_map",
",",
"interleave",
"=",
"False",
")",
":",
"delem",
"=",
"SchemaNode",
".",
"define",
"(",
"name",
",",
"interleave",
"=",
"interleave",
")",
"delem",
".",
"attr",
"[",
"\"na... | Install definition `name` into the appropriate dictionary.
`dstmt` is the definition statement ('typedef' or 'grouping')
that is to be mapped to a RELAX NG named pattern '<define
name="`name`">'. `def_map` must be either `self.local_defs` or
`self.global_defs`. `interleave` determines the interleave
status inside the definition. | [
"Install",
"definition",
"name",
"into",
"the",
"appropriate",
"dictionary",
"."
] | python | train |
studionow/pybrightcove | pybrightcove/config.py | https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/config.py#L40-L46 | def has_option(section, name):
"""
Wrapper around ConfigParser's ``has_option`` method.
"""
cfg = ConfigParser.SafeConfigParser({"working_dir": "/tmp", "debug": "0"})
cfg.read(CONFIG_LOCATIONS)
return cfg.has_option(section, name) | [
"def",
"has_option",
"(",
"section",
",",
"name",
")",
":",
"cfg",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
"{",
"\"working_dir\"",
":",
"\"/tmp\"",
",",
"\"debug\"",
":",
"\"0\"",
"}",
")",
"cfg",
".",
"read",
"(",
"CONFIG_LOCATIONS",
")",
"retu... | Wrapper around ConfigParser's ``has_option`` method. | [
"Wrapper",
"around",
"ConfigParser",
"s",
"has_option",
"method",
"."
] | python | train |
klen/zeta-library | zetalibrary/main.py | https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/main.py#L42-L58 | def watch(args):
" Watch directory for changes and auto pack sources "
assert op.isdir(args.source), "Watch mode allowed only for directories."
print 'Zeta-library v. %s watch mode' % VERSION
print '================================'
print 'Ctrl+C for exit\n'
observer = Observer()
handler = ZetaTrick(args=args)
observer.schedule(handler, args.source, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
print "\nWatch mode stoped."
observer.join() | [
"def",
"watch",
"(",
"args",
")",
":",
"assert",
"op",
".",
"isdir",
"(",
"args",
".",
"source",
")",
",",
"\"Watch mode allowed only for directories.\"",
"print",
"'Zeta-library v. %s watch mode'",
"%",
"VERSION",
"print",
"'================================'",
"print",... | Watch directory for changes and auto pack sources | [
"Watch",
"directory",
"for",
"changes",
"and",
"auto",
"pack",
"sources"
] | python | train |
cloudera/cm_api | python/src/cm_api/endpoints/role_config_groups.py | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/role_config_groups.py#L210-L219 | def update_config(self, config):
"""
Update the group's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration.
"""
path = self._path() + '/config'
resp = self._get_resource_root().put(path, data = config_to_json(config))
return json_to_config(resp) | [
"def",
"update_config",
"(",
"self",
",",
"config",
")",
":",
"path",
"=",
"self",
".",
"_path",
"(",
")",
"+",
"'/config'",
"resp",
"=",
"self",
".",
"_get_resource_root",
"(",
")",
".",
"put",
"(",
"path",
",",
"data",
"=",
"config_to_json",
"(",
"... | Update the group's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration. | [
"Update",
"the",
"group",
"s",
"configuration",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/nodes.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/nodes.py#L826-L836 | def get_graphviz_dirtree(self, engine="automatic", **kwargs):
"""
Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
if engine == "automatic":
engine = "fdp"
return Dirviz(self.workdir).get_cluster_graph(engine=engine, **kwargs) | [
"def",
"get_graphviz_dirtree",
"(",
"self",
",",
"engine",
"=",
"\"automatic\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"engine",
"==",
"\"automatic\"",
":",
"engine",
"=",
"\"fdp\"",
"return",
"Dirviz",
"(",
"self",
".",
"workdir",
")",
".",
"get_cluste... | Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> | [
"Generate",
"directory",
"graph",
"in",
"the",
"DOT",
"language",
".",
"The",
"graph",
"show",
"the",
"files",
"and",
"directories",
"in",
"the",
"node",
"workdir",
"."
] | python | train |
openeventdata/mordecai | mordecai/utilities.py | https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L218-L231 | def structure_results(res):
"""Format Elasticsearch result as Python dictionary"""
out = {'hits': {'hits': []}}
keys = [u'admin1_code', u'admin2_code', u'admin3_code', u'admin4_code',
u'alternativenames', u'asciiname', u'cc2', u'coordinates',
u'country_code2', u'country_code3', u'dem', u'elevation',
u'feature_class', u'feature_code', u'geonameid',
u'modification_date', u'name', u'population', u'timezone']
for i in res:
i_out = {}
for k in keys:
i_out[k] = i[k]
out['hits']['hits'].append(i_out)
return out | [
"def",
"structure_results",
"(",
"res",
")",
":",
"out",
"=",
"{",
"'hits'",
":",
"{",
"'hits'",
":",
"[",
"]",
"}",
"}",
"keys",
"=",
"[",
"u'admin1_code'",
",",
"u'admin2_code'",
",",
"u'admin3_code'",
",",
"u'admin4_code'",
",",
"u'alternativenames'",
"... | Format Elasticsearch result as Python dictionary | [
"Format",
"Elasticsearch",
"result",
"as",
"Python",
"dictionary"
] | python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/dbapi/_helpers.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dbapi/_helpers.py#L102-L117 | def to_query_parameters(parameters):
"""Converts DB-API parameter values into query parameters.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters: A dictionary or sequence of query parameter values.
:rtype: List[google.cloud.bigquery.query._AbstractQueryParameter]
:returns: A list of query parameters.
"""
if parameters is None:
return []
if isinstance(parameters, collections_abc.Mapping):
return to_query_parameters_dict(parameters)
return to_query_parameters_list(parameters) | [
"def",
"to_query_parameters",
"(",
"parameters",
")",
":",
"if",
"parameters",
"is",
"None",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"parameters",
",",
"collections_abc",
".",
"Mapping",
")",
":",
"return",
"to_query_parameters_dict",
"(",
"parameters"... | Converts DB-API parameter values into query parameters.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters: A dictionary or sequence of query parameter values.
:rtype: List[google.cloud.bigquery.query._AbstractQueryParameter]
:returns: A list of query parameters. | [
"Converts",
"DB",
"-",
"API",
"parameter",
"values",
"into",
"query",
"parameters",
"."
] | python | train |
zeaphoo/reston | reston/core/apk.py | https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/apk.py#L290-L313 | def get_app_name(self):
"""
Return the appname of the APK
:rtype: string
"""
main_activity_name = self.get_main_activity()
app_name = self.get_element('activity', 'label', name=main_activity_name)
if not app_name:
app_name = self.get_element('application', 'label')
if app_name.startswith("@"):
res_id = int(app_name[1:], 16)
res_parser = self.get_android_resources()
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
anconf.warning("Exception selecting app name: %s" % e)
app_name = ""
return app_name | [
"def",
"get_app_name",
"(",
"self",
")",
":",
"main_activity_name",
"=",
"self",
".",
"get_main_activity",
"(",
")",
"app_name",
"=",
"self",
".",
"get_element",
"(",
"'activity'",
",",
"'label'",
",",
"name",
"=",
"main_activity_name",
")",
"if",
"not",
"ap... | Return the appname of the APK
:rtype: string | [
"Return",
"the",
"appname",
"of",
"the",
"APK"
] | python | train |
torfsen/service | src/service/__init__.py | https://github.com/torfsen/service/blob/d0dd824fce9237825c1943b30cd14f7b0f5957a6/src/service/__init__.py#L279-L291 | def send_signal(self, s):
"""
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
"""
self._get_signal_event(s) # Check if signal has been enabled
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
os.kill(pid, s) | [
"def",
"send_signal",
"(",
"self",
",",
"s",
")",
":",
"self",
".",
"_get_signal_event",
"(",
"s",
")",
"# Check if signal has been enabled",
"pid",
"=",
"self",
".",
"get_pid",
"(",
")",
"if",
"not",
"pid",
":",
"raise",
"ValueError",
"(",
"'Daemon is not r... | Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised. | [
"Send",
"a",
"signal",
"to",
"the",
"daemon",
"process",
"."
] | python | train |
lextoumbourou/txstripe | txstripe/resource.py | https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L374-L377 | def invoice_items(self, **params):
"""Return a deferred."""
params['customer'] = self.id
return InvoiceItem.all(self.api_key, **params) | [
"def",
"invoice_items",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"params",
"[",
"'customer'",
"]",
"=",
"self",
".",
"id",
"return",
"InvoiceItem",
".",
"all",
"(",
"self",
".",
"api_key",
",",
"*",
"*",
"params",
")"
] | Return a deferred. | [
"Return",
"a",
"deferred",
"."
] | python | train |
slimkrazy/python-google-places | googleplaces/__init__.py | https://github.com/slimkrazy/python-google-places/blob/d4b7363e1655cdc091a6253379f6d2a95b827881/googleplaces/__init__.py#L741-L756 | def get_details(self, language=None):
"""
Retrieves full information on the place matching the place_id.
Stores the response in the `place` property.
"""
if self._place is None:
if language is None:
try:
language = self._query_instance._request_params['language']
except KeyError:
language = lang.ENGLISH
place = _get_place_details(
self.place_id, self._query_instance.api_key,
self._query_instance.sensor, language=language)
self._place = Place(self._query_instance, place) | [
"def",
"get_details",
"(",
"self",
",",
"language",
"=",
"None",
")",
":",
"if",
"self",
".",
"_place",
"is",
"None",
":",
"if",
"language",
"is",
"None",
":",
"try",
":",
"language",
"=",
"self",
".",
"_query_instance",
".",
"_request_params",
"[",
"'... | Retrieves full information on the place matching the place_id.
Stores the response in the `place` property. | [
"Retrieves",
"full",
"information",
"on",
"the",
"place",
"matching",
"the",
"place_id",
"."
] | python | train |
dask/dask-ml | dask_ml/wrappers.py | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L511-L533 | def _first_block(dask_object):
"""Extract the first block / partition from a dask object
"""
if isinstance(dask_object, da.Array):
if dask_object.ndim > 1 and dask_object.numblocks[-1] != 1:
raise NotImplementedError(
"IID estimators require that the array "
"blocked only along the first axis. "
"Rechunk your array before fitting."
)
shape = (dask_object.chunks[0][0],)
if dask_object.ndim > 1:
shape = shape + (dask_object.chunks[1][0],)
return da.from_delayed(
dask_object.to_delayed().flatten()[0], shape, dask_object.dtype
)
if isinstance(dask_object, dd._Frame):
return dask_object.get_partition(0)
else:
return dask_object | [
"def",
"_first_block",
"(",
"dask_object",
")",
":",
"if",
"isinstance",
"(",
"dask_object",
",",
"da",
".",
"Array",
")",
":",
"if",
"dask_object",
".",
"ndim",
">",
"1",
"and",
"dask_object",
".",
"numblocks",
"[",
"-",
"1",
"]",
"!=",
"1",
":",
"r... | Extract the first block / partition from a dask object | [
"Extract",
"the",
"first",
"block",
"/",
"partition",
"from",
"a",
"dask",
"object"
] | python | train |
pantsbuild/pants | src/python/pants/backend/jvm/tasks/coursier_resolve.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/coursier_resolve.py#L104-L185 | def resolve(self, targets, compile_classpath, sources, javadoc, executor):
"""
This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
"""
manager = JarDependencyManagement.global_instance()
jar_targets = manager.targets_by_artifact_set(targets)
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError('The executor argument must be an Executor instance, given {} of type {}'.format(
executor, type(executor)))
for artifact_set, target_subset in jar_targets.items():
# TODO(wisechengyi): this is the only place we are using IvyUtil method, which isn't specific to ivy really.
raw_jar_deps, global_excludes = IvyUtils.calculate_classpath(target_subset)
# ['sources'] * False = [], ['sources'] * True = ['sources']
confs_for_fingerprint = ['sources'] * sources + ['javadoc'] * javadoc
fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint)
compile_classpath.add_excludes_for_targets(target_subset)
with self.invalidated(target_subset,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=fp_strategy) as invalidation_check:
if not invalidation_check.all_vts:
continue
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts)
pants_jar_base_dir = self._prepare_workdir()
coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir
# If a report is requested, do not proceed with loading validated result.
if not self.get_options().report:
# Check each individual target without context first
# If the individuals are valid, check them as a VersionedTargetSet
if not invalidation_check.invalid_vts and resolve_vts.valid:
# Load up from the results dir
success = self._load_from_results_dir(compile_classpath, vt_set_results_dir,
coursier_cache_dir, invalidation_check, pants_jar_base_dir)
if success:
return
jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(raw_jar_deps,
artifact_set,
manager)
results = self._get_result_from_coursier(jars_to_resolve, global_excludes, pinned_coords,
coursier_cache_dir, sources, javadoc, executor)
for conf, result_list in results.items():
for result in result_list:
self._load_json_result(conf, compile_classpath, coursier_cache_dir, invalidation_check,
pants_jar_base_dir, result, self._override_classifiers_for_conf(conf))
self._populate_results_dir(vt_set_results_dir, results)
resolve_vts.update() | [
"def",
"resolve",
"(",
"self",
",",
"targets",
",",
"compile_classpath",
",",
"sources",
",",
"javadoc",
",",
"executor",
")",
":",
"manager",
"=",
"JarDependencyManagement",
".",
"global_instance",
"(",
")",
"jar_targets",
"=",
"manager",
".",
"targets_by_artif... | This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a | [
"This",
"is",
"the",
"core",
"function",
"for",
"coursier",
"resolve",
"."
] | python | train |
mrstephenneal/dirutility | dirutility/walk/walk.py | https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/walk/walk.py#L35-L49 | def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):
"""
Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list
"""
with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))):
pool = Pool(cpus)
vals = pool.map(func, iterable)
pool.close()
return vals | [
"def",
"pool_process",
"(",
"func",
",",
"iterable",
",",
"process_name",
"=",
"'Pool processing'",
",",
"cpus",
"=",
"cpu_count",
"(",
")",
")",
":",
"with",
"Timer",
"(",
"'\\t{0} ({1}) completed in'",
".",
"format",
"(",
"process_name",
",",
"str",
"(",
"... | Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list | [
"Apply",
"a",
"function",
"to",
"each",
"element",
"in",
"an",
"iterable",
"and",
"return",
"a",
"result",
"list",
"."
] | python | train |
Antidote1911/cryptoshop | cryptoshop/_chunk_engine.py | https://github.com/Antidote1911/cryptoshop/blob/0b7ff4a6848f2733f4737606957e8042a4d6ca0b/cryptoshop/_chunk_engine.py#L39-L64 | def encry_decry_chunk(chunk, key, algo, bool_encry, assoc_data):
"""
When bool_encry is True, encrypt a chunk of the file with the key and a randomly generated nonce. When it is False,
the function extract the nonce from the cipherchunk (first 16 bytes), and decrypt the rest of the chunk.
:param chunk: a chunk in bytes to encrypt or decrypt.
:param key: a 32 bytes key in bytes.
:param algo: a string of algorithm. Can be "srp" , "AES" or "twf"
:param bool_encry: if bool_encry is True, chunk is encrypted. Else, it will be decrypted.
:param assoc_data: bytes string of additional data for GCM Authentication.
:return: if bool_encry is True, corresponding nonce + cipherchunk else, a decrypted chunk.
"""
engine = botan.cipher(algo=algo, encrypt=bool_encry)
engine.set_key(key=key)
engine.set_assoc_data(assoc_data)
if bool_encry is True:
nonce = generate_nonce_timestamp()
engine.start(nonce=nonce)
return nonce + engine.finish(chunk)
else:
nonce = chunk[:__nonce_length__]
encryptedchunk = chunk[__nonce_length__:__nonce_length__ + __gcmtag_length__ + __chunk_size__]
engine.start(nonce=nonce)
decryptedchunk = engine.finish(encryptedchunk)
if decryptedchunk == b"":
raise Exception("Integrity failure: Invalid passphrase or corrupted data")
return decryptedchunk | [
"def",
"encry_decry_chunk",
"(",
"chunk",
",",
"key",
",",
"algo",
",",
"bool_encry",
",",
"assoc_data",
")",
":",
"engine",
"=",
"botan",
".",
"cipher",
"(",
"algo",
"=",
"algo",
",",
"encrypt",
"=",
"bool_encry",
")",
"engine",
".",
"set_key",
"(",
"... | When bool_encry is True, encrypt a chunk of the file with the key and a randomly generated nonce. When it is False,
the function extract the nonce from the cipherchunk (first 16 bytes), and decrypt the rest of the chunk.
:param chunk: a chunk in bytes to encrypt or decrypt.
:param key: a 32 bytes key in bytes.
:param algo: a string of algorithm. Can be "srp" , "AES" or "twf"
:param bool_encry: if bool_encry is True, chunk is encrypted. Else, it will be decrypted.
:param assoc_data: bytes string of additional data for GCM Authentication.
:return: if bool_encry is True, corresponding nonce + cipherchunk else, a decrypted chunk. | [
"When",
"bool_encry",
"is",
"True",
"encrypt",
"a",
"chunk",
"of",
"the",
"file",
"with",
"the",
"key",
"and",
"a",
"randomly",
"generated",
"nonce",
".",
"When",
"it",
"is",
"False",
"the",
"function",
"extract",
"the",
"nonce",
"from",
"the",
"cipherchun... | python | train |
tjcsl/cslbot | cslbot/helpers/handler.py | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L376-L398 | def do_args(self, modargs, send, nick, target, source, name, msgtype):
"""Handle the various args that modules need."""
realargs = {}
args = {
'nick': nick,
'handler': self,
'db': None,
'config': self.config,
'source': source,
'name': name,
'type': msgtype,
'botnick': self.connection.real_nickname,
'target': target if target[0] == "#" else "private",
'do_kick': lambda target, nick, msg: self.do_kick(send, target, nick, msg),
'is_admin': lambda nick: self.is_admin(send, nick),
'abuse': lambda nick, limit, cmd: self.abusecheck(send, nick, target, limit, cmd)
}
for arg in modargs:
if arg in args:
realargs[arg] = args[arg]
else:
raise Exception("Invalid Argument: %s" % arg)
return realargs | [
"def",
"do_args",
"(",
"self",
",",
"modargs",
",",
"send",
",",
"nick",
",",
"target",
",",
"source",
",",
"name",
",",
"msgtype",
")",
":",
"realargs",
"=",
"{",
"}",
"args",
"=",
"{",
"'nick'",
":",
"nick",
",",
"'handler'",
":",
"self",
",",
... | Handle the various args that modules need. | [
"Handle",
"the",
"various",
"args",
"that",
"modules",
"need",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L929-L940 | def get_vnetwork_portgroups_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_vnetwork_portgroups_output_instance_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_vnetwork_portgroups",
"=",
"ET",
".",
"Element",
"(",
"\"get_vnetwork_portgroups\"",
")",
"config... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
sludgedesk/metoffer | metoffer.py | https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L179-L188 | def _query(self, data_category, resource_category, field, request, step, isotime=None):
"""
Request and return data from DataPoint RESTful API.
"""
rest_url = "/".join([HOST, data_category, resource_category, field, DATA_TYPE, request])
query_string = "?" + "&".join(["res=" + step, "time=" + isotime if isotime is not None else "", "key=" + self.key])
url = rest_url + query_string
page = url_lib.urlopen(url)
pg = page.read()
return pg | [
"def",
"_query",
"(",
"self",
",",
"data_category",
",",
"resource_category",
",",
"field",
",",
"request",
",",
"step",
",",
"isotime",
"=",
"None",
")",
":",
"rest_url",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"HOST",
",",
"data_category",
",",
"resource_... | Request and return data from DataPoint RESTful API. | [
"Request",
"and",
"return",
"data",
"from",
"DataPoint",
"RESTful",
"API",
"."
] | python | train |
SHDShim/pytheos | pytheos/eqn_hugoniot.py | https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_hugoniot.py#L10-L22 | def hugoniot_p(rho, rho0, c0, s):
"""
calculate pressure along a Hugoniot
:param rho: density in g/cm^3
:param rho0: density at 1 bar in g/cm^3
:param c0: velocity at 1 bar in km/s
:param s: slope of the velocity change
:return: pressure in GPa
"""
eta = 1. - (rho0 / rho)
Ph = rho0 * c0 * c0 * eta / np.power((1. - s * eta), 2.)
return Ph | [
"def",
"hugoniot_p",
"(",
"rho",
",",
"rho0",
",",
"c0",
",",
"s",
")",
":",
"eta",
"=",
"1.",
"-",
"(",
"rho0",
"/",
"rho",
")",
"Ph",
"=",
"rho0",
"*",
"c0",
"*",
"c0",
"*",
"eta",
"/",
"np",
".",
"power",
"(",
"(",
"1.",
"-",
"s",
"*",... | calculate pressure along a Hugoniot
:param rho: density in g/cm^3
:param rho0: density at 1 bar in g/cm^3
:param c0: velocity at 1 bar in km/s
:param s: slope of the velocity change
:return: pressure in GPa | [
"calculate",
"pressure",
"along",
"a",
"Hugoniot"
] | python | train |
garnertb/django-classification-banner | django_classification_banner/context_processors.py | https://github.com/garnertb/django-classification-banner/blob/dfe47e510efcce7eb0023e0d756eb2a0906bf9af/django_classification_banner/context_processors.py#L4-L17 | def classification(request):
"""
Adds classification context to views.
"""
ctx = {
'classification_text': getattr(settings, 'CLASSIFICATION_TEXT', 'UNCLASSIFIED'),
'classification_text_color': getattr(settings, 'CLASSIFICATION_TEXT_COLOR', 'white'),
'classification_background_color': getattr(settings, 'CLASSIFICATION_BACKGROUND_COLOR', 'green'),
'classification_banner_enabled': getattr(settings, 'CLASSIFICATION_BANNER_ENABLED', True),
'classification_link': getattr(settings, 'CLASSIFICATION_LINK', None)
}
return ctx | [
"def",
"classification",
"(",
"request",
")",
":",
"ctx",
"=",
"{",
"'classification_text'",
":",
"getattr",
"(",
"settings",
",",
"'CLASSIFICATION_TEXT'",
",",
"'UNCLASSIFIED'",
")",
",",
"'classification_text_color'",
":",
"getattr",
"(",
"settings",
",",
"'CLAS... | Adds classification context to views. | [
"Adds",
"classification",
"context",
"to",
"views",
"."
] | python | train |
juju-solutions/charms.reactive | charms/reactive/relations.py | https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/relations.py#L413-L436 | def conversation(self, scope=None):
"""
Get a single conversation, by scope, that this relation is currently handling.
If the scope is not given, the correct scope is inferred by the current
hook execution context. If there is no current hook execution context, it
is assume that there is only a single global conversation scope for this
relation. If this relation's scope is not global and there is no current
hook execution context, then an error is raised.
"""
if scope is None:
if self.scope is scopes.UNIT:
scope = hookenv.remote_unit()
elif self.scope is scopes.SERVICE:
scope = hookenv.remote_service_name()
else:
scope = self.scope
if scope is None:
raise ValueError('Unable to determine default scope: no current hook or global scope')
for conversation in self._conversations:
if conversation.scope == scope:
return conversation
else:
raise ValueError("Conversation with scope '%s' not found" % scope) | [
"def",
"conversation",
"(",
"self",
",",
"scope",
"=",
"None",
")",
":",
"if",
"scope",
"is",
"None",
":",
"if",
"self",
".",
"scope",
"is",
"scopes",
".",
"UNIT",
":",
"scope",
"=",
"hookenv",
".",
"remote_unit",
"(",
")",
"elif",
"self",
".",
"sc... | Get a single conversation, by scope, that this relation is currently handling.
If the scope is not given, the correct scope is inferred by the current
hook execution context. If there is no current hook execution context, it
is assume that there is only a single global conversation scope for this
relation. If this relation's scope is not global and there is no current
hook execution context, then an error is raised. | [
"Get",
"a",
"single",
"conversation",
"by",
"scope",
"that",
"this",
"relation",
"is",
"currently",
"handling",
"."
] | python | train |
telminov/sw-django-rest-auth | sw_rest_auth/views.py | https://github.com/telminov/sw-django-rest-auth/blob/ba63b6d6c86e4325cac38ae8e2fde1501b47ce0a/sw_rest_auth/views.py#L14-L39 | def check_token(request):
"""
Resource check is token valid.
---
request_serializer: serializers.CheckToken
type:
username:
required: true
type: string
description: token related user
responseMessages:
- code: 200
message: Token is valid
- code: 400
message: Token is not valid
- code: 401
message: Unauthorized
"""
serializer = serializers.CheckToken(data=request.data)
serializer.is_valid(raise_exception=True)
token = serializer.validated_data['token']
logger.debug('Token correct', extra={'token': token, 'username': token.user.username})
return Response({'username': token.user.username}) | [
"def",
"check_token",
"(",
"request",
")",
":",
"serializer",
"=",
"serializers",
".",
"CheckToken",
"(",
"data",
"=",
"request",
".",
"data",
")",
"serializer",
".",
"is_valid",
"(",
"raise_exception",
"=",
"True",
")",
"token",
"=",
"serializer",
".",
"v... | Resource check is token valid.
---
request_serializer: serializers.CheckToken
type:
username:
required: true
type: string
description: token related user
responseMessages:
- code: 200
message: Token is valid
- code: 400
message: Token is not valid
- code: 401
message: Unauthorized | [
"Resource",
"check",
"is",
"token",
"valid",
".",
"---",
"request_serializer",
":",
"serializers",
".",
"CheckToken",
"type",
":",
"username",
":",
"required",
":",
"true",
"type",
":",
"string",
"description",
":",
"token",
"related",
"user"
] | python | train |
brechtm/rinohtype | src/rinoh/backend/pdf/xobject/purepng.py | https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L2136-L2188 | def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file
returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's
type as a byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the chunk_type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, chunk_type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (chunk_type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.',
chunk_type)
if seek and chunk_type != seek:
continue
verify = zlib.crc32(strtobytes(chunk_type))
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." %\
(chunk_type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return chunk_type, data | [
"def",
"chunk",
"(",
"self",
",",
"seek",
"=",
"None",
",",
"lenient",
"=",
"False",
")",
":",
"self",
".",
"validate_signature",
"(",
")",
"while",
"True",
":",
"# http://www.w3.org/TR/PNG/#5Chunk-layout",
"if",
"not",
"self",
".",
"atchunk",
":",
"self",
... | Read the next PNG chunk from the input file
returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's
type as a byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the chunk_type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions. | [
"Read",
"the",
"next",
"PNG",
"chunk",
"from",
"the",
"input",
"file"
] | python | train |
fabaff/python-mystrom | pymystrom/bulb.py | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L31-L42 | def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError() | [
"def",
"get_status",
"(",
"self",
")",
":",
"try",
":",
"request",
"=",
"requests",
".",
"get",
"(",
"'{}/{}/'",
".",
"format",
"(",
"self",
".",
"resource",
",",
"URI",
")",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"raw_data",
"=",
"request... | Get the details from the bulb. | [
"Get",
"the",
"details",
"from",
"the",
"bulb",
"."
] | python | train |
studionow/pybrightcove | pybrightcove/video.py | https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/video.py#L701-L708 | def find_by_text(text, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
"""
List videos that match the ``text`` in title or description.
"""
return connection.ItemResultSet('find_videos_by_text',
Video, _connection, page_size, page_number, sort_by, sort_order,
text=text) | [
"def",
"find_by_text",
"(",
"text",
",",
"_connection",
"=",
"None",
",",
"page_size",
"=",
"100",
",",
"page_number",
"=",
"0",
",",
"sort_by",
"=",
"enums",
".",
"DEFAULT_SORT_BY",
",",
"sort_order",
"=",
"enums",
".",
"DEFAULT_SORT_ORDER",
")",
":",
"re... | List videos that match the ``text`` in title or description. | [
"List",
"videos",
"that",
"match",
"the",
"text",
"in",
"title",
"or",
"description",
"."
] | python | train |
auth0/auth0-python | auth0/v3/management/connections.py | https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/connections.py#L101-L115 | def update(self, id, body):
"""Modifies a connection.
Args:
id: Id of the connection.
body (dict): Specifies which fields are to be modified, and to what
values.
See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id
Returns:
The modified connection object.
"""
return self.client.patch(self._url(id), data=body) | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"body",
")",
":",
"return",
"self",
".",
"client",
".",
"patch",
"(",
"self",
".",
"_url",
"(",
"id",
")",
",",
"data",
"=",
"body",
")"
] | Modifies a connection.
Args:
id: Id of the connection.
body (dict): Specifies which fields are to be modified, and to what
values.
See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id
Returns:
The modified connection object. | [
"Modifies",
"a",
"connection",
"."
] | python | train |
m32/endesive | endesive/pdf/fpdf/fpdf.py | https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/fpdf.py#L958-L1022 | def image(self, name, x=None, y=None, w=0,h=0,type='',link=''):
"Put an image on the page"
if not name in self.images:
#First use of image, get info
if(type==''):
pos=name.rfind('.')
if(not pos):
self.error('image file has no extension and no type was specified: '+name)
type=substr(name,pos+1)
type=type.lower()
if(type=='jpg' or type=='jpeg'):
info=self._parsejpg(name)
elif(type=='png'):
info=self._parsepng(name)
else:
#Allow for additional formats
#maybe the image is not showing the correct extension,
#but the header is OK,
succeed_parsing = False
#try all the parsing functions
parsing_functions = [self._parsejpg,self._parsepng,self._parsegif]
for pf in parsing_functions:
try:
info = pf(name)
succeed_parsing = True
break;
except:
pass
#last resource
if not succeed_parsing:
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
info['i']=len(self.images)+1
self.images[name]=info
else:
info=self.images[name]
#Automatic width and height calculation if needed
if(w==0 and h==0):
#Put image at 72 dpi
w=info['w']/self.k
h=info['h']/self.k
elif(w==0):
w=h*info['w']/info['h']
elif(h==0):
h=w*info['h']/info['w']
# Flowing mode
if y is None:
if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x = self.x
self.add_page(self.cur_orientation)
self.x = x
y = self.y
self.y += h
if x is None:
x = self.x
self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i']))
if(link):
self.link(x,y,w,h,link) | [
"def",
"image",
"(",
"self",
",",
"name",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"w",
"=",
"0",
",",
"h",
"=",
"0",
",",
"type",
"=",
"''",
",",
"link",
"=",
"''",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"images",
":... | Put an image on the page | [
"Put",
"an",
"image",
"on",
"the",
"page"
] | python | train |
UCL-INGI/INGInious | inginious/backend/backend.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/backend/backend.py#L103-L107 | async def handle_client_hello(self, client_addr, _: ClientHello):
""" Handle an ClientHello message. Send available containers to the client """
self._logger.info("New client connected %s", client_addr)
self._registered_clients.add(client_addr)
await self.send_container_update_to_client([client_addr]) | [
"async",
"def",
"handle_client_hello",
"(",
"self",
",",
"client_addr",
",",
"_",
":",
"ClientHello",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"New client connected %s\"",
",",
"client_addr",
")",
"self",
".",
"_registered_clients",
".",
"add",
"(... | Handle an ClientHello message. Send available containers to the client | [
"Handle",
"an",
"ClientHello",
"message",
".",
"Send",
"available",
"containers",
"to",
"the",
"client"
] | python | train |
insomnia-lab/libreant | libreantdb/api.py | https://github.com/insomnia-lab/libreant/blob/55d529435baf4c05a86b8341899e9f5e14e50245/libreantdb/api.py#L186-L209 | def create_index(self, indexname=None, index_conf=None):
''' Create the index
Create the index with given configuration.
If `indexname` is provided it will be used as the new index name
instead of the class one (:py:attr:`DB.index_name`)
:param index_conf: configuration to be used in index creation. If this
is not specified the default index configuration will be used.
:raises Exception: if the index already exists.
'''
if indexname is None:
indexname = self.index_name
log.debug("Creating new index: '{0}'".format(indexname))
if index_conf is None:
index_conf = {'settings': self.settings,
'mappings': {'book': {'properties': self.properties}}}
try:
self.es.indices.create(index=indexname, body=index_conf)
except TransportError as te:
if te.error.startswith("IndexAlreadyExistsException"):
raise Exception("Cannot create index '{}', already exists".format(indexname))
else:
raise | [
"def",
"create_index",
"(",
"self",
",",
"indexname",
"=",
"None",
",",
"index_conf",
"=",
"None",
")",
":",
"if",
"indexname",
"is",
"None",
":",
"indexname",
"=",
"self",
".",
"index_name",
"log",
".",
"debug",
"(",
"\"Creating new index: '{0}'\"",
".",
... | Create the index
Create the index with given configuration.
If `indexname` is provided it will be used as the new index name
instead of the class one (:py:attr:`DB.index_name`)
:param index_conf: configuration to be used in index creation. If this
is not specified the default index configuration will be used.
:raises Exception: if the index already exists. | [
"Create",
"the",
"index"
] | python | train |
saltstack/salt | salt/loader.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L1067-L1076 | def cache(opts, serial):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'cache', 'cache'),
opts,
tag='cache',
pack={'__opts__': opts, '__context__': {'serial': serial}},
) | [
"def",
"cache",
"(",
"opts",
",",
"serial",
")",
":",
"return",
"LazyLoader",
"(",
"_module_dirs",
"(",
"opts",
",",
"'cache'",
",",
"'cache'",
")",
",",
"opts",
",",
"tag",
"=",
"'cache'",
",",
"pack",
"=",
"{",
"'__opts__'",
":",
"opts",
",",
"'__c... | Returns the returner modules | [
"Returns",
"the",
"returner",
"modules"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.