repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
vallis/libstempo
|
libstempo/plot.py
|
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L7-L38
|
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
|
[
"def",
"plotres",
"(",
"psr",
",",
"deleted",
"=",
"False",
",",
"group",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"res",
",",
"t",
",",
"errs",
"=",
"psr",
".",
"residuals",
"(",
")",
",",
"psr",
".",
"toas",
"(",
")",
",",
"psr",
".",
"toaerrs",
"if",
"(",
"not",
"deleted",
")",
"and",
"N",
".",
"any",
"(",
"psr",
".",
"deleted",
"!=",
"0",
")",
":",
"res",
",",
"t",
",",
"errs",
"=",
"res",
"[",
"psr",
".",
"deleted",
"==",
"0",
"]",
",",
"t",
"[",
"psr",
".",
"deleted",
"==",
"0",
"]",
",",
"errs",
"[",
"psr",
".",
"deleted",
"==",
"0",
"]",
"print",
"(",
"\"Plotting {0}/{1} nondeleted points.\"",
".",
"format",
"(",
"len",
"(",
"res",
")",
",",
"psr",
".",
"nobs",
")",
")",
"meanres",
"=",
"math",
".",
"sqrt",
"(",
"N",
".",
"mean",
"(",
"res",
"**",
"2",
")",
")",
"/",
"1e-6",
"if",
"group",
"is",
"None",
":",
"i",
"=",
"N",
".",
"argsort",
"(",
"t",
")",
"P",
".",
"errorbar",
"(",
"t",
"[",
"i",
"]",
",",
"res",
"[",
"i",
"]",
"/",
"1e-6",
",",
"yerr",
"=",
"errs",
"[",
"i",
"]",
",",
"fmt",
"=",
"'x'",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"(",
"not",
"deleted",
")",
"and",
"N",
".",
"any",
"(",
"psr",
".",
"deleted",
")",
":",
"flagmask",
"=",
"psr",
".",
"flagvals",
"(",
"group",
")",
"[",
"~",
"psr",
".",
"deleted",
"]",
"else",
":",
"flagmask",
"=",
"psr",
".",
"flagvals",
"(",
"group",
")",
"unique",
"=",
"list",
"(",
"set",
"(",
"flagmask",
")",
")",
"for",
"flagval",
"in",
"unique",
":",
"f",
"=",
"(",
"flagmask",
"==",
"flagval",
")",
"flagres",
",",
"flagt",
",",
"flagerrs",
"=",
"res",
"[",
"f",
"]",
",",
"t",
"[",
"f",
"]",
",",
"errs",
"[",
"f",
"]",
"i",
"=",
"N",
".",
"argsort",
"(",
"flagt",
")",
"P",
".",
"errorbar",
"(",
"flagt",
"[",
"i",
"]",
",",
"flagres",
"[",
"i",
"]",
"/",
"1e-6",
",",
"yerr",
"=",
"flagerrs",
"[",
"i",
"]",
",",
"fmt",
"=",
"'x'",
",",
"*",
"*",
"kwargs",
")",
"P",
".",
"legend",
"(",
"unique",
",",
"numpoints",
"=",
"1",
",",
"bbox_to_anchor",
"=",
"(",
"1.1",
",",
"1.1",
")",
")",
"P",
".",
"xlabel",
"(",
"'MJD'",
")",
"P",
".",
"ylabel",
"(",
"'res [us]'",
")",
"P",
".",
"title",
"(",
"\"{0} - rms res = {1:.2f} us\"",
".",
"format",
"(",
"psr",
".",
"name",
",",
"meanres",
")",
")"
] |
Plot residuals, compute unweighted rms residual.
|
[
"Plot",
"residuals",
"compute",
"unweighted",
"rms",
"residual",
"."
] |
python
|
train
|
openai/baselines
|
baselines/common/misc_util.py
|
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L123-L134
|
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
|
[
"def",
"update",
"(",
"self",
",",
"new_val",
")",
":",
"if",
"self",
".",
"_value",
"is",
"None",
":",
"self",
".",
"_value",
"=",
"new_val",
"else",
":",
"self",
".",
"_value",
"=",
"self",
".",
"_gamma",
"*",
"self",
".",
"_value",
"+",
"(",
"1.0",
"-",
"self",
".",
"_gamma",
")",
"*",
"new_val"
] |
Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
|
[
"Update",
"the",
"estimate",
"."
] |
python
|
valid
|
romanz/trezor-agent
|
libagent/gpg/keyring.py
|
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L84-L94
|
def unescape(s):
"""Unescape ASSUAN message (0xAB <-> '%AB')."""
s = bytearray(s)
i = 0
while i < len(s):
if s[i] == ord('%'):
hex_bytes = bytes(s[i+1:i+3])
value = int(hex_bytes.decode('ascii'), 16)
s[i:i+3] = [value]
i += 1
return bytes(s)
|
[
"def",
"unescape",
"(",
"s",
")",
":",
"s",
"=",
"bytearray",
"(",
"s",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"s",
")",
":",
"if",
"s",
"[",
"i",
"]",
"==",
"ord",
"(",
"'%'",
")",
":",
"hex_bytes",
"=",
"bytes",
"(",
"s",
"[",
"i",
"+",
"1",
":",
"i",
"+",
"3",
"]",
")",
"value",
"=",
"int",
"(",
"hex_bytes",
".",
"decode",
"(",
"'ascii'",
")",
",",
"16",
")",
"s",
"[",
"i",
":",
"i",
"+",
"3",
"]",
"=",
"[",
"value",
"]",
"i",
"+=",
"1",
"return",
"bytes",
"(",
"s",
")"
] |
Unescape ASSUAN message (0xAB <-> '%AB').
|
[
"Unescape",
"ASSUAN",
"message",
"(",
"0xAB",
"<",
"-",
">",
"%AB",
")",
"."
] |
python
|
train
|
alpha-xone/xbbg
|
xbbg/core/utils.py
|
https://github.com/alpha-xone/xbbg/blob/70226eb19a72a08144b5d8cea9db4913200f7bc5/xbbg/core/utils.py#L12-L47
|
def flatten(iterable, maps=None, unique=False) -> list:
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
|
[
"def",
"flatten",
"(",
"iterable",
",",
"maps",
"=",
"None",
",",
"unique",
"=",
"False",
")",
"->",
"list",
":",
"if",
"iterable",
"is",
"None",
":",
"return",
"[",
"]",
"if",
"maps",
"is",
"None",
":",
"maps",
"=",
"dict",
"(",
")",
"if",
"isinstance",
"(",
"iterable",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"return",
"[",
"maps",
".",
"get",
"(",
"iterable",
",",
"iterable",
")",
"]",
"else",
":",
"x",
"=",
"[",
"maps",
".",
"get",
"(",
"item",
",",
"item",
")",
"for",
"item",
"in",
"_to_gen_",
"(",
"iterable",
")",
"]",
"return",
"list",
"(",
"set",
"(",
"x",
")",
")",
"if",
"unique",
"else",
"x"
] |
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
|
[
"Flatten",
"any",
"array",
"of",
"items",
"to",
"list"
] |
python
|
valid
|
ajenhl/tacl
|
tacl/jitc.py
|
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/jitc.py#L279-L307
|
def _process_intersection(self, yes_work, maybe_work, work_dir,
ym_results_path, stats):
"""Returns statistics on the intersection between `yes_work` and
`maybe_work`.
:param yes_work: name of work for which stats are collected
:type yes_work: `str`
:param maybe_work: name of work being compared with `yes_work`
:type maybe_work: `str`
:param work_dir: directory where generated files are saved
:type work_dir: `str`
:param ym_results_path: path to results intersecting
`yes_work` with `maybe_work`
:type ym_results_path: `str`
:param stats: data structure to hold the statistical data
:type stats: `dict`
:rtype: `dict`
"""
catalogue = {yes_work: self._no_label, maybe_work: self._maybe_label}
self._run_query(ym_results_path, self._store.intersection, [catalogue],
False)
# Though this is the intersection only between "yes" and
# "maybe", the percentage of overlap is added to the "common"
# stat rather than "shared". Then, in _process_diff, the
# percentage of difference between "yes" and "no" can be
# removed from "common" and added to "shared".
return self._update_stats('intersect', work_dir, ym_results_path,
yes_work, maybe_work, stats, COMMON, UNIQUE)
|
[
"def",
"_process_intersection",
"(",
"self",
",",
"yes_work",
",",
"maybe_work",
",",
"work_dir",
",",
"ym_results_path",
",",
"stats",
")",
":",
"catalogue",
"=",
"{",
"yes_work",
":",
"self",
".",
"_no_label",
",",
"maybe_work",
":",
"self",
".",
"_maybe_label",
"}",
"self",
".",
"_run_query",
"(",
"ym_results_path",
",",
"self",
".",
"_store",
".",
"intersection",
",",
"[",
"catalogue",
"]",
",",
"False",
")",
"# Though this is the intersection only between \"yes\" and",
"# \"maybe\", the percentage of overlap is added to the \"common\"",
"# stat rather than \"shared\". Then, in _process_diff, the",
"# percentage of difference between \"yes\" and \"no\" can be",
"# removed from \"common\" and added to \"shared\".",
"return",
"self",
".",
"_update_stats",
"(",
"'intersect'",
",",
"work_dir",
",",
"ym_results_path",
",",
"yes_work",
",",
"maybe_work",
",",
"stats",
",",
"COMMON",
",",
"UNIQUE",
")"
] |
Returns statistics on the intersection between `yes_work` and
`maybe_work`.
:param yes_work: name of work for which stats are collected
:type yes_work: `str`
:param maybe_work: name of work being compared with `yes_work`
:type maybe_work: `str`
:param work_dir: directory where generated files are saved
:type work_dir: `str`
:param ym_results_path: path to results intersecting
`yes_work` with `maybe_work`
:type ym_results_path: `str`
:param stats: data structure to hold the statistical data
:type stats: `dict`
:rtype: `dict`
|
[
"Returns",
"statistics",
"on",
"the",
"intersection",
"between",
"yes_work",
"and",
"maybe_work",
"."
] |
python
|
train
|
apache/spark
|
python/pyspark/sql/functions.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1597-L1610
|
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
|
[
"def",
"substring",
"(",
"str",
",",
"pos",
",",
"len",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"substring",
"(",
"_to_java_column",
"(",
"str",
")",
",",
"pos",
",",
"len",
")",
")"
] |
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
|
[
"Substring",
"starts",
"at",
"pos",
"and",
"is",
"of",
"length",
"len",
"when",
"str",
"is",
"String",
"type",
"or",
"returns",
"the",
"slice",
"of",
"byte",
"array",
"that",
"starts",
"at",
"pos",
"in",
"byte",
"and",
"is",
"of",
"length",
"len",
"when",
"str",
"is",
"Binary",
"type",
"."
] |
python
|
train
|
PaulHancock/Aegean
|
AegeanTools/regions.py
|
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/regions.py#L137-L151
|
def add_pixels(self, pix, depth):
"""
Add one or more HEALPix pixels to this region.
Parameters
----------
pix : int or iterable
The pixels to be added
depth : int
The depth at which the pixels are added.
"""
if depth not in self.pixeldict:
self.pixeldict[depth] = set()
self.pixeldict[depth].update(set(pix))
|
[
"def",
"add_pixels",
"(",
"self",
",",
"pix",
",",
"depth",
")",
":",
"if",
"depth",
"not",
"in",
"self",
".",
"pixeldict",
":",
"self",
".",
"pixeldict",
"[",
"depth",
"]",
"=",
"set",
"(",
")",
"self",
".",
"pixeldict",
"[",
"depth",
"]",
".",
"update",
"(",
"set",
"(",
"pix",
")",
")"
] |
Add one or more HEALPix pixels to this region.
Parameters
----------
pix : int or iterable
The pixels to be added
depth : int
The depth at which the pixels are added.
|
[
"Add",
"one",
"or",
"more",
"HEALPix",
"pixels",
"to",
"this",
"region",
"."
] |
python
|
train
|
GuiltyTargets/ppi-network-annotation
|
src/ppi_network_annotation/model/attribute_network.py
|
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L74-L85
|
def _add_disease_association_attributes(self, att_ind_start, att_mappings):
"""Add disease association information to the attribute mapping dictionary.
:param int att_ind_start: Start index for enumerating the attributes.
:param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes.
"""
disease_mappings = self.get_disease_mappings(att_ind_start)
for vertex in self.graph.vs:
assoc_diseases = vertex["associated_diseases"]
if assoc_diseases is not None:
assoc_disease_ids = [disease_mappings[disease] for disease in assoc_diseases]
att_mappings[vertex.index].extend(assoc_disease_ids)
|
[
"def",
"_add_disease_association_attributes",
"(",
"self",
",",
"att_ind_start",
",",
"att_mappings",
")",
":",
"disease_mappings",
"=",
"self",
".",
"get_disease_mappings",
"(",
"att_ind_start",
")",
"for",
"vertex",
"in",
"self",
".",
"graph",
".",
"vs",
":",
"assoc_diseases",
"=",
"vertex",
"[",
"\"associated_diseases\"",
"]",
"if",
"assoc_diseases",
"is",
"not",
"None",
":",
"assoc_disease_ids",
"=",
"[",
"disease_mappings",
"[",
"disease",
"]",
"for",
"disease",
"in",
"assoc_diseases",
"]",
"att_mappings",
"[",
"vertex",
".",
"index",
"]",
".",
"extend",
"(",
"assoc_disease_ids",
")"
] |
Add disease association information to the attribute mapping dictionary.
:param int att_ind_start: Start index for enumerating the attributes.
:param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes.
|
[
"Add",
"disease",
"association",
"information",
"to",
"the",
"attribute",
"mapping",
"dictionary",
"."
] |
python
|
train
|
yakupadakli/python-unsplash
|
unsplash/models.py
|
https://github.com/yakupadakli/python-unsplash/blob/6e43dce3225237e1b8111fd475fb98b1ea33972c/unsplash/models.py#L18-L25
|
def parse_list(cls, data):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or []
for obj in data:
if obj:
results.append(cls.parse(obj))
return results
|
[
"def",
"parse_list",
"(",
"cls",
",",
"data",
")",
":",
"results",
"=",
"ResultSet",
"(",
")",
"data",
"=",
"data",
"or",
"[",
"]",
"for",
"obj",
"in",
"data",
":",
"if",
"obj",
":",
"results",
".",
"append",
"(",
"cls",
".",
"parse",
"(",
"obj",
")",
")",
"return",
"results"
] |
Parse a list of JSON objects into a result set of model instances.
|
[
"Parse",
"a",
"list",
"of",
"JSON",
"objects",
"into",
"a",
"result",
"set",
"of",
"model",
"instances",
"."
] |
python
|
train
|
rootpy/rootpy
|
rootpy/plotting/hist.py
|
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L1260-L1273
|
def set_sum_w2(self, w, ix, iy=0, iz=0):
"""
Sets the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
self.GetSumw2().SetAt(w, idx)
|
[
"def",
"set_sum_w2",
"(",
"self",
",",
"w",
",",
"ix",
",",
"iy",
"=",
"0",
",",
"iz",
"=",
"0",
")",
":",
"if",
"self",
".",
"GetSumw2N",
"(",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Attempting to access Sumw2 in histogram \"",
"\"where weights were not stored\"",
")",
"xl",
"=",
"self",
".",
"nbins",
"(",
"axis",
"=",
"0",
",",
"overflow",
"=",
"True",
")",
"yl",
"=",
"self",
".",
"nbins",
"(",
"axis",
"=",
"1",
",",
"overflow",
"=",
"True",
")",
"idx",
"=",
"xl",
"*",
"yl",
"*",
"iz",
"+",
"xl",
"*",
"iy",
"+",
"ix",
"if",
"not",
"0",
"<=",
"idx",
"<",
"self",
".",
"GetSumw2N",
"(",
")",
":",
"raise",
"IndexError",
"(",
"\"bin index out of range\"",
")",
"self",
".",
"GetSumw2",
"(",
")",
".",
"SetAt",
"(",
"w",
",",
"idx",
")"
] |
Sets the true number of entries in the bin weighted by w^2
|
[
"Sets",
"the",
"true",
"number",
"of",
"entries",
"in",
"the",
"bin",
"weighted",
"by",
"w^2"
] |
python
|
train
|
clab/dynet
|
python/dynet_viz.py
|
https://github.com/clab/dynet/blob/21cc62606b74f81bb4b11a9989a6c2bd0caa09c5/python/dynet_viz.py#L775-L947
|
def make_network_graph(compact, expression_names, lookup_names):
"""
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
"""
nodes = set()
# edges = defaultdict(set) # parent -> (child, extra)
var_name_dict = dict()
if expression_names:
for e in graphviz_items: # e: Expression
if e in expression_names:
var_name_dict[e.vindex] = expression_names[e]
rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A')))
def vidx2str(vidx): return '%s%s' % ('N', vidx)
for e in graphviz_items: # e: Expression
vidx = e.vindex
f_name = e.name
args = e.args
output_dim = e.dim
input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim
children = set()
node_type = '2_regular'
if f_name == 'vecInput':
[_dim] = args
arg_strs = []
elif f_name == 'inputVector':
[_v] = args
arg_strs = []
elif f_name == 'matInput':
[_d1, _d2] = args
arg_strs = []
elif f_name == 'inputMatrix':
[_v, _d] = args
arg_strs = []
elif f_name == 'parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup_parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup':
[p, idx, update] = args
[_dim] = p.args
if vidx in var_name_dict:
name = var_name_dict[vidx]
else:
name = None
item_name = None
if lookup_names and p in expression_names:
param_name = expression_names[p]
if param_name in lookup_names:
item_name = '\\"%s\\"' % (lookup_names[param_name][idx],)
if compact:
if item_name is not None:
f_name = item_name
elif name is not None:
f_name = '%s[%s]' % (name, idx)
else:
f_name = 'lookup(%s)' % (idx)
arg_strs = []
else:
arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))]
if item_name is not None:
arg_strs.append(item_name)
vocab_size = _dim[0]
arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed'])
#children.add(vidx2str(p.vindex))
#node_type = '1_param'
elif f_name == 'RNNState':
[arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e
rnn_name = rnn_bldr_name[bldr_num]
if bldr_type.endswith('Builder'):
bldr_type[:-len('Builder')]
f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx)
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs = [s]
else:
arg_strs = []
children.add(vidx2str(arg.vindex))
node_type = '3_rnn_state'
else:
arg_strs = []
for arg in args:
if isinstance(arg, Expression):
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs.append(s)
children.add(vidx2str(arg.vindex))
elif isinstance(arg, float) and compact:
s = re.sub('0+$', '', '%.3f' % (arg))
if s == '0.':
s = str(arg)
arg_strs.append(s)
else:
arg_strs.append(str(arg))
# f_name = { ,
# }.get(f_name, f_name)
if compact:
f_name = { 'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'cadd': '+',
'cmul': '*',
'cdiv': '/',
'scalarsub': '-',
'concatenate': 'cat',
'esum': 'sum',
'emax': 'max',
'emin': 'min',
}.get(f_name, f_name)
if arg_strs:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
elif f_name == 'add':
[a,b] = arg_strs
str_repr = '%s + %s' % (a,b)
elif f_name == 'sub':
[a,b] = arg_strs
str_repr = '%s - %s' % (a,b)
elif f_name == 'mul':
[a,b] = arg_strs
str_repr = '%s * %s' % (a,b)
elif f_name == 'div':
[a,b] = arg_strs
str_repr = '%s / %s' % (a,b)
elif f_name == 'neg':
[a,] = arg_strs
str_repr = '-%s' % (a)
elif f_name == 'affine_transform':
str_repr = arg_strs[0]
for i in xrange(1, len(arg_strs), 2):
str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2])
else:
if arg_strs is not None:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
name = vidx2str(vidx)
var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else ''
# if show_dims:
# str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr)
label = str_repr
if not compact:
label = '%s = %s' % (var_name, label)
features = ''
# if output_dim.invalid():
# features += " [color=red,style=filled,fillcolor=red]"
# node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))
expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None
nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name))
return nodes
|
[
"def",
"make_network_graph",
"(",
"compact",
",",
"expression_names",
",",
"lookup_names",
")",
":",
"nodes",
"=",
"set",
"(",
")",
"# edges = defaultdict(set) # parent -> (child, extra)",
"var_name_dict",
"=",
"dict",
"(",
")",
"if",
"expression_names",
":",
"for",
"e",
"in",
"graphviz_items",
":",
"# e: Expression",
"if",
"e",
"in",
"expression_names",
":",
"var_name_dict",
"[",
"e",
".",
"vindex",
"]",
"=",
"expression_names",
"[",
"e",
"]",
"rnn_bldr_name",
"=",
"defaultdict",
"(",
"lambda",
":",
"chr",
"(",
"len",
"(",
"rnn_bldr_name",
")",
"+",
"ord",
"(",
"'A'",
")",
")",
")",
"def",
"vidx2str",
"(",
"vidx",
")",
":",
"return",
"'%s%s'",
"%",
"(",
"'N'",
",",
"vidx",
")",
"for",
"e",
"in",
"graphviz_items",
":",
"# e: Expression",
"vidx",
"=",
"e",
".",
"vindex",
"f_name",
"=",
"e",
".",
"name",
"args",
"=",
"e",
".",
"args",
"output_dim",
"=",
"e",
".",
"dim",
"input_dim",
"=",
"None",
"# basically just RNNStates use this since everything else has input_dim==output_dim",
"children",
"=",
"set",
"(",
")",
"node_type",
"=",
"'2_regular'",
"if",
"f_name",
"==",
"'vecInput'",
":",
"[",
"_dim",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"elif",
"f_name",
"==",
"'inputVector'",
":",
"[",
"_v",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"elif",
"f_name",
"==",
"'matInput'",
":",
"[",
"_d1",
",",
"_d2",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"elif",
"f_name",
"==",
"'inputMatrix'",
":",
"[",
"_v",
",",
"_d",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"elif",
"f_name",
"==",
"'parameters'",
":",
"[",
"_dim",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"if",
"compact",
":",
"if",
"vidx",
"in",
"var_name_dict",
":",
"f_name",
"=",
"var_name_dict",
"[",
"vidx",
"]",
"node_type",
"=",
"'1_param'",
"elif",
"f_name",
"==",
"'lookup_parameters'",
":",
"[",
"_dim",
"]",
"=",
"args",
"arg_strs",
"=",
"[",
"]",
"if",
"compact",
":",
"if",
"vidx",
"in",
"var_name_dict",
":",
"f_name",
"=",
"var_name_dict",
"[",
"vidx",
"]",
"node_type",
"=",
"'1_param'",
"elif",
"f_name",
"==",
"'lookup'",
":",
"[",
"p",
",",
"idx",
",",
"update",
"]",
"=",
"args",
"[",
"_dim",
"]",
"=",
"p",
".",
"args",
"if",
"vidx",
"in",
"var_name_dict",
":",
"name",
"=",
"var_name_dict",
"[",
"vidx",
"]",
"else",
":",
"name",
"=",
"None",
"item_name",
"=",
"None",
"if",
"lookup_names",
"and",
"p",
"in",
"expression_names",
":",
"param_name",
"=",
"expression_names",
"[",
"p",
"]",
"if",
"param_name",
"in",
"lookup_names",
":",
"item_name",
"=",
"'\\\\\"%s\\\\\"'",
"%",
"(",
"lookup_names",
"[",
"param_name",
"]",
"[",
"idx",
"]",
",",
")",
"if",
"compact",
":",
"if",
"item_name",
"is",
"not",
"None",
":",
"f_name",
"=",
"item_name",
"elif",
"name",
"is",
"not",
"None",
":",
"f_name",
"=",
"'%s[%s]'",
"%",
"(",
"name",
",",
"idx",
")",
"else",
":",
"f_name",
"=",
"'lookup(%s)'",
"%",
"(",
"idx",
")",
"arg_strs",
"=",
"[",
"]",
"else",
":",
"arg_strs",
"=",
"[",
"var_name_dict",
".",
"get",
"(",
"p",
".",
"vindex",
",",
"'v%d'",
"%",
"(",
"p",
".",
"vindex",
")",
")",
"]",
"if",
"item_name",
"is",
"not",
"None",
":",
"arg_strs",
".",
"append",
"(",
"item_name",
")",
"vocab_size",
"=",
"_dim",
"[",
"0",
"]",
"arg_strs",
".",
"extend",
"(",
"[",
"'%s'",
"%",
"(",
"idx",
")",
",",
"'%s'",
"%",
"(",
"vocab_size",
")",
",",
"'update'",
"if",
"update",
"else",
"'fixed'",
"]",
")",
"#children.add(vidx2str(p.vindex))",
"#node_type = '1_param'",
"elif",
"f_name",
"==",
"'RNNState'",
":",
"[",
"arg",
",",
"input_dim",
",",
"bldr_type",
",",
"bldr_num",
",",
"state_idx",
"]",
"=",
"args",
"# arg==input_e",
"rnn_name",
"=",
"rnn_bldr_name",
"[",
"bldr_num",
"]",
"if",
"bldr_type",
".",
"endswith",
"(",
"'Builder'",
")",
":",
"bldr_type",
"[",
":",
"-",
"len",
"(",
"'Builder'",
")",
"]",
"f_name",
"=",
"'%s-%s-%s'",
"%",
"(",
"bldr_type",
",",
"rnn_name",
",",
"state_idx",
")",
"if",
"not",
"compact",
":",
"i",
"=",
"arg",
".",
"vindex",
"s",
"=",
"var_name_dict",
".",
"get",
"(",
"i",
",",
"'v%d'",
"%",
"(",
"i",
")",
")",
"arg_strs",
"=",
"[",
"s",
"]",
"else",
":",
"arg_strs",
"=",
"[",
"]",
"children",
".",
"add",
"(",
"vidx2str",
"(",
"arg",
".",
"vindex",
")",
")",
"node_type",
"=",
"'3_rnn_state'",
"else",
":",
"arg_strs",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"Expression",
")",
":",
"if",
"not",
"compact",
":",
"i",
"=",
"arg",
".",
"vindex",
"s",
"=",
"var_name_dict",
".",
"get",
"(",
"i",
",",
"'v%d'",
"%",
"(",
"i",
")",
")",
"arg_strs",
".",
"append",
"(",
"s",
")",
"children",
".",
"add",
"(",
"vidx2str",
"(",
"arg",
".",
"vindex",
")",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"float",
")",
"and",
"compact",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"'0+$'",
",",
"''",
",",
"'%.3f'",
"%",
"(",
"arg",
")",
")",
"if",
"s",
"==",
"'0.'",
":",
"s",
"=",
"str",
"(",
"arg",
")",
"arg_strs",
".",
"append",
"(",
"s",
")",
"else",
":",
"arg_strs",
".",
"append",
"(",
"str",
"(",
"arg",
")",
")",
"# f_name = { ,",
"# }.get(f_name, f_name)",
"if",
"compact",
":",
"f_name",
"=",
"{",
"'add'",
":",
"'+'",
",",
"'sub'",
":",
"'-'",
",",
"'mul'",
":",
"'*'",
",",
"'div'",
":",
"'/'",
",",
"'cadd'",
":",
"'+'",
",",
"'cmul'",
":",
"'*'",
",",
"'cdiv'",
":",
"'/'",
",",
"'scalarsub'",
":",
"'-'",
",",
"'concatenate'",
":",
"'cat'",
",",
"'esum'",
":",
"'sum'",
",",
"'emax'",
":",
"'max'",
",",
"'emin'",
":",
"'min'",
",",
"}",
".",
"get",
"(",
"f_name",
",",
"f_name",
")",
"if",
"arg_strs",
":",
"str_repr",
"=",
"'%s(%s)'",
"%",
"(",
"f_name",
",",
"', '",
".",
"join",
"(",
"arg_strs",
")",
")",
"else",
":",
"str_repr",
"=",
"f_name",
"elif",
"f_name",
"==",
"'add'",
":",
"[",
"a",
",",
"b",
"]",
"=",
"arg_strs",
"str_repr",
"=",
"'%s + %s'",
"%",
"(",
"a",
",",
"b",
")",
"elif",
"f_name",
"==",
"'sub'",
":",
"[",
"a",
",",
"b",
"]",
"=",
"arg_strs",
"str_repr",
"=",
"'%s - %s'",
"%",
"(",
"a",
",",
"b",
")",
"elif",
"f_name",
"==",
"'mul'",
":",
"[",
"a",
",",
"b",
"]",
"=",
"arg_strs",
"str_repr",
"=",
"'%s * %s'",
"%",
"(",
"a",
",",
"b",
")",
"elif",
"f_name",
"==",
"'div'",
":",
"[",
"a",
",",
"b",
"]",
"=",
"arg_strs",
"str_repr",
"=",
"'%s / %s'",
"%",
"(",
"a",
",",
"b",
")",
"elif",
"f_name",
"==",
"'neg'",
":",
"[",
"a",
",",
"]",
"=",
"arg_strs",
"str_repr",
"=",
"'-%s'",
"%",
"(",
"a",
")",
"elif",
"f_name",
"==",
"'affine_transform'",
":",
"str_repr",
"=",
"arg_strs",
"[",
"0",
"]",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"arg_strs",
")",
",",
"2",
")",
":",
"str_repr",
"+=",
"' + %s*%s'",
"%",
"tuple",
"(",
"arg_strs",
"[",
"i",
":",
"i",
"+",
"2",
"]",
")",
"else",
":",
"if",
"arg_strs",
"is",
"not",
"None",
":",
"str_repr",
"=",
"'%s(%s)'",
"%",
"(",
"f_name",
",",
"', '",
".",
"join",
"(",
"arg_strs",
")",
")",
"else",
":",
"str_repr",
"=",
"f_name",
"name",
"=",
"vidx2str",
"(",
"vidx",
")",
"var_name",
"=",
"'%s'",
"%",
"(",
"var_name_dict",
".",
"get",
"(",
"vidx",
",",
"'v%d'",
"%",
"(",
"vidx",
")",
")",
")",
"if",
"not",
"compact",
"else",
"''",
"# if show_dims:",
"# str_repr = '%s\\\\n%s' % (shape_str(e.dim), str_repr)",
"label",
"=",
"str_repr",
"if",
"not",
"compact",
":",
"label",
"=",
"'%s = %s'",
"%",
"(",
"var_name",
",",
"label",
")",
"features",
"=",
"''",
"# if output_dim.invalid():",
"# features += \" [color=red,style=filled,fillcolor=red]\"",
"# node_def_lines.append(' %s [label=\"%s%s\"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))",
"expr_name",
"=",
"expression_names",
"[",
"e",
"]",
"if",
"compact",
"and",
"expression_names",
"and",
"(",
"e",
"in",
"expression_names",
")",
"and",
"(",
"expression_names",
"[",
"e",
"]",
"!=",
"f_name",
")",
"else",
"None",
"nodes",
".",
"add",
"(",
"GVNode",
"(",
"name",
",",
"input_dim",
",",
"label",
",",
"output_dim",
",",
"frozenset",
"(",
"children",
")",
",",
"features",
",",
"node_type",
",",
"expr_name",
")",
")",
"return",
"nodes"
] |
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
|
[
"Make",
"a",
"network",
"graph",
"represented",
"as",
"of",
"nodes",
"and",
"a",
"set",
"of",
"edges",
".",
"The",
"nodes",
"are",
"represented",
"as",
"tuples",
":",
"(",
"name",
":",
"string",
"input_dim",
":",
"Dim",
"label",
":",
"string",
"output_dim",
":",
"Dim",
"children",
":",
"set",
"[",
"name",
"]",
"features",
":",
"string",
")",
"#",
"The",
"edges",
"are",
"represented",
"as",
"dict",
"of",
"children",
"to",
"sets",
"of",
"parents",
":",
"(",
"child",
":",
"string",
")",
"-",
">",
"[",
"(",
"parent",
":",
"string",
"features",
":",
"string",
")",
"]"
] |
python
|
valid
|
jazzband/django-axes
|
axes/attempts.py
|
https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/attempts.py#L61-L73
|
def clean_expired_user_attempts(attempt_time: datetime = None) -> int:
"""
Clean expired user attempts from the database.
"""
if settings.AXES_COOLOFF_TIME is None:
log.debug('AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured')
return 0
threshold = get_cool_off_threshold(attempt_time)
count, _ = AccessAttempt.objects.filter(attempt_time__lt=threshold).delete()
log.info('AXES: Cleaned up %s expired access attempts from database that were older than %s', count, threshold)
return count
|
[
"def",
"clean_expired_user_attempts",
"(",
"attempt_time",
":",
"datetime",
"=",
"None",
")",
"->",
"int",
":",
"if",
"settings",
".",
"AXES_COOLOFF_TIME",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"'AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured'",
")",
"return",
"0",
"threshold",
"=",
"get_cool_off_threshold",
"(",
"attempt_time",
")",
"count",
",",
"_",
"=",
"AccessAttempt",
".",
"objects",
".",
"filter",
"(",
"attempt_time__lt",
"=",
"threshold",
")",
".",
"delete",
"(",
")",
"log",
".",
"info",
"(",
"'AXES: Cleaned up %s expired access attempts from database that were older than %s'",
",",
"count",
",",
"threshold",
")",
"return",
"count"
] |
Clean expired user attempts from the database.
|
[
"Clean",
"expired",
"user",
"attempts",
"from",
"the",
"database",
"."
] |
python
|
train
|
dpursehouse/pygerrit2
|
pygerrit2/rest/__init__.py
|
https://github.com/dpursehouse/pygerrit2/blob/141031469603b33369d89c38c703390eb3786bd0/pygerrit2/rest/__init__.py#L46-L75
|
def _decode_response(response):
"""Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
"""
content_type = response.headers.get('content-type', '')
logger.debug("status[%s] content_type[%s] encoding[%s]" %
(response.status_code, content_type, response.encoding))
response.raise_for_status()
content = response.content.strip()
if response.encoding:
content = content.decode(response.encoding)
if not content:
logger.debug("no content in response")
return content
if content_type.split(';')[0] != 'application/json':
return content
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):]
try:
return json.loads(content)
except ValueError:
logger.error('Invalid json content: %s', content)
raise
|
[
"def",
"_decode_response",
"(",
"response",
")",
":",
"content_type",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'content-type'",
",",
"''",
")",
"logger",
".",
"debug",
"(",
"\"status[%s] content_type[%s] encoding[%s]\"",
"%",
"(",
"response",
".",
"status_code",
",",
"content_type",
",",
"response",
".",
"encoding",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"content",
"=",
"response",
".",
"content",
".",
"strip",
"(",
")",
"if",
"response",
".",
"encoding",
":",
"content",
"=",
"content",
".",
"decode",
"(",
"response",
".",
"encoding",
")",
"if",
"not",
"content",
":",
"logger",
".",
"debug",
"(",
"\"no content in response\"",
")",
"return",
"content",
"if",
"content_type",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"!=",
"'application/json'",
":",
"return",
"content",
"if",
"content",
".",
"startswith",
"(",
"GERRIT_MAGIC_JSON_PREFIX",
")",
":",
"content",
"=",
"content",
"[",
"len",
"(",
"GERRIT_MAGIC_JSON_PREFIX",
")",
":",
"]",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"content",
")",
"except",
"ValueError",
":",
"logger",
".",
"error",
"(",
"'Invalid json content: %s'",
",",
"content",
")",
"raise"
] |
Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
|
[
"Strip",
"off",
"Gerrit",
"s",
"magic",
"prefix",
"and",
"decode",
"a",
"response",
"."
] |
python
|
train
|
shoebot/shoebot
|
lib/beziereditor/__init__.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/beziereditor/__init__.py#L318-L635
|
def update(self):
""" Update runs each frame to check for mouse interaction.
Alters the path by allowing the user to add new points,
drag point handles and move their location.
Updates are automatically stored as SVG
in the given filename.
"""
x, y = mouse()
if self.show_grid:
x, y = self.grid.snap(x, y)
if _ctx._ns["mousedown"] \
and not self.freehand:
self._dirty = True
# Handle buttons first.
# When pressing down on a button, all other action halts.
# Buttons appear near a point being edited.
# Once clicked, actions are resolved.
if self.edit != None \
and not self.drag_point \
and not self.drag_handle1 \
and not self.drag_handle2:
pt = self._points[self.edit]
dx = pt.x+self.btn_x
dy = pt.y+self.btn_y
# The delete button
if self.overlap(dx, dy, x, y, r=self.btn_r):
self.delete = self.edit
return
# The moveto button,
# active on the last point in the path.
dx += self.btn_r*2 + 2
if self.edit == len(self._points) -1 and \
self.overlap(dx, dy, x, y, r=self.btn_r):
self.moveto = self.edit
return
if self.insert:
self.inserting = True
return
# When not dragging a point or the handle of a point,
# i.e. the mousebutton was released and then pressed again,
# check to see if a point on the path is pressed.
# When this point is not the last new point,
# enter edit mode.
if not self.drag_point and \
not self.drag_handle1 and \
not self.drag_handle2:
self.editing = False
indices = range(len(self._points))
indices.reverse()
for i in indices:
pt = self._points[i]
if pt != self.new \
and self.overlap(x, y, pt.x, pt.y) \
and self.new == None:
# Don't select a point if in fact
# it is at the same location of the first handle
# of the point we are currently editing.
if self.edit == i+1 \
and self.overlap(self._points[i+1].ctrl1.x,
self._points[i+1].ctrl1.y, x, y):
continue
else:
self.edit = i
self.editing = True
break
# When the mouse button is down,
# edit mode continues as long as
# a point or handle is dragged.
# Else, stop editing and switch to add-mode
# (the user is clicking somewhere on the canvas).
if not self.editing:
if self.edit != None:
pt = self._points[self.edit]
if self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y) or \
self.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y):
self.editing = True
else:
self.edit = None
# When not in edit mode, there are two options.
# Either no new point is defined and the user is
# clicking somewhere on the canvas (add a new point)
# or the user is dragging the handle of the new point.
# Adding a new point is a fluid click-to-locate and
# drag-to-curve action.
if self.edit == None:
if self.new == None:
# A special case is when the used clicked
# the moveto button on the last point in the path.
# This indicates a gap (i.e. MOVETO) in the path.
self.new = PathElement()
if self.moveto == True \
or len(self._points) == 0:
cmd = MOVETO
self.moveto = None
self.last_moveto = self.new
else:
cmd = CURVETO
self.new.cmd = cmd
self.new.x = x
self.new.y = y
self.new.ctrl1 = Point(x, y)
self.new.ctrl2 = Point(x, y)
self.new.freehand = False
# Don't forget to map the point's ctrl1 handle
# to the ctrl2 handle of the previous point.
# This makes for smooth, continuous paths.
if len(self._points) > 0:
prev = self._points[-1]
rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)
self.new.ctrl1 = Point(rx, ry)
self._points.append(self.new)
else:
# Illustrator-like behavior:
# when the handle is dragged downwards,
# the path bulges upwards.
rx, ry = self.reflect(self.new.x, self.new.y, x, y)
self.new.ctrl2 = Point(rx, ry)
# Edit mode
elif self.new == None:
pt = self._points[self.edit]
# The user is pressing the mouse on a point,
# enter drag-point mode.
if self.overlap(pt.x, pt.y, x, y) \
and not self.drag_handle1 \
and not self.drag_handle2 \
and not self.new != None:
self.drag_point = True
self.drag_handle1 = False
self.drag_handle2 = False
# The user is pressing the mouse on a point's handle,
# enter drag-handle mode.
if self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y) \
and pt.cmd == CURVETO \
and not self.drag_point \
and not self.drag_handle2:
self.drag_point = False
self.drag_handle1 = True
self.drag_handle2 = False
if self.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y) \
and pt.cmd == CURVETO \
and not self.drag_point \
and not self.drag_handle1:
self.drag_point = False
self.drag_handle1 = False
self.drag_handle2 = True
# In drag-point mode,
# the point is located at the mouse coordinates.
# The handles move relatively to the new location
# (e.g. they are retained, the path does not distort).
# Modify the ctrl1 handle of the next point as well.
if self.drag_point == True:
dx = x - pt.x
dy = y - pt.y
pt.x = x
pt.y = y
pt.ctrl2.x += dx
pt.ctrl2.y += dy
if self.edit < len(self._points)-1:
rx, ry = self.reflect(pt.x, pt.y, x, y)
next = self._points[self.edit+1]
next.ctrl1.x += dx
next.ctrl1.y += dy
# In drag-handle mode,
# set the path's handle to the mouse location.
# Rotate the handle of the next or previous point
# to keep paths smooth - unless the user is pressing "x".
if self.drag_handle1 == True:
pt.ctrl1 = Point(x, y)
if self.edit > 0 \
and self.last_key != "x":
prev = self._points[self.edit-1]
d = self.distance(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)
a = self.angle(prev.x, prev.y, pt.ctrl1.x, pt.ctrl1.y)
prev.ctrl2 = self.coordinates(prev.x, prev.y, d, a+180)
if self.drag_handle2 == True:
pt.ctrl2 = Point(x, y)
if self.edit < len(self._points)-1 \
and self.last_key != "x":
next = self._points[self.edit+1]
d = self.distance(pt.x, pt.y, next.ctrl1.x, next.ctrl1.y)
a = self.angle(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)
next.ctrl1 = self.coordinates(pt.x, pt.y, d, a+180)
elif not self.freehand:
# The mouse button is released
# so we are not dragging anything around.
self.new = None
self.drag_point = False
self.drag_handle1 = False
self.drag_handle2 = False
# The delete button for a point was clicked.
if self.delete != None and len(self._points) > 0:
i = self.delete
cmd = self._points[i].cmd
del self._points[i]
if 0 < i < len(self._points):
prev = self._points[i-1]
rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)
self._points[i].ctrl1 = Point(rx, ry)
# Also delete all the freehand points
# prior to this point.
start_i = i
while i > 1:
i -= 1
pt = self._points[i]
if pt.freehand:
del self._points[i]
elif i < start_i-1 and pt.freehand == False:
if pt.cmd == MOVETO:
del self._points[i]
break
# When you delete a MOVETO point,
# the last moveto (the one where the dashed line points to)
# needs to be updated.
if len(self._points) > 0 \
and (cmd == MOVETO or i == 0):
self.last_moveto = self._points[0]
for pt in self._points:
if pt.cmd == MOVETO:
self.last_moveto = pt
self.delete = None
self.edit = None
# The moveto button for the last point
# in the path was clicked.
elif isinstance(self.moveto, int):
self.moveto = True
self.edit = None
# We are not editing a node and
# the mouse is hovering over the path outline stroke:
# it is possible to insert a point here.
elif self.edit == None \
and self.contains_point(x, y, d=2):
self.insert = True
else:
self.insert = False
# Commit insert of new point.
if self.inserting \
and self.contains_point(x, y, d=2):
self.insert_point(x, y)
self.insert = False
self.inserting = False
# No modifications are being made right now
# and the SVG file needs to be updated.
if self._dirty == True:
self.export_svg()
self._dirty = False
# Keyboard interaction.
if _ctx._ns["keydown"]:
self.last_key = _ctx._ns["key"]
self.last_keycode = _ctx._ns["keycode"]
if not _ctx._ns["keydown"] and self.last_key != None:
# If the TAB-key is pressed,
# switch the magnetic grid either on or off.
if self.last_keycode == KEY_TAB:
self.show_grid = not self.show_grid
# When "f" is pressed, switch freehand mode.
if self.last_key == "f":
self.edit = None
self.freehand = not self.freehand
if self.freehand:
self.msg = "freehand"
else:
self.msg = "curves"
# When ESC is pressed exit edit mode.
if self.last_keycode == KEY_ESC:
self.edit = None
# When BACKSPACE is pressed, delete current point.
if self.last_keycode == _ctx.KEY_BACKSPACE \
and self.edit != None:
self.delete = self.edit
self.last_key = None
self.last_code = None
# Using the keypad you can scroll the screen.
if _ctx._ns["keydown"]:
dx = 0
dy = 0
keycode = _ctx._ns["keycode"]
if keycode == _ctx.KEY_LEFT:
dx = -10
elif keycode == _ctx.KEY_RIGHT:
dx = 10
if keycode == _ctx.KEY_UP:
dy = -10
elif keycode == _ctx.KEY_DOWN:
dy = 10
if dx != 0 or dy != 0:
for pt in self._points:
pt.x += dx
pt.y += dy
pt.ctrl1.x += dx
pt.ctrl1.y += dy
pt.ctrl2.x += dx
pt.ctrl2.y += dy
|
[
"def",
"update",
"(",
"self",
")",
":",
"x",
",",
"y",
"=",
"mouse",
"(",
")",
"if",
"self",
".",
"show_grid",
":",
"x",
",",
"y",
"=",
"self",
".",
"grid",
".",
"snap",
"(",
"x",
",",
"y",
")",
"if",
"_ctx",
".",
"_ns",
"[",
"\"mousedown\"",
"]",
"and",
"not",
"self",
".",
"freehand",
":",
"self",
".",
"_dirty",
"=",
"True",
"# Handle buttons first.",
"# When pressing down on a button, all other action halts.",
"# Buttons appear near a point being edited.",
"# Once clicked, actions are resolved.",
"if",
"self",
".",
"edit",
"!=",
"None",
"and",
"not",
"self",
".",
"drag_point",
"and",
"not",
"self",
".",
"drag_handle1",
"and",
"not",
"self",
".",
"drag_handle2",
":",
"pt",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"]",
"dx",
"=",
"pt",
".",
"x",
"+",
"self",
".",
"btn_x",
"dy",
"=",
"pt",
".",
"y",
"+",
"self",
".",
"btn_y",
"# The delete button",
"if",
"self",
".",
"overlap",
"(",
"dx",
",",
"dy",
",",
"x",
",",
"y",
",",
"r",
"=",
"self",
".",
"btn_r",
")",
":",
"self",
".",
"delete",
"=",
"self",
".",
"edit",
"return",
"# The moveto button,",
"# active on the last point in the path.",
"dx",
"+=",
"self",
".",
"btn_r",
"*",
"2",
"+",
"2",
"if",
"self",
".",
"edit",
"==",
"len",
"(",
"self",
".",
"_points",
")",
"-",
"1",
"and",
"self",
".",
"overlap",
"(",
"dx",
",",
"dy",
",",
"x",
",",
"y",
",",
"r",
"=",
"self",
".",
"btn_r",
")",
":",
"self",
".",
"moveto",
"=",
"self",
".",
"edit",
"return",
"if",
"self",
".",
"insert",
":",
"self",
".",
"inserting",
"=",
"True",
"return",
"# When not dragging a point or the handle of a point,",
"# i.e. the mousebutton was released and then pressed again,",
"# check to see if a point on the path is pressed.",
"# When this point is not the last new point,",
"# enter edit mode.",
"if",
"not",
"self",
".",
"drag_point",
"and",
"not",
"self",
".",
"drag_handle1",
"and",
"not",
"self",
".",
"drag_handle2",
":",
"self",
".",
"editing",
"=",
"False",
"indices",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"_points",
")",
")",
"indices",
".",
"reverse",
"(",
")",
"for",
"i",
"in",
"indices",
":",
"pt",
"=",
"self",
".",
"_points",
"[",
"i",
"]",
"if",
"pt",
"!=",
"self",
".",
"new",
"and",
"self",
".",
"overlap",
"(",
"x",
",",
"y",
",",
"pt",
".",
"x",
",",
"pt",
".",
"y",
")",
"and",
"self",
".",
"new",
"==",
"None",
":",
"# Don't select a point if in fact",
"# it is at the same location of the first handle ",
"# of the point we are currently editing.",
"if",
"self",
".",
"edit",
"==",
"i",
"+",
"1",
"and",
"self",
".",
"overlap",
"(",
"self",
".",
"_points",
"[",
"i",
"+",
"1",
"]",
".",
"ctrl1",
".",
"x",
",",
"self",
".",
"_points",
"[",
"i",
"+",
"1",
"]",
".",
"ctrl1",
".",
"y",
",",
"x",
",",
"y",
")",
":",
"continue",
"else",
":",
"self",
".",
"edit",
"=",
"i",
"self",
".",
"editing",
"=",
"True",
"break",
"# When the mouse button is down,",
"# edit mode continues as long as",
"# a point or handle is dragged.",
"# Else, stop editing and switch to add-mode",
"# (the user is clicking somewhere on the canvas).",
"if",
"not",
"self",
".",
"editing",
":",
"if",
"self",
".",
"edit",
"!=",
"None",
":",
"pt",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"]",
"if",
"self",
".",
"overlap",
"(",
"pt",
".",
"ctrl1",
".",
"x",
",",
"pt",
".",
"ctrl1",
".",
"y",
",",
"x",
",",
"y",
")",
"or",
"self",
".",
"overlap",
"(",
"pt",
".",
"ctrl2",
".",
"x",
",",
"pt",
".",
"ctrl2",
".",
"y",
",",
"x",
",",
"y",
")",
":",
"self",
".",
"editing",
"=",
"True",
"else",
":",
"self",
".",
"edit",
"=",
"None",
"# When not in edit mode, there are two options.",
"# Either no new point is defined and the user is",
"# clicking somewhere on the canvas (add a new point)",
"# or the user is dragging the handle of the new point.",
"# Adding a new point is a fluid click-to-locate and",
"# drag-to-curve action.",
"if",
"self",
".",
"edit",
"==",
"None",
":",
"if",
"self",
".",
"new",
"==",
"None",
":",
"# A special case is when the used clicked",
"# the moveto button on the last point in the path.",
"# This indicates a gap (i.e. MOVETO) in the path.",
"self",
".",
"new",
"=",
"PathElement",
"(",
")",
"if",
"self",
".",
"moveto",
"==",
"True",
"or",
"len",
"(",
"self",
".",
"_points",
")",
"==",
"0",
":",
"cmd",
"=",
"MOVETO",
"self",
".",
"moveto",
"=",
"None",
"self",
".",
"last_moveto",
"=",
"self",
".",
"new",
"else",
":",
"cmd",
"=",
"CURVETO",
"self",
".",
"new",
".",
"cmd",
"=",
"cmd",
"self",
".",
"new",
".",
"x",
"=",
"x",
"self",
".",
"new",
".",
"y",
"=",
"y",
"self",
".",
"new",
".",
"ctrl1",
"=",
"Point",
"(",
"x",
",",
"y",
")",
"self",
".",
"new",
".",
"ctrl2",
"=",
"Point",
"(",
"x",
",",
"y",
")",
"self",
".",
"new",
".",
"freehand",
"=",
"False",
"# Don't forget to map the point's ctrl1 handle",
"# to the ctrl2 handle of the previous point.",
"# This makes for smooth, continuous paths.",
"if",
"len",
"(",
"self",
".",
"_points",
")",
">",
"0",
":",
"prev",
"=",
"self",
".",
"_points",
"[",
"-",
"1",
"]",
"rx",
",",
"ry",
"=",
"self",
".",
"reflect",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
",",
"prev",
".",
"ctrl2",
".",
"x",
",",
"prev",
".",
"ctrl2",
".",
"y",
")",
"self",
".",
"new",
".",
"ctrl1",
"=",
"Point",
"(",
"rx",
",",
"ry",
")",
"self",
".",
"_points",
".",
"append",
"(",
"self",
".",
"new",
")",
"else",
":",
"# Illustrator-like behavior:",
"# when the handle is dragged downwards,",
"# the path bulges upwards.",
"rx",
",",
"ry",
"=",
"self",
".",
"reflect",
"(",
"self",
".",
"new",
".",
"x",
",",
"self",
".",
"new",
".",
"y",
",",
"x",
",",
"y",
")",
"self",
".",
"new",
".",
"ctrl2",
"=",
"Point",
"(",
"rx",
",",
"ry",
")",
"# Edit mode",
"elif",
"self",
".",
"new",
"==",
"None",
":",
"pt",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"]",
"# The user is pressing the mouse on a point,",
"# enter drag-point mode.",
"if",
"self",
".",
"overlap",
"(",
"pt",
".",
"x",
",",
"pt",
".",
"y",
",",
"x",
",",
"y",
")",
"and",
"not",
"self",
".",
"drag_handle1",
"and",
"not",
"self",
".",
"drag_handle2",
"and",
"not",
"self",
".",
"new",
"!=",
"None",
":",
"self",
".",
"drag_point",
"=",
"True",
"self",
".",
"drag_handle1",
"=",
"False",
"self",
".",
"drag_handle2",
"=",
"False",
"# The user is pressing the mouse on a point's handle,",
"# enter drag-handle mode.",
"if",
"self",
".",
"overlap",
"(",
"pt",
".",
"ctrl1",
".",
"x",
",",
"pt",
".",
"ctrl1",
".",
"y",
",",
"x",
",",
"y",
")",
"and",
"pt",
".",
"cmd",
"==",
"CURVETO",
"and",
"not",
"self",
".",
"drag_point",
"and",
"not",
"self",
".",
"drag_handle2",
":",
"self",
".",
"drag_point",
"=",
"False",
"self",
".",
"drag_handle1",
"=",
"True",
"self",
".",
"drag_handle2",
"=",
"False",
"if",
"self",
".",
"overlap",
"(",
"pt",
".",
"ctrl2",
".",
"x",
",",
"pt",
".",
"ctrl2",
".",
"y",
",",
"x",
",",
"y",
")",
"and",
"pt",
".",
"cmd",
"==",
"CURVETO",
"and",
"not",
"self",
".",
"drag_point",
"and",
"not",
"self",
".",
"drag_handle1",
":",
"self",
".",
"drag_point",
"=",
"False",
"self",
".",
"drag_handle1",
"=",
"False",
"self",
".",
"drag_handle2",
"=",
"True",
"# In drag-point mode,",
"# the point is located at the mouse coordinates.",
"# The handles move relatively to the new location",
"# (e.g. they are retained, the path does not distort).",
"# Modify the ctrl1 handle of the next point as well.",
"if",
"self",
".",
"drag_point",
"==",
"True",
":",
"dx",
"=",
"x",
"-",
"pt",
".",
"x",
"dy",
"=",
"y",
"-",
"pt",
".",
"y",
"pt",
".",
"x",
"=",
"x",
"pt",
".",
"y",
"=",
"y",
"pt",
".",
"ctrl2",
".",
"x",
"+=",
"dx",
"pt",
".",
"ctrl2",
".",
"y",
"+=",
"dy",
"if",
"self",
".",
"edit",
"<",
"len",
"(",
"self",
".",
"_points",
")",
"-",
"1",
":",
"rx",
",",
"ry",
"=",
"self",
".",
"reflect",
"(",
"pt",
".",
"x",
",",
"pt",
".",
"y",
",",
"x",
",",
"y",
")",
"next",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"+",
"1",
"]",
"next",
".",
"ctrl1",
".",
"x",
"+=",
"dx",
"next",
".",
"ctrl1",
".",
"y",
"+=",
"dy",
"# In drag-handle mode,",
"# set the path's handle to the mouse location.",
"# Rotate the handle of the next or previous point",
"# to keep paths smooth - unless the user is pressing \"x\".",
"if",
"self",
".",
"drag_handle1",
"==",
"True",
":",
"pt",
".",
"ctrl1",
"=",
"Point",
"(",
"x",
",",
"y",
")",
"if",
"self",
".",
"edit",
">",
"0",
"and",
"self",
".",
"last_key",
"!=",
"\"x\"",
":",
"prev",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"-",
"1",
"]",
"d",
"=",
"self",
".",
"distance",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
",",
"prev",
".",
"ctrl2",
".",
"x",
",",
"prev",
".",
"ctrl2",
".",
"y",
")",
"a",
"=",
"self",
".",
"angle",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
",",
"pt",
".",
"ctrl1",
".",
"x",
",",
"pt",
".",
"ctrl1",
".",
"y",
")",
"prev",
".",
"ctrl2",
"=",
"self",
".",
"coordinates",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
",",
"d",
",",
"a",
"+",
"180",
")",
"if",
"self",
".",
"drag_handle2",
"==",
"True",
":",
"pt",
".",
"ctrl2",
"=",
"Point",
"(",
"x",
",",
"y",
")",
"if",
"self",
".",
"edit",
"<",
"len",
"(",
"self",
".",
"_points",
")",
"-",
"1",
"and",
"self",
".",
"last_key",
"!=",
"\"x\"",
":",
"next",
"=",
"self",
".",
"_points",
"[",
"self",
".",
"edit",
"+",
"1",
"]",
"d",
"=",
"self",
".",
"distance",
"(",
"pt",
".",
"x",
",",
"pt",
".",
"y",
",",
"next",
".",
"ctrl1",
".",
"x",
",",
"next",
".",
"ctrl1",
".",
"y",
")",
"a",
"=",
"self",
".",
"angle",
"(",
"pt",
".",
"x",
",",
"pt",
".",
"y",
",",
"pt",
".",
"ctrl2",
".",
"x",
",",
"pt",
".",
"ctrl2",
".",
"y",
")",
"next",
".",
"ctrl1",
"=",
"self",
".",
"coordinates",
"(",
"pt",
".",
"x",
",",
"pt",
".",
"y",
",",
"d",
",",
"a",
"+",
"180",
")",
"elif",
"not",
"self",
".",
"freehand",
":",
"# The mouse button is released",
"# so we are not dragging anything around.",
"self",
".",
"new",
"=",
"None",
"self",
".",
"drag_point",
"=",
"False",
"self",
".",
"drag_handle1",
"=",
"False",
"self",
".",
"drag_handle2",
"=",
"False",
"# The delete button for a point was clicked.",
"if",
"self",
".",
"delete",
"!=",
"None",
"and",
"len",
"(",
"self",
".",
"_points",
")",
">",
"0",
":",
"i",
"=",
"self",
".",
"delete",
"cmd",
"=",
"self",
".",
"_points",
"[",
"i",
"]",
".",
"cmd",
"del",
"self",
".",
"_points",
"[",
"i",
"]",
"if",
"0",
"<",
"i",
"<",
"len",
"(",
"self",
".",
"_points",
")",
":",
"prev",
"=",
"self",
".",
"_points",
"[",
"i",
"-",
"1",
"]",
"rx",
",",
"ry",
"=",
"self",
".",
"reflect",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
",",
"prev",
".",
"ctrl2",
".",
"x",
",",
"prev",
".",
"ctrl2",
".",
"y",
")",
"self",
".",
"_points",
"[",
"i",
"]",
".",
"ctrl1",
"=",
"Point",
"(",
"rx",
",",
"ry",
")",
"# Also delete all the freehand points",
"# prior to this point.",
"start_i",
"=",
"i",
"while",
"i",
">",
"1",
":",
"i",
"-=",
"1",
"pt",
"=",
"self",
".",
"_points",
"[",
"i",
"]",
"if",
"pt",
".",
"freehand",
":",
"del",
"self",
".",
"_points",
"[",
"i",
"]",
"elif",
"i",
"<",
"start_i",
"-",
"1",
"and",
"pt",
".",
"freehand",
"==",
"False",
":",
"if",
"pt",
".",
"cmd",
"==",
"MOVETO",
":",
"del",
"self",
".",
"_points",
"[",
"i",
"]",
"break",
"# When you delete a MOVETO point,",
"# the last moveto (the one where the dashed line points to)",
"# needs to be updated.",
"if",
"len",
"(",
"self",
".",
"_points",
")",
">",
"0",
"and",
"(",
"cmd",
"==",
"MOVETO",
"or",
"i",
"==",
"0",
")",
":",
"self",
".",
"last_moveto",
"=",
"self",
".",
"_points",
"[",
"0",
"]",
"for",
"pt",
"in",
"self",
".",
"_points",
":",
"if",
"pt",
".",
"cmd",
"==",
"MOVETO",
":",
"self",
".",
"last_moveto",
"=",
"pt",
"self",
".",
"delete",
"=",
"None",
"self",
".",
"edit",
"=",
"None",
"# The moveto button for the last point",
"# in the path was clicked.",
"elif",
"isinstance",
"(",
"self",
".",
"moveto",
",",
"int",
")",
":",
"self",
".",
"moveto",
"=",
"True",
"self",
".",
"edit",
"=",
"None",
"# We are not editing a node and",
"# the mouse is hovering over the path outline stroke:",
"# it is possible to insert a point here.",
"elif",
"self",
".",
"edit",
"==",
"None",
"and",
"self",
".",
"contains_point",
"(",
"x",
",",
"y",
",",
"d",
"=",
"2",
")",
":",
"self",
".",
"insert",
"=",
"True",
"else",
":",
"self",
".",
"insert",
"=",
"False",
"# Commit insert of new point.",
"if",
"self",
".",
"inserting",
"and",
"self",
".",
"contains_point",
"(",
"x",
",",
"y",
",",
"d",
"=",
"2",
")",
":",
"self",
".",
"insert_point",
"(",
"x",
",",
"y",
")",
"self",
".",
"insert",
"=",
"False",
"self",
".",
"inserting",
"=",
"False",
"# No modifications are being made right now",
"# and the SVG file needs to be updated.",
"if",
"self",
".",
"_dirty",
"==",
"True",
":",
"self",
".",
"export_svg",
"(",
")",
"self",
".",
"_dirty",
"=",
"False",
"# Keyboard interaction.",
"if",
"_ctx",
".",
"_ns",
"[",
"\"keydown\"",
"]",
":",
"self",
".",
"last_key",
"=",
"_ctx",
".",
"_ns",
"[",
"\"key\"",
"]",
"self",
".",
"last_keycode",
"=",
"_ctx",
".",
"_ns",
"[",
"\"keycode\"",
"]",
"if",
"not",
"_ctx",
".",
"_ns",
"[",
"\"keydown\"",
"]",
"and",
"self",
".",
"last_key",
"!=",
"None",
":",
"# If the TAB-key is pressed,",
"# switch the magnetic grid either on or off.",
"if",
"self",
".",
"last_keycode",
"==",
"KEY_TAB",
":",
"self",
".",
"show_grid",
"=",
"not",
"self",
".",
"show_grid",
"# When \"f\" is pressed, switch freehand mode.",
"if",
"self",
".",
"last_key",
"==",
"\"f\"",
":",
"self",
".",
"edit",
"=",
"None",
"self",
".",
"freehand",
"=",
"not",
"self",
".",
"freehand",
"if",
"self",
".",
"freehand",
":",
"self",
".",
"msg",
"=",
"\"freehand\"",
"else",
":",
"self",
".",
"msg",
"=",
"\"curves\"",
"# When ESC is pressed exit edit mode.",
"if",
"self",
".",
"last_keycode",
"==",
"KEY_ESC",
":",
"self",
".",
"edit",
"=",
"None",
"# When BACKSPACE is pressed, delete current point.",
"if",
"self",
".",
"last_keycode",
"==",
"_ctx",
".",
"KEY_BACKSPACE",
"and",
"self",
".",
"edit",
"!=",
"None",
":",
"self",
".",
"delete",
"=",
"self",
".",
"edit",
"self",
".",
"last_key",
"=",
"None",
"self",
".",
"last_code",
"=",
"None",
"# Using the keypad you can scroll the screen.",
"if",
"_ctx",
".",
"_ns",
"[",
"\"keydown\"",
"]",
":",
"dx",
"=",
"0",
"dy",
"=",
"0",
"keycode",
"=",
"_ctx",
".",
"_ns",
"[",
"\"keycode\"",
"]",
"if",
"keycode",
"==",
"_ctx",
".",
"KEY_LEFT",
":",
"dx",
"=",
"-",
"10",
"elif",
"keycode",
"==",
"_ctx",
".",
"KEY_RIGHT",
":",
"dx",
"=",
"10",
"if",
"keycode",
"==",
"_ctx",
".",
"KEY_UP",
":",
"dy",
"=",
"-",
"10",
"elif",
"keycode",
"==",
"_ctx",
".",
"KEY_DOWN",
":",
"dy",
"=",
"10",
"if",
"dx",
"!=",
"0",
"or",
"dy",
"!=",
"0",
":",
"for",
"pt",
"in",
"self",
".",
"_points",
":",
"pt",
".",
"x",
"+=",
"dx",
"pt",
".",
"y",
"+=",
"dy",
"pt",
".",
"ctrl1",
".",
"x",
"+=",
"dx",
"pt",
".",
"ctrl1",
".",
"y",
"+=",
"dy",
"pt",
".",
"ctrl2",
".",
"x",
"+=",
"dx",
"pt",
".",
"ctrl2",
".",
"y",
"+=",
"dy"
] |
Update runs each frame to check for mouse interaction.
Alters the path by allowing the user to add new points,
drag point handles and move their location.
Updates are automatically stored as SVG
in the given filename.
|
[
"Update",
"runs",
"each",
"frame",
"to",
"check",
"for",
"mouse",
"interaction",
".",
"Alters",
"the",
"path",
"by",
"allowing",
"the",
"user",
"to",
"add",
"new",
"points",
"drag",
"point",
"handles",
"and",
"move",
"their",
"location",
".",
"Updates",
"are",
"automatically",
"stored",
"as",
"SVG",
"in",
"the",
"given",
"filename",
"."
] |
python
|
valid
|
log2timeline/plaso
|
plaso/engine/worker.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/worker.py#L338-L374
|
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
"""Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
# Do not extract metadata from the root file entry when it is virtual.
if file_entry.IsRoot() and file_entry.type_indicator not in (
self._TYPES_WITH_ROOT_METADATA):
return
# We always want to extract the file entry metadata but we only want
# to parse it once per file entry, so we only use it if we are
# processing the default data stream of regular files.
if data_stream and not data_stream.IsDefault():
return
display_name = mediator.GetDisplayName()
logger.debug(
'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
|
[
"def",
"_ExtractMetadataFromFileEntry",
"(",
"self",
",",
"mediator",
",",
"file_entry",
",",
"data_stream",
")",
":",
"# Do not extract metadata from the root file entry when it is virtual.",
"if",
"file_entry",
".",
"IsRoot",
"(",
")",
"and",
"file_entry",
".",
"type_indicator",
"not",
"in",
"(",
"self",
".",
"_TYPES_WITH_ROOT_METADATA",
")",
":",
"return",
"# We always want to extract the file entry metadata but we only want",
"# to parse it once per file entry, so we only use it if we are",
"# processing the default data stream of regular files.",
"if",
"data_stream",
"and",
"not",
"data_stream",
".",
"IsDefault",
"(",
")",
":",
"return",
"display_name",
"=",
"mediator",
".",
"GetDisplayName",
"(",
")",
"logger",
".",
"debug",
"(",
"'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'",
".",
"format",
"(",
"display_name",
")",
")",
"self",
".",
"processing_status",
"=",
"definitions",
".",
"STATUS_INDICATOR_EXTRACTING",
"if",
"self",
".",
"_processing_profiler",
":",
"self",
".",
"_processing_profiler",
".",
"StartTiming",
"(",
"'extracting'",
")",
"self",
".",
"_event_extractor",
".",
"ParseFileEntryMetadata",
"(",
"mediator",
",",
"file_entry",
")",
"if",
"self",
".",
"_processing_profiler",
":",
"self",
".",
"_processing_profiler",
".",
"StopTiming",
"(",
"'extracting'",
")",
"self",
".",
"processing_status",
"=",
"definitions",
".",
"STATUS_INDICATOR_RUNNING"
] |
Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
|
[
"Extracts",
"metadata",
"from",
"a",
"file",
"entry",
"."
] |
python
|
train
|
olitheolix/qtmacs
|
qtmacs/base_macro.py
|
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/base_macro.py#L418-L469
|
def qtePrepareToRun(self):
"""
This method is called by Qtmacs to prepare the macro for
execution.
It is probably a bad idea to overload this method as it only
administrates the macro execution and calls the ``qteRun``
method (which *should* be overloaded by the macro programmer
in order for the macro to do something).
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
# Report the execution attempt.
msgObj = QtmacsMessage((self.qteMacroName(), self.qteWidget), None)
msgObj.setSignalName('qtesigMacroStart')
self.qteMain.qtesigMacroStart.emit(msgObj)
# Try to run the macro and radio the success via the
# ``qtesigMacroFinished`` signal.
try:
self.qteRun()
self.qteMain.qtesigMacroFinished.emit(msgObj)
except Exception as err:
if self.qteApplet is None:
appID = appSig = None
else:
appID = self.qteApplet.qteAppletID()
appSig = self.qteApplet.qteAppletSignature()
msg = ('Macro <b>{}</b> (called from the <b>{}</b> applet'
' with ID <b>{}</b>) did not execute properly.')
msg = msg.format(self.qteMacroName(), appSig, appID)
if isinstance(err, QtmacsArgumentError):
msg += '<br/>' + str(err)
# Irrespective of the error, log it, enable macro
# processing (in case it got disabled), and trigger the
# error signal.
self.qteMain.qteEnableMacroProcessing()
self.qteMain.qtesigMacroError.emit(msgObj)
self.qteLogger.exception(msg, exc_info=True, stack_info=True)
|
[
"def",
"qtePrepareToRun",
"(",
"self",
")",
":",
"# Report the execution attempt.",
"msgObj",
"=",
"QtmacsMessage",
"(",
"(",
"self",
".",
"qteMacroName",
"(",
")",
",",
"self",
".",
"qteWidget",
")",
",",
"None",
")",
"msgObj",
".",
"setSignalName",
"(",
"'qtesigMacroStart'",
")",
"self",
".",
"qteMain",
".",
"qtesigMacroStart",
".",
"emit",
"(",
"msgObj",
")",
"# Try to run the macro and radio the success via the",
"# ``qtesigMacroFinished`` signal.",
"try",
":",
"self",
".",
"qteRun",
"(",
")",
"self",
".",
"qteMain",
".",
"qtesigMacroFinished",
".",
"emit",
"(",
"msgObj",
")",
"except",
"Exception",
"as",
"err",
":",
"if",
"self",
".",
"qteApplet",
"is",
"None",
":",
"appID",
"=",
"appSig",
"=",
"None",
"else",
":",
"appID",
"=",
"self",
".",
"qteApplet",
".",
"qteAppletID",
"(",
")",
"appSig",
"=",
"self",
".",
"qteApplet",
".",
"qteAppletSignature",
"(",
")",
"msg",
"=",
"(",
"'Macro <b>{}</b> (called from the <b>{}</b> applet'",
"' with ID <b>{}</b>) did not execute properly.'",
")",
"msg",
"=",
"msg",
".",
"format",
"(",
"self",
".",
"qteMacroName",
"(",
")",
",",
"appSig",
",",
"appID",
")",
"if",
"isinstance",
"(",
"err",
",",
"QtmacsArgumentError",
")",
":",
"msg",
"+=",
"'<br/>'",
"+",
"str",
"(",
"err",
")",
"# Irrespective of the error, log it, enable macro",
"# processing (in case it got disabled), and trigger the",
"# error signal.",
"self",
".",
"qteMain",
".",
"qteEnableMacroProcessing",
"(",
")",
"self",
".",
"qteMain",
".",
"qtesigMacroError",
".",
"emit",
"(",
"msgObj",
")",
"self",
".",
"qteLogger",
".",
"exception",
"(",
"msg",
",",
"exc_info",
"=",
"True",
",",
"stack_info",
"=",
"True",
")"
] |
This method is called by Qtmacs to prepare the macro for
execution.
It is probably a bad idea to overload this method as it only
administrates the macro execution and calls the ``qteRun``
method (which *should* be overloaded by the macro programmer
in order for the macro to do something).
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
|
[
"This",
"method",
"is",
"called",
"by",
"Qtmacs",
"to",
"prepare",
"the",
"macro",
"for",
"execution",
"."
] |
python
|
train
|
Fantomas42/mots-vides
|
mots_vides/factory.py
|
https://github.com/Fantomas42/mots-vides/blob/eaeccf73bdb415d0c5559ccd74de360b37a2bbac/mots_vides/factory.py#L111-L118
|
def write_collection(self, filename, collection):
"""
Writes a collection of stop words into a file.
"""
collection = sorted(list(collection))
with open(filename, 'wb+') as fd:
fd.truncate()
fd.write('\n'.join(collection).encode('utf-8'))
|
[
"def",
"write_collection",
"(",
"self",
",",
"filename",
",",
"collection",
")",
":",
"collection",
"=",
"sorted",
"(",
"list",
"(",
"collection",
")",
")",
"with",
"open",
"(",
"filename",
",",
"'wb+'",
")",
"as",
"fd",
":",
"fd",
".",
"truncate",
"(",
")",
"fd",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"collection",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] |
Writes a collection of stop words into a file.
|
[
"Writes",
"a",
"collection",
"of",
"stop",
"words",
"into",
"a",
"file",
"."
] |
python
|
train
|
ergoithz/browsepy
|
browsepy/manager.py
|
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L395-L409
|
def get_mimetype(self, path):
'''
Get mimetype of given path calling all registered mime functions (and
default ones).
:param path: filesystem path of file
:type path: str
:returns: mimetype
:rtype: str
'''
for fnc in self._mimetype_functions:
mime = fnc(path)
if mime:
return mime
return mimetype.by_default(path)
|
[
"def",
"get_mimetype",
"(",
"self",
",",
"path",
")",
":",
"for",
"fnc",
"in",
"self",
".",
"_mimetype_functions",
":",
"mime",
"=",
"fnc",
"(",
"path",
")",
"if",
"mime",
":",
"return",
"mime",
"return",
"mimetype",
".",
"by_default",
"(",
"path",
")"
] |
Get mimetype of given path calling all registered mime functions (and
default ones).
:param path: filesystem path of file
:type path: str
:returns: mimetype
:rtype: str
|
[
"Get",
"mimetype",
"of",
"given",
"path",
"calling",
"all",
"registered",
"mime",
"functions",
"(",
"and",
"default",
"ones",
")",
"."
] |
python
|
train
|
bsolomon1124/pyfinance
|
pyfinance/returns.py
|
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L449-L468
|
def drawdown_recov(self, return_int=False):
"""Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.recov_date() - self.drawdown_end()
if return_int:
return td.days
return td
|
[
"def",
"drawdown_recov",
"(",
"self",
",",
"return_int",
"=",
"False",
")",
":",
"td",
"=",
"self",
".",
"recov_date",
"(",
")",
"-",
"self",
".",
"drawdown_end",
"(",
")",
"if",
"return_int",
":",
"return",
"td",
".",
"days",
"return",
"td"
] |
Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
|
[
"Length",
"of",
"drawdown",
"recovery",
"in",
"days",
"."
] |
python
|
train
|
thesharp/htpasswd
|
htpasswd/basic.py
|
https://github.com/thesharp/htpasswd/blob/8bf5cee0bd5362af586729f4c9cea8131eedd74f/htpasswd/basic.py#L88-L97
|
def _encrypt_password(self, password):
"""encrypt the password for given mode """
if self.encryption_mode.lower() == 'crypt':
return self._crypt_password(password)
elif self.encryption_mode.lower() == 'md5':
return self._md5_password(password)
elif self.encryption_mode.lower() == 'md5-base':
return self._md5_base_password(password)
else:
raise UnknownEncryptionMode(self.encryption_mode)
|
[
"def",
"_encrypt_password",
"(",
"self",
",",
"password",
")",
":",
"if",
"self",
".",
"encryption_mode",
".",
"lower",
"(",
")",
"==",
"'crypt'",
":",
"return",
"self",
".",
"_crypt_password",
"(",
"password",
")",
"elif",
"self",
".",
"encryption_mode",
".",
"lower",
"(",
")",
"==",
"'md5'",
":",
"return",
"self",
".",
"_md5_password",
"(",
"password",
")",
"elif",
"self",
".",
"encryption_mode",
".",
"lower",
"(",
")",
"==",
"'md5-base'",
":",
"return",
"self",
".",
"_md5_base_password",
"(",
"password",
")",
"else",
":",
"raise",
"UnknownEncryptionMode",
"(",
"self",
".",
"encryption_mode",
")"
] |
encrypt the password for given mode
|
[
"encrypt",
"the",
"password",
"for",
"given",
"mode"
] |
python
|
train
|
ihmeuw/vivarium
|
src/vivarium/framework/configuration.py
|
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/configuration.py#L25-L36
|
def validate_model_specification_file(file_path: str) -> str:
"""Ensures the provided file is a yaml file"""
if not os.path.isfile(file_path):
raise ConfigurationError('If you provide a model specification file, it must be a file. '
f'You provided {file_path}')
extension = file_path.split('.')[-1]
if extension not in ['yaml', 'yml']:
raise ConfigurationError(f'Model specification files must be in a yaml format. You provided {extension}')
# Attempt to load
yaml.full_load(file_path)
return file_path
|
[
"def",
"validate_model_specification_file",
"(",
"file_path",
":",
"str",
")",
"->",
"str",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"raise",
"ConfigurationError",
"(",
"'If you provide a model specification file, it must be a file. '",
"f'You provided {file_path}'",
")",
"extension",
"=",
"file_path",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"extension",
"not",
"in",
"[",
"'yaml'",
",",
"'yml'",
"]",
":",
"raise",
"ConfigurationError",
"(",
"f'Model specification files must be in a yaml format. You provided {extension}'",
")",
"# Attempt to load",
"yaml",
".",
"full_load",
"(",
"file_path",
")",
"return",
"file_path"
] |
Ensures the provided file is a yaml file
|
[
"Ensures",
"the",
"provided",
"file",
"is",
"a",
"yaml",
"file"
] |
python
|
train
|
foutaise/texttable
|
texttable.py
|
https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L294-L306
|
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
|
[
"def",
"set_cols_valign",
"(",
"self",
",",
"array",
")",
":",
"self",
".",
"_check_row_size",
"(",
"array",
")",
"self",
".",
"_valign",
"=",
"array",
"return",
"self"
] |
Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
|
[
"Set",
"the",
"desired",
"columns",
"vertical",
"alignment"
] |
python
|
train
|
ungarj/mapchete
|
mapchete/formats/default/raster_file.py
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/raster_file.py#L90-L119
|
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
out_crs = self.pyramid.crs if out_crs is None else out_crs
with rasterio.open(self.path) as inp:
inp_crs = inp.crs
out_bbox = bbox = box(*inp.bounds)
# If soucre and target CRSes differ, segmentize and reproject
if inp_crs != out_crs:
# estimate segmentize value (raster pixel size * tile size)
# and get reprojected bounding box
return reproject_geometry(
segmentize_geometry(
bbox, inp.transform[0] * self.pyramid.tile_size
),
src_crs=inp_crs, dst_crs=out_crs
)
else:
return out_bbox
|
[
"def",
"bbox",
"(",
"self",
",",
"out_crs",
"=",
"None",
")",
":",
"out_crs",
"=",
"self",
".",
"pyramid",
".",
"crs",
"if",
"out_crs",
"is",
"None",
"else",
"out_crs",
"with",
"rasterio",
".",
"open",
"(",
"self",
".",
"path",
")",
"as",
"inp",
":",
"inp_crs",
"=",
"inp",
".",
"crs",
"out_bbox",
"=",
"bbox",
"=",
"box",
"(",
"*",
"inp",
".",
"bounds",
")",
"# If soucre and target CRSes differ, segmentize and reproject",
"if",
"inp_crs",
"!=",
"out_crs",
":",
"# estimate segmentize value (raster pixel size * tile size)",
"# and get reprojected bounding box",
"return",
"reproject_geometry",
"(",
"segmentize_geometry",
"(",
"bbox",
",",
"inp",
".",
"transform",
"[",
"0",
"]",
"*",
"self",
".",
"pyramid",
".",
"tile_size",
")",
",",
"src_crs",
"=",
"inp_crs",
",",
"dst_crs",
"=",
"out_crs",
")",
"else",
":",
"return",
"out_bbox"
] |
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
|
[
"Return",
"data",
"bounding",
"box",
"."
] |
python
|
valid
|
genialis/resolwe
|
resolwe/elastic/builder.py
|
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/builder.py#L505-L510
|
def destroy(self):
"""Delete all indexes from Elasticsearch and index builder."""
self.unregister_signals()
for index in self.indexes:
index.destroy()
self.indexes = []
|
[
"def",
"destroy",
"(",
"self",
")",
":",
"self",
".",
"unregister_signals",
"(",
")",
"for",
"index",
"in",
"self",
".",
"indexes",
":",
"index",
".",
"destroy",
"(",
")",
"self",
".",
"indexes",
"=",
"[",
"]"
] |
Delete all indexes from Elasticsearch and index builder.
|
[
"Delete",
"all",
"indexes",
"from",
"Elasticsearch",
"and",
"index",
"builder",
"."
] |
python
|
train
|
assamite/creamas
|
creamas/rules/rule.py
|
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/rule.py#L153-L167
|
def add_subrule(self, subrule, weight):
"""Add subrule to the rule.
:param subrule:
Subrule to add to this rule, an instance of :class:`Rule` or
:class:`RuleLeaf`.
:param float weight: Weight of the subrule
"""
if not issubclass(subrule.__class__, (Rule, RuleLeaf)):
raise TypeError("Rule's class must be (subclass of) {} or {}, got "
"{}.".format(Rule, RuleLeaf, subrule.__class__))
self.__domains = set.union(self.__domains, subrule.domains)
self.R.append(subrule)
self.W.append(weight)
|
[
"def",
"add_subrule",
"(",
"self",
",",
"subrule",
",",
"weight",
")",
":",
"if",
"not",
"issubclass",
"(",
"subrule",
".",
"__class__",
",",
"(",
"Rule",
",",
"RuleLeaf",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Rule's class must be (subclass of) {} or {}, got \"",
"\"{}.\"",
".",
"format",
"(",
"Rule",
",",
"RuleLeaf",
",",
"subrule",
".",
"__class__",
")",
")",
"self",
".",
"__domains",
"=",
"set",
".",
"union",
"(",
"self",
".",
"__domains",
",",
"subrule",
".",
"domains",
")",
"self",
".",
"R",
".",
"append",
"(",
"subrule",
")",
"self",
".",
"W",
".",
"append",
"(",
"weight",
")"
] |
Add subrule to the rule.
:param subrule:
Subrule to add to this rule, an instance of :class:`Rule` or
:class:`RuleLeaf`.
:param float weight: Weight of the subrule
|
[
"Add",
"subrule",
"to",
"the",
"rule",
"."
] |
python
|
train
|
ansible/molecule
|
molecule/provisioner/ansible.py
|
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/provisioner/ansible.py#L641-L656
|
def converge(self, playbook=None, **kwargs):
"""
Executes ``ansible-playbook`` against the converge playbook unless
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
"""
if playbook is None:
pb = self._get_ansible_playbook(self.playbooks.converge, **kwargs)
else:
pb = self._get_ansible_playbook(playbook, **kwargs)
return pb.execute()
|
[
"def",
"converge",
"(",
"self",
",",
"playbook",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"playbook",
"is",
"None",
":",
"pb",
"=",
"self",
".",
"_get_ansible_playbook",
"(",
"self",
".",
"playbooks",
".",
"converge",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"pb",
"=",
"self",
".",
"_get_ansible_playbook",
"(",
"playbook",
",",
"*",
"*",
"kwargs",
")",
"return",
"pb",
".",
"execute",
"(",
")"
] |
Executes ``ansible-playbook`` against the converge playbook unless
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
|
[
"Executes",
"ansible",
"-",
"playbook",
"against",
"the",
"converge",
"playbook",
"unless",
"specified",
"otherwise",
"and",
"returns",
"a",
"string",
"."
] |
python
|
train
|
jilljenn/tryalgo
|
tryalgo/convex_hull.py
|
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/convex_hull.py#L20-L38
|
def andrew(S):
"""Convex hull by Andrew
:param S: list of points as coordinate pairs
:requires: S has at least 2 points
:returns: list of points of the convex hull
:complexity: `O(n log n)`
"""
S.sort()
top = []
bot = []
for p in S:
while len(top) >= 2 and not left_turn(p, top[-1], top[-2]):
top.pop()
top.append(p)
while len(bot) >= 2 and not left_turn(bot[-2], bot[-1], p):
bot.pop()
bot.append(p)
return bot[:-1] + top[:0:-1]
|
[
"def",
"andrew",
"(",
"S",
")",
":",
"S",
".",
"sort",
"(",
")",
"top",
"=",
"[",
"]",
"bot",
"=",
"[",
"]",
"for",
"p",
"in",
"S",
":",
"while",
"len",
"(",
"top",
")",
">=",
"2",
"and",
"not",
"left_turn",
"(",
"p",
",",
"top",
"[",
"-",
"1",
"]",
",",
"top",
"[",
"-",
"2",
"]",
")",
":",
"top",
".",
"pop",
"(",
")",
"top",
".",
"append",
"(",
"p",
")",
"while",
"len",
"(",
"bot",
")",
">=",
"2",
"and",
"not",
"left_turn",
"(",
"bot",
"[",
"-",
"2",
"]",
",",
"bot",
"[",
"-",
"1",
"]",
",",
"p",
")",
":",
"bot",
".",
"pop",
"(",
")",
"bot",
".",
"append",
"(",
"p",
")",
"return",
"bot",
"[",
":",
"-",
"1",
"]",
"+",
"top",
"[",
":",
"0",
":",
"-",
"1",
"]"
] |
Convex hull by Andrew
:param S: list of points as coordinate pairs
:requires: S has at least 2 points
:returns: list of points of the convex hull
:complexity: `O(n log n)`
|
[
"Convex",
"hull",
"by",
"Andrew"
] |
python
|
train
|
AllTheWayDown/turgles
|
turgles/buffer.py
|
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L79-L86
|
def resize(self, new_size):
"""Create a new larger array, and copy data over"""
assert new_size > self.size
new_data = self._allocate(new_size)
# copy
new_data[0:self.size * self.chunk_size] = self.data
self.size = new_size
self.data = new_data
|
[
"def",
"resize",
"(",
"self",
",",
"new_size",
")",
":",
"assert",
"new_size",
">",
"self",
".",
"size",
"new_data",
"=",
"self",
".",
"_allocate",
"(",
"new_size",
")",
"# copy",
"new_data",
"[",
"0",
":",
"self",
".",
"size",
"*",
"self",
".",
"chunk_size",
"]",
"=",
"self",
".",
"data",
"self",
".",
"size",
"=",
"new_size",
"self",
".",
"data",
"=",
"new_data"
] |
Create a new larger array, and copy data over
|
[
"Create",
"a",
"new",
"larger",
"array",
"and",
"copy",
"data",
"over"
] |
python
|
train
|
OpenTreeOfLife/peyotl
|
tutorials/ot-oti-find-tree.py
|
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-tree.py#L12-L24
|
def ot_find_tree(arg_dict, exact=True, verbose=False, oti_wrapper=None):
"""Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`.
The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
"""
if oti_wrapper is None:
from peyotl.sugar import oti
oti_wrapper = oti
return oti_wrapper.find_trees(arg_dict,
exact=exact,
verbose=verbose,
wrap_response=True)
|
[
"def",
"ot_find_tree",
"(",
"arg_dict",
",",
"exact",
"=",
"True",
",",
"verbose",
"=",
"False",
",",
"oti_wrapper",
"=",
"None",
")",
":",
"if",
"oti_wrapper",
"is",
"None",
":",
"from",
"peyotl",
".",
"sugar",
"import",
"oti",
"oti_wrapper",
"=",
"oti",
"return",
"oti_wrapper",
".",
"find_trees",
"(",
"arg_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
",",
"wrap_response",
"=",
"True",
")"
] |
Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`.
The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
|
[
"Uses",
"a",
"peyotl",
"wrapper",
"around",
"an",
"Open",
"Tree",
"web",
"service",
"to",
"get",
"a",
"list",
"of",
"trees",
"including",
"values",
"value",
"for",
"a",
"given",
"property",
"to",
"be",
"searched",
"on",
"porperty",
"."
] |
python
|
train
|
openstack/networking-cisco
|
networking_cisco/apps/saf/server/services/firewall/native/drivers/native.py
|
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/drivers/native.py#L445-L472
|
def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Tenant Network create Notification.
Restart is not supported currently for this. fixme(padkrish).
"""
router_id = self.get_router_id(tenant_id, tenant_name)
if not router_id:
LOG.error("Rout ID not present for tenant")
return False
ret = self._program_dcnm_static_route(tenant_id, tenant_name)
if not ret:
LOG.error("Program DCNM with static routes failed "
"for router %s", router_id)
return False
# Program router namespace to have this network to be routed
# to IN service network
in_ip_dict = self.get_in_ip_addr(tenant_id)
in_gw = in_ip_dict.get('gateway')
if in_gw is None:
LOG.error("No FW service GW present")
return False
ret = self.os_helper.program_rtr_nwk_next_hop(router_id, in_gw, cidr)
if not ret:
LOG.error("Unable to program default router next hop %s",
router_id)
return False
return True
|
[
"def",
"network_create_notif",
"(",
"self",
",",
"tenant_id",
",",
"tenant_name",
",",
"cidr",
")",
":",
"router_id",
"=",
"self",
".",
"get_router_id",
"(",
"tenant_id",
",",
"tenant_name",
")",
"if",
"not",
"router_id",
":",
"LOG",
".",
"error",
"(",
"\"Rout ID not present for tenant\"",
")",
"return",
"False",
"ret",
"=",
"self",
".",
"_program_dcnm_static_route",
"(",
"tenant_id",
",",
"tenant_name",
")",
"if",
"not",
"ret",
":",
"LOG",
".",
"error",
"(",
"\"Program DCNM with static routes failed \"",
"\"for router %s\"",
",",
"router_id",
")",
"return",
"False",
"# Program router namespace to have this network to be routed",
"# to IN service network",
"in_ip_dict",
"=",
"self",
".",
"get_in_ip_addr",
"(",
"tenant_id",
")",
"in_gw",
"=",
"in_ip_dict",
".",
"get",
"(",
"'gateway'",
")",
"if",
"in_gw",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"\"No FW service GW present\"",
")",
"return",
"False",
"ret",
"=",
"self",
".",
"os_helper",
".",
"program_rtr_nwk_next_hop",
"(",
"router_id",
",",
"in_gw",
",",
"cidr",
")",
"if",
"not",
"ret",
":",
"LOG",
".",
"error",
"(",
"\"Unable to program default router next hop %s\"",
",",
"router_id",
")",
"return",
"False",
"return",
"True"
] |
Tenant Network create Notification.
Restart is not supported currently for this. fixme(padkrish).
|
[
"Tenant",
"Network",
"create",
"Notification",
"."
] |
python
|
train
|
mrstephenneal/mysql-toolkit
|
mysql/toolkit/commands/execute.py
|
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L43-L75
|
def commands(self):
"""
Fetch individual SQL commands from a SQL commands containing many commands.
:return: List of commands
"""
# Retrieve all commands via split function or splitting on ';'
print('\tRetrieving commands from', self.sql_script)
print('\tUsing command splitter algorithm {0}'.format(self.split_algo))
with Timer('\tRetrieved commands in'):
# Split commands
# sqlparse packages split function combined with sql_split function
if self.split_algo is 'sql_parse':
commands = SplitCommands(self.sql_script).sql_parse
# Split on every ';' (unreliable)
elif self.split_algo is 'simple_split':
commands = SplitCommands(self.sql_script).simple_split()
# sqlparse package without additional splitting
elif self.split_algo is 'sql_parse_nosplit':
commands = SplitCommands(self.sql_script).sql_parse_nosplit
# Parse every char of the SQL commands and determine breakpoints
elif self.split_algo is 'sql_split':
commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False)
else:
commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False)
# remove dbo. prefixes from table names
cleaned_commands = [com.replace("dbo.", '') for com in commands]
return cleaned_commands
|
[
"def",
"commands",
"(",
"self",
")",
":",
"# Retrieve all commands via split function or splitting on ';'",
"print",
"(",
"'\\tRetrieving commands from'",
",",
"self",
".",
"sql_script",
")",
"print",
"(",
"'\\tUsing command splitter algorithm {0}'",
".",
"format",
"(",
"self",
".",
"split_algo",
")",
")",
"with",
"Timer",
"(",
"'\\tRetrieved commands in'",
")",
":",
"# Split commands",
"# sqlparse packages split function combined with sql_split function",
"if",
"self",
".",
"split_algo",
"is",
"'sql_parse'",
":",
"commands",
"=",
"SplitCommands",
"(",
"self",
".",
"sql_script",
")",
".",
"sql_parse",
"# Split on every ';' (unreliable)",
"elif",
"self",
".",
"split_algo",
"is",
"'simple_split'",
":",
"commands",
"=",
"SplitCommands",
"(",
"self",
".",
"sql_script",
")",
".",
"simple_split",
"(",
")",
"# sqlparse package without additional splitting",
"elif",
"self",
".",
"split_algo",
"is",
"'sql_parse_nosplit'",
":",
"commands",
"=",
"SplitCommands",
"(",
"self",
".",
"sql_script",
")",
".",
"sql_parse_nosplit",
"# Parse every char of the SQL commands and determine breakpoints",
"elif",
"self",
".",
"split_algo",
"is",
"'sql_split'",
":",
"commands",
"=",
"SplitCommands",
"(",
"self",
".",
"sql_script",
")",
".",
"sql_split",
"(",
"disable_tqdm",
"=",
"False",
")",
"else",
":",
"commands",
"=",
"SplitCommands",
"(",
"self",
".",
"sql_script",
")",
".",
"sql_split",
"(",
"disable_tqdm",
"=",
"False",
")",
"# remove dbo. prefixes from table names",
"cleaned_commands",
"=",
"[",
"com",
".",
"replace",
"(",
"\"dbo.\"",
",",
"''",
")",
"for",
"com",
"in",
"commands",
"]",
"return",
"cleaned_commands"
] |
Fetch individual SQL commands from a SQL commands containing many commands.
:return: List of commands
|
[
"Fetch",
"individual",
"SQL",
"commands",
"from",
"a",
"SQL",
"commands",
"containing",
"many",
"commands",
"."
] |
python
|
train
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1390-L1437
|
def _process_genes(self, limit=None):
"""
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, ncbi_gene_id
# , empty # till next time
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip()
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
geno.addGene(gene_id, gene_symbol)
model.addEquivalentClass(gene_id, ncbi_gene_id)
LOG.info("Done with genes")
return
|
[
"def",
"_process_genes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing genes\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'gene'",
"]",
"[",
"'file'",
"]",
")",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"ncbi_gene_id",
"# , empty # till next time",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'gene'",
"]",
":",
"continue",
"gene_id",
"=",
"'ZFIN:'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"ncbi_gene_id",
"=",
"'NCBIGene:'",
"+",
"ncbi_gene_id",
".",
"strip",
"(",
")",
"self",
".",
"id_label_map",
"[",
"gene_id",
"]",
"=",
"gene_symbol",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"geno",
".",
"addGene",
"(",
"gene_id",
",",
"gene_symbol",
")",
"model",
".",
"addEquivalentClass",
"(",
"gene_id",
",",
"ncbi_gene_id",
")",
"LOG",
".",
"info",
"(",
"\"Done with genes\"",
")",
"return"
] |
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
|
[
"This",
"table",
"provides",
"the",
"ZFIN",
"gene",
"id",
"the",
"SO",
"type",
"of",
"the",
"gene",
"the",
"gene",
"symbol",
"and",
"the",
"NCBI",
"Gene",
"ID",
"."
] |
python
|
train
|
googledatalab/pydatalab
|
google/datalab/bigquery/commands/_bigquery.py
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L683-L754
|
def _table_cell(args, cell_body):
"""Implements the BigQuery table magic subcommand used to operate on tables
The supported syntax is:
%%bq tables <command> <args>
Commands:
{list, create, delete, describe, view}
Args:
args: the optional arguments following '%%bq tables command'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The HTML rendering for the table of datasets.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
if args['dataset']:
if args['project'] is None:
datasets = [bigquery.Dataset(args['dataset'])]
else:
context = google.datalab.Context(args['project'],
google.datalab.Context.default().credentials)
datasets = [bigquery.Dataset(args['dataset'], context)]
else:
default_context = google.datalab.Context.default()
context = google.datalab.Context(default_context.project_id, default_context.credentials)
if args['project']:
context.set_project_id(args['project'])
datasets = bigquery.Datasets(context)
tables = []
for dataset in datasets:
tables.extend([table.full_name
for table in dataset if fnmatch.fnmatch(table.full_name, filter_)])
return _render_list(tables)
elif args['command'] == 'create':
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e))
elif args['command'] == 'describe':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
html = _repr_html_table_schema(table.schema)
return IPython.core.display.HTML(html)
elif args['command'] == 'delete':
try:
bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e))
elif args['command'] == 'view':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
return table
|
[
"def",
"_table_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'list'",
":",
"filter_",
"=",
"args",
"[",
"'filter'",
"]",
"if",
"args",
"[",
"'filter'",
"]",
"else",
"'*'",
"if",
"args",
"[",
"'dataset'",
"]",
":",
"if",
"args",
"[",
"'project'",
"]",
"is",
"None",
":",
"datasets",
"=",
"[",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'dataset'",
"]",
")",
"]",
"else",
":",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
"(",
"args",
"[",
"'project'",
"]",
",",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
".",
"credentials",
")",
"datasets",
"=",
"[",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'dataset'",
"]",
",",
"context",
")",
"]",
"else",
":",
"default_context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
"(",
"default_context",
".",
"project_id",
",",
"default_context",
".",
"credentials",
")",
"if",
"args",
"[",
"'project'",
"]",
":",
"context",
".",
"set_project_id",
"(",
"args",
"[",
"'project'",
"]",
")",
"datasets",
"=",
"bigquery",
".",
"Datasets",
"(",
"context",
")",
"tables",
"=",
"[",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"tables",
".",
"extend",
"(",
"[",
"table",
".",
"full_name",
"for",
"table",
"in",
"dataset",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"table",
".",
"full_name",
",",
"filter_",
")",
"]",
")",
"return",
"_render_list",
"(",
"tables",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'create'",
":",
"if",
"cell_body",
"is",
"None",
":",
"print",
"(",
"'Failed to create %s: no schema specified'",
"%",
"args",
"[",
"'name'",
"]",
")",
"else",
":",
"try",
":",
"record",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"jsonschema",
".",
"validate",
"(",
"record",
",",
"BigQuerySchema",
".",
"TABLE_SCHEMA_SCHEMA",
")",
"schema",
"=",
"bigquery",
".",
"Schema",
"(",
"record",
"[",
"'schema'",
"]",
")",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"schema",
"=",
"schema",
",",
"overwrite",
"=",
"args",
"[",
"'overwrite'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'describe'",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"not",
"table",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"name",
")",
"html",
"=",
"_repr_html_table_schema",
"(",
"table",
".",
"schema",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'delete'",
":",
"try",
":",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'view'",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"not",
"table",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"name",
")",
"return",
"table"
] |
Implements the BigQuery table magic subcommand used to operate on tables
The supported syntax is:
%%bq tables <command> <args>
Commands:
{list, create, delete, describe, view}
Args:
args: the optional arguments following '%%bq tables command'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The HTML rendering for the table of datasets.
|
[
"Implements",
"the",
"BigQuery",
"table",
"magic",
"subcommand",
"used",
"to",
"operate",
"on",
"tables"
] |
python
|
train
|
mabuchilab/QNET
|
src/qnet/algebra/core/indexed_operations.py
|
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/indexed_operations.py#L100-L124
|
def doit(
self, classes=None, recursive=True, indices=None, max_terms=None,
**kwargs):
"""Write out the indexed sum explicitly
If `classes` is None or :class:`IndexedSum` is in `classes`,
(partially) write out the indexed sum in to an explicit sum of terms.
If `recursive` is True, write out each of the new sum's summands by
calling its :meth:`doit` method.
Args:
classes (None or list): see :meth:`.Expression.doit`
recursive (bool): see :meth:`.Expression.doit`
indices (list): List of :class:`IdxSym` indices for which the sum
should be expanded. If `indices` is a subset of the indices
over which the sum runs, it will be partially expanded. If not
given, expand the sum completely
max_terms (int): Number of terms after which to truncate the sum.
This is particularly useful for infinite sums. If not given,
expand all terms of the sum. Cannot be combined with `indices`
kwargs: keyword arguments for recursive calls to
:meth:`doit`. See :meth:`.Expression.doit`
"""
return super().doit(
classes, recursive, indices=indices, max_terms=max_terms, **kwargs)
|
[
"def",
"doit",
"(",
"self",
",",
"classes",
"=",
"None",
",",
"recursive",
"=",
"True",
",",
"indices",
"=",
"None",
",",
"max_terms",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
")",
".",
"doit",
"(",
"classes",
",",
"recursive",
",",
"indices",
"=",
"indices",
",",
"max_terms",
"=",
"max_terms",
",",
"*",
"*",
"kwargs",
")"
] |
Write out the indexed sum explicitly
If `classes` is None or :class:`IndexedSum` is in `classes`,
(partially) write out the indexed sum in to an explicit sum of terms.
If `recursive` is True, write out each of the new sum's summands by
calling its :meth:`doit` method.
Args:
classes (None or list): see :meth:`.Expression.doit`
recursive (bool): see :meth:`.Expression.doit`
indices (list): List of :class:`IdxSym` indices for which the sum
should be expanded. If `indices` is a subset of the indices
over which the sum runs, it will be partially expanded. If not
given, expand the sum completely
max_terms (int): Number of terms after which to truncate the sum.
This is particularly useful for infinite sums. If not given,
expand all terms of the sum. Cannot be combined with `indices`
kwargs: keyword arguments for recursive calls to
:meth:`doit`. See :meth:`.Expression.doit`
|
[
"Write",
"out",
"the",
"indexed",
"sum",
"explicitly"
] |
python
|
train
|
graphql-python/graphene
|
graphene/pyutils/version.py
|
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/version.py#L40-L50
|
def get_complete_version(version=None):
"""Returns a tuple of the graphene version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphene import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version
|
[
"def",
"get_complete_version",
"(",
"version",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"from",
"graphene",
"import",
"VERSION",
"as",
"version",
"else",
":",
"assert",
"len",
"(",
"version",
")",
"==",
"5",
"assert",
"version",
"[",
"3",
"]",
"in",
"(",
"\"alpha\"",
",",
"\"beta\"",
",",
"\"rc\"",
",",
"\"final\"",
")",
"return",
"version"
] |
Returns a tuple of the graphene version. If version argument is non-empty,
then checks for correctness of the tuple provided.
|
[
"Returns",
"a",
"tuple",
"of",
"the",
"graphene",
"version",
".",
"If",
"version",
"argument",
"is",
"non",
"-",
"empty",
"then",
"checks",
"for",
"correctness",
"of",
"the",
"tuple",
"provided",
"."
] |
python
|
train
|
jlesquembre/jlle
|
jlle/releaser/vcs.py
|
https://github.com/jlesquembre/jlle/blob/3645d8f203708355853ef911f4b887ae4d794826/jlle/releaser/vcs.py#L121-L138
|
def history_file(self, location=None):
"""Return history file location.
"""
if location:
# Hardcoded location passed from the config file.
if os.path.exists(location):
return location
else:
logger.warn("The specified history file %s doesn't exist",
location)
filenames = []
for base in ['CHANGES', 'HISTORY', 'CHANGELOG']:
filenames.append(base)
for extension in ['rst', 'txt', 'markdown']:
filenames.append('.'.join([base, extension]))
history = self.filefind(filenames)
if history:
return history
|
[
"def",
"history_file",
"(",
"self",
",",
"location",
"=",
"None",
")",
":",
"if",
"location",
":",
"# Hardcoded location passed from the config file.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"location",
")",
":",
"return",
"location",
"else",
":",
"logger",
".",
"warn",
"(",
"\"The specified history file %s doesn't exist\"",
",",
"location",
")",
"filenames",
"=",
"[",
"]",
"for",
"base",
"in",
"[",
"'CHANGES'",
",",
"'HISTORY'",
",",
"'CHANGELOG'",
"]",
":",
"filenames",
".",
"append",
"(",
"base",
")",
"for",
"extension",
"in",
"[",
"'rst'",
",",
"'txt'",
",",
"'markdown'",
"]",
":",
"filenames",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"[",
"base",
",",
"extension",
"]",
")",
")",
"history",
"=",
"self",
".",
"filefind",
"(",
"filenames",
")",
"if",
"history",
":",
"return",
"history"
] |
Return history file location.
|
[
"Return",
"history",
"file",
"location",
"."
] |
python
|
train
|
Nukesor/pueue
|
pueue/daemon/daemon.py
|
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L319-L344
|
def send_status(self, payload):
"""Send the daemon status and the current queue for displaying."""
answer = {}
data = []
# Get daemon status
if self.paused:
answer['status'] = 'paused'
else:
answer['status'] = 'running'
# Add current queue or a message, that queue is empty
if len(self.queue) > 0:
data = deepcopy(self.queue.queue)
# Remove stderr and stdout output for transfer
# Some outputs are way to big for the socket buffer
# and this is not needed by the client
for key, item in data.items():
if 'stderr' in item:
del item['stderr']
if 'stdout' in item:
del item['stdout']
else:
data = 'Queue is empty'
answer['data'] = data
return answer
|
[
"def",
"send_status",
"(",
"self",
",",
"payload",
")",
":",
"answer",
"=",
"{",
"}",
"data",
"=",
"[",
"]",
"# Get daemon status",
"if",
"self",
".",
"paused",
":",
"answer",
"[",
"'status'",
"]",
"=",
"'paused'",
"else",
":",
"answer",
"[",
"'status'",
"]",
"=",
"'running'",
"# Add current queue or a message, that queue is empty",
"if",
"len",
"(",
"self",
".",
"queue",
")",
">",
"0",
":",
"data",
"=",
"deepcopy",
"(",
"self",
".",
"queue",
".",
"queue",
")",
"# Remove stderr and stdout output for transfer",
"# Some outputs are way to big for the socket buffer",
"# and this is not needed by the client",
"for",
"key",
",",
"item",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"'stderr'",
"in",
"item",
":",
"del",
"item",
"[",
"'stderr'",
"]",
"if",
"'stdout'",
"in",
"item",
":",
"del",
"item",
"[",
"'stdout'",
"]",
"else",
":",
"data",
"=",
"'Queue is empty'",
"answer",
"[",
"'data'",
"]",
"=",
"data",
"return",
"answer"
] |
Send the daemon status and the current queue for displaying.
|
[
"Send",
"the",
"daemon",
"status",
"and",
"the",
"current",
"queue",
"for",
"displaying",
"."
] |
python
|
train
|
pvlib/pvlib-python
|
pvlib/pvsystem.py
|
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L1825-L1908
|
def sapm_celltemp(poa_global, wind_speed, temp_air,
model='open_rack_cell_glassback'):
'''
Estimate cell and module temperatures per the Sandia PV Array
Performance Model (SAPM, SAND2004-3535), from the incident
irradiance, wind speed, ambient temperature, and SAPM module
parameters.
Parameters
----------
poa_global : float or Series
Total incident irradiance in W/m^2.
wind_speed : float or Series
Wind speed in m/s at a height of 10 meters.
temp_air : float or Series
Ambient dry bulb temperature in degrees C.
model : string, list, or dict, default 'open_rack_cell_glassback'
Model to be used.
If string, can be:
* 'open_rack_cell_glassback' (default)
* 'roof_mount_cell_glassback'
* 'open_rack_cell_polymerback'
* 'insulated_back_polymerback'
* 'open_rack_polymer_thinfilm_steel'
* '22x_concentrator_tracker'
If dict, supply the following parameters
(if list, in the following order):
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance.
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
(see SAPM eqn. 11).
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance, E0.
Returns
--------
DataFrame with columns 'temp_cell' and 'temp_module'.
Values in degrees C.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
See Also
--------
sapm
'''
temp_models = TEMP_MODEL_PARAMS['sapm']
if isinstance(model, str):
model = temp_models[model.lower()]
elif isinstance(model, (dict, pd.Series)):
model = [model['a'], model['b'], model['deltaT']]
a = model[0]
b = model[1]
deltaT = model[2]
E0 = 1000. # Reference irradiance
temp_module = pd.Series(poa_global * np.exp(a + b * wind_speed) + temp_air)
temp_cell = temp_module + (poa_global / E0) * (deltaT)
return pd.DataFrame({'temp_cell': temp_cell, 'temp_module': temp_module})
|
[
"def",
"sapm_celltemp",
"(",
"poa_global",
",",
"wind_speed",
",",
"temp_air",
",",
"model",
"=",
"'open_rack_cell_glassback'",
")",
":",
"temp_models",
"=",
"TEMP_MODEL_PARAMS",
"[",
"'sapm'",
"]",
"if",
"isinstance",
"(",
"model",
",",
"str",
")",
":",
"model",
"=",
"temp_models",
"[",
"model",
".",
"lower",
"(",
")",
"]",
"elif",
"isinstance",
"(",
"model",
",",
"(",
"dict",
",",
"pd",
".",
"Series",
")",
")",
":",
"model",
"=",
"[",
"model",
"[",
"'a'",
"]",
",",
"model",
"[",
"'b'",
"]",
",",
"model",
"[",
"'deltaT'",
"]",
"]",
"a",
"=",
"model",
"[",
"0",
"]",
"b",
"=",
"model",
"[",
"1",
"]",
"deltaT",
"=",
"model",
"[",
"2",
"]",
"E0",
"=",
"1000.",
"# Reference irradiance",
"temp_module",
"=",
"pd",
".",
"Series",
"(",
"poa_global",
"*",
"np",
".",
"exp",
"(",
"a",
"+",
"b",
"*",
"wind_speed",
")",
"+",
"temp_air",
")",
"temp_cell",
"=",
"temp_module",
"+",
"(",
"poa_global",
"/",
"E0",
")",
"*",
"(",
"deltaT",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'temp_cell'",
":",
"temp_cell",
",",
"'temp_module'",
":",
"temp_module",
"}",
")"
] |
Estimate cell and module temperatures per the Sandia PV Array
Performance Model (SAPM, SAND2004-3535), from the incident
irradiance, wind speed, ambient temperature, and SAPM module
parameters.
Parameters
----------
poa_global : float or Series
Total incident irradiance in W/m^2.
wind_speed : float or Series
Wind speed in m/s at a height of 10 meters.
temp_air : float or Series
Ambient dry bulb temperature in degrees C.
model : string, list, or dict, default 'open_rack_cell_glassback'
Model to be used.
If string, can be:
* 'open_rack_cell_glassback' (default)
* 'roof_mount_cell_glassback'
* 'open_rack_cell_polymerback'
* 'insulated_back_polymerback'
* 'open_rack_polymer_thinfilm_steel'
* '22x_concentrator_tracker'
If dict, supply the following parameters
(if list, in the following order):
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance.
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
(see SAPM eqn. 11).
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance, E0.
Returns
--------
DataFrame with columns 'temp_cell' and 'temp_module'.
Values in degrees C.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
See Also
--------
sapm
|
[
"Estimate",
"cell",
"and",
"module",
"temperatures",
"per",
"the",
"Sandia",
"PV",
"Array",
"Performance",
"Model",
"(",
"SAPM",
"SAND2004",
"-",
"3535",
")",
"from",
"the",
"incident",
"irradiance",
"wind",
"speed",
"ambient",
"temperature",
"and",
"SAPM",
"module",
"parameters",
"."
] |
python
|
train
|
theonion/django-bulbs
|
bulbs/utils/vault.py
|
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/utils/vault.py#L15-L27
|
def read(path):
"""Read a secret from Vault REST endpoint"""
url = '{}/{}/{}'.format(settings.VAULT_BASE_URL.rstrip('/'),
settings.VAULT_BASE_SECRET_PATH.strip('/'),
path.lstrip('/'))
headers = {'X-Vault-Token': settings.VAULT_ACCESS_TOKEN}
resp = requests.get(url, headers=headers)
if resp.ok:
return resp.json()['data']
else:
log.error('Failed VAULT GET request: %s %s', resp.status_code, resp.text)
raise Exception('Failed Vault GET request: {} {}'.format(resp.status_code, resp.text))
|
[
"def",
"read",
"(",
"path",
")",
":",
"url",
"=",
"'{}/{}/{}'",
".",
"format",
"(",
"settings",
".",
"VAULT_BASE_URL",
".",
"rstrip",
"(",
"'/'",
")",
",",
"settings",
".",
"VAULT_BASE_SECRET_PATH",
".",
"strip",
"(",
"'/'",
")",
",",
"path",
".",
"lstrip",
"(",
"'/'",
")",
")",
"headers",
"=",
"{",
"'X-Vault-Token'",
":",
"settings",
".",
"VAULT_ACCESS_TOKEN",
"}",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"resp",
".",
"ok",
":",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"'data'",
"]",
"else",
":",
"log",
".",
"error",
"(",
"'Failed VAULT GET request: %s %s'",
",",
"resp",
".",
"status_code",
",",
"resp",
".",
"text",
")",
"raise",
"Exception",
"(",
"'Failed Vault GET request: {} {}'",
".",
"format",
"(",
"resp",
".",
"status_code",
",",
"resp",
".",
"text",
")",
")"
] |
Read a secret from Vault REST endpoint
|
[
"Read",
"a",
"secret",
"from",
"Vault",
"REST",
"endpoint"
] |
python
|
train
|
pyblish/pyblish-qml
|
pyblish_qml/ipc/formatting.py
|
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/ipc/formatting.py#L153-L185
|
def format_instance(instance):
"""Serialise `instance`
For children to be visualised and modified,
they must provide an appropriate implementation
of __str__.
Data that isn't JSON compatible cannot be
visualised nor modified.
Attributes:
name (str): Name of instance
niceName (str, optional): Nice name of instance
family (str): Name of compatible family
data (dict, optional): Associated data
publish (bool): Whether or not instance should be published
Returns:
Dictionary of JSON-compatible instance
"""
instance = {
"name": instance.name,
"id": instance.id,
"data": format_data(instance.data),
"children": list(),
}
if os.getenv("PYBLISH_SAFE"):
schema.validate(instance, "instance")
return instance
|
[
"def",
"format_instance",
"(",
"instance",
")",
":",
"instance",
"=",
"{",
"\"name\"",
":",
"instance",
".",
"name",
",",
"\"id\"",
":",
"instance",
".",
"id",
",",
"\"data\"",
":",
"format_data",
"(",
"instance",
".",
"data",
")",
",",
"\"children\"",
":",
"list",
"(",
")",
",",
"}",
"if",
"os",
".",
"getenv",
"(",
"\"PYBLISH_SAFE\"",
")",
":",
"schema",
".",
"validate",
"(",
"instance",
",",
"\"instance\"",
")",
"return",
"instance"
] |
Serialise `instance`
For children to be visualised and modified,
they must provide an appropriate implementation
of __str__.
Data that isn't JSON compatible cannot be
visualised nor modified.
Attributes:
name (str): Name of instance
niceName (str, optional): Nice name of instance
family (str): Name of compatible family
data (dict, optional): Associated data
publish (bool): Whether or not instance should be published
Returns:
Dictionary of JSON-compatible instance
|
[
"Serialise",
"instance"
] |
python
|
train
|
saltstack/salt
|
salt/states/grafana4_org.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana4_org.py#L220-L251
|
def absent(name, profile='grafana'):
'''
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
org = __salt__['grafana4.get_org'](name, profile)
if not org:
ret['result'] = True
ret['comment'] = 'Org {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Org {0} will be deleted'.format(name)
return ret
__salt__['grafana4.delete_org'](org['id'], profile=profile)
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'Org {0} was deleted'.format(name)
return ret
|
[
"def",
"absent",
"(",
"name",
",",
"profile",
"=",
"'grafana'",
")",
":",
"if",
"isinstance",
"(",
"profile",
",",
"string_types",
")",
":",
"profile",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"profile",
")",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"None",
",",
"'changes'",
":",
"{",
"}",
"}",
"org",
"=",
"__salt__",
"[",
"'grafana4.get_org'",
"]",
"(",
"name",
",",
"profile",
")",
"if",
"not",
"org",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Org {0} already absent'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Org {0} will be deleted'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"__salt__",
"[",
"'grafana4.delete_org'",
"]",
"(",
"org",
"[",
"'id'",
"]",
",",
"profile",
"=",
"profile",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Absent'",
"ret",
"[",
"'comment'",
"]",
"=",
"'Org {0} was deleted'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] |
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
|
[
"Ensure",
"that",
"a",
"org",
"is",
"present",
"."
] |
python
|
train
|
libChEBI/libChEBIpy
|
libchebipy/_parsers.py
|
https://github.com/libChEBI/libChEBIpy/blob/89f223a91f518619d5e3910070d283adcac1626e/libchebipy/_parsers.py#L83-L88
|
def get_mass(chebi_id):
'''Returns mass'''
if len(__MASSES) == 0:
__parse_chemical_data()
return __MASSES[chebi_id] if chebi_id in __MASSES else float('NaN')
|
[
"def",
"get_mass",
"(",
"chebi_id",
")",
":",
"if",
"len",
"(",
"__MASSES",
")",
"==",
"0",
":",
"__parse_chemical_data",
"(",
")",
"return",
"__MASSES",
"[",
"chebi_id",
"]",
"if",
"chebi_id",
"in",
"__MASSES",
"else",
"float",
"(",
"'NaN'",
")"
] |
Returns mass
|
[
"Returns",
"mass"
] |
python
|
train
|
sbarham/dsrt
|
dsrt/data/SampleSet.py
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/data/SampleSet.py#L48-L63
|
def log(self, priority, msg):
"""
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
"""
# self.logger.log(self.config.levelmap[priority], msg)
self.logger.log(logging.CRITICAL, msg)
|
[
"def",
"log",
"(",
"self",
",",
"priority",
",",
"msg",
")",
":",
"# self.logger.log(self.config.levelmap[priority], msg)",
"self",
".",
"logger",
".",
"log",
"(",
"logging",
".",
"CRITICAL",
",",
"msg",
")"
] |
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
|
[
"Just",
"a",
"wrapper",
"for",
"convenience",
".",
"NB1",
":",
"priority",
"may",
"be",
"set",
"to",
"one",
"of",
":",
"-",
"CRITICAL",
"[",
"50",
"]",
"-",
"ERROR",
"[",
"40",
"]",
"-",
"WARNING",
"[",
"30",
"]",
"-",
"INFO",
"[",
"20",
"]",
"-",
"DEBUG",
"[",
"10",
"]",
"-",
"NOTSET",
"[",
"0",
"]",
"Anything",
"else",
"defaults",
"to",
"[",
"20",
"]",
"NB2",
":",
"the",
"levelmap",
"is",
"a",
"defaultdict",
"stored",
"in",
"Config",
";",
"it",
"maps",
"priority",
"strings",
"onto",
"integers"
] |
python
|
train
|
jkokorian/pyqt2waybinding
|
pyqt2waybinding/__init__.py
|
https://github.com/jkokorian/pyqt2waybinding/blob/fb1fb84f55608cfbf99c6486650100ba81743117/pyqt2waybinding/__init__.py#L168-L188
|
def _updateEndpoints(self,*args,**kwargs):
"""
Updates all endpoints except the one from which this slot was called.
Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents
"""
sender = self.sender()
if not self.ignoreEvents:
self.ignoreEvents = True
for binding in self.bindings.values():
if binding.instanceId == id(sender):
continue
if args:
binding.setter(*args,**kwargs)
else:
binding.setter(self.bindings[id(sender)].getter())
self.ignoreEvents = False
|
[
"def",
"_updateEndpoints",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"sender",
"=",
"self",
".",
"sender",
"(",
")",
"if",
"not",
"self",
".",
"ignoreEvents",
":",
"self",
".",
"ignoreEvents",
"=",
"True",
"for",
"binding",
"in",
"self",
".",
"bindings",
".",
"values",
"(",
")",
":",
"if",
"binding",
".",
"instanceId",
"==",
"id",
"(",
"sender",
")",
":",
"continue",
"if",
"args",
":",
"binding",
".",
"setter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"binding",
".",
"setter",
"(",
"self",
".",
"bindings",
"[",
"id",
"(",
"sender",
")",
"]",
".",
"getter",
"(",
")",
")",
"self",
".",
"ignoreEvents",
"=",
"False"
] |
Updates all endpoints except the one from which this slot was called.
Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents
|
[
"Updates",
"all",
"endpoints",
"except",
"the",
"one",
"from",
"which",
"this",
"slot",
"was",
"called",
"."
] |
python
|
train
|
jobovy/galpy
|
galpy/potential/SpiralArmsPotential.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SpiralArmsPotential.py#L248-L335
|
def _R2deriv(self, R, z, phi=0, t=0):
"""
NAME:
_R2deriv
PURPOSE:
Evaluate the second (cylindrical) radial derivative of the potential.
(d^2 potential / d R^2)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the second radial derivative
HISTORY:
2017-05-31 Jack Hong (UBC)
"""
Rs = self._Rs
He = self._H * np.exp(-(R-self._r_ref)/self._Rs)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
dKs_dR = self._dK_dR(R)
dBs_dR = self._dB_dR(R)
dDs_dR = self._dD_dR(R)
R_sina = R * self._sin_alpha
HNn_R_sina = self._HNn / R_sina
HNn_R_sina_2 = HNn_R_sina**2
x = R * (0.3 * HNn_R_sina + 1) * self._sin_alpha
d2Ks_dR2 = 2 * self._N * self._ns / R**3 / self._sin_alpha
d2Bs_dR2 = HNn_R_sina / R**2 * (2.4 * HNn_R_sina + 2)
d2Ds_dR2 = self._sin_alpha / R / x * (self._HNn* (0.18 * self._HNn * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x**2
+ 2 / R_sina
- 0.6 * HNn_R_sina * (1 + 0.6 * HNn_R_sina) / x
- 0.6 * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x
+ 1.8 * self._HNn / R_sina**2))
g = self._gamma(R, phi - self._omega * t)
dg_dR = self._dgamma_dR(R)
d2g_dR2 = self._N / R**2 / self._tan_alpha
sin_ng = np.sin(self._ns * g)
cos_ng = np.cos(self._ns * g)
zKB = z * Ks / Bs
sechzKB = 1 / np.cosh(zKB)
sechzKB_Bs = sechzKB**Bs
log_sechzKB = np.log(sechzKB)
tanhzKB = np.tanh(zKB)
ztanhzKB = z * tanhzKB
return -He / Rs * (np.sum(self._Cs * sechzKB_Bs / Ds
* ((self._ns * dg_dR / Ks * sin_ng
+ cos_ng * (ztanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR / Ks * log_sechzKB
+ dKs_dR / Ks**2
+ dDs_dR / Ds / Ks))
- (Rs * (1 / Ks * ((ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR)
+ log_sechzKB * dBs_dR)
- dDs_dR / Ds) * (self._ns * dg_dR * sin_ng
+ cos_ng * (ztanhzKB * Ks * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR * log_sechzKB
+ dKs_dR / Ks
+ dDs_dR / Ds))
+ (self._ns * (sin_ng * (d2g_dR2 / Ks - dg_dR / Ks**2 * dKs_dR)
+ dg_dR**2 / Ks * cos_ng * self._ns)
+ z * (-sin_ng * self._ns * dg_dR * tanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
+ cos_ng * (z * (dKs_dR/Bs - dBs_dR/Bs**2 * Ks) * (1-tanhzKB**2) * (dKs_dR/Ks - dBs_dR/Bs)
+ tanhzKB * (d2Ks_dR2/Ks-(dKs_dR/Ks)**2 - d2Bs_dR2/Bs + (dBs_dR/Bs)**2)))
+ (cos_ng * (dBs_dR/Ks * ztanhzKB * (dKs_dR/Bs - dBs_dR/Bs**2*Ks)
-(d2Bs_dR2/Ks-dBs_dR*dKs_dR/Ks**2) * log_sechzKB)
+ dBs_dR/Ks * log_sechzKB * sin_ng * self._ns * dg_dR)
+ ((cos_ng * (d2Ks_dR2 / Ks**2 - 2 * dKs_dR**2 / Ks**3)
- dKs_dR / Ks**2 * sin_ng * self._ns * dg_dR)
+ (cos_ng * (d2Ds_dR2 / Ds / Ks
- (dDs_dR/Ds)**2 / Ks
- dDs_dR / Ds / Ks**2 * dKs_dR)
- sin_ng * self._ns * dg_dR * dDs_dR / Ds / Ks))))
- 1 / Ks * (cos_ng / Rs
+ (cos_ng * ((dDs_dR * Ks + Ds * dKs_dR) / (Ds * Ks)
- (ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR)
+ log_sechzKB * dBs_dR))
+ sin_ng * self._ns * dg_dR))))))
|
[
"def",
"_R2deriv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0",
",",
"t",
"=",
"0",
")",
":",
"Rs",
"=",
"self",
".",
"_Rs",
"He",
"=",
"self",
".",
"_H",
"*",
"np",
".",
"exp",
"(",
"-",
"(",
"R",
"-",
"self",
".",
"_r_ref",
")",
"/",
"self",
".",
"_Rs",
")",
"Ks",
"=",
"self",
".",
"_K",
"(",
"R",
")",
"Bs",
"=",
"self",
".",
"_B",
"(",
"R",
")",
"Ds",
"=",
"self",
".",
"_D",
"(",
"R",
")",
"dKs_dR",
"=",
"self",
".",
"_dK_dR",
"(",
"R",
")",
"dBs_dR",
"=",
"self",
".",
"_dB_dR",
"(",
"R",
")",
"dDs_dR",
"=",
"self",
".",
"_dD_dR",
"(",
"R",
")",
"R_sina",
"=",
"R",
"*",
"self",
".",
"_sin_alpha",
"HNn_R_sina",
"=",
"self",
".",
"_HNn",
"/",
"R_sina",
"HNn_R_sina_2",
"=",
"HNn_R_sina",
"**",
"2",
"x",
"=",
"R",
"*",
"(",
"0.3",
"*",
"HNn_R_sina",
"+",
"1",
")",
"*",
"self",
".",
"_sin_alpha",
"d2Ks_dR2",
"=",
"2",
"*",
"self",
".",
"_N",
"*",
"self",
".",
"_ns",
"/",
"R",
"**",
"3",
"/",
"self",
".",
"_sin_alpha",
"d2Bs_dR2",
"=",
"HNn_R_sina",
"/",
"R",
"**",
"2",
"*",
"(",
"2.4",
"*",
"HNn_R_sina",
"+",
"2",
")",
"d2Ds_dR2",
"=",
"self",
".",
"_sin_alpha",
"/",
"R",
"/",
"x",
"*",
"(",
"self",
".",
"_HNn",
"*",
"(",
"0.18",
"*",
"self",
".",
"_HNn",
"*",
"(",
"HNn_R_sina",
"+",
"0.3",
"*",
"HNn_R_sina_2",
"+",
"1",
")",
"/",
"x",
"**",
"2",
"+",
"2",
"/",
"R_sina",
"-",
"0.6",
"*",
"HNn_R_sina",
"*",
"(",
"1",
"+",
"0.6",
"*",
"HNn_R_sina",
")",
"/",
"x",
"-",
"0.6",
"*",
"(",
"HNn_R_sina",
"+",
"0.3",
"*",
"HNn_R_sina_2",
"+",
"1",
")",
"/",
"x",
"+",
"1.8",
"*",
"self",
".",
"_HNn",
"/",
"R_sina",
"**",
"2",
")",
")",
"g",
"=",
"self",
".",
"_gamma",
"(",
"R",
",",
"phi",
"-",
"self",
".",
"_omega",
"*",
"t",
")",
"dg_dR",
"=",
"self",
".",
"_dgamma_dR",
"(",
"R",
")",
"d2g_dR2",
"=",
"self",
".",
"_N",
"/",
"R",
"**",
"2",
"/",
"self",
".",
"_tan_alpha",
"sin_ng",
"=",
"np",
".",
"sin",
"(",
"self",
".",
"_ns",
"*",
"g",
")",
"cos_ng",
"=",
"np",
".",
"cos",
"(",
"self",
".",
"_ns",
"*",
"g",
")",
"zKB",
"=",
"z",
"*",
"Ks",
"/",
"Bs",
"sechzKB",
"=",
"1",
"/",
"np",
".",
"cosh",
"(",
"zKB",
")",
"sechzKB_Bs",
"=",
"sechzKB",
"**",
"Bs",
"log_sechzKB",
"=",
"np",
".",
"log",
"(",
"sechzKB",
")",
"tanhzKB",
"=",
"np",
".",
"tanh",
"(",
"zKB",
")",
"ztanhzKB",
"=",
"z",
"*",
"tanhzKB",
"return",
"-",
"He",
"/",
"Rs",
"*",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"_Cs",
"*",
"sechzKB_Bs",
"/",
"Ds",
"*",
"(",
"(",
"self",
".",
"_ns",
"*",
"dg_dR",
"/",
"Ks",
"*",
"sin_ng",
"+",
"cos_ng",
"*",
"(",
"ztanhzKB",
"*",
"(",
"dKs_dR",
"/",
"Ks",
"-",
"dBs_dR",
"/",
"Bs",
")",
"-",
"dBs_dR",
"/",
"Ks",
"*",
"log_sechzKB",
"+",
"dKs_dR",
"/",
"Ks",
"**",
"2",
"+",
"dDs_dR",
"/",
"Ds",
"/",
"Ks",
")",
")",
"-",
"(",
"Rs",
"*",
"(",
"1",
"/",
"Ks",
"*",
"(",
"(",
"ztanhzKB",
"*",
"(",
"dBs_dR",
"/",
"Bs",
"*",
"Ks",
"-",
"dKs_dR",
")",
"+",
"log_sechzKB",
"*",
"dBs_dR",
")",
"-",
"dDs_dR",
"/",
"Ds",
")",
"*",
"(",
"self",
".",
"_ns",
"*",
"dg_dR",
"*",
"sin_ng",
"+",
"cos_ng",
"*",
"(",
"ztanhzKB",
"*",
"Ks",
"*",
"(",
"dKs_dR",
"/",
"Ks",
"-",
"dBs_dR",
"/",
"Bs",
")",
"-",
"dBs_dR",
"*",
"log_sechzKB",
"+",
"dKs_dR",
"/",
"Ks",
"+",
"dDs_dR",
"/",
"Ds",
")",
")",
"+",
"(",
"self",
".",
"_ns",
"*",
"(",
"sin_ng",
"*",
"(",
"d2g_dR2",
"/",
"Ks",
"-",
"dg_dR",
"/",
"Ks",
"**",
"2",
"*",
"dKs_dR",
")",
"+",
"dg_dR",
"**",
"2",
"/",
"Ks",
"*",
"cos_ng",
"*",
"self",
".",
"_ns",
")",
"+",
"z",
"*",
"(",
"-",
"sin_ng",
"*",
"self",
".",
"_ns",
"*",
"dg_dR",
"*",
"tanhzKB",
"*",
"(",
"dKs_dR",
"/",
"Ks",
"-",
"dBs_dR",
"/",
"Bs",
")",
"+",
"cos_ng",
"*",
"(",
"z",
"*",
"(",
"dKs_dR",
"/",
"Bs",
"-",
"dBs_dR",
"/",
"Bs",
"**",
"2",
"*",
"Ks",
")",
"*",
"(",
"1",
"-",
"tanhzKB",
"**",
"2",
")",
"*",
"(",
"dKs_dR",
"/",
"Ks",
"-",
"dBs_dR",
"/",
"Bs",
")",
"+",
"tanhzKB",
"*",
"(",
"d2Ks_dR2",
"/",
"Ks",
"-",
"(",
"dKs_dR",
"/",
"Ks",
")",
"**",
"2",
"-",
"d2Bs_dR2",
"/",
"Bs",
"+",
"(",
"dBs_dR",
"/",
"Bs",
")",
"**",
"2",
")",
")",
")",
"+",
"(",
"cos_ng",
"*",
"(",
"dBs_dR",
"/",
"Ks",
"*",
"ztanhzKB",
"*",
"(",
"dKs_dR",
"/",
"Bs",
"-",
"dBs_dR",
"/",
"Bs",
"**",
"2",
"*",
"Ks",
")",
"-",
"(",
"d2Bs_dR2",
"/",
"Ks",
"-",
"dBs_dR",
"*",
"dKs_dR",
"/",
"Ks",
"**",
"2",
")",
"*",
"log_sechzKB",
")",
"+",
"dBs_dR",
"/",
"Ks",
"*",
"log_sechzKB",
"*",
"sin_ng",
"*",
"self",
".",
"_ns",
"*",
"dg_dR",
")",
"+",
"(",
"(",
"cos_ng",
"*",
"(",
"d2Ks_dR2",
"/",
"Ks",
"**",
"2",
"-",
"2",
"*",
"dKs_dR",
"**",
"2",
"/",
"Ks",
"**",
"3",
")",
"-",
"dKs_dR",
"/",
"Ks",
"**",
"2",
"*",
"sin_ng",
"*",
"self",
".",
"_ns",
"*",
"dg_dR",
")",
"+",
"(",
"cos_ng",
"*",
"(",
"d2Ds_dR2",
"/",
"Ds",
"/",
"Ks",
"-",
"(",
"dDs_dR",
"/",
"Ds",
")",
"**",
"2",
"/",
"Ks",
"-",
"dDs_dR",
"/",
"Ds",
"/",
"Ks",
"**",
"2",
"*",
"dKs_dR",
")",
"-",
"sin_ng",
"*",
"self",
".",
"_ns",
"*",
"dg_dR",
"*",
"dDs_dR",
"/",
"Ds",
"/",
"Ks",
")",
")",
")",
")",
"-",
"1",
"/",
"Ks",
"*",
"(",
"cos_ng",
"/",
"Rs",
"+",
"(",
"cos_ng",
"*",
"(",
"(",
"dDs_dR",
"*",
"Ks",
"+",
"Ds",
"*",
"dKs_dR",
")",
"/",
"(",
"Ds",
"*",
"Ks",
")",
"-",
"(",
"ztanhzKB",
"*",
"(",
"dBs_dR",
"/",
"Bs",
"*",
"Ks",
"-",
"dKs_dR",
")",
"+",
"log_sechzKB",
"*",
"dBs_dR",
")",
")",
"+",
"sin_ng",
"*",
"self",
".",
"_ns",
"*",
"dg_dR",
")",
")",
")",
")",
")",
")"
] |
NAME:
_R2deriv
PURPOSE:
Evaluate the second (cylindrical) radial derivative of the potential.
(d^2 potential / d R^2)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the second radial derivative
HISTORY:
2017-05-31 Jack Hong (UBC)
|
[
"NAME",
":",
"_R2deriv",
"PURPOSE",
":",
"Evaluate",
"the",
"second",
"(",
"cylindrical",
")",
"radial",
"derivative",
"of",
"the",
"potential",
".",
"(",
"d^2",
"potential",
"/",
"d",
"R^2",
")",
"INPUT",
":",
":",
"param",
"R",
":",
"galactocentric",
"cylindrical",
"radius",
"(",
"must",
"be",
"scalar",
"not",
"array",
")",
":",
"param",
"z",
":",
"vertical",
"height",
"(",
"must",
"be",
"scalar",
"not",
"array",
")",
":",
"param",
"phi",
":",
"azimuth",
"(",
"must",
"be",
"scalar",
"not",
"array",
")",
":",
"param",
"t",
":",
"time",
"(",
"must",
"be",
"scalar",
"not",
"array",
")",
"OUTPUT",
":",
":",
"return",
":",
"the",
"second",
"radial",
"derivative",
"HISTORY",
":",
"2017",
"-",
"05",
"-",
"31",
"Jack",
"Hong",
"(",
"UBC",
")"
] |
python
|
train
|
pyQode/pyqode.core
|
pyqode/core/widgets/tabs.py
|
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/tabs.py#L89-L101
|
def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.currentWidget()
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.removeTab(i)
else:
i = 1
|
[
"def",
"close_others",
"(",
"self",
")",
":",
"current_widget",
"=",
"self",
".",
"currentWidget",
"(",
")",
"self",
".",
"_try_close_dirty_tabs",
"(",
"exept",
"=",
"current_widget",
")",
"i",
"=",
"0",
"while",
"self",
".",
"count",
"(",
")",
">",
"1",
":",
"widget",
"=",
"self",
".",
"widget",
"(",
"i",
")",
"if",
"widget",
"!=",
"current_widget",
":",
"self",
".",
"removeTab",
"(",
"i",
")",
"else",
":",
"i",
"=",
"1"
] |
Closes every editors tabs except the current one.
|
[
"Closes",
"every",
"editors",
"tabs",
"except",
"the",
"current",
"one",
"."
] |
python
|
train
|
GoogleCloudPlatform/appengine-gcs-client
|
python/src/cloudstorage/common.py
|
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/common.py#L272-L287
|
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
|
[
"def",
"_validate_path",
"(",
"path",
")",
":",
"if",
"not",
"path",
":",
"raise",
"ValueError",
"(",
"'Path is empty'",
")",
"if",
"not",
"isinstance",
"(",
"path",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"'Path should be a string but is %s (%s).'",
"%",
"(",
"path",
".",
"__class__",
",",
"path",
")",
")"
] |
Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
|
[
"Basic",
"validation",
"of",
"Google",
"Storage",
"paths",
"."
] |
python
|
train
|
inasafe/inasafe
|
safe/gui/tools/wizard/step_fc90_analysis.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc90_analysis.py#L293-L300
|
def setup_gui_analysis_done(self):
"""Helper method to setup gui if analysis is done."""
self.progress_bar.hide()
self.lblAnalysisStatus.setText(tr('Analysis done.'))
self.pbnReportWeb.show()
self.pbnReportPDF.show()
# self.pbnReportComposer.show() # Hide until it works again.
self.pbnReportPDF.clicked.connect(self.print_map)
|
[
"def",
"setup_gui_analysis_done",
"(",
"self",
")",
":",
"self",
".",
"progress_bar",
".",
"hide",
"(",
")",
"self",
".",
"lblAnalysisStatus",
".",
"setText",
"(",
"tr",
"(",
"'Analysis done.'",
")",
")",
"self",
".",
"pbnReportWeb",
".",
"show",
"(",
")",
"self",
".",
"pbnReportPDF",
".",
"show",
"(",
")",
"# self.pbnReportComposer.show() # Hide until it works again.",
"self",
".",
"pbnReportPDF",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"print_map",
")"
] |
Helper method to setup gui if analysis is done.
|
[
"Helper",
"method",
"to",
"setup",
"gui",
"if",
"analysis",
"is",
"done",
"."
] |
python
|
train
|
sci-bots/pygtkhelpers
|
pygtkhelpers/ui/objectlist/view.py
|
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/objectlist/view.py#L507-L518
|
def remove(self, item):
"""Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
"""
if item not in self:
raise ValueError('objectlist.remove(item) failed, item not in list')
item_id = int(self._view_path_for(item))
giter = self._iter_for(item)
del self[giter]
self.emit('item-removed', item, item_id)
|
[
"def",
"remove",
"(",
"self",
",",
"item",
")",
":",
"if",
"item",
"not",
"in",
"self",
":",
"raise",
"ValueError",
"(",
"'objectlist.remove(item) failed, item not in list'",
")",
"item_id",
"=",
"int",
"(",
"self",
".",
"_view_path_for",
"(",
"item",
")",
")",
"giter",
"=",
"self",
".",
"_iter_for",
"(",
"item",
")",
"del",
"self",
"[",
"giter",
"]",
"self",
".",
"emit",
"(",
"'item-removed'",
",",
"item",
",",
"item_id",
")"
] |
Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
|
[
"Remove",
"an",
"item",
"from",
"the",
"list"
] |
python
|
train
|
jupyter-widgets/ipywidgets
|
ipywidgets/widgets/interaction.py
|
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/interaction.py#L60-L80
|
def interactive_output(f, controls):
"""Connect widget controls to a function.
This function does not generate a user interface for the widgets (unlike `interact`).
This enables customisation of the widget user interface layout.
The user interface layout must be defined and displayed manually.
"""
out = Output()
def observer(change):
kwargs = {k:v.value for k,v in controls.items()}
show_inline_matplotlib_plots()
with out:
clear_output(wait=True)
f(**kwargs)
show_inline_matplotlib_plots()
for k,w in controls.items():
w.observe(observer, 'value')
show_inline_matplotlib_plots()
observer(None)
return out
|
[
"def",
"interactive_output",
"(",
"f",
",",
"controls",
")",
":",
"out",
"=",
"Output",
"(",
")",
"def",
"observer",
"(",
"change",
")",
":",
"kwargs",
"=",
"{",
"k",
":",
"v",
".",
"value",
"for",
"k",
",",
"v",
"in",
"controls",
".",
"items",
"(",
")",
"}",
"show_inline_matplotlib_plots",
"(",
")",
"with",
"out",
":",
"clear_output",
"(",
"wait",
"=",
"True",
")",
"f",
"(",
"*",
"*",
"kwargs",
")",
"show_inline_matplotlib_plots",
"(",
")",
"for",
"k",
",",
"w",
"in",
"controls",
".",
"items",
"(",
")",
":",
"w",
".",
"observe",
"(",
"observer",
",",
"'value'",
")",
"show_inline_matplotlib_plots",
"(",
")",
"observer",
"(",
"None",
")",
"return",
"out"
] |
Connect widget controls to a function.
This function does not generate a user interface for the widgets (unlike `interact`).
This enables customisation of the widget user interface layout.
The user interface layout must be defined and displayed manually.
|
[
"Connect",
"widget",
"controls",
"to",
"a",
"function",
"."
] |
python
|
train
|
cs01/pygdbmi
|
pygdbmi/gdbmiparser.py
|
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L182-L193
|
def _get_notify_msg_and_payload(result, stream):
"""Get notify message and payload dict"""
token = stream.advance_past_chars(["=", "*"])
token = int(token) if token != "" else None
logger.debug("%s", fmt_green("parsing message"))
message = stream.advance_past_chars([","])
logger.debug("parsed message")
logger.debug("%s", fmt_green(message))
payload = _parse_dict(stream)
return token, message.strip(), payload
|
[
"def",
"_get_notify_msg_and_payload",
"(",
"result",
",",
"stream",
")",
":",
"token",
"=",
"stream",
".",
"advance_past_chars",
"(",
"[",
"\"=\"",
",",
"\"*\"",
"]",
")",
"token",
"=",
"int",
"(",
"token",
")",
"if",
"token",
"!=",
"\"\"",
"else",
"None",
"logger",
".",
"debug",
"(",
"\"%s\"",
",",
"fmt_green",
"(",
"\"parsing message\"",
")",
")",
"message",
"=",
"stream",
".",
"advance_past_chars",
"(",
"[",
"\",\"",
"]",
")",
"logger",
".",
"debug",
"(",
"\"parsed message\"",
")",
"logger",
".",
"debug",
"(",
"\"%s\"",
",",
"fmt_green",
"(",
"message",
")",
")",
"payload",
"=",
"_parse_dict",
"(",
"stream",
")",
"return",
"token",
",",
"message",
".",
"strip",
"(",
")",
",",
"payload"
] |
Get notify message and payload dict
|
[
"Get",
"notify",
"message",
"and",
"payload",
"dict"
] |
python
|
valid
|
python-diamond/Diamond
|
src/collectors/numa/numa.py
|
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/numa/numa.py#L21-L30
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NumaCollector, self).get_default_config()
config.update({
'path': 'numa',
'bin': self.find_binary('numactl'),
})
return config
|
[
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"NumaCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'numa'",
",",
"'bin'",
":",
"self",
".",
"find_binary",
"(",
"'numactl'",
")",
",",
"}",
")",
"return",
"config"
] |
Returns the default collector settings
|
[
"Returns",
"the",
"default",
"collector",
"settings"
] |
python
|
train
|
Hrabal/TemPy
|
tempy/tempy.py
|
https://github.com/Hrabal/TemPy/blob/7d229b73e2ce3ccbb8254deae05c1f758f626ed6/tempy/tempy.py#L340-L343
|
def append(self, _, child, name=None):
"""Adds childs to this tag, after the current existing childs."""
self._insert(child, name=name)
return self
|
[
"def",
"append",
"(",
"self",
",",
"_",
",",
"child",
",",
"name",
"=",
"None",
")",
":",
"self",
".",
"_insert",
"(",
"child",
",",
"name",
"=",
"name",
")",
"return",
"self"
] |
Adds childs to this tag, after the current existing childs.
|
[
"Adds",
"childs",
"to",
"this",
"tag",
"after",
"the",
"current",
"existing",
"childs",
"."
] |
python
|
train
|
pymacaron/pymacaron-core
|
pymacaron_core/swagger/spec.py
|
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L95-L105
|
def model_to_json(self, object, cleanup=True):
"""Take a model instance and return it as a json struct"""
model_name = type(object).__name__
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Marshalling %s into json" % model_name)
m = marshal_model(self.spec, model_def, object)
if cleanup:
self.cleanup_model(m)
return m
|
[
"def",
"model_to_json",
"(",
"self",
",",
"object",
",",
"cleanup",
"=",
"True",
")",
":",
"model_name",
"=",
"type",
"(",
"object",
")",
".",
"__name__",
"if",
"model_name",
"not",
"in",
"self",
".",
"swagger_dict",
"[",
"'definitions'",
"]",
":",
"raise",
"ValidationError",
"(",
"\"Swagger spec has no definition for model %s\"",
"%",
"model_name",
")",
"model_def",
"=",
"self",
".",
"swagger_dict",
"[",
"'definitions'",
"]",
"[",
"model_name",
"]",
"log",
".",
"debug",
"(",
"\"Marshalling %s into json\"",
"%",
"model_name",
")",
"m",
"=",
"marshal_model",
"(",
"self",
".",
"spec",
",",
"model_def",
",",
"object",
")",
"if",
"cleanup",
":",
"self",
".",
"cleanup_model",
"(",
"m",
")",
"return",
"m"
] |
Take a model instance and return it as a json struct
|
[
"Take",
"a",
"model",
"instance",
"and",
"return",
"it",
"as",
"a",
"json",
"struct"
] |
python
|
train
|
xav-b/pyconsul
|
pyconsul/http.py
|
https://github.com/xav-b/pyconsul/blob/06ce3b921d01010c19643424486bea4b22196076/pyconsul/http.py#L27-L32
|
def get(self, key, **kwargs):
'''
Fetch value at the given key
kwargs can hold `recurse`, `wait` and `index` params
'''
return self._get('/'.join([self._endpoint, key]), payload=kwargs)
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"_endpoint",
",",
"key",
"]",
")",
",",
"payload",
"=",
"kwargs",
")"
] |
Fetch value at the given key
kwargs can hold `recurse`, `wait` and `index` params
|
[
"Fetch",
"value",
"at",
"the",
"given",
"key",
"kwargs",
"can",
"hold",
"recurse",
"wait",
"and",
"index",
"params"
] |
python
|
train
|
allenai/allennlp
|
allennlp/semparse/domain_languages/wikitables_language.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L477-L488
|
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return 0.0 # type: ignore
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Number):
raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}")
return most_frequent_value
|
[
"def",
"mode_number",
"(",
"self",
",",
"rows",
":",
"List",
"[",
"Row",
"]",
",",
"column",
":",
"NumberColumn",
")",
"->",
"Number",
":",
"most_frequent_list",
"=",
"self",
".",
"_get_most_frequent_values",
"(",
"rows",
",",
"column",
")",
"if",
"not",
"most_frequent_list",
":",
"return",
"0.0",
"# type: ignore",
"most_frequent_value",
"=",
"most_frequent_list",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"most_frequent_value",
",",
"Number",
")",
":",
"raise",
"ExecutionError",
"(",
"f\"Invalid valus for mode_number: {most_frequent_value}\"",
")",
"return",
"most_frequent_value"
] |
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
|
[
"Takes",
"a",
"list",
"of",
"rows",
"and",
"a",
"column",
"and",
"returns",
"the",
"most",
"frequent",
"value",
"under",
"that",
"column",
"in",
"those",
"rows",
"."
] |
python
|
train
|
inspirehep/refextract
|
refextract/references/kbs.py
|
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/kbs.py#L161-L174
|
def create_institute_numeration_group_regexp_pattern(patterns):
"""Using a list of regexp patterns for recognising numeration patterns
for institute preprint references, ordered by length - longest to
shortest - create a grouped 'OR' or of these patterns, ready to be
used in a bigger regexp.
@param patterns: (list) of strings. All of the numeration regexp
patterns for recognising an institute's preprint reference styles.
@return: (string) a grouped 'OR' regexp pattern of the numeration
patterns. E.g.:
(?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d)
"""
patterns_list = [institute_num_pattern_to_regex(p[1]) for p in patterns]
grouped_numeration_pattern = u"(?P<numn>%s)" % u'|'.join(patterns_list)
return grouped_numeration_pattern
|
[
"def",
"create_institute_numeration_group_regexp_pattern",
"(",
"patterns",
")",
":",
"patterns_list",
"=",
"[",
"institute_num_pattern_to_regex",
"(",
"p",
"[",
"1",
"]",
")",
"for",
"p",
"in",
"patterns",
"]",
"grouped_numeration_pattern",
"=",
"u\"(?P<numn>%s)\"",
"%",
"u'|'",
".",
"join",
"(",
"patterns_list",
")",
"return",
"grouped_numeration_pattern"
] |
Using a list of regexp patterns for recognising numeration patterns
for institute preprint references, ordered by length - longest to
shortest - create a grouped 'OR' or of these patterns, ready to be
used in a bigger regexp.
@param patterns: (list) of strings. All of the numeration regexp
patterns for recognising an institute's preprint reference styles.
@return: (string) a grouped 'OR' regexp pattern of the numeration
patterns. E.g.:
(?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d)
|
[
"Using",
"a",
"list",
"of",
"regexp",
"patterns",
"for",
"recognising",
"numeration",
"patterns",
"for",
"institute",
"preprint",
"references",
"ordered",
"by",
"length",
"-",
"longest",
"to",
"shortest",
"-",
"create",
"a",
"grouped",
"OR",
"or",
"of",
"these",
"patterns",
"ready",
"to",
"be",
"used",
"in",
"a",
"bigger",
"regexp",
"."
] |
python
|
train
|
gabstopper/smc-python
|
smc/core/route.py
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/route.py#L504-L529
|
def add_static_route(self, gateway, destination, network=None):
"""
Add a static route to this route table. Destination can be any element
type supported in the routing table such as a Group of network members.
Since a static route gateway needs to be on the same network as the
interface, provide a value for `network` if an interface has multiple
addresses on different networks.
::
>>> engine = Engine('ve-1')
>>> itf = engine.routing.get(0)
>>> itf.add_static_route(
gateway=Router('tmprouter'),
destination=[Group('routegroup')])
:param Element gateway: gateway for this route (Router, Host)
:param Element destination: destination network/s for this route.
:type destination: list(Host, Router, ..)
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure to update routing table
:return: Status of whether the route table was updated
:rtype: bool
"""
routing_node_gateway = RoutingNodeGateway(gateway,
destinations=destination)
return self._add_gateway_node('router', routing_node_gateway, network)
|
[
"def",
"add_static_route",
"(",
"self",
",",
"gateway",
",",
"destination",
",",
"network",
"=",
"None",
")",
":",
"routing_node_gateway",
"=",
"RoutingNodeGateway",
"(",
"gateway",
",",
"destinations",
"=",
"destination",
")",
"return",
"self",
".",
"_add_gateway_node",
"(",
"'router'",
",",
"routing_node_gateway",
",",
"network",
")"
] |
Add a static route to this route table. Destination can be any element
type supported in the routing table such as a Group of network members.
Since a static route gateway needs to be on the same network as the
interface, provide a value for `network` if an interface has multiple
addresses on different networks.
::
>>> engine = Engine('ve-1')
>>> itf = engine.routing.get(0)
>>> itf.add_static_route(
gateway=Router('tmprouter'),
destination=[Group('routegroup')])
:param Element gateway: gateway for this route (Router, Host)
:param Element destination: destination network/s for this route.
:type destination: list(Host, Router, ..)
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure to update routing table
:return: Status of whether the route table was updated
:rtype: bool
|
[
"Add",
"a",
"static",
"route",
"to",
"this",
"route",
"table",
".",
"Destination",
"can",
"be",
"any",
"element",
"type",
"supported",
"in",
"the",
"routing",
"table",
"such",
"as",
"a",
"Group",
"of",
"network",
"members",
".",
"Since",
"a",
"static",
"route",
"gateway",
"needs",
"to",
"be",
"on",
"the",
"same",
"network",
"as",
"the",
"interface",
"provide",
"a",
"value",
"for",
"network",
"if",
"an",
"interface",
"has",
"multiple",
"addresses",
"on",
"different",
"networks",
".",
"::"
] |
python
|
train
|
viniciuschiele/flask-apscheduler
|
flask_apscheduler/scheduler.py
|
https://github.com/viniciuschiele/flask-apscheduler/blob/cc52c39e1948c4e8de5da0d01db45f1779f61997/flask_apscheduler/scheduler.py#L162-L173
|
def delete_job(self, id, jobstore=None):
"""
DEPRECATED, use remove_job instead.
Remove a job, preventing it from being run any more.
:param str id: the identifier of the job
:param str jobstore: alias of the job store that contains the job
"""
warnings.warn('delete_job has been deprecated, use remove_job instead.', DeprecationWarning)
self.remove_job(id, jobstore)
|
[
"def",
"delete_job",
"(",
"self",
",",
"id",
",",
"jobstore",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"'delete_job has been deprecated, use remove_job instead.'",
",",
"DeprecationWarning",
")",
"self",
".",
"remove_job",
"(",
"id",
",",
"jobstore",
")"
] |
DEPRECATED, use remove_job instead.
Remove a job, preventing it from being run any more.
:param str id: the identifier of the job
:param str jobstore: alias of the job store that contains the job
|
[
"DEPRECATED",
"use",
"remove_job",
"instead",
"."
] |
python
|
train
|
chriso/timeseries
|
timeseries/time_series.py
|
https://github.com/chriso/timeseries/blob/8b81e6cfd955a7cf75a421dfdb71b3f9e53be64d/timeseries/time_series.py#L62-L66
|
def trend_coefficients(self, order=LINEAR):
'''Calculate trend coefficients for the specified order.'''
if not len(self.points):
raise ArithmeticError('Cannot calculate the trend of an empty series')
return LazyImport.numpy().polyfit(self.timestamps, self.values, order)
|
[
"def",
"trend_coefficients",
"(",
"self",
",",
"order",
"=",
"LINEAR",
")",
":",
"if",
"not",
"len",
"(",
"self",
".",
"points",
")",
":",
"raise",
"ArithmeticError",
"(",
"'Cannot calculate the trend of an empty series'",
")",
"return",
"LazyImport",
".",
"numpy",
"(",
")",
".",
"polyfit",
"(",
"self",
".",
"timestamps",
",",
"self",
".",
"values",
",",
"order",
")"
] |
Calculate trend coefficients for the specified order.
|
[
"Calculate",
"trend",
"coefficients",
"for",
"the",
"specified",
"order",
"."
] |
python
|
train
|
pyrogram/pyrogram
|
pyrogram/client/methods/chats/export_chat_invite_link.py
|
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/chats/export_chat_invite_link.py#L26-L58
|
def export_chat_invite_link(
self,
chat_id: Union[int, str]
) -> str:
"""Use this method to generate a new invite link for a chat; any previously generated link is revoked.
You must be an administrator in the chat for this to work and have the appropriate admin rights.
Args:
chat_id (``int`` | ``str``):
Unique identifier for the target chat or username of the target channel/supergroup
(in the format @username).
Returns:
On success, the exported invite link as string is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
return self.send(
functions.messages.ExportChatInvite(
peer=peer.chat_id
)
).link
elif isinstance(peer, types.InputPeerChannel):
return self.send(
functions.channels.ExportInvite(
channel=peer
)
).link
|
[
"def",
"export_chat_invite_link",
"(",
"self",
",",
"chat_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
")",
"->",
"str",
":",
"peer",
"=",
"self",
".",
"resolve_peer",
"(",
"chat_id",
")",
"if",
"isinstance",
"(",
"peer",
",",
"types",
".",
"InputPeerChat",
")",
":",
"return",
"self",
".",
"send",
"(",
"functions",
".",
"messages",
".",
"ExportChatInvite",
"(",
"peer",
"=",
"peer",
".",
"chat_id",
")",
")",
".",
"link",
"elif",
"isinstance",
"(",
"peer",
",",
"types",
".",
"InputPeerChannel",
")",
":",
"return",
"self",
".",
"send",
"(",
"functions",
".",
"channels",
".",
"ExportInvite",
"(",
"channel",
"=",
"peer",
")",
")",
".",
"link"
] |
Use this method to generate a new invite link for a chat; any previously generated link is revoked.
You must be an administrator in the chat for this to work and have the appropriate admin rights.
Args:
chat_id (``int`` | ``str``):
Unique identifier for the target chat or username of the target channel/supergroup
(in the format @username).
Returns:
On success, the exported invite link as string is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
|
[
"Use",
"this",
"method",
"to",
"generate",
"a",
"new",
"invite",
"link",
"for",
"a",
"chat",
";",
"any",
"previously",
"generated",
"link",
"is",
"revoked",
"."
] |
python
|
train
|
waqasbhatti/astrobase
|
astrobase/periodbase/__init__.py
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/__init__.py#L509-L589
|
def make_combined_periodogram(pflist, outfile, addmethods=False):
'''This just puts all of the period-finders on a single periodogram.
This will renormalize all of the periodograms so their values lie between 0
and 1, with values lying closer to 1 being more significant. Periodograms
that give the same best periods will have their peaks line up together.
Parameters
----------
pflist : list of dict
This is a list of result dicts from any of the period-finders in
periodbase. To use your own period-finders' results here, make sure the
result dict is of the form and has at least the keys below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
outfile : str
This is the output file to write the output to. NOTE: EPS/PS won't work
because we use alpha transparency to better distinguish between the
various periodograms.
addmethods : bool
If this is True, will add all of the normalized periodograms together,
then renormalize them to between 0 and 1. In this way, if all of the
period-finders agree on something, it'll stand out easily. FIXME:
implement this kwarg.
Returns
-------
str
The name of the generated plot file.
'''
import matplotlib.pyplot as plt
for pf in pflist:
if pf['method'] == 'pdm':
plt.plot(pf['periods'],
np.max(pf['lspvals'])/pf['lspvals'] - 1.0,
label='%s P=%.5f' % (pf['method'], pf['bestperiod']),
alpha=0.5)
else:
plt.plot(pf['periods'],
pf['lspvals']/np.max(pf['lspvals']),
label='%s P=%.5f' % (pf['method'], pf['bestperiod']),
alpha=0.5)
plt.xlabel('period [days]')
plt.ylabel('normalized periodogram power')
plt.xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(outfile)
plt.close('all')
return outfile
|
[
"def",
"make_combined_periodogram",
"(",
"pflist",
",",
"outfile",
",",
"addmethods",
"=",
"False",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"for",
"pf",
"in",
"pflist",
":",
"if",
"pf",
"[",
"'method'",
"]",
"==",
"'pdm'",
":",
"plt",
".",
"plot",
"(",
"pf",
"[",
"'periods'",
"]",
",",
"np",
".",
"max",
"(",
"pf",
"[",
"'lspvals'",
"]",
")",
"/",
"pf",
"[",
"'lspvals'",
"]",
"-",
"1.0",
",",
"label",
"=",
"'%s P=%.5f'",
"%",
"(",
"pf",
"[",
"'method'",
"]",
",",
"pf",
"[",
"'bestperiod'",
"]",
")",
",",
"alpha",
"=",
"0.5",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"pf",
"[",
"'periods'",
"]",
",",
"pf",
"[",
"'lspvals'",
"]",
"/",
"np",
".",
"max",
"(",
"pf",
"[",
"'lspvals'",
"]",
")",
",",
"label",
"=",
"'%s P=%.5f'",
"%",
"(",
"pf",
"[",
"'method'",
"]",
",",
"pf",
"[",
"'bestperiod'",
"]",
")",
",",
"alpha",
"=",
"0.5",
")",
"plt",
".",
"xlabel",
"(",
"'period [days]'",
")",
"plt",
".",
"ylabel",
"(",
"'normalized periodogram power'",
")",
"plt",
".",
"xscale",
"(",
"'log'",
")",
"plt",
".",
"legend",
"(",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"outfile",
")",
"plt",
".",
"close",
"(",
"'all'",
")",
"return",
"outfile"
] |
This just puts all of the period-finders on a single periodogram.
This will renormalize all of the periodograms so their values lie between 0
and 1, with values lying closer to 1 being more significant. Periodograms
that give the same best periods will have their peaks line up together.
Parameters
----------
pflist : list of dict
This is a list of result dicts from any of the period-finders in
periodbase. To use your own period-finders' results here, make sure the
result dict is of the form and has at least the keys below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
outfile : str
This is the output file to write the output to. NOTE: EPS/PS won't work
because we use alpha transparency to better distinguish between the
various periodograms.
addmethods : bool
If this is True, will add all of the normalized periodograms together,
then renormalize them to between 0 and 1. In this way, if all of the
period-finders agree on something, it'll stand out easily. FIXME:
implement this kwarg.
Returns
-------
str
The name of the generated plot file.
|
[
"This",
"just",
"puts",
"all",
"of",
"the",
"period",
"-",
"finders",
"on",
"a",
"single",
"periodogram",
"."
] |
python
|
valid
|
estnltk/estnltk
|
estnltk/disambiguator.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L491-L545
|
def __find_hidden_analyses(self, docs):
""" Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index); """
hidden = dict()
nudTudLopud = re.compile('^.*[ntd]ud$')
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
if ANALYSIS in word and len(word[ANALYSIS]) > 1:
#
# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:
# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //
nudTud = [ nudTudLopud.match(a[ROOT]) != None or \
nudTudLopud.match(a[ENDING]) != None \
for a in word[ANALYSIS] ]
if nudTud.count( True ) > 1:
hidden[(d, w)] = 1
#
# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:
# Nt kui+0 //_D_ // kui+0 //_J_ //
# nagu+0 //_D_ // nagu+0 //_J_ //
lemmas = set([ a[ROOT] for a in word[ANALYSIS] ])
forms = set([ a[FORM] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '':
hidden[(d, w)] = 1
#
# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:
# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;
endings = set([ a[ENDING] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \
and (list(endings))[0] == '0':
hidden[(d, w)] = 1
#
# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:
# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //
# kes+0 //_P_ sg n // kes+0 //_P_ pl n //
postags = set([ a[POSTAG] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \
len(endings) == 1:
hidden[(d, w)] = 1
#
# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:
# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //
# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //
if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \
'N' in postags) and len(endings) == 1:
hidden[(d, w)] = 1
return hidden
|
[
"def",
"__find_hidden_analyses",
"(",
"self",
",",
"docs",
")",
":",
"hidden",
"=",
"dict",
"(",
")",
"nudTudLopud",
"=",
"re",
".",
"compile",
"(",
"'^.*[ntd]ud$'",
")",
"for",
"d",
"in",
"range",
"(",
"len",
"(",
"docs",
")",
")",
":",
"for",
"w",
"in",
"range",
"(",
"len",
"(",
"docs",
"[",
"d",
"]",
"[",
"WORDS",
"]",
")",
")",
":",
"word",
"=",
"docs",
"[",
"d",
"]",
"[",
"WORDS",
"]",
"[",
"w",
"]",
"if",
"ANALYSIS",
"in",
"word",
"and",
"len",
"(",
"word",
"[",
"ANALYSIS",
"]",
")",
">",
"1",
":",
"#",
"# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:",
"# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //",
"nudTud",
"=",
"[",
"nudTudLopud",
".",
"match",
"(",
"a",
"[",
"ROOT",
"]",
")",
"!=",
"None",
"or",
"nudTudLopud",
".",
"match",
"(",
"a",
"[",
"ENDING",
"]",
")",
"!=",
"None",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
"if",
"nudTud",
".",
"count",
"(",
"True",
")",
">",
"1",
":",
"hidden",
"[",
"(",
"d",
",",
"w",
")",
"]",
"=",
"1",
"#",
"# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:",
"# Nt kui+0 //_D_ // kui+0 //_J_ //",
"# nagu+0 //_D_ // nagu+0 //_J_ //",
"lemmas",
"=",
"set",
"(",
"[",
"a",
"[",
"ROOT",
"]",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
")",
"forms",
"=",
"set",
"(",
"[",
"a",
"[",
"FORM",
"]",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
")",
"if",
"len",
"(",
"lemmas",
")",
"==",
"1",
"and",
"len",
"(",
"forms",
")",
"==",
"1",
"and",
"(",
"list",
"(",
"forms",
")",
")",
"[",
"0",
"]",
"==",
"''",
":",
"hidden",
"[",
"(",
"d",
",",
"w",
")",
"]",
"=",
"1",
"#",
"# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:",
"# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;",
"endings",
"=",
"set",
"(",
"[",
"a",
"[",
"ENDING",
"]",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
")",
"if",
"len",
"(",
"lemmas",
")",
"==",
"1",
"and",
"(",
"list",
"(",
"lemmas",
")",
")",
"[",
"0",
"]",
"==",
"'ole'",
"and",
"len",
"(",
"endings",
")",
"==",
"1",
"and",
"(",
"list",
"(",
"endings",
")",
")",
"[",
"0",
"]",
"==",
"'0'",
":",
"hidden",
"[",
"(",
"d",
",",
"w",
")",
"]",
"=",
"1",
"#",
"# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:",
"# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //",
"# kes+0 //_P_ sg n // kes+0 //_P_ pl n //",
"postags",
"=",
"set",
"(",
"[",
"a",
"[",
"POSTAG",
"]",
"for",
"a",
"in",
"word",
"[",
"ANALYSIS",
"]",
"]",
")",
"if",
"len",
"(",
"lemmas",
")",
"==",
"1",
"and",
"len",
"(",
"postags",
")",
"==",
"1",
"and",
"'P'",
"in",
"postags",
"and",
"len",
"(",
"endings",
")",
"==",
"1",
":",
"hidden",
"[",
"(",
"d",
",",
"w",
")",
"]",
"=",
"1",
"#",
"# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:",
"# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //",
"# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //",
"if",
"len",
"(",
"lemmas",
")",
"==",
"1",
"and",
"'P'",
"in",
"postags",
"and",
"(",
"'O'",
"in",
"postags",
"or",
"'N'",
"in",
"postags",
")",
"and",
"len",
"(",
"endings",
")",
"==",
"1",
":",
"hidden",
"[",
"(",
"d",
",",
"w",
")",
"]",
"=",
"1",
"return",
"hidden"
] |
Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index);
|
[
"Jätab",
"meelde",
"millised",
"analüüsid",
"on",
"nn",
"peidetud",
"ehk",
"siis",
"mida",
"ei",
"tule",
"arvestada",
"lemmade",
"järelühestamisel",
":",
"*",
")",
"kesksõnade",
"nud",
"dud",
"tud",
"mitmesused",
";",
"*",
")",
"muutumatute",
"sõnade",
"sõnaliigi",
"mitmesus",
";",
"*",
")",
"oleviku",
"olema",
"mitmesus",
"(",
"nad",
"on",
"vs",
"ta",
"on",
")",
";",
"*",
")",
"asesõnade",
"ainsuse",
"-",
"mitmuse",
"mitmesus",
";",
"*",
")",
"arv",
"-",
"ja",
"asesõnade",
"vaheline",
"mitmesus",
";",
"Tagastab",
"sõnastiku",
"peidetud",
"analüüse",
"sisaldanud",
"sõnade",
"asukohtadega",
"iga",
"võti",
"kujul",
"(",
"doc_index",
"word_index",
")",
";"
] |
python
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L212-L221
|
def apply(self, query, data):
"""Filter a query."""
field = self.model_field or query.model_class._meta.fields.get(self.name)
if not field or self.name not in data:
return query
value = self.value(data)
if value is self.default:
return query
value = field.db_value(value)
return self.filter_query(query, field, value)
|
[
"def",
"apply",
"(",
"self",
",",
"query",
",",
"data",
")",
":",
"field",
"=",
"self",
".",
"model_field",
"or",
"query",
".",
"model_class",
".",
"_meta",
".",
"fields",
".",
"get",
"(",
"self",
".",
"name",
")",
"if",
"not",
"field",
"or",
"self",
".",
"name",
"not",
"in",
"data",
":",
"return",
"query",
"value",
"=",
"self",
".",
"value",
"(",
"data",
")",
"if",
"value",
"is",
"self",
".",
"default",
":",
"return",
"query",
"value",
"=",
"field",
".",
"db_value",
"(",
"value",
")",
"return",
"self",
".",
"filter_query",
"(",
"query",
",",
"field",
",",
"value",
")"
] |
Filter a query.
|
[
"Filter",
"a",
"query",
"."
] |
python
|
train
|
thautwarm/Redy
|
Redy/Magic/Classic.py
|
https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/Magic/Classic.py#L77-L102
|
def execute(func: types.FunctionType):
"""
>>> from Redy.Magic.Classic import execute
>>> x = 1
>>> @execute
>>> def f(x = x) -> int:
>>> return x + 1
>>> assert f is 2
"""
spec = getfullargspec(func)
default = spec.defaults
arg_cursor = 0
def get_item(name):
nonlocal arg_cursor
ctx = func.__globals__
value = ctx.get(name, _undef)
if value is _undef:
try:
value = default[arg_cursor]
arg_cursor += 1
except (TypeError, IndexError):
raise ValueError(f"Current context has no variable `{name}`")
return value
return func(*(get_item(arg_name) for arg_name in spec.args))
|
[
"def",
"execute",
"(",
"func",
":",
"types",
".",
"FunctionType",
")",
":",
"spec",
"=",
"getfullargspec",
"(",
"func",
")",
"default",
"=",
"spec",
".",
"defaults",
"arg_cursor",
"=",
"0",
"def",
"get_item",
"(",
"name",
")",
":",
"nonlocal",
"arg_cursor",
"ctx",
"=",
"func",
".",
"__globals__",
"value",
"=",
"ctx",
".",
"get",
"(",
"name",
",",
"_undef",
")",
"if",
"value",
"is",
"_undef",
":",
"try",
":",
"value",
"=",
"default",
"[",
"arg_cursor",
"]",
"arg_cursor",
"+=",
"1",
"except",
"(",
"TypeError",
",",
"IndexError",
")",
":",
"raise",
"ValueError",
"(",
"f\"Current context has no variable `{name}`\"",
")",
"return",
"value",
"return",
"func",
"(",
"*",
"(",
"get_item",
"(",
"arg_name",
")",
"for",
"arg_name",
"in",
"spec",
".",
"args",
")",
")"
] |
>>> from Redy.Magic.Classic import execute
>>> x = 1
>>> @execute
>>> def f(x = x) -> int:
>>> return x + 1
>>> assert f is 2
|
[
">>>",
"from",
"Redy",
".",
"Magic",
".",
"Classic",
"import",
"execute",
">>>",
"x",
"=",
"1",
">>>"
] |
python
|
train
|
luckydonald/pytgbot
|
code_generation/output/pytgbot/api_types/receivable/stickers.py
|
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/stickers.py#L81-L96
|
def to_array(self):
"""
Serializes this StickerSet to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(StickerSet, self).to_array()
array['name'] = u(self.name) # py2: type unicode, py3: type str
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['contains_masks'] = bool(self.contains_masks) # type bool
array['stickers'] = self._as_array(self.stickers) # type list of Sticker
return array
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"StickerSet",
",",
"self",
")",
".",
"to_array",
"(",
")",
"array",
"[",
"'name'",
"]",
"=",
"u",
"(",
"self",
".",
"name",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'title'",
"]",
"=",
"u",
"(",
"self",
".",
"title",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'contains_masks'",
"]",
"=",
"bool",
"(",
"self",
".",
"contains_masks",
")",
"# type bool",
"array",
"[",
"'stickers'",
"]",
"=",
"self",
".",
"_as_array",
"(",
"self",
".",
"stickers",
")",
"# type list of Sticker",
"return",
"array"
] |
Serializes this StickerSet to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
|
[
"Serializes",
"this",
"StickerSet",
"to",
"a",
"dictionary",
"."
] |
python
|
train
|
pydata/xarray
|
xarray/core/dataset.py
|
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L3129-L3200
|
def reduce(self, func, dim=None, keep_attrs=None, numeric_only=False,
allow_lazy=False, **kwargs):
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if dim is ALL_DIMS:
dim = None
if isinstance(dim, str):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
missing_dimensions = [d for d in dims if d not in self.dims]
if missing_dimensions:
raise ValueError('Dataset does not contain the dimensions: %s'
% missing_dimensions)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
variables = OrderedDict()
for name, var in self._variables.items():
reduce_dims = [d for d in var.dims if d in dims]
if name in self.coords:
if not reduce_dims:
variables[name] = var
else:
if (not numeric_only or
np.issubdtype(var.dtype, np.number) or
(var.dtype == np.bool_)):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.reduce(func, dim=reduce_dims,
keep_attrs=keep_attrs,
allow_lazy=allow_lazy,
**kwargs)
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
|
[
"def",
"reduce",
"(",
"self",
",",
"func",
",",
"dim",
"=",
"None",
",",
"keep_attrs",
"=",
"None",
",",
"numeric_only",
"=",
"False",
",",
"allow_lazy",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"dim",
"is",
"ALL_DIMS",
":",
"dim",
"=",
"None",
"if",
"isinstance",
"(",
"dim",
",",
"str",
")",
":",
"dims",
"=",
"set",
"(",
"[",
"dim",
"]",
")",
"elif",
"dim",
"is",
"None",
":",
"dims",
"=",
"set",
"(",
"self",
".",
"dims",
")",
"else",
":",
"dims",
"=",
"set",
"(",
"dim",
")",
"missing_dimensions",
"=",
"[",
"d",
"for",
"d",
"in",
"dims",
"if",
"d",
"not",
"in",
"self",
".",
"dims",
"]",
"if",
"missing_dimensions",
":",
"raise",
"ValueError",
"(",
"'Dataset does not contain the dimensions: %s'",
"%",
"missing_dimensions",
")",
"if",
"keep_attrs",
"is",
"None",
":",
"keep_attrs",
"=",
"_get_keep_attrs",
"(",
"default",
"=",
"False",
")",
"variables",
"=",
"OrderedDict",
"(",
")",
"for",
"name",
",",
"var",
"in",
"self",
".",
"_variables",
".",
"items",
"(",
")",
":",
"reduce_dims",
"=",
"[",
"d",
"for",
"d",
"in",
"var",
".",
"dims",
"if",
"d",
"in",
"dims",
"]",
"if",
"name",
"in",
"self",
".",
"coords",
":",
"if",
"not",
"reduce_dims",
":",
"variables",
"[",
"name",
"]",
"=",
"var",
"else",
":",
"if",
"(",
"not",
"numeric_only",
"or",
"np",
".",
"issubdtype",
"(",
"var",
".",
"dtype",
",",
"np",
".",
"number",
")",
"or",
"(",
"var",
".",
"dtype",
"==",
"np",
".",
"bool_",
")",
")",
":",
"if",
"len",
"(",
"reduce_dims",
")",
"==",
"1",
":",
"# unpack dimensions for the benefit of functions",
"# like np.argmin which can't handle tuple arguments",
"reduce_dims",
",",
"=",
"reduce_dims",
"elif",
"len",
"(",
"reduce_dims",
")",
"==",
"var",
".",
"ndim",
":",
"# prefer to aggregate over axis=None rather than",
"# axis=(0, 1) if they will be equivalent, because",
"# the former is often more efficient",
"reduce_dims",
"=",
"None",
"variables",
"[",
"name",
"]",
"=",
"var",
".",
"reduce",
"(",
"func",
",",
"dim",
"=",
"reduce_dims",
",",
"keep_attrs",
"=",
"keep_attrs",
",",
"allow_lazy",
"=",
"allow_lazy",
",",
"*",
"*",
"kwargs",
")",
"coord_names",
"=",
"set",
"(",
"k",
"for",
"k",
"in",
"self",
".",
"coords",
"if",
"k",
"in",
"variables",
")",
"attrs",
"=",
"self",
".",
"attrs",
"if",
"keep_attrs",
"else",
"None",
"return",
"self",
".",
"_replace_vars_and_dims",
"(",
"variables",
",",
"coord_names",
",",
"attrs",
"=",
"attrs",
")"
] |
Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
|
[
"Reduce",
"this",
"dataset",
"by",
"applying",
"func",
"along",
"some",
"dimension",
"(",
"s",
")",
"."
] |
python
|
train
|
qacafe/cdrouter.py
|
cdrouter/highlights.py
|
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/highlights.py#L85-L94
|
def create(self, id, seq, resource): # pylint: disable=invalid-name,redefined-builtin
"""Create a highlight.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:param resource: :class:`highlights.Highlight <highlights.Highlight>` object
:return: :class:`highlights.Highlight <highlights.Highlight>` object
:rtype: highlights.Highlight
"""
return self.create_or_edit(id, seq, resource)
|
[
"def",
"create",
"(",
"self",
",",
"id",
",",
"seq",
",",
"resource",
")",
":",
"# pylint: disable=invalid-name,redefined-builtin",
"return",
"self",
".",
"create_or_edit",
"(",
"id",
",",
"seq",
",",
"resource",
")"
] |
Create a highlight.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:param resource: :class:`highlights.Highlight <highlights.Highlight>` object
:return: :class:`highlights.Highlight <highlights.Highlight>` object
:rtype: highlights.Highlight
|
[
"Create",
"a",
"highlight",
"."
] |
python
|
train
|
grundic/yagocd
|
yagocd/client.py
|
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/client.py#L221-L229
|
def info(self):
"""
Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager
"""
if self._info_manager is None:
self._info_manager = InfoManager(session=self._session)
return self._info_manager
|
[
"def",
"info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info_manager",
"is",
"None",
":",
"self",
".",
"_info_manager",
"=",
"InfoManager",
"(",
"session",
"=",
"self",
".",
"_session",
")",
"return",
"self",
".",
"_info_manager"
] |
Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager
|
[
"Property",
"for",
"accessing",
":",
"class",
":",
"InfoManager",
"instance",
"which",
"is",
"used",
"to",
"general",
"server",
"info",
"."
] |
python
|
train
|
fabioz/PyDev.Debugger
|
third_party/pep8/autopep8.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/autopep8.py#L1897-L1921
|
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
|
[
"def",
"_split_after_delimiter",
"(",
"self",
",",
"item",
",",
"indent_amt",
")",
":",
"self",
".",
"_delete_whitespace",
"(",
")",
"if",
"self",
".",
"fits_on_current_line",
"(",
"item",
".",
"size",
")",
":",
"return",
"last_space",
"=",
"None",
"for",
"item",
"in",
"reversed",
"(",
"self",
".",
"_lines",
")",
":",
"if",
"(",
"last_space",
"and",
"(",
"not",
"isinstance",
"(",
"item",
",",
"Atom",
")",
"or",
"not",
"item",
".",
"is_colon",
")",
")",
":",
"break",
"else",
":",
"last_space",
"=",
"None",
"if",
"isinstance",
"(",
"item",
",",
"self",
".",
"_Space",
")",
":",
"last_space",
"=",
"item",
"if",
"isinstance",
"(",
"item",
",",
"(",
"self",
".",
"_LineBreak",
",",
"self",
".",
"_Indent",
")",
")",
":",
"return",
"if",
"not",
"last_space",
":",
"return",
"self",
".",
"add_line_break_at",
"(",
"self",
".",
"_lines",
".",
"index",
"(",
"last_space",
")",
",",
"indent_amt",
")"
] |
Split the line only after a delimiter.
|
[
"Split",
"the",
"line",
"only",
"after",
"a",
"delimiter",
"."
] |
python
|
train
|
quantopian/zipline
|
zipline/lib/labelarray.py
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L38-L43
|
def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
)
|
[
"def",
"compare_arrays",
"(",
"left",
",",
"right",
")",
":",
"return",
"(",
"left",
"is",
"right",
"or",
"(",
"(",
"left",
".",
"shape",
"==",
"right",
".",
"shape",
")",
"and",
"(",
"left",
"==",
"right",
")",
".",
"all",
"(",
")",
")",
")"
] |
Eq check with a short-circuit for identical objects.
|
[
"Eq",
"check",
"with",
"a",
"short",
"-",
"circuit",
"for",
"identical",
"objects",
"."
] |
python
|
train
|
reingart/pyafipws
|
wsctg.py
|
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsctg.py#L640-L654
|
def ConsultarConstanciaCTGPDF(self, numero_ctg=None,
archivo="constancia.pdf"):
"Operación Consultar Constancia de CTG en PDF"
ret = self.client.consultarConstanciaCTGPDF(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
ctg=numero_ctg,
))['response']
self.__analizar_errores(ret)
datos = base64.b64decode(ret.get('archivo', ""))
f = open(archivo, "wb")
f.write(datos)
f.close()
return True
|
[
"def",
"ConsultarConstanciaCTGPDF",
"(",
"self",
",",
"numero_ctg",
"=",
"None",
",",
"archivo",
"=",
"\"constancia.pdf\"",
")",
":",
"ret",
"=",
"self",
".",
"client",
".",
"consultarConstanciaCTGPDF",
"(",
"request",
"=",
"dict",
"(",
"auth",
"=",
"{",
"'token'",
":",
"self",
".",
"Token",
",",
"'sign'",
":",
"self",
".",
"Sign",
",",
"'cuitRepresentado'",
":",
"self",
".",
"Cuit",
",",
"}",
",",
"ctg",
"=",
"numero_ctg",
",",
")",
")",
"[",
"'response'",
"]",
"self",
".",
"__analizar_errores",
"(",
"ret",
")",
"datos",
"=",
"base64",
".",
"b64decode",
"(",
"ret",
".",
"get",
"(",
"'archivo'",
",",
"\"\"",
")",
")",
"f",
"=",
"open",
"(",
"archivo",
",",
"\"wb\"",
")",
"f",
".",
"write",
"(",
"datos",
")",
"f",
".",
"close",
"(",
")",
"return",
"True"
] |
Operación Consultar Constancia de CTG en PDF
|
[
"Operación",
"Consultar",
"Constancia",
"de",
"CTG",
"en",
"PDF"
] |
python
|
train
|
hvac/hvac
|
hvac/v1/__init__.py
|
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/v1/__init__.py#L675-L703
|
def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs):
"""POST /auth/<mount point>/users/<username>
:param username:
:type username:
:param password:
:type password:
:param policies:
:type policies:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
# Users can have more than 1 policy. It is easier for the user to pass in the
# policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {
'password': password,
'policies': policies
}
params.update(kwargs)
return self._adapter.post('/v1/auth/{}/users/{}'.format(mount_point, username), json=params)
|
[
"def",
"create_userpass",
"(",
"self",
",",
"username",
",",
"password",
",",
"policies",
",",
"mount_point",
"=",
"'userpass'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Users can have more than 1 policy. It is easier for the user to pass in the",
"# policies as a list so if they do, we need to convert to a , delimited string.",
"if",
"isinstance",
"(",
"policies",
",",
"(",
"list",
",",
"set",
",",
"tuple",
")",
")",
":",
"policies",
"=",
"','",
".",
"join",
"(",
"policies",
")",
"params",
"=",
"{",
"'password'",
":",
"password",
",",
"'policies'",
":",
"policies",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"'/v1/auth/{}/users/{}'",
".",
"format",
"(",
"mount_point",
",",
"username",
")",
",",
"json",
"=",
"params",
")"
] |
POST /auth/<mount point>/users/<username>
:param username:
:type username:
:param password:
:type password:
:param policies:
:type policies:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype:
|
[
"POST",
"/",
"auth",
"/",
"<mount",
"point",
">",
"/",
"users",
"/",
"<username",
">"
] |
python
|
train
|
ibm-watson-iot/iot-python
|
samples/customMessageFormat/myCustomCodec.py
|
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/samples/customMessageFormat/myCustomCodec.py#L45-L61
|
def decode(message):
'''
The decoder understands the comma-seperated format produced by the encoder and
allocates the two values to the correct keys:
data['hello'] = 'world'
data['x'] = 10
'''
(hello, x) = message.payload.split(",")
data = {}
data['hello'] = hello
data['x'] = x
timestamp = datetime.now(pytz.timezone('UTC'))
return Message(data, timestamp)
|
[
"def",
"decode",
"(",
"message",
")",
":",
"(",
"hello",
",",
"x",
")",
"=",
"message",
".",
"payload",
".",
"split",
"(",
"\",\"",
")",
"data",
"=",
"{",
"}",
"data",
"[",
"'hello'",
"]",
"=",
"hello",
"data",
"[",
"'x'",
"]",
"=",
"x",
"timestamp",
"=",
"datetime",
".",
"now",
"(",
"pytz",
".",
"timezone",
"(",
"'UTC'",
")",
")",
"return",
"Message",
"(",
"data",
",",
"timestamp",
")"
] |
The decoder understands the comma-seperated format produced by the encoder and
allocates the two values to the correct keys:
data['hello'] = 'world'
data['x'] = 10
|
[
"The",
"decoder",
"understands",
"the",
"comma",
"-",
"seperated",
"format",
"produced",
"by",
"the",
"encoder",
"and",
"allocates",
"the",
"two",
"values",
"to",
"the",
"correct",
"keys",
":",
"data",
"[",
"hello",
"]",
"=",
"world",
"data",
"[",
"x",
"]",
"=",
"10"
] |
python
|
test
|
crytic/slither
|
slither/core/declarations/contract.py
|
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/contract.py#L488-L494
|
def all_state_variables_written(self):
'''
list(StateVariable): List all of the state variables written
'''
all_state_variables_written = [f.all_state_variables_written() for f in self.functions + self.modifiers]
all_state_variables_written = [item for sublist in all_state_variables_written for item in sublist]
return list(set(all_state_variables_written))
|
[
"def",
"all_state_variables_written",
"(",
"self",
")",
":",
"all_state_variables_written",
"=",
"[",
"f",
".",
"all_state_variables_written",
"(",
")",
"for",
"f",
"in",
"self",
".",
"functions",
"+",
"self",
".",
"modifiers",
"]",
"all_state_variables_written",
"=",
"[",
"item",
"for",
"sublist",
"in",
"all_state_variables_written",
"for",
"item",
"in",
"sublist",
"]",
"return",
"list",
"(",
"set",
"(",
"all_state_variables_written",
")",
")"
] |
list(StateVariable): List all of the state variables written
|
[
"list",
"(",
"StateVariable",
")",
":",
"List",
"all",
"of",
"the",
"state",
"variables",
"written"
] |
python
|
train
|
andialbrecht/sqlparse
|
sqlparse/formatter.py
|
https://github.com/andialbrecht/sqlparse/blob/913b56e34edc7e3025feea4744dbd762774805c3/sqlparse/formatter.py#L15-L130
|
def validate_options(options):
"""Validates options."""
kwcase = options.get('keyword_case')
if kwcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for keyword_case: '
'{0!r}'.format(kwcase))
idcase = options.get('identifier_case')
if idcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for identifier_case: '
'{0!r}'.format(idcase))
ofrmt = options.get('output_format')
if ofrmt not in [None, 'sql', 'python', 'php']:
raise SQLParseError('Unknown output format: '
'{0!r}'.format(ofrmt))
strip_comments = options.get('strip_comments', False)
if strip_comments not in [True, False]:
raise SQLParseError('Invalid value for strip_comments: '
'{0!r}'.format(strip_comments))
space_around_operators = options.get('use_space_around_operators', False)
if space_around_operators not in [True, False]:
raise SQLParseError('Invalid value for use_space_around_operators: '
'{0!r}'.format(space_around_operators))
strip_ws = options.get('strip_whitespace', False)
if strip_ws not in [True, False]:
raise SQLParseError('Invalid value for strip_whitespace: '
'{0!r}'.format(strip_ws))
truncate_strings = options.get('truncate_strings')
if truncate_strings is not None:
try:
truncate_strings = int(truncate_strings)
except (ValueError, TypeError):
raise SQLParseError('Invalid value for truncate_strings: '
'{0!r}'.format(truncate_strings))
if truncate_strings <= 1:
raise SQLParseError('Invalid value for truncate_strings: '
'{0!r}'.format(truncate_strings))
options['truncate_strings'] = truncate_strings
options['truncate_char'] = options.get('truncate_char', '[...]')
indent_columns = options.get('indent_columns', False)
if indent_columns not in [True, False]:
raise SQLParseError('Invalid value for indent_columns: '
'{0!r}'.format(indent_columns))
elif indent_columns:
options['reindent'] = True # enforce reindent
options['indent_columns'] = indent_columns
reindent = options.get('reindent', False)
if reindent not in [True, False]:
raise SQLParseError('Invalid value for reindent: '
'{0!r}'.format(reindent))
elif reindent:
options['strip_whitespace'] = True
reindent_aligned = options.get('reindent_aligned', False)
if reindent_aligned not in [True, False]:
raise SQLParseError('Invalid value for reindent_aligned: '
'{0!r}'.format(reindent))
elif reindent_aligned:
options['strip_whitespace'] = True
indent_after_first = options.get('indent_after_first', False)
if indent_after_first not in [True, False]:
raise SQLParseError('Invalid value for indent_after_first: '
'{0!r}'.format(indent_after_first))
options['indent_after_first'] = indent_after_first
indent_tabs = options.get('indent_tabs', False)
if indent_tabs not in [True, False]:
raise SQLParseError('Invalid value for indent_tabs: '
'{0!r}'.format(indent_tabs))
elif indent_tabs:
options['indent_char'] = '\t'
else:
options['indent_char'] = ' '
indent_width = options.get('indent_width', 2)
try:
indent_width = int(indent_width)
except (TypeError, ValueError):
raise SQLParseError('indent_width requires an integer')
if indent_width < 1:
raise SQLParseError('indent_width requires a positive integer')
options['indent_width'] = indent_width
wrap_after = options.get('wrap_after', 0)
try:
wrap_after = int(wrap_after)
except (TypeError, ValueError):
raise SQLParseError('wrap_after requires an integer')
if wrap_after < 0:
raise SQLParseError('wrap_after requires a positive integer')
options['wrap_after'] = wrap_after
comma_first = options.get('comma_first', False)
if comma_first not in [True, False]:
raise SQLParseError('comma_first requires a boolean value')
options['comma_first'] = comma_first
right_margin = options.get('right_margin')
if right_margin is not None:
try:
right_margin = int(right_margin)
except (TypeError, ValueError):
raise SQLParseError('right_margin requires an integer')
if right_margin < 10:
raise SQLParseError('right_margin requires an integer > 10')
options['right_margin'] = right_margin
return options
|
[
"def",
"validate_options",
"(",
"options",
")",
":",
"kwcase",
"=",
"options",
".",
"get",
"(",
"'keyword_case'",
")",
"if",
"kwcase",
"not",
"in",
"[",
"None",
",",
"'upper'",
",",
"'lower'",
",",
"'capitalize'",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for keyword_case: '",
"'{0!r}'",
".",
"format",
"(",
"kwcase",
")",
")",
"idcase",
"=",
"options",
".",
"get",
"(",
"'identifier_case'",
")",
"if",
"idcase",
"not",
"in",
"[",
"None",
",",
"'upper'",
",",
"'lower'",
",",
"'capitalize'",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for identifier_case: '",
"'{0!r}'",
".",
"format",
"(",
"idcase",
")",
")",
"ofrmt",
"=",
"options",
".",
"get",
"(",
"'output_format'",
")",
"if",
"ofrmt",
"not",
"in",
"[",
"None",
",",
"'sql'",
",",
"'python'",
",",
"'php'",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Unknown output format: '",
"'{0!r}'",
".",
"format",
"(",
"ofrmt",
")",
")",
"strip_comments",
"=",
"options",
".",
"get",
"(",
"'strip_comments'",
",",
"False",
")",
"if",
"strip_comments",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for strip_comments: '",
"'{0!r}'",
".",
"format",
"(",
"strip_comments",
")",
")",
"space_around_operators",
"=",
"options",
".",
"get",
"(",
"'use_space_around_operators'",
",",
"False",
")",
"if",
"space_around_operators",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for use_space_around_operators: '",
"'{0!r}'",
".",
"format",
"(",
"space_around_operators",
")",
")",
"strip_ws",
"=",
"options",
".",
"get",
"(",
"'strip_whitespace'",
",",
"False",
")",
"if",
"strip_ws",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for strip_whitespace: '",
"'{0!r}'",
".",
"format",
"(",
"strip_ws",
")",
")",
"truncate_strings",
"=",
"options",
".",
"get",
"(",
"'truncate_strings'",
")",
"if",
"truncate_strings",
"is",
"not",
"None",
":",
"try",
":",
"truncate_strings",
"=",
"int",
"(",
"truncate_strings",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for truncate_strings: '",
"'{0!r}'",
".",
"format",
"(",
"truncate_strings",
")",
")",
"if",
"truncate_strings",
"<=",
"1",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for truncate_strings: '",
"'{0!r}'",
".",
"format",
"(",
"truncate_strings",
")",
")",
"options",
"[",
"'truncate_strings'",
"]",
"=",
"truncate_strings",
"options",
"[",
"'truncate_char'",
"]",
"=",
"options",
".",
"get",
"(",
"'truncate_char'",
",",
"'[...]'",
")",
"indent_columns",
"=",
"options",
".",
"get",
"(",
"'indent_columns'",
",",
"False",
")",
"if",
"indent_columns",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for indent_columns: '",
"'{0!r}'",
".",
"format",
"(",
"indent_columns",
")",
")",
"elif",
"indent_columns",
":",
"options",
"[",
"'reindent'",
"]",
"=",
"True",
"# enforce reindent",
"options",
"[",
"'indent_columns'",
"]",
"=",
"indent_columns",
"reindent",
"=",
"options",
".",
"get",
"(",
"'reindent'",
",",
"False",
")",
"if",
"reindent",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for reindent: '",
"'{0!r}'",
".",
"format",
"(",
"reindent",
")",
")",
"elif",
"reindent",
":",
"options",
"[",
"'strip_whitespace'",
"]",
"=",
"True",
"reindent_aligned",
"=",
"options",
".",
"get",
"(",
"'reindent_aligned'",
",",
"False",
")",
"if",
"reindent_aligned",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for reindent_aligned: '",
"'{0!r}'",
".",
"format",
"(",
"reindent",
")",
")",
"elif",
"reindent_aligned",
":",
"options",
"[",
"'strip_whitespace'",
"]",
"=",
"True",
"indent_after_first",
"=",
"options",
".",
"get",
"(",
"'indent_after_first'",
",",
"False",
")",
"if",
"indent_after_first",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for indent_after_first: '",
"'{0!r}'",
".",
"format",
"(",
"indent_after_first",
")",
")",
"options",
"[",
"'indent_after_first'",
"]",
"=",
"indent_after_first",
"indent_tabs",
"=",
"options",
".",
"get",
"(",
"'indent_tabs'",
",",
"False",
")",
"if",
"indent_tabs",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'Invalid value for indent_tabs: '",
"'{0!r}'",
".",
"format",
"(",
"indent_tabs",
")",
")",
"elif",
"indent_tabs",
":",
"options",
"[",
"'indent_char'",
"]",
"=",
"'\\t'",
"else",
":",
"options",
"[",
"'indent_char'",
"]",
"=",
"' '",
"indent_width",
"=",
"options",
".",
"get",
"(",
"'indent_width'",
",",
"2",
")",
"try",
":",
"indent_width",
"=",
"int",
"(",
"indent_width",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"SQLParseError",
"(",
"'indent_width requires an integer'",
")",
"if",
"indent_width",
"<",
"1",
":",
"raise",
"SQLParseError",
"(",
"'indent_width requires a positive integer'",
")",
"options",
"[",
"'indent_width'",
"]",
"=",
"indent_width",
"wrap_after",
"=",
"options",
".",
"get",
"(",
"'wrap_after'",
",",
"0",
")",
"try",
":",
"wrap_after",
"=",
"int",
"(",
"wrap_after",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"SQLParseError",
"(",
"'wrap_after requires an integer'",
")",
"if",
"wrap_after",
"<",
"0",
":",
"raise",
"SQLParseError",
"(",
"'wrap_after requires a positive integer'",
")",
"options",
"[",
"'wrap_after'",
"]",
"=",
"wrap_after",
"comma_first",
"=",
"options",
".",
"get",
"(",
"'comma_first'",
",",
"False",
")",
"if",
"comma_first",
"not",
"in",
"[",
"True",
",",
"False",
"]",
":",
"raise",
"SQLParseError",
"(",
"'comma_first requires a boolean value'",
")",
"options",
"[",
"'comma_first'",
"]",
"=",
"comma_first",
"right_margin",
"=",
"options",
".",
"get",
"(",
"'right_margin'",
")",
"if",
"right_margin",
"is",
"not",
"None",
":",
"try",
":",
"right_margin",
"=",
"int",
"(",
"right_margin",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"SQLParseError",
"(",
"'right_margin requires an integer'",
")",
"if",
"right_margin",
"<",
"10",
":",
"raise",
"SQLParseError",
"(",
"'right_margin requires an integer > 10'",
")",
"options",
"[",
"'right_margin'",
"]",
"=",
"right_margin",
"return",
"options"
] |
Validates options.
|
[
"Validates",
"options",
"."
] |
python
|
train
|
GearPlug/payu-python
|
payu/recurring.py
|
https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/recurring.py#L386-L397
|
def get_additional_charge_by_identifier(self, recurring_billing_id):
"""
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._get(self.url + fmt, headers=self.get_headers())
|
[
"def",
"get_additional_charge_by_identifier",
"(",
"self",
",",
"recurring_billing_id",
")",
":",
"fmt",
"=",
"'recurringBillItems/{}'",
".",
"format",
"(",
"recurring_billing_id",
")",
"return",
"self",
".",
"client",
".",
"_get",
"(",
"self",
".",
"url",
"+",
"fmt",
",",
"headers",
"=",
"self",
".",
"get_headers",
"(",
")",
")"
] |
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
|
[
"Query",
"extra",
"charge",
"information",
"of",
"an",
"invoice",
"from",
"its",
"identifier",
"."
] |
python
|
train
|
Microsoft/nni
|
src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py
|
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py#L88-L116
|
def json2parameter(in_x, parameter, name=ROOT):
"""
Change json to parameters.
"""
out_y = copy.deepcopy(in_x)
if isinstance(in_x, dict):
if TYPE in in_x.keys():
_type = in_x[TYPE]
name = name + '-' + _type
if _type == 'choice':
_index = parameter[name]
out_y = {
INDEX: _index,
VALUE: json2parameter(in_x[VALUE][_index], parameter, name=name+'[%d]' % _index)
}
else:
out_y = parameter[name]
else:
out_y = dict()
for key in in_x.keys():
out_y[key] = json2parameter(
in_x[key], parameter, name + '[%s]' % str(key))
elif isinstance(in_x, list):
out_y = list()
for i, x_i in enumerate(in_x):
out_y.append(json2parameter(x_i, parameter, name + '[%d]' % i))
else:
logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x))
return out_y
|
[
"def",
"json2parameter",
"(",
"in_x",
",",
"parameter",
",",
"name",
"=",
"ROOT",
")",
":",
"out_y",
"=",
"copy",
".",
"deepcopy",
"(",
"in_x",
")",
"if",
"isinstance",
"(",
"in_x",
",",
"dict",
")",
":",
"if",
"TYPE",
"in",
"in_x",
".",
"keys",
"(",
")",
":",
"_type",
"=",
"in_x",
"[",
"TYPE",
"]",
"name",
"=",
"name",
"+",
"'-'",
"+",
"_type",
"if",
"_type",
"==",
"'choice'",
":",
"_index",
"=",
"parameter",
"[",
"name",
"]",
"out_y",
"=",
"{",
"INDEX",
":",
"_index",
",",
"VALUE",
":",
"json2parameter",
"(",
"in_x",
"[",
"VALUE",
"]",
"[",
"_index",
"]",
",",
"parameter",
",",
"name",
"=",
"name",
"+",
"'[%d]'",
"%",
"_index",
")",
"}",
"else",
":",
"out_y",
"=",
"parameter",
"[",
"name",
"]",
"else",
":",
"out_y",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"in_x",
".",
"keys",
"(",
")",
":",
"out_y",
"[",
"key",
"]",
"=",
"json2parameter",
"(",
"in_x",
"[",
"key",
"]",
",",
"parameter",
",",
"name",
"+",
"'[%s]'",
"%",
"str",
"(",
"key",
")",
")",
"elif",
"isinstance",
"(",
"in_x",
",",
"list",
")",
":",
"out_y",
"=",
"list",
"(",
")",
"for",
"i",
",",
"x_i",
"in",
"enumerate",
"(",
"in_x",
")",
":",
"out_y",
".",
"append",
"(",
"json2parameter",
"(",
"x_i",
",",
"parameter",
",",
"name",
"+",
"'[%d]'",
"%",
"i",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'in_x is not a dict or a list in json2space fuinction %s'",
",",
"str",
"(",
"in_x",
")",
")",
"return",
"out_y"
] |
Change json to parameters.
|
[
"Change",
"json",
"to",
"parameters",
"."
] |
python
|
train
|
reingart/gui2py
|
gui/controls/listview.py
|
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L397-L402
|
def clear(self):
"Remove all items and reset internal structures"
dict.clear(self)
self._key = 0
if hasattr(self._list_view, "wx_obj"):
self._list_view.wx_obj.DeleteAllItems()
|
[
"def",
"clear",
"(",
"self",
")",
":",
"dict",
".",
"clear",
"(",
"self",
")",
"self",
".",
"_key",
"=",
"0",
"if",
"hasattr",
"(",
"self",
".",
"_list_view",
",",
"\"wx_obj\"",
")",
":",
"self",
".",
"_list_view",
".",
"wx_obj",
".",
"DeleteAllItems",
"(",
")"
] |
Remove all items and reset internal structures
|
[
"Remove",
"all",
"items",
"and",
"reset",
"internal",
"structures"
] |
python
|
test
|
SuperCowPowers/workbench
|
workbench/clients/client_helper.py
|
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/clients/client_helper.py#L7-L24
|
def grab_server_args():
"""Grab server info from configuration file"""
workbench_conf = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')
workbench_conf.read(config_path)
server = workbench_conf.get('workbench', 'server_uri')
port = workbench_conf.get('workbench', 'server_port')
# Collect args from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server')
parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server')
args, commands = parser.parse_known_args()
server = str(args.server)
port = str(args.port)
return {'server':server, 'port':port, 'commands': commands}
|
[
"def",
"grab_server_args",
"(",
")",
":",
"workbench_conf",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"'config.ini'",
")",
"workbench_conf",
".",
"read",
"(",
"config_path",
")",
"server",
"=",
"workbench_conf",
".",
"get",
"(",
"'workbench'",
",",
"'server_uri'",
")",
"port",
"=",
"workbench_conf",
".",
"get",
"(",
"'workbench'",
",",
"'server_port'",
")",
"# Collect args from the command line",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--server'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"server",
",",
"help",
"=",
"'location of workbench server'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--port'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"port",
",",
"help",
"=",
"'port used by workbench server'",
")",
"args",
",",
"commands",
"=",
"parser",
".",
"parse_known_args",
"(",
")",
"server",
"=",
"str",
"(",
"args",
".",
"server",
")",
"port",
"=",
"str",
"(",
"args",
".",
"port",
")",
"return",
"{",
"'server'",
":",
"server",
",",
"'port'",
":",
"port",
",",
"'commands'",
":",
"commands",
"}"
] |
Grab server info from configuration file
|
[
"Grab",
"server",
"info",
"from",
"configuration",
"file"
] |
python
|
train
|
apache/incubator-heron
|
heron/statemgrs/src/python/filestatemanager.py
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/statemgrs/src/python/filestatemanager.py#L273-L285
|
def get_scheduler_location(self, topologyName, callback=None):
"""
Get scheduler location
"""
if callback:
self.scheduler_location_watchers[topologyName].append(callback)
else:
scheduler_location_path = self.get_scheduler_location_path(topologyName)
with open(scheduler_location_path) as f:
data = f.read()
scheduler_location = SchedulerLocation()
scheduler_location.ParseFromString(data)
return scheduler_location
|
[
"def",
"get_scheduler_location",
"(",
"self",
",",
"topologyName",
",",
"callback",
"=",
"None",
")",
":",
"if",
"callback",
":",
"self",
".",
"scheduler_location_watchers",
"[",
"topologyName",
"]",
".",
"append",
"(",
"callback",
")",
"else",
":",
"scheduler_location_path",
"=",
"self",
".",
"get_scheduler_location_path",
"(",
"topologyName",
")",
"with",
"open",
"(",
"scheduler_location_path",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"scheduler_location",
"=",
"SchedulerLocation",
"(",
")",
"scheduler_location",
".",
"ParseFromString",
"(",
"data",
")",
"return",
"scheduler_location"
] |
Get scheduler location
|
[
"Get",
"scheduler",
"location"
] |
python
|
valid
|
reingart/pyafipws
|
wsctg.py
|
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsctg.py#L387-L410
|
def ConfirmarDefinitivo(self, numero_carta_de_porte, numero_ctg,
establecimiento=None, codigo_cosecha=None, peso_neto_carga=None,
**kwargs):
"Confirma arribo definitivo CTG"
ret = self.client.confirmarDefinitivo(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosConfirmarDefinitivo=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
establecimiento=establecimiento,
codigoCosecha=codigo_cosecha,
pesoNeto=peso_neto_carga,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('datosResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['ctg'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoTransaccion = str(datos.get('codigoOperacion', ""))
self.Observaciones = ""
return self.CodigoTransaccion
|
[
"def",
"ConfirmarDefinitivo",
"(",
"self",
",",
"numero_carta_de_porte",
",",
"numero_ctg",
",",
"establecimiento",
"=",
"None",
",",
"codigo_cosecha",
"=",
"None",
",",
"peso_neto_carga",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"self",
".",
"client",
".",
"confirmarDefinitivo",
"(",
"request",
"=",
"dict",
"(",
"auth",
"=",
"{",
"'token'",
":",
"self",
".",
"Token",
",",
"'sign'",
":",
"self",
".",
"Sign",
",",
"'cuitRepresentado'",
":",
"self",
".",
"Cuit",
",",
"}",
",",
"datosConfirmarDefinitivo",
"=",
"dict",
"(",
"cartaPorte",
"=",
"numero_carta_de_porte",
",",
"ctg",
"=",
"numero_ctg",
",",
"establecimiento",
"=",
"establecimiento",
",",
"codigoCosecha",
"=",
"codigo_cosecha",
",",
"pesoNeto",
"=",
"peso_neto_carga",
",",
")",
")",
")",
"[",
"'response'",
"]",
"self",
".",
"__analizar_errores",
"(",
"ret",
")",
"datos",
"=",
"ret",
".",
"get",
"(",
"'datosResponse'",
")",
"if",
"datos",
":",
"self",
".",
"CartaPorte",
"=",
"str",
"(",
"datos",
"[",
"'cartaPorte'",
"]",
")",
"self",
".",
"NumeroCTG",
"=",
"str",
"(",
"datos",
"[",
"'ctg'",
"]",
")",
"self",
".",
"FechaHora",
"=",
"str",
"(",
"datos",
"[",
"'fechaHora'",
"]",
")",
"self",
".",
"CodigoTransaccion",
"=",
"str",
"(",
"datos",
".",
"get",
"(",
"'codigoOperacion'",
",",
"\"\"",
")",
")",
"self",
".",
"Observaciones",
"=",
"\"\"",
"return",
"self",
".",
"CodigoTransaccion"
] |
Confirma arribo definitivo CTG
|
[
"Confirma",
"arribo",
"definitivo",
"CTG"
] |
python
|
train
|
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/color/color_space.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/color_space.py#L78-L106
|
def _hsv_to_rgb(hsvs):
"""Convert Nx3 or Nx4 hsv to rgb"""
hsvs, n_dim = _check_color_dim(hsvs)
# In principle, we *might* be able to vectorize this, but might as well
# wait until a compelling use case appears
rgbs = list()
for hsv in hsvs:
c = hsv[1] * hsv[2]
m = hsv[2] - c
hp = hsv[0] / 60
x = c * (1 - abs(hp % 2 - 1))
if 0 <= hp < 1:
r, g, b = c, x, 0
elif hp < 2:
r, g, b = x, c, 0
elif hp < 3:
r, g, b = 0, c, x
elif hp < 4:
r, g, b = 0, x, c
elif hp < 5:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
rgb = [r + m, g + m, b + m]
rgbs.append(rgb)
rgbs = np.array(rgbs, dtype=np.float32)
if n_dim == 4:
rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1)
return rgbs
|
[
"def",
"_hsv_to_rgb",
"(",
"hsvs",
")",
":",
"hsvs",
",",
"n_dim",
"=",
"_check_color_dim",
"(",
"hsvs",
")",
"# In principle, we *might* be able to vectorize this, but might as well",
"# wait until a compelling use case appears",
"rgbs",
"=",
"list",
"(",
")",
"for",
"hsv",
"in",
"hsvs",
":",
"c",
"=",
"hsv",
"[",
"1",
"]",
"*",
"hsv",
"[",
"2",
"]",
"m",
"=",
"hsv",
"[",
"2",
"]",
"-",
"c",
"hp",
"=",
"hsv",
"[",
"0",
"]",
"/",
"60",
"x",
"=",
"c",
"*",
"(",
"1",
"-",
"abs",
"(",
"hp",
"%",
"2",
"-",
"1",
")",
")",
"if",
"0",
"<=",
"hp",
"<",
"1",
":",
"r",
",",
"g",
",",
"b",
"=",
"c",
",",
"x",
",",
"0",
"elif",
"hp",
"<",
"2",
":",
"r",
",",
"g",
",",
"b",
"=",
"x",
",",
"c",
",",
"0",
"elif",
"hp",
"<",
"3",
":",
"r",
",",
"g",
",",
"b",
"=",
"0",
",",
"c",
",",
"x",
"elif",
"hp",
"<",
"4",
":",
"r",
",",
"g",
",",
"b",
"=",
"0",
",",
"x",
",",
"c",
"elif",
"hp",
"<",
"5",
":",
"r",
",",
"g",
",",
"b",
"=",
"x",
",",
"0",
",",
"c",
"else",
":",
"r",
",",
"g",
",",
"b",
"=",
"c",
",",
"0",
",",
"x",
"rgb",
"=",
"[",
"r",
"+",
"m",
",",
"g",
"+",
"m",
",",
"b",
"+",
"m",
"]",
"rgbs",
".",
"append",
"(",
"rgb",
")",
"rgbs",
"=",
"np",
".",
"array",
"(",
"rgbs",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"if",
"n_dim",
"==",
"4",
":",
"rgbs",
"=",
"np",
".",
"concatenate",
"(",
"(",
"rgbs",
",",
"hsvs",
"[",
":",
",",
"3",
"]",
")",
",",
"axis",
"=",
"1",
")",
"return",
"rgbs"
] |
Convert Nx3 or Nx4 hsv to rgb
|
[
"Convert",
"Nx3",
"or",
"Nx4",
"hsv",
"to",
"rgb"
] |
python
|
train
|
ewels/MultiQC
|
multiqc/modules/picard/ValidateSamFile.py
|
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/picard/ValidateSamFile.py#L318-L352
|
def _generate_detailed_table(data):
"""
Generates and retuns the HTML table that overviews the details found.
"""
headers = _get_general_stats_headers()
# Only add headers for errors/warnings we have found
for problems in data.values():
for problem in problems:
if problem not in headers and problem in WARNING_DESCRIPTIONS:
headers['WARNING_count']['hidden'] = False
headers[problem] = {
'description': WARNING_DESCRIPTIONS[problem],
'namespace': 'WARNING',
'scale': headers['WARNING_count']['scale'],
'format': '{:.0f}',
'shared_key': 'warnings',
'hidden': True, # Hide by default; to unclutter things.
}
if problem not in headers and problem in ERROR_DESCRIPTIONS:
headers['ERROR_count']['hidden'] = False
headers[problem] = {
'description': ERROR_DESCRIPTIONS[problem],
'namespace': 'ERROR',
'scale': headers['ERROR_count']['scale'],
'format': '{:.0f}',
'shared_key': 'errors',
'hidden': True, # Hide by default; to unclutter things.
}
table_config = {
'table_title': 'Picard: SAM/BAM File Validation',
}
return table.plot(data=data, headers=headers, pconfig=table_config)
|
[
"def",
"_generate_detailed_table",
"(",
"data",
")",
":",
"headers",
"=",
"_get_general_stats_headers",
"(",
")",
"# Only add headers for errors/warnings we have found",
"for",
"problems",
"in",
"data",
".",
"values",
"(",
")",
":",
"for",
"problem",
"in",
"problems",
":",
"if",
"problem",
"not",
"in",
"headers",
"and",
"problem",
"in",
"WARNING_DESCRIPTIONS",
":",
"headers",
"[",
"'WARNING_count'",
"]",
"[",
"'hidden'",
"]",
"=",
"False",
"headers",
"[",
"problem",
"]",
"=",
"{",
"'description'",
":",
"WARNING_DESCRIPTIONS",
"[",
"problem",
"]",
",",
"'namespace'",
":",
"'WARNING'",
",",
"'scale'",
":",
"headers",
"[",
"'WARNING_count'",
"]",
"[",
"'scale'",
"]",
",",
"'format'",
":",
"'{:.0f}'",
",",
"'shared_key'",
":",
"'warnings'",
",",
"'hidden'",
":",
"True",
",",
"# Hide by default; to unclutter things.",
"}",
"if",
"problem",
"not",
"in",
"headers",
"and",
"problem",
"in",
"ERROR_DESCRIPTIONS",
":",
"headers",
"[",
"'ERROR_count'",
"]",
"[",
"'hidden'",
"]",
"=",
"False",
"headers",
"[",
"problem",
"]",
"=",
"{",
"'description'",
":",
"ERROR_DESCRIPTIONS",
"[",
"problem",
"]",
",",
"'namespace'",
":",
"'ERROR'",
",",
"'scale'",
":",
"headers",
"[",
"'ERROR_count'",
"]",
"[",
"'scale'",
"]",
",",
"'format'",
":",
"'{:.0f}'",
",",
"'shared_key'",
":",
"'errors'",
",",
"'hidden'",
":",
"True",
",",
"# Hide by default; to unclutter things.",
"}",
"table_config",
"=",
"{",
"'table_title'",
":",
"'Picard: SAM/BAM File Validation'",
",",
"}",
"return",
"table",
".",
"plot",
"(",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"pconfig",
"=",
"table_config",
")"
] |
Generates and retuns the HTML table that overviews the details found.
|
[
"Generates",
"and",
"retuns",
"the",
"HTML",
"table",
"that",
"overviews",
"the",
"details",
"found",
"."
] |
python
|
train
|
ReFirmLabs/binwalk
|
src/binwalk/core/magic.py
|
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/magic.py#L548-L771
|
def _analyze(self, signature, offset):
'''
Analyzes self.data for the specified signature data at the specified offset .
@signature - The signature to apply to the data.
@offset - The offset in self.data to apply the signature to.
Returns a dictionary of tags parsed from the data.
'''
description = []
max_line_level = 0
previous_line_end = 0
tags = {'id': signature.id, 'offset':
offset, 'invalid': False, 'once': False}
# Apply each line of the signature to self.data, starting at the
# specified offset
for n in range(0, len(signature.lines)):
line = signature.lines[n]
# Ignore indentation levels above the current max indent level
if line.level <= max_line_level:
# If the relative offset of this signature line is just an
# integer value, use it
if isinstance(line.offset, int):
line_offset = line.offset
# Else, evaluate the complex expression
else:
# Format the previous_line_end value into a string. Add the '+' sign to explicitly
# state that this value is to be added to any subsequent values in the expression
# (e.g., '&0' becomes '4+0').
ple = '%d+' % previous_line_end
# Allow users to use either the '&0' (libmagic) or '&+0' (explcit addition) sytaxes;
# replace both with the ple text.
line_offset_text = line.offset.replace('&+', ple).replace('&', ple)
# Evaluate the expression
line_offset = self._do_math(offset, line_offset_text)
# Sanity check
if not isinstance(line_offset, int):
raise ParserException("Failed to convert offset '%s' to a number: '%s'" % (line.offset, line.text))
# The start of the data needed by this line is at offset + line_offset.
# The end of the data will be line.size bytes later.
start = offset + line_offset
end = start + line.size
# If the line has a packed format string, unpack it
if line.pkfmt:
try:
dvalue = struct.unpack(line.pkfmt, binwalk.core.compat.str2bytes(self.data[start:end]))[0]
# Not enough bytes left in self.data for the specified
# format size
except struct.error as e:
dvalue = 0
# Else, this is a string
else:
# Wildcard strings have line.value == None
if line.value is None:
# Check to see if this is a string whose size is known and has been specified on a previous
# signature line.
if binwalk.core.compat.has_key(tags, 'strlen') and binwalk.core.compat.has_key(line.tags, 'string'):
dvalue = self.data[start:(start + tags['strlen'])]
# Else, just terminate the string at the first newline,
# carriage return, or NULL byte
else:
dvalue = self.data[start:end].split('\x00')[0].split('\r')[0].split('\n')[0]
# Non-wildcard strings have a known length, specified in
# the signature line
else:
dvalue = self.data[start:end]
# Some integer values have special operations that need to be performed on them
# before comparison (e.g., "belong&0x0000FFFF"). Complex math expressions are
# supported here as well.
# if isinstance(dvalue, int) and line.operator:
if line.operator:
try:
# If the operator value of this signature line is just
# an integer value, use it
if isinstance(line.opvalue, int) or isinstance(line.opvalue, long):
opval = line.opvalue
# Else, evaluate the complex expression
else:
opval = self._do_math(offset, line.opvalue)
# Perform the specified operation
if line.operator == '&':
dvalue &= opval
elif line.operator == '|':
dvalue |= opval
elif line.operator == '*':
dvalue *= opval
elif line.operator == '+':
dvalue += opval
elif line.operator == '-':
dvalue -= opval
elif line.operator == '/':
dvalue /= opval
elif line.operator == '~':
dvalue = ~opval
elif line.operator == '^':
dvalue ^= opval
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise ParserException("Operation '" +
str(dvalue) +
" " +
str(line.operator) +
"= " +
str(line.opvalue) +
"' failed: " + str(e))
# Does the data (dvalue) match the specified comparison?
if ((line.value is None) or
(line.regex and line.value.match(dvalue)) or
(line.condition == '=' and dvalue == line.value) or
(line.condition == '>' and dvalue > line.value) or
(line.condition == '<' and dvalue < line.value) or
(line.condition == '!' and dvalue != line.value) or
(line.condition == '~' and (dvalue == ~line.value)) or
(line.condition == '^' and (dvalue ^ line.value)) or
(line.condition == '&' and (dvalue & line.value)) or
(line.condition == '|' and (dvalue | line.value))):
# Up until this point, date fields are treated as integer values,
# but we want to display them as nicely formatted strings.
if line.type == 'date':
try:
ts = datetime.datetime.utcfromtimestamp(dvalue)
dvalue = ts.strftime("%Y-%m-%d %H:%M:%S")
except KeyboardInterrupt as e:
raise e
except Exception:
dvalue = "invalid timestamp"
# Generate the tuple for the format string
dvalue_tuple = ()
for x in self.fmtstr.finditer(line.format):
dvalue_tuple += (dvalue,)
# Format the description string
desc = line.format % dvalue_tuple
# If there was any description string, append it to the
# list of description string parts
if desc:
description.append(desc)
# Process tag keywords specified in the signature line. These have already been parsed out of the
# original format string so that they can be processed
# separately from the printed description string.
for (tag_name, tag_value) in binwalk.core.compat.iterator(line.tags):
# If the tag value is a string, try to format it
if isinstance(tag_value, str):
# Generate the tuple for the format string
dvalue_tuple = ()
for x in self.fmtstr.finditer(tag_value):
dvalue_tuple += (dvalue,)
# Format the tag string
tags[tag_name] = tag_value % dvalue_tuple
# Else, just use the raw tag value
else:
tags[tag_name] = tag_value
# Some tag values are intended to be integer values, so
# try to convert them as such
try:
tags[tag_name] = int(tags[tag_name], 0)
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
# Abort processing soon as this signature is marked invalid, unless invalid results
# were explicitly requested. This means that the sooner invalid checks are made in a
# given signature, the faster the scan can filter out false
# positives.
if not self.show_invalid and tags['invalid']:
break
# Look ahead to the next line in the signature; if its indent level is greater than
# that of the current line, then track the end of data for the current line. This is
# so that subsequent lines can use the '>>&0' offset syntax to specify relative offsets
# from previous lines.
try:
next_line = signature.lines[n + 1]
if next_line.level > line.level:
if line.type == 'string':
previous_line_end = line_offset + len(dvalue)
else:
previous_line_end = line_offset + line.size
except IndexError as e:
pass
# If this line satisfied its comparison, +1 the max
# indentation level
max_line_level = line.level + 1
else:
# No match on the first line, abort
if line.level == 0:
break
else:
# If this line did not satisfy its comparison, then higher
# indentation levels will not be accepted.
max_line_level = line.level
# Join the formatted description strings and remove backspace
# characters (plus the preceeding character as well)
tags['description'] = self.bspace.sub('', " ".join(description))
# This should never happen
if not tags['description']:
tags['display'] = False
tags['invalid'] = True
# If the formatted string contains non-printable characters, consider
# it invalid
if self.printable.match(tags['description']).group() != tags['description']:
tags['invalid'] = True
return tags
|
[
"def",
"_analyze",
"(",
"self",
",",
"signature",
",",
"offset",
")",
":",
"description",
"=",
"[",
"]",
"max_line_level",
"=",
"0",
"previous_line_end",
"=",
"0",
"tags",
"=",
"{",
"'id'",
":",
"signature",
".",
"id",
",",
"'offset'",
":",
"offset",
",",
"'invalid'",
":",
"False",
",",
"'once'",
":",
"False",
"}",
"# Apply each line of the signature to self.data, starting at the",
"# specified offset",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"signature",
".",
"lines",
")",
")",
":",
"line",
"=",
"signature",
".",
"lines",
"[",
"n",
"]",
"# Ignore indentation levels above the current max indent level",
"if",
"line",
".",
"level",
"<=",
"max_line_level",
":",
"# If the relative offset of this signature line is just an",
"# integer value, use it",
"if",
"isinstance",
"(",
"line",
".",
"offset",
",",
"int",
")",
":",
"line_offset",
"=",
"line",
".",
"offset",
"# Else, evaluate the complex expression",
"else",
":",
"# Format the previous_line_end value into a string. Add the '+' sign to explicitly",
"# state that this value is to be added to any subsequent values in the expression",
"# (e.g., '&0' becomes '4+0').",
"ple",
"=",
"'%d+'",
"%",
"previous_line_end",
"# Allow users to use either the '&0' (libmagic) or '&+0' (explcit addition) sytaxes;",
"# replace both with the ple text.",
"line_offset_text",
"=",
"line",
".",
"offset",
".",
"replace",
"(",
"'&+'",
",",
"ple",
")",
".",
"replace",
"(",
"'&'",
",",
"ple",
")",
"# Evaluate the expression",
"line_offset",
"=",
"self",
".",
"_do_math",
"(",
"offset",
",",
"line_offset_text",
")",
"# Sanity check",
"if",
"not",
"isinstance",
"(",
"line_offset",
",",
"int",
")",
":",
"raise",
"ParserException",
"(",
"\"Failed to convert offset '%s' to a number: '%s'\"",
"%",
"(",
"line",
".",
"offset",
",",
"line",
".",
"text",
")",
")",
"# The start of the data needed by this line is at offset + line_offset.",
"# The end of the data will be line.size bytes later.",
"start",
"=",
"offset",
"+",
"line_offset",
"end",
"=",
"start",
"+",
"line",
".",
"size",
"# If the line has a packed format string, unpack it",
"if",
"line",
".",
"pkfmt",
":",
"try",
":",
"dvalue",
"=",
"struct",
".",
"unpack",
"(",
"line",
".",
"pkfmt",
",",
"binwalk",
".",
"core",
".",
"compat",
".",
"str2bytes",
"(",
"self",
".",
"data",
"[",
"start",
":",
"end",
"]",
")",
")",
"[",
"0",
"]",
"# Not enough bytes left in self.data for the specified",
"# format size",
"except",
"struct",
".",
"error",
"as",
"e",
":",
"dvalue",
"=",
"0",
"# Else, this is a string",
"else",
":",
"# Wildcard strings have line.value == None",
"if",
"line",
".",
"value",
"is",
"None",
":",
"# Check to see if this is a string whose size is known and has been specified on a previous",
"# signature line.",
"if",
"binwalk",
".",
"core",
".",
"compat",
".",
"has_key",
"(",
"tags",
",",
"'strlen'",
")",
"and",
"binwalk",
".",
"core",
".",
"compat",
".",
"has_key",
"(",
"line",
".",
"tags",
",",
"'string'",
")",
":",
"dvalue",
"=",
"self",
".",
"data",
"[",
"start",
":",
"(",
"start",
"+",
"tags",
"[",
"'strlen'",
"]",
")",
"]",
"# Else, just terminate the string at the first newline,",
"# carriage return, or NULL byte",
"else",
":",
"dvalue",
"=",
"self",
".",
"data",
"[",
"start",
":",
"end",
"]",
".",
"split",
"(",
"'\\x00'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'\\r'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"# Non-wildcard strings have a known length, specified in",
"# the signature line",
"else",
":",
"dvalue",
"=",
"self",
".",
"data",
"[",
"start",
":",
"end",
"]",
"# Some integer values have special operations that need to be performed on them",
"# before comparison (e.g., \"belong&0x0000FFFF\"). Complex math expressions are",
"# supported here as well.",
"# if isinstance(dvalue, int) and line.operator:",
"if",
"line",
".",
"operator",
":",
"try",
":",
"# If the operator value of this signature line is just",
"# an integer value, use it",
"if",
"isinstance",
"(",
"line",
".",
"opvalue",
",",
"int",
")",
"or",
"isinstance",
"(",
"line",
".",
"opvalue",
",",
"long",
")",
":",
"opval",
"=",
"line",
".",
"opvalue",
"# Else, evaluate the complex expression",
"else",
":",
"opval",
"=",
"self",
".",
"_do_math",
"(",
"offset",
",",
"line",
".",
"opvalue",
")",
"# Perform the specified operation",
"if",
"line",
".",
"operator",
"==",
"'&'",
":",
"dvalue",
"&=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'|'",
":",
"dvalue",
"|=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'*'",
":",
"dvalue",
"*=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'+'",
":",
"dvalue",
"+=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'-'",
":",
"dvalue",
"-=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'/'",
":",
"dvalue",
"/=",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'~'",
":",
"dvalue",
"=",
"~",
"opval",
"elif",
"line",
".",
"operator",
"==",
"'^'",
":",
"dvalue",
"^=",
"opval",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ParserException",
"(",
"\"Operation '\"",
"+",
"str",
"(",
"dvalue",
")",
"+",
"\" \"",
"+",
"str",
"(",
"line",
".",
"operator",
")",
"+",
"\"= \"",
"+",
"str",
"(",
"line",
".",
"opvalue",
")",
"+",
"\"' failed: \"",
"+",
"str",
"(",
"e",
")",
")",
"# Does the data (dvalue) match the specified comparison?",
"if",
"(",
"(",
"line",
".",
"value",
"is",
"None",
")",
"or",
"(",
"line",
".",
"regex",
"and",
"line",
".",
"value",
".",
"match",
"(",
"dvalue",
")",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'='",
"and",
"dvalue",
"==",
"line",
".",
"value",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'>'",
"and",
"dvalue",
">",
"line",
".",
"value",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'<'",
"and",
"dvalue",
"<",
"line",
".",
"value",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'!'",
"and",
"dvalue",
"!=",
"line",
".",
"value",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'~'",
"and",
"(",
"dvalue",
"==",
"~",
"line",
".",
"value",
")",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'^'",
"and",
"(",
"dvalue",
"^",
"line",
".",
"value",
")",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'&'",
"and",
"(",
"dvalue",
"&",
"line",
".",
"value",
")",
")",
"or",
"(",
"line",
".",
"condition",
"==",
"'|'",
"and",
"(",
"dvalue",
"|",
"line",
".",
"value",
")",
")",
")",
":",
"# Up until this point, date fields are treated as integer values,",
"# but we want to display them as nicely formatted strings.",
"if",
"line",
".",
"type",
"==",
"'date'",
":",
"try",
":",
"ts",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"dvalue",
")",
"dvalue",
"=",
"ts",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
":",
"dvalue",
"=",
"\"invalid timestamp\"",
"# Generate the tuple for the format string",
"dvalue_tuple",
"=",
"(",
")",
"for",
"x",
"in",
"self",
".",
"fmtstr",
".",
"finditer",
"(",
"line",
".",
"format",
")",
":",
"dvalue_tuple",
"+=",
"(",
"dvalue",
",",
")",
"# Format the description string",
"desc",
"=",
"line",
".",
"format",
"%",
"dvalue_tuple",
"# If there was any description string, append it to the",
"# list of description string parts",
"if",
"desc",
":",
"description",
".",
"append",
"(",
"desc",
")",
"# Process tag keywords specified in the signature line. These have already been parsed out of the",
"# original format string so that they can be processed",
"# separately from the printed description string.",
"for",
"(",
"tag_name",
",",
"tag_value",
")",
"in",
"binwalk",
".",
"core",
".",
"compat",
".",
"iterator",
"(",
"line",
".",
"tags",
")",
":",
"# If the tag value is a string, try to format it",
"if",
"isinstance",
"(",
"tag_value",
",",
"str",
")",
":",
"# Generate the tuple for the format string",
"dvalue_tuple",
"=",
"(",
")",
"for",
"x",
"in",
"self",
".",
"fmtstr",
".",
"finditer",
"(",
"tag_value",
")",
":",
"dvalue_tuple",
"+=",
"(",
"dvalue",
",",
")",
"# Format the tag string",
"tags",
"[",
"tag_name",
"]",
"=",
"tag_value",
"%",
"dvalue_tuple",
"# Else, just use the raw tag value",
"else",
":",
"tags",
"[",
"tag_name",
"]",
"=",
"tag_value",
"# Some tag values are intended to be integer values, so",
"# try to convert them as such",
"try",
":",
"tags",
"[",
"tag_name",
"]",
"=",
"int",
"(",
"tags",
"[",
"tag_name",
"]",
",",
"0",
")",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"pass",
"# Abort processing soon as this signature is marked invalid, unless invalid results",
"# were explicitly requested. This means that the sooner invalid checks are made in a",
"# given signature, the faster the scan can filter out false",
"# positives.",
"if",
"not",
"self",
".",
"show_invalid",
"and",
"tags",
"[",
"'invalid'",
"]",
":",
"break",
"# Look ahead to the next line in the signature; if its indent level is greater than",
"# that of the current line, then track the end of data for the current line. This is",
"# so that subsequent lines can use the '>>&0' offset syntax to specify relative offsets",
"# from previous lines.",
"try",
":",
"next_line",
"=",
"signature",
".",
"lines",
"[",
"n",
"+",
"1",
"]",
"if",
"next_line",
".",
"level",
">",
"line",
".",
"level",
":",
"if",
"line",
".",
"type",
"==",
"'string'",
":",
"previous_line_end",
"=",
"line_offset",
"+",
"len",
"(",
"dvalue",
")",
"else",
":",
"previous_line_end",
"=",
"line_offset",
"+",
"line",
".",
"size",
"except",
"IndexError",
"as",
"e",
":",
"pass",
"# If this line satisfied its comparison, +1 the max",
"# indentation level",
"max_line_level",
"=",
"line",
".",
"level",
"+",
"1",
"else",
":",
"# No match on the first line, abort",
"if",
"line",
".",
"level",
"==",
"0",
":",
"break",
"else",
":",
"# If this line did not satisfy its comparison, then higher",
"# indentation levels will not be accepted.",
"max_line_level",
"=",
"line",
".",
"level",
"# Join the formatted description strings and remove backspace",
"# characters (plus the preceeding character as well)",
"tags",
"[",
"'description'",
"]",
"=",
"self",
".",
"bspace",
".",
"sub",
"(",
"''",
",",
"\" \"",
".",
"join",
"(",
"description",
")",
")",
"# This should never happen",
"if",
"not",
"tags",
"[",
"'description'",
"]",
":",
"tags",
"[",
"'display'",
"]",
"=",
"False",
"tags",
"[",
"'invalid'",
"]",
"=",
"True",
"# If the formatted string contains non-printable characters, consider",
"# it invalid",
"if",
"self",
".",
"printable",
".",
"match",
"(",
"tags",
"[",
"'description'",
"]",
")",
".",
"group",
"(",
")",
"!=",
"tags",
"[",
"'description'",
"]",
":",
"tags",
"[",
"'invalid'",
"]",
"=",
"True",
"return",
"tags"
] |
Analyzes self.data for the specified signature data at the specified offset .
@signature - The signature to apply to the data.
@offset - The offset in self.data to apply the signature to.
Returns a dictionary of tags parsed from the data.
|
[
"Analyzes",
"self",
".",
"data",
"for",
"the",
"specified",
"signature",
"data",
"at",
"the",
"specified",
"offset",
"."
] |
python
|
train
|
log2timeline/plaso
|
plaso/cli/time_slices.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/time_slices.py#L43-L49
|
def start_timestamp(self):
"""int: slice start timestamp or None."""
if self.event_timestamp:
return self.event_timestamp - (
self.duration * self._MICRO_SECONDS_PER_MINUTE)
return None
|
[
"def",
"start_timestamp",
"(",
"self",
")",
":",
"if",
"self",
".",
"event_timestamp",
":",
"return",
"self",
".",
"event_timestamp",
"-",
"(",
"self",
".",
"duration",
"*",
"self",
".",
"_MICRO_SECONDS_PER_MINUTE",
")",
"return",
"None"
] |
int: slice start timestamp or None.
|
[
"int",
":",
"slice",
"start",
"timestamp",
"or",
"None",
"."
] |
python
|
train
|
pjuren/pyokit
|
src/pyokit/io/maf.py
|
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/maf.py#L195-L271
|
def maf_iterator(fn, index_friendly=False,
yield_class=MultipleSequenceAlignment, yield_kw_args={},
verbose=False):
"""
Iterate of MAF format file and yield <yield_class> objects for each block.
MAF files are arranged in blocks. Each block is a multiple alignment. Within
a block, the first character of a line indicates what kind of line it is:
a -- key-value pair meta data for block; one per block, should be first line
s -- a sequence line; see __build_sequence() for details.
i -- always come after s lines, and contain information about the context of
the sequence; see __annotate_sequence_with_context() for details.
e -- indicates that there is no aligning sequence for a species, but that
there are blocks before and after this one that do align. See
__build_unknown_sequence() for details.
q -- quality information about an aligned base in a species. See
__annotate_sequence_with_quality()
:param yield_class: yield objects returned by this function/constructor;
must accept key-word args of 'sequences' and
'meta_data' which are list and dictionary respectively.
Default is MultipleSequenceAlignment
:param yield_kw_args: extra keyword args to pass to 'yield_class'
"""
try:
fh = open(fn)
except (TypeError):
fh = fn
if index_friendly:
fh = iter(fh.readline, '')
sequences = []
meta_data = {}
for line in fh:
line = line.strip()
if line == "" or line[0] == "#":
continue
parts = line.split()
line_type = parts[0].strip()
if line_type == A_LINE:
if sequences != []:
meta_data[SEQ_ORDER_KEY] = [s.name for s in sequences]
kw_args = merge_dictionaries({"sequences": sequences,
"meta_data": meta_data}, yield_kw_args)
yield yield_class(**kw_args)
sequences = []
meta_data = {}
for i in range(1, len(parts)):
assert(parts[i].count("=") == 1)
piv = parts[i].find("=")
meta_data[parts[i][:piv]] = parts[i][piv + 1:]
elif line_type == S_LINE:
sequences.append(__build_sequence(parts))
elif line_type == I_LINE:
if len(sequences) < 1:
raise MAFError("found information line with no preceeding sequence " +
"in block")
__annotate_sequence_with_context(sequences[-1], parts)
elif line_type == E_LINE:
sequences.append(__build_unknown_sequence(parts))
elif line_type == Q_LINE:
if len(sequences) < 1:
raise MAFError("found quality line with no preceeding sequence in " +
"block")
__annotate_sequence_with_quality(sequences[-1], parts)
else:
raise MAFError("Unknown type of MAF line: " + line)
# don't forget to yield the final block
if sequences != []:
meta_data[SEQ_ORDER_KEY] = [s.name for s in sequences]
kw_args = merge_dictionaries({"sequences": sequences,
"meta_data": meta_data},
yield_kw_args)
yield yield_class(**kw_args)
|
[
"def",
"maf_iterator",
"(",
"fn",
",",
"index_friendly",
"=",
"False",
",",
"yield_class",
"=",
"MultipleSequenceAlignment",
",",
"yield_kw_args",
"=",
"{",
"}",
",",
"verbose",
"=",
"False",
")",
":",
"try",
":",
"fh",
"=",
"open",
"(",
"fn",
")",
"except",
"(",
"TypeError",
")",
":",
"fh",
"=",
"fn",
"if",
"index_friendly",
":",
"fh",
"=",
"iter",
"(",
"fh",
".",
"readline",
",",
"''",
")",
"sequences",
"=",
"[",
"]",
"meta_data",
"=",
"{",
"}",
"for",
"line",
"in",
"fh",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"\"\"",
"or",
"line",
"[",
"0",
"]",
"==",
"\"#\"",
":",
"continue",
"parts",
"=",
"line",
".",
"split",
"(",
")",
"line_type",
"=",
"parts",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"line_type",
"==",
"A_LINE",
":",
"if",
"sequences",
"!=",
"[",
"]",
":",
"meta_data",
"[",
"SEQ_ORDER_KEY",
"]",
"=",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"sequences",
"]",
"kw_args",
"=",
"merge_dictionaries",
"(",
"{",
"\"sequences\"",
":",
"sequences",
",",
"\"meta_data\"",
":",
"meta_data",
"}",
",",
"yield_kw_args",
")",
"yield",
"yield_class",
"(",
"*",
"*",
"kw_args",
")",
"sequences",
"=",
"[",
"]",
"meta_data",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"parts",
")",
")",
":",
"assert",
"(",
"parts",
"[",
"i",
"]",
".",
"count",
"(",
"\"=\"",
")",
"==",
"1",
")",
"piv",
"=",
"parts",
"[",
"i",
"]",
".",
"find",
"(",
"\"=\"",
")",
"meta_data",
"[",
"parts",
"[",
"i",
"]",
"[",
":",
"piv",
"]",
"]",
"=",
"parts",
"[",
"i",
"]",
"[",
"piv",
"+",
"1",
":",
"]",
"elif",
"line_type",
"==",
"S_LINE",
":",
"sequences",
".",
"append",
"(",
"__build_sequence",
"(",
"parts",
")",
")",
"elif",
"line_type",
"==",
"I_LINE",
":",
"if",
"len",
"(",
"sequences",
")",
"<",
"1",
":",
"raise",
"MAFError",
"(",
"\"found information line with no preceeding sequence \"",
"+",
"\"in block\"",
")",
"__annotate_sequence_with_context",
"(",
"sequences",
"[",
"-",
"1",
"]",
",",
"parts",
")",
"elif",
"line_type",
"==",
"E_LINE",
":",
"sequences",
".",
"append",
"(",
"__build_unknown_sequence",
"(",
"parts",
")",
")",
"elif",
"line_type",
"==",
"Q_LINE",
":",
"if",
"len",
"(",
"sequences",
")",
"<",
"1",
":",
"raise",
"MAFError",
"(",
"\"found quality line with no preceeding sequence in \"",
"+",
"\"block\"",
")",
"__annotate_sequence_with_quality",
"(",
"sequences",
"[",
"-",
"1",
"]",
",",
"parts",
")",
"else",
":",
"raise",
"MAFError",
"(",
"\"Unknown type of MAF line: \"",
"+",
"line",
")",
"# don't forget to yield the final block",
"if",
"sequences",
"!=",
"[",
"]",
":",
"meta_data",
"[",
"SEQ_ORDER_KEY",
"]",
"=",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"sequences",
"]",
"kw_args",
"=",
"merge_dictionaries",
"(",
"{",
"\"sequences\"",
":",
"sequences",
",",
"\"meta_data\"",
":",
"meta_data",
"}",
",",
"yield_kw_args",
")",
"yield",
"yield_class",
"(",
"*",
"*",
"kw_args",
")"
] |
Iterate of MAF format file and yield <yield_class> objects for each block.
MAF files are arranged in blocks. Each block is a multiple alignment. Within
a block, the first character of a line indicates what kind of line it is:
a -- key-value pair meta data for block; one per block, should be first line
s -- a sequence line; see __build_sequence() for details.
i -- always come after s lines, and contain information about the context of
the sequence; see __annotate_sequence_with_context() for details.
e -- indicates that there is no aligning sequence for a species, but that
there are blocks before and after this one that do align. See
__build_unknown_sequence() for details.
q -- quality information about an aligned base in a species. See
__annotate_sequence_with_quality()
:param yield_class: yield objects returned by this function/constructor;
must accept key-word args of 'sequences' and
'meta_data' which are list and dictionary respectively.
Default is MultipleSequenceAlignment
:param yield_kw_args: extra keyword args to pass to 'yield_class'
|
[
"Iterate",
"of",
"MAF",
"format",
"file",
"and",
"yield",
"<yield_class",
">",
"objects",
"for",
"each",
"block",
"."
] |
python
|
train
|
MycroftAI/padatious
|
padatious/intent_container.py
|
https://github.com/MycroftAI/padatious/blob/794a2530d6079bdd06e193edd0d30b2cc793e631/padatious/intent_container.py#L105-L118
|
def load_entity(self, name, file_name, reload_cache=False):
"""
Loads an entity, optionally checking the cache first
Args:
name (str): The associated name of the entity
file_name (str): The location of the entity file
reload_cache (bool): Whether to refresh all of cache
"""
Entity.verify_name(name)
self.entities.load(Entity.wrap_name(name), file_name, reload_cache)
with open(file_name) as f:
self.padaos.add_entity(name, f.read().split('\n'))
self.must_train = True
|
[
"def",
"load_entity",
"(",
"self",
",",
"name",
",",
"file_name",
",",
"reload_cache",
"=",
"False",
")",
":",
"Entity",
".",
"verify_name",
"(",
"name",
")",
"self",
".",
"entities",
".",
"load",
"(",
"Entity",
".",
"wrap_name",
"(",
"name",
")",
",",
"file_name",
",",
"reload_cache",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"self",
".",
"padaos",
".",
"add_entity",
"(",
"name",
",",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"self",
".",
"must_train",
"=",
"True"
] |
Loads an entity, optionally checking the cache first
Args:
name (str): The associated name of the entity
file_name (str): The location of the entity file
reload_cache (bool): Whether to refresh all of cache
|
[
"Loads",
"an",
"entity",
"optionally",
"checking",
"the",
"cache",
"first"
] |
python
|
valid
|
google/grr
|
grr/server/grr_response_server/notification.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/notification.py#L16-L33
|
def _HostPrefix(client_id):
"""Build a host prefix for a notification message based on a client id."""
if not client_id:
return ""
hostname = None
if data_store.RelationalDBEnabled():
client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id)
if client_snapshot:
hostname = client_snapshot.knowledge_base.fqdn
else:
client_fd = aff4.FACTORY.Open(client_id, mode="rw")
hostname = client_fd.Get(client_fd.Schema.FQDN) or ""
if hostname:
return "%s: " % hostname
else:
return ""
|
[
"def",
"_HostPrefix",
"(",
"client_id",
")",
":",
"if",
"not",
"client_id",
":",
"return",
"\"\"",
"hostname",
"=",
"None",
"if",
"data_store",
".",
"RelationalDBEnabled",
"(",
")",
":",
"client_snapshot",
"=",
"data_store",
".",
"REL_DB",
".",
"ReadClientSnapshot",
"(",
"client_id",
")",
"if",
"client_snapshot",
":",
"hostname",
"=",
"client_snapshot",
".",
"knowledge_base",
".",
"fqdn",
"else",
":",
"client_fd",
"=",
"aff4",
".",
"FACTORY",
".",
"Open",
"(",
"client_id",
",",
"mode",
"=",
"\"rw\"",
")",
"hostname",
"=",
"client_fd",
".",
"Get",
"(",
"client_fd",
".",
"Schema",
".",
"FQDN",
")",
"or",
"\"\"",
"if",
"hostname",
":",
"return",
"\"%s: \"",
"%",
"hostname",
"else",
":",
"return",
"\"\""
] |
Build a host prefix for a notification message based on a client id.
|
[
"Build",
"a",
"host",
"prefix",
"for",
"a",
"notification",
"message",
"based",
"on",
"a",
"client",
"id",
"."
] |
python
|
train
|
mathandy/svgpathtools
|
svgpathtools/misctools.py
|
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/misctools.py#L37-L39
|
def isclose(a, b, rtol=1e-5, atol=1e-8):
"""This is essentially np.isclose, but slightly faster."""
return abs(a - b) < (atol + rtol * abs(b))
|
[
"def",
"isclose",
"(",
"a",
",",
"b",
",",
"rtol",
"=",
"1e-5",
",",
"atol",
"=",
"1e-8",
")",
":",
"return",
"abs",
"(",
"a",
"-",
"b",
")",
"<",
"(",
"atol",
"+",
"rtol",
"*",
"abs",
"(",
"b",
")",
")"
] |
This is essentially np.isclose, but slightly faster.
|
[
"This",
"is",
"essentially",
"np",
".",
"isclose",
"but",
"slightly",
"faster",
"."
] |
python
|
train
|
rigetti/grove
|
grove/tomography/operator_utils.py
|
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/operator_utils.py#L70-L94
|
def make_diagonal_povm(pi_basis, confusion_rate_matrix):
"""
Create a DiagonalPOVM from a ``pi_basis`` and the ``confusion_rate_matrix`` associated with a
readout.
See also the grove documentation.
:param OperatorBasis pi_basis: An operator basis of rank-1 projection operators.
:param numpy.ndarray confusion_rate_matrix: The matrix of detection probabilities conditional
on a prepared qubit state.
:return: The POVM corresponding to confusion_rate_matrix.
:rtype: DiagonalPOVM
"""
confusion_rate_matrix = np.asarray(confusion_rate_matrix)
if not np.allclose(confusion_rate_matrix.sum(axis=0), np.ones(confusion_rate_matrix.shape[1])):
raise CRMUnnormalizedError("Unnormalized confusion matrix:\n{}".format(
confusion_rate_matrix))
if not (confusion_rate_matrix >= 0).all() or not (confusion_rate_matrix <= 1).all():
raise CRMValueError("Confusion matrix must have values in [0, 1]:"
"\n{}".format(confusion_rate_matrix))
ops = [sum((pi_j * pjk for (pi_j, pjk) in izip(pi_basis.ops, pjs)), 0)
for pjs in confusion_rate_matrix]
return DiagonalPOVM(pi_basis=pi_basis, confusion_rate_matrix=confusion_rate_matrix, ops=ops)
|
[
"def",
"make_diagonal_povm",
"(",
"pi_basis",
",",
"confusion_rate_matrix",
")",
":",
"confusion_rate_matrix",
"=",
"np",
".",
"asarray",
"(",
"confusion_rate_matrix",
")",
"if",
"not",
"np",
".",
"allclose",
"(",
"confusion_rate_matrix",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
",",
"np",
".",
"ones",
"(",
"confusion_rate_matrix",
".",
"shape",
"[",
"1",
"]",
")",
")",
":",
"raise",
"CRMUnnormalizedError",
"(",
"\"Unnormalized confusion matrix:\\n{}\"",
".",
"format",
"(",
"confusion_rate_matrix",
")",
")",
"if",
"not",
"(",
"confusion_rate_matrix",
">=",
"0",
")",
".",
"all",
"(",
")",
"or",
"not",
"(",
"confusion_rate_matrix",
"<=",
"1",
")",
".",
"all",
"(",
")",
":",
"raise",
"CRMValueError",
"(",
"\"Confusion matrix must have values in [0, 1]:\"",
"\"\\n{}\"",
".",
"format",
"(",
"confusion_rate_matrix",
")",
")",
"ops",
"=",
"[",
"sum",
"(",
"(",
"pi_j",
"*",
"pjk",
"for",
"(",
"pi_j",
",",
"pjk",
")",
"in",
"izip",
"(",
"pi_basis",
".",
"ops",
",",
"pjs",
")",
")",
",",
"0",
")",
"for",
"pjs",
"in",
"confusion_rate_matrix",
"]",
"return",
"DiagonalPOVM",
"(",
"pi_basis",
"=",
"pi_basis",
",",
"confusion_rate_matrix",
"=",
"confusion_rate_matrix",
",",
"ops",
"=",
"ops",
")"
] |
Create a DiagonalPOVM from a ``pi_basis`` and the ``confusion_rate_matrix`` associated with a
readout.
See also the grove documentation.
:param OperatorBasis pi_basis: An operator basis of rank-1 projection operators.
:param numpy.ndarray confusion_rate_matrix: The matrix of detection probabilities conditional
on a prepared qubit state.
:return: The POVM corresponding to confusion_rate_matrix.
:rtype: DiagonalPOVM
|
[
"Create",
"a",
"DiagonalPOVM",
"from",
"a",
"pi_basis",
"and",
"the",
"confusion_rate_matrix",
"associated",
"with",
"a",
"readout",
"."
] |
python
|
train
|
fronzbot/blinkpy
|
blinkpy/api.py
|
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L56-L64
|
def request_syncmodule(blink, network):
"""
Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/syncmodules".format(blink.urls.base_url, network)
return http_get(blink, url)
|
[
"def",
"request_syncmodule",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/syncmodules\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] |
Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id.
|
[
"Request",
"sync",
"module",
"info",
"."
] |
python
|
train
|
KelSolaar/Foundations
|
foundations/strings.py
|
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/strings.py#L175-L192
|
def get_words(data):
"""
Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list
"""
words = re.findall(r"\w+", data)
LOGGER.debug("> Words: '{0}'".format(", ".join(words)))
return words
|
[
"def",
"get_words",
"(",
"data",
")",
":",
"words",
"=",
"re",
".",
"findall",
"(",
"r\"\\w+\"",
",",
"data",
")",
"LOGGER",
".",
"debug",
"(",
"\"> Words: '{0}'\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"words",
")",
")",
")",
"return",
"words"
] |
Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list
|
[
"Extracts",
"the",
"words",
"from",
"given",
"string",
"."
] |
python
|
train
|
DLR-RM/RAFCON
|
source/rafcon/core/execution/execution_engine.py
|
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/execution/execution_engine.py#L451-L474
|
def _modify_run_to_states(self, state):
"""
This is a special case. Inside a hierarchy state a step_over is triggered and affects the last child.
In this case the self.run_to_states has to be modified in order to contain the parent of the hierarchy state.
Otherwise the execution won't respect the step_over any more and run until the end of the state machine.
The same holds for a step_out.
The reason for this is, that handle_execution_mode() can not be called between
the last state of a hierarchy state and the termination of the hierarchy state itself.
"""
if self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OVER or \
self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OUT:
for state_path in copy.deepcopy(self.run_to_states):
if state_path == state.get_path():
logger.verbose("Modifying run_to_states; triggered by state %s!", state.name)
self.run_to_states.remove(state_path)
from rafcon.core.states.state import State
if isinstance(state.parent, State):
from rafcon.core.states.library_state import LibraryState
if isinstance(state.parent, LibraryState):
parent_path = state.parent.parent.get_path()
else:
parent_path = state.parent.get_path()
self.run_to_states.append(parent_path)
break
|
[
"def",
"_modify_run_to_states",
"(",
"self",
",",
"state",
")",
":",
"if",
"self",
".",
"_status",
".",
"execution_mode",
"is",
"StateMachineExecutionStatus",
".",
"FORWARD_OVER",
"or",
"self",
".",
"_status",
".",
"execution_mode",
"is",
"StateMachineExecutionStatus",
".",
"FORWARD_OUT",
":",
"for",
"state_path",
"in",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"run_to_states",
")",
":",
"if",
"state_path",
"==",
"state",
".",
"get_path",
"(",
")",
":",
"logger",
".",
"verbose",
"(",
"\"Modifying run_to_states; triggered by state %s!\"",
",",
"state",
".",
"name",
")",
"self",
".",
"run_to_states",
".",
"remove",
"(",
"state_path",
")",
"from",
"rafcon",
".",
"core",
".",
"states",
".",
"state",
"import",
"State",
"if",
"isinstance",
"(",
"state",
".",
"parent",
",",
"State",
")",
":",
"from",
"rafcon",
".",
"core",
".",
"states",
".",
"library_state",
"import",
"LibraryState",
"if",
"isinstance",
"(",
"state",
".",
"parent",
",",
"LibraryState",
")",
":",
"parent_path",
"=",
"state",
".",
"parent",
".",
"parent",
".",
"get_path",
"(",
")",
"else",
":",
"parent_path",
"=",
"state",
".",
"parent",
".",
"get_path",
"(",
")",
"self",
".",
"run_to_states",
".",
"append",
"(",
"parent_path",
")",
"break"
] |
This is a special case. Inside a hierarchy state a step_over is triggered and affects the last child.
In this case the self.run_to_states has to be modified in order to contain the parent of the hierarchy state.
Otherwise the execution won't respect the step_over any more and run until the end of the state machine.
The same holds for a step_out.
The reason for this is, that handle_execution_mode() can not be called between
the last state of a hierarchy state and the termination of the hierarchy state itself.
|
[
"This",
"is",
"a",
"special",
"case",
".",
"Inside",
"a",
"hierarchy",
"state",
"a",
"step_over",
"is",
"triggered",
"and",
"affects",
"the",
"last",
"child",
".",
"In",
"this",
"case",
"the",
"self",
".",
"run_to_states",
"has",
"to",
"be",
"modified",
"in",
"order",
"to",
"contain",
"the",
"parent",
"of",
"the",
"hierarchy",
"state",
".",
"Otherwise",
"the",
"execution",
"won",
"t",
"respect",
"the",
"step_over",
"any",
"more",
"and",
"run",
"until",
"the",
"end",
"of",
"the",
"state",
"machine",
".",
"The",
"same",
"holds",
"for",
"a",
"step_out",
".",
"The",
"reason",
"for",
"this",
"is",
"that",
"handle_execution_mode",
"()",
"can",
"not",
"be",
"called",
"between",
"the",
"last",
"state",
"of",
"a",
"hierarchy",
"state",
"and",
"the",
"termination",
"of",
"the",
"hierarchy",
"state",
"itself",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/bigip.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bigip.py#L472-L549
|
def modify_node(hostname, username, password, name,
connection_limit=None,
description=None,
dynamic_ratio=None,
logging=None,
monitor=None,
rate_limit=None,
ratio=None,
session=None,
state=None,
trans_label=None):
'''
A function to connect to a bigip device and modify an existing node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[user-down | user-up ]
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
'''
params = {
'connection-limit': connection_limit,
'description': description,
'dynamic-ratio': dynamic_ratio,
'logging': logging,
'monitor': monitor,
'rate-limit': rate_limit,
'ratio': ratio,
'session': session,
'state': state,
}
#build session
bigip_session = _build_session(username, password, trans_label)
#build payload
payload = _loop_payload(params)
payload['name'] = name
#put to REST
try:
response = bigip_session.put(
BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node/{name}'.format(name=name),
data=salt.utils.json.dumps(payload)
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
|
[
"def",
"modify_node",
"(",
"hostname",
",",
"username",
",",
"password",
",",
"name",
",",
"connection_limit",
"=",
"None",
",",
"description",
"=",
"None",
",",
"dynamic_ratio",
"=",
"None",
",",
"logging",
"=",
"None",
",",
"monitor",
"=",
"None",
",",
"rate_limit",
"=",
"None",
",",
"ratio",
"=",
"None",
",",
"session",
"=",
"None",
",",
"state",
"=",
"None",
",",
"trans_label",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'connection-limit'",
":",
"connection_limit",
",",
"'description'",
":",
"description",
",",
"'dynamic-ratio'",
":",
"dynamic_ratio",
",",
"'logging'",
":",
"logging",
",",
"'monitor'",
":",
"monitor",
",",
"'rate-limit'",
":",
"rate_limit",
",",
"'ratio'",
":",
"ratio",
",",
"'session'",
":",
"session",
",",
"'state'",
":",
"state",
",",
"}",
"#build session",
"bigip_session",
"=",
"_build_session",
"(",
"username",
",",
"password",
",",
"trans_label",
")",
"#build payload",
"payload",
"=",
"_loop_payload",
"(",
"params",
")",
"payload",
"[",
"'name'",
"]",
"=",
"name",
"#put to REST",
"try",
":",
"response",
"=",
"bigip_session",
".",
"put",
"(",
"BIG_IP_URL_BASE",
".",
"format",
"(",
"host",
"=",
"hostname",
")",
"+",
"'/ltm/node/{name}'",
".",
"format",
"(",
"name",
"=",
"name",
")",
",",
"data",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"e",
":",
"return",
"_load_connection_error",
"(",
"hostname",
",",
"e",
")",
"return",
"_load_response",
"(",
"response",
")"
] |
A function to connect to a bigip device and modify an existing node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[user-down | user-up ]
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
|
[
"A",
"function",
"to",
"connect",
"to",
"a",
"bigip",
"device",
"and",
"modify",
"an",
"existing",
"node",
"."
] |
python
|
train
|
ggravlingen/pytradfri
|
pytradfri/api/aiocoap_api.py
|
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/aiocoap_api.py#L152-L161
|
async def request(self, api_commands):
"""Make a request."""
if not isinstance(api_commands, list):
result = await self._execute(api_commands)
return result
commands = (self._execute(api_command) for api_command in api_commands)
command_results = await asyncio.gather(*commands, loop=self._loop)
return command_results
|
[
"async",
"def",
"request",
"(",
"self",
",",
"api_commands",
")",
":",
"if",
"not",
"isinstance",
"(",
"api_commands",
",",
"list",
")",
":",
"result",
"=",
"await",
"self",
".",
"_execute",
"(",
"api_commands",
")",
"return",
"result",
"commands",
"=",
"(",
"self",
".",
"_execute",
"(",
"api_command",
")",
"for",
"api_command",
"in",
"api_commands",
")",
"command_results",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"commands",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"return",
"command_results"
] |
Make a request.
|
[
"Make",
"a",
"request",
"."
] |
python
|
train
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/addons/guerilla/guerillamgmt.py
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L150-L165
|
def create_seq(self, ):
"""Create a sequence and store it in the self.sequence
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
desc = self.desc_pte.toPlainText()
try:
seq = djadapter.models.Sequence(name=name, project=self._project, description=desc)
seq.save()
self.sequence = seq
self.accept()
except:
log.exception("Could not create new sequence")
|
[
"def",
"create_seq",
"(",
"self",
",",
")",
":",
"name",
"=",
"self",
".",
"name_le",
".",
"text",
"(",
")",
"desc",
"=",
"self",
".",
"desc_pte",
".",
"toPlainText",
"(",
")",
"try",
":",
"seq",
"=",
"djadapter",
".",
"models",
".",
"Sequence",
"(",
"name",
"=",
"name",
",",
"project",
"=",
"self",
".",
"_project",
",",
"description",
"=",
"desc",
")",
"seq",
".",
"save",
"(",
")",
"self",
".",
"sequence",
"=",
"seq",
"self",
".",
"accept",
"(",
")",
"except",
":",
"log",
".",
"exception",
"(",
"\"Could not create new sequence\"",
")"
] |
Create a sequence and store it in the self.sequence
:returns: None
:rtype: None
:raises: None
|
[
"Create",
"a",
"sequence",
"and",
"store",
"it",
"in",
"the",
"self",
".",
"sequence"
] |
python
|
train
|
datadesk/django-greeking
|
greeking/templatetags/greeking_tags.py
|
https://github.com/datadesk/django-greeking/blob/72509c94952279503bbe8d5a710c1fd344da0670/greeking/templatetags/greeking_tags.py#L111-L147
|
def placeholdit(
width,
height,
background_color="cccccc",
text_color="969696",
text=None,
random_background_color=False
):
"""
Creates a placeholder image using placehold.it
Usage format:
{% placeholdit [width] [height] [background_color] [text_color] [text] %}
Example usage:
Default image at 250 square
{% placeholdit 250 %}
100 wide and 200 high
{% placeholdit 100 200 %}
Custom background and text colors
{% placeholdit 100 200 background_color='fff' text_color=000' %}
Custom text
{% placeholdit 100 200 text='Hello LA' %}
"""
url = get_placeholdit_url(
width,
height,
background_color=background_color,
text_color=text_color,
text=text,
)
return format_html('<img src="{}"/>', url)
|
[
"def",
"placeholdit",
"(",
"width",
",",
"height",
",",
"background_color",
"=",
"\"cccccc\"",
",",
"text_color",
"=",
"\"969696\"",
",",
"text",
"=",
"None",
",",
"random_background_color",
"=",
"False",
")",
":",
"url",
"=",
"get_placeholdit_url",
"(",
"width",
",",
"height",
",",
"background_color",
"=",
"background_color",
",",
"text_color",
"=",
"text_color",
",",
"text",
"=",
"text",
",",
")",
"return",
"format_html",
"(",
"'<img src=\"{}\"/>'",
",",
"url",
")"
] |
Creates a placeholder image using placehold.it
Usage format:
{% placeholdit [width] [height] [background_color] [text_color] [text] %}
Example usage:
Default image at 250 square
{% placeholdit 250 %}
100 wide and 200 high
{% placeholdit 100 200 %}
Custom background and text colors
{% placeholdit 100 200 background_color='fff' text_color=000' %}
Custom text
{% placeholdit 100 200 text='Hello LA' %}
|
[
"Creates",
"a",
"placeholder",
"image",
"using",
"placehold",
".",
"it"
] |
python
|
train
|
Gorialis/jishaku
|
jishaku/cog.py
|
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L480-L502
|
async def jsk_source(self, ctx: commands.Context, *, command_name: str):
"""
Displays the source code for a command.
"""
command = self.bot.get_command(command_name)
if not command:
return await ctx.send(f"Couldn't find command `{command_name}`.")
try:
source_lines, _ = inspect.getsourcelines(command.callback)
except (TypeError, OSError):
return await ctx.send(f"Was unable to retrieve the source for `{command}` for some reason.")
# getsourcelines for some reason returns WITH line endings
source_lines = ''.join(source_lines).split('\n')
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
for line in source_lines:
paginator.add_line(line)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
|
[
"async",
"def",
"jsk_source",
"(",
"self",
",",
"ctx",
":",
"commands",
".",
"Context",
",",
"*",
",",
"command_name",
":",
"str",
")",
":",
"command",
"=",
"self",
".",
"bot",
".",
"get_command",
"(",
"command_name",
")",
"if",
"not",
"command",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"f\"Couldn't find command `{command_name}`.\"",
")",
"try",
":",
"source_lines",
",",
"_",
"=",
"inspect",
".",
"getsourcelines",
"(",
"command",
".",
"callback",
")",
"except",
"(",
"TypeError",
",",
"OSError",
")",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"f\"Was unable to retrieve the source for `{command}` for some reason.\"",
")",
"# getsourcelines for some reason returns WITH line endings",
"source_lines",
"=",
"''",
".",
"join",
"(",
"source_lines",
")",
".",
"split",
"(",
"'\\n'",
")",
"paginator",
"=",
"WrappedPaginator",
"(",
"prefix",
"=",
"'```py'",
",",
"suffix",
"=",
"'```'",
",",
"max_size",
"=",
"1985",
")",
"for",
"line",
"in",
"source_lines",
":",
"paginator",
".",
"add_line",
"(",
"line",
")",
"interface",
"=",
"PaginatorInterface",
"(",
"ctx",
".",
"bot",
",",
"paginator",
",",
"owner",
"=",
"ctx",
".",
"author",
")",
"await",
"interface",
".",
"send_to",
"(",
"ctx",
")"
] |
Displays the source code for a command.
|
[
"Displays",
"the",
"source",
"code",
"for",
"a",
"command",
"."
] |
python
|
train
|
marrabld/planarradpy
|
libplanarradpy/planrad.py
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L486-L494
|
def build_b(self, scattering_fraction=0.01833):
"""Calculates the total scattering from back-scattering
:param scattering_fraction: the fraction of back-scattering to total scattering default = 0.01833
b = ( bb[sea water] + bb[p] ) /0.01833
"""
lg.info('Building b with scattering fraction of :: ' + str(scattering_fraction))
self.b = (self.b_b + self.b_water / 2.0) / scattering_fraction
|
[
"def",
"build_b",
"(",
"self",
",",
"scattering_fraction",
"=",
"0.01833",
")",
":",
"lg",
".",
"info",
"(",
"'Building b with scattering fraction of :: '",
"+",
"str",
"(",
"scattering_fraction",
")",
")",
"self",
".",
"b",
"=",
"(",
"self",
".",
"b_b",
"+",
"self",
".",
"b_water",
"/",
"2.0",
")",
"/",
"scattering_fraction"
] |
Calculates the total scattering from back-scattering
:param scattering_fraction: the fraction of back-scattering to total scattering default = 0.01833
b = ( bb[sea water] + bb[p] ) /0.01833
|
[
"Calculates",
"the",
"total",
"scattering",
"from",
"back",
"-",
"scattering"
] |
python
|
test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.