code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz) | def function[_Rforce, parameter[self, R, z, phi, t]]:
constant[
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
]
variable[Rz] assign[=] binary_operation[binary_operation[name[R] ** constant[2.0]] + binary_operation[name[z] ** constant[2.0]]]
variable[sqrtRz] assign[=] call[name[numpy].sqrt, parameter[name[Rz]]]
return[binary_operation[name[R] * binary_operation[binary_operation[binary_operation[constant[1.0] / name[Rz]] / binary_operation[name[self].a + name[sqrtRz]]] - binary_operation[binary_operation[call[name[numpy].log, parameter[binary_operation[constant[1.0] + binary_operation[name[sqrtRz] / name[self].a]]]] / name[sqrtRz]] / name[Rz]]]]] | keyword[def] identifier[_Rforce] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
identifier[Rz] = identifier[R] ** literal[int] + identifier[z] ** literal[int]
identifier[sqrtRz] = identifier[numpy] . identifier[sqrt] ( identifier[Rz] )
keyword[return] identifier[R] *( literal[int] / identifier[Rz] /( identifier[self] . identifier[a] + identifier[sqrtRz] )- identifier[numpy] . identifier[log] ( literal[int] + identifier[sqrtRz] / identifier[self] . identifier[a] )/ identifier[sqrtRz] / identifier[Rz] ) | def _Rforce(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz = R ** 2.0 + z ** 2.0
sqrtRz = numpy.sqrt(Rz)
return R * (1.0 / Rz / (self.a + sqrtRz) - numpy.log(1.0 + sqrtRz / self.a) / sqrtRz / Rz) |
def _set_default_vertex_attributes(self) -> None:
"""Assign default values on attributes to all vertices."""
self.graph.vs["l2fc"] = 0
self.graph.vs["padj"] = 0.5
self.graph.vs["symbol"] = self.graph.vs["name"]
self.graph.vs["diff_expressed"] = False
self.graph.vs["up_regulated"] = False
self.graph.vs["down_regulated"] = False | def function[_set_default_vertex_attributes, parameter[self]]:
constant[Assign default values on attributes to all vertices.]
call[name[self].graph.vs][constant[l2fc]] assign[=] constant[0]
call[name[self].graph.vs][constant[padj]] assign[=] constant[0.5]
call[name[self].graph.vs][constant[symbol]] assign[=] call[name[self].graph.vs][constant[name]]
call[name[self].graph.vs][constant[diff_expressed]] assign[=] constant[False]
call[name[self].graph.vs][constant[up_regulated]] assign[=] constant[False]
call[name[self].graph.vs][constant[down_regulated]] assign[=] constant[False] | keyword[def] identifier[_set_default_vertex_attributes] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= literal[int]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= literal[int]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= keyword[False]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= keyword[False]
identifier[self] . identifier[graph] . identifier[vs] [ literal[string] ]= keyword[False] | def _set_default_vertex_attributes(self) -> None:
"""Assign default values on attributes to all vertices."""
self.graph.vs['l2fc'] = 0
self.graph.vs['padj'] = 0.5
self.graph.vs['symbol'] = self.graph.vs['name']
self.graph.vs['diff_expressed'] = False
self.graph.vs['up_regulated'] = False
self.graph.vs['down_regulated'] = False |
def convert_msg(self, msg):
"""
Takes one POEntry object and converts it (adds a dummy translation to it)
msg is an instance of polib.POEntry
"""
source = msg.msgid
if not source:
# don't translate empty string
return
plural = msg.msgid_plural
if plural:
# translate singular and plural
foreign_single = self.convert(source)
foreign_plural = self.convert(plural)
plural = {
'0': self.final_newline(source, foreign_single),
'1': self.final_newline(plural, foreign_plural),
}
msg.msgstr_plural = plural
else:
foreign = self.convert(source)
msg.msgstr = self.final_newline(source, foreign) | def function[convert_msg, parameter[self, msg]]:
constant[
Takes one POEntry object and converts it (adds a dummy translation to it)
msg is an instance of polib.POEntry
]
variable[source] assign[=] name[msg].msgid
if <ast.UnaryOp object at 0x7da20c795cc0> begin[:]
return[None]
variable[plural] assign[=] name[msg].msgid_plural
if name[plural] begin[:]
variable[foreign_single] assign[=] call[name[self].convert, parameter[name[source]]]
variable[foreign_plural] assign[=] call[name[self].convert, parameter[name[plural]]]
variable[plural] assign[=] dictionary[[<ast.Constant object at 0x7da20c7945e0>, <ast.Constant object at 0x7da20c794370>], [<ast.Call object at 0x7da20c795ab0>, <ast.Call object at 0x7da20c795c60>]]
name[msg].msgstr_plural assign[=] name[plural] | keyword[def] identifier[convert_msg] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[source] = identifier[msg] . identifier[msgid]
keyword[if] keyword[not] identifier[source] :
keyword[return]
identifier[plural] = identifier[msg] . identifier[msgid_plural]
keyword[if] identifier[plural] :
identifier[foreign_single] = identifier[self] . identifier[convert] ( identifier[source] )
identifier[foreign_plural] = identifier[self] . identifier[convert] ( identifier[plural] )
identifier[plural] ={
literal[string] : identifier[self] . identifier[final_newline] ( identifier[source] , identifier[foreign_single] ),
literal[string] : identifier[self] . identifier[final_newline] ( identifier[plural] , identifier[foreign_plural] ),
}
identifier[msg] . identifier[msgstr_plural] = identifier[plural]
keyword[else] :
identifier[foreign] = identifier[self] . identifier[convert] ( identifier[source] )
identifier[msg] . identifier[msgstr] = identifier[self] . identifier[final_newline] ( identifier[source] , identifier[foreign] ) | def convert_msg(self, msg):
"""
Takes one POEntry object and converts it (adds a dummy translation to it)
msg is an instance of polib.POEntry
"""
source = msg.msgid
if not source:
# don't translate empty string
return # depends on [control=['if'], data=[]]
plural = msg.msgid_plural
if plural:
# translate singular and plural
foreign_single = self.convert(source)
foreign_plural = self.convert(plural)
plural = {'0': self.final_newline(source, foreign_single), '1': self.final_newline(plural, foreign_plural)}
msg.msgstr_plural = plural # depends on [control=['if'], data=[]]
else:
foreign = self.convert(source)
msg.msgstr = self.final_newline(source, foreign) |
def _get_value(self):
"""
Return two delegating variables. Each variable should contain
a value attribute with the real value.
"""
x, y = self._point.x, self._point.y
self._px, self._py = self._item_point.canvas.get_matrix_i2i(self._item_point,
self._item_target).transform_point(x, y)
return self._px, self._py | def function[_get_value, parameter[self]]:
constant[
Return two delegating variables. Each variable should contain
a value attribute with the real value.
]
<ast.Tuple object at 0x7da1b1aa4910> assign[=] tuple[[<ast.Attribute object at 0x7da1b1aa7c70>, <ast.Attribute object at 0x7da1b1aa6cb0>]]
<ast.Tuple object at 0x7da1b1aa4790> assign[=] call[call[name[self]._item_point.canvas.get_matrix_i2i, parameter[name[self]._item_point, name[self]._item_target]].transform_point, parameter[name[x], name[y]]]
return[tuple[[<ast.Attribute object at 0x7da1b1aa4be0>, <ast.Attribute object at 0x7da1b1aa7ac0>]]] | keyword[def] identifier[_get_value] ( identifier[self] ):
literal[string]
identifier[x] , identifier[y] = identifier[self] . identifier[_point] . identifier[x] , identifier[self] . identifier[_point] . identifier[y]
identifier[self] . identifier[_px] , identifier[self] . identifier[_py] = identifier[self] . identifier[_item_point] . identifier[canvas] . identifier[get_matrix_i2i] ( identifier[self] . identifier[_item_point] ,
identifier[self] . identifier[_item_target] ). identifier[transform_point] ( identifier[x] , identifier[y] )
keyword[return] identifier[self] . identifier[_px] , identifier[self] . identifier[_py] | def _get_value(self):
"""
Return two delegating variables. Each variable should contain
a value attribute with the real value.
"""
(x, y) = (self._point.x, self._point.y)
(self._px, self._py) = self._item_point.canvas.get_matrix_i2i(self._item_point, self._item_target).transform_point(x, y)
return (self._px, self._py) |
def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = "*****"
return """
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
""" % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | def function[res_to_str, parameter[res]]:
constant[
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
]
if compare[constant[Authorization] in name[res].request.headers] begin[:]
call[name[res].request.headers][constant[Authorization]] assign[=] constant[*****]
return[binary_operation[constant[
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c9900a0>, <ast.Call object at 0x7da20c993850>, <ast.BoolOp object at 0x7da20c993760>, <ast.Attribute object at 0x7da20c9902e0>, <ast.Attribute object at 0x7da20c992800>, <ast.Attribute object at 0x7da20c9900d0>, <ast.Attribute object at 0x7da20c991cf0>]]]] | keyword[def] identifier[res_to_str] ( identifier[res] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[res] . identifier[request] . identifier[headers] :
identifier[res] . identifier[request] . identifier[headers] [ literal[string] ]= literal[string]
keyword[return] literal[string] %( identifier[res] . identifier[url] ,
identifier[str] ( identifier[res] . identifier[request] . identifier[headers] ),
identifier[OLD_REQ] keyword[and] identifier[res] . identifier[request] . identifier[data] keyword[or] identifier[res] . identifier[request] . identifier[body] ,
identifier[res] . identifier[headers] ,
identifier[res] . identifier[status_code] ,
identifier[res] . identifier[reason] ,
identifier[res] . identifier[text] ) | def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = '*****' # depends on [control=['if'], data=[]]
return '\n####################################\nurl = %s\nheaders = %s\n-------- data sent -----------------\n%s\n------------------------------------\n@@@@@ response @@@@@@@@@@@@@@@@\nheaders = %s\ncode = %d\nreason = %s\n--------- data received ------------\n%s\n------------------------------------\n####################################\n' % (res.url, str(res.request.headers), OLD_REQ and res.request.data or res.request.body, res.headers, res.status_code, res.reason, res.text) |
def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1-q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache)**right
return out | def function[_ppf, parameter[self, q, left, right, cache]]:
constant[
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
]
variable[left] assign[=] call[name[evaluation].get_inverse_cache, parameter[name[left], name[cache]]]
variable[right] assign[=] call[name[evaluation].get_inverse_cache, parameter[name[right], name[cache]]]
if call[name[isinstance], parameter[name[left], name[Dist]]] begin[:]
if call[name[isinstance], parameter[name[right], name[Dist]]] begin[:]
<ast.Raise object at 0x7da18eb560b0>
variable[right] assign[=] binary_operation[name[right] + call[name[numpy].zeros, parameter[name[q].shape]]]
variable[q] assign[=] call[name[numpy].where, parameter[compare[name[right] less[<] constant[0]], binary_operation[constant[1] - name[q]], name[q]]]
variable[out] assign[=] binary_operation[call[name[evaluation].evaluate_inverse, parameter[name[left], name[q]]] ** name[right]]
return[name[out]] | keyword[def] identifier[_ppf] ( identifier[self] , identifier[q] , identifier[left] , identifier[right] , identifier[cache] ):
literal[string]
identifier[left] = identifier[evaluation] . identifier[get_inverse_cache] ( identifier[left] , identifier[cache] )
identifier[right] = identifier[evaluation] . identifier[get_inverse_cache] ( identifier[right] , identifier[cache] )
keyword[if] identifier[isinstance] ( identifier[left] , identifier[Dist] ):
keyword[if] identifier[isinstance] ( identifier[right] , identifier[Dist] ):
keyword[raise] identifier[StochasticallyDependentError] (
literal[string] . identifier[format] ( identifier[left] , identifier[right] ))
keyword[elif] keyword[not] identifier[isinstance] ( identifier[right] , identifier[Dist] ):
keyword[return] identifier[left] ** identifier[right]
keyword[else] :
identifier[out] = identifier[evaluation] . identifier[evaluate_inverse] ( identifier[right] , identifier[q] , identifier[cache] = identifier[cache] )
identifier[out] = identifier[numpy] . identifier[where] ( identifier[left] < literal[int] , literal[int] - identifier[out] , identifier[out] )
identifier[out] = identifier[left] ** identifier[out]
keyword[return] identifier[out]
identifier[right] = identifier[right] + identifier[numpy] . identifier[zeros] ( identifier[q] . identifier[shape] )
identifier[q] = identifier[numpy] . identifier[where] ( identifier[right] < literal[int] , literal[int] - identifier[q] , identifier[q] )
identifier[out] = identifier[evaluation] . identifier[evaluate_inverse] ( identifier[left] , identifier[q] , identifier[cache] = identifier[cache] )** identifier[right]
keyword[return] identifier[out] | def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError('under-defined distribution {} or {}'.format(left, right)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not isinstance(right, Dist):
return left ** right # depends on [control=['if'], data=[]]
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1 - out, out)
out = left ** out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1 - q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache) ** right
return out |
def get_plugins(directory):
"""
returns the list of plugins from the specified directory
:param directory: directory that contains the plugins. Files starting with _ will be ignored.
"""
# not just plugin_*.py
plugins = []
files = glob.glob(directory + "/*.py")
for p in files:
p = p.replace(directory + "/", "").replace(".py", "")
if not p.startswith('_'):
plugins.append(p)
# log.info("Loading Plugins from {0}".format(dir))
# log.info(" {0}".format(str(plugins)))
return plugins | def function[get_plugins, parameter[directory]]:
constant[
returns the list of plugins from the specified directory
:param directory: directory that contains the plugins. Files starting with _ will be ignored.
]
variable[plugins] assign[=] list[[]]
variable[files] assign[=] call[name[glob].glob, parameter[binary_operation[name[directory] + constant[/*.py]]]]
for taget[name[p]] in starred[name[files]] begin[:]
variable[p] assign[=] call[call[name[p].replace, parameter[binary_operation[name[directory] + constant[/]], constant[]]].replace, parameter[constant[.py], constant[]]]
if <ast.UnaryOp object at 0x7da204565210> begin[:]
call[name[plugins].append, parameter[name[p]]]
return[name[plugins]] | keyword[def] identifier[get_plugins] ( identifier[directory] ):
literal[string]
identifier[plugins] =[]
identifier[files] = identifier[glob] . identifier[glob] ( identifier[directory] + literal[string] )
keyword[for] identifier[p] keyword[in] identifier[files] :
identifier[p] = identifier[p] . identifier[replace] ( identifier[directory] + literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[p] . identifier[startswith] ( literal[string] ):
identifier[plugins] . identifier[append] ( identifier[p] )
keyword[return] identifier[plugins] | def get_plugins(directory):
"""
returns the list of plugins from the specified directory
:param directory: directory that contains the plugins. Files starting with _ will be ignored.
"""
# not just plugin_*.py
plugins = []
files = glob.glob(directory + '/*.py')
for p in files:
p = p.replace(directory + '/', '').replace('.py', '')
if not p.startswith('_'):
plugins.append(p) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
# log.info("Loading Plugins from {0}".format(dir))
# log.info(" {0}".format(str(plugins)))
return plugins |
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget) | def function[prop_budget, parameter[self, budget]]:
constant[
Set limit on the number of propagations.
]
if name[self].glucose begin[:]
call[name[pysolvers].glucose3_pbudget, parameter[name[self].glucose, name[budget]]] | keyword[def] identifier[prop_budget] ( identifier[self] , identifier[budget] ):
literal[string]
keyword[if] identifier[self] . identifier[glucose] :
identifier[pysolvers] . identifier[glucose3_pbudget] ( identifier[self] . identifier[glucose] , identifier[budget] ) | def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget) # depends on [control=['if'], data=[]] |
def _ThCond(rho, T, fase=None, drho=None):
"""Equation for the thermal conductivity
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
fase: dict, optional for calculate critical enhancement
phase properties
drho: float, optional for calculate critical enhancement
[∂ρ/∂P]T at reference state,
Returns
-------
k : float
Thermal conductivity, [W/mK]
Examples
--------
>>> _ThCond(998, 298.15)
0.6077128675880629
>>> _ThCond(0, 873.15)
0.07910346589648833
References
----------
IAPWS, Release on the IAPWS Formulation 2011 for the Thermal Conductivity
of Ordinary Water Substance, http://www.iapws.org/relguide/ThCond.html
"""
d = rho/rhoc
Tr = T/Tc
# Eq 16
no = [2.443221e-3, 1.323095e-2, 6.770357e-3, -3.454586e-3, 4.096266e-4]
k0 = Tr**0.5/sum([n/Tr**i for i, n in enumerate(no)])
# Eq 17
I = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4]
J = [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0,
1, 2, 3, 4, 5]
nij = [1.60397357, -0.646013523, 0.111443906, 0.102997357, -0.0504123634,
0.00609859258, 2.33771842, -2.78843778, 1.53616167, -0.463045512,
0.0832827019, -0.00719201245, 2.19650529, -4.54580785, 3.55777244,
-1.40944978, 0.275418278, -0.0205938816, -1.21051378, 1.60812989,
-0.621178141, 0.0716373224, -2.7203370, 4.57586331, -3.18369245,
1.1168348, -0.19268305, 0.012913842]
k1 = exp(d*sum([(1/Tr-1)**i*n*(d-1)**j for i, j, n in zip(I, J, nij)]))
# Critical enhancement
if fase:
R = 0.46151805
if not drho:
# Industrial formulation
# Eq 25
if d <= 0.310559006:
ai = [6.53786807199516, -5.61149954923348, 3.39624167361325,
-2.27492629730878, 10.2631854662709, 1.97815050331519]
elif d <= 0.776397516:
ai = [6.52717759281799, -6.30816983387575, 8.08379285492595,
-9.82240510197603, 12.1358413791395, -5.54349664571295]
elif d <= 1.242236025:
ai = [5.35500529896124, -3.96415689925446, 8.91990208918795,
-12.0338729505790, 9.19494865194302, -2.16866274479712]
elif d <= 1.863354037:
ai = [1.55225959906681, 0.464621290821181, 8.93237374861479,
-11.0321960061126, 6.16780999933360, -0.965458722086812]
else:
ai = [1.11999926419994, 0.595748562571649, 9.88952565078920,
-10.3255051147040, 4.66861294457414, -0.503243546373828]
drho = 1/sum([a*d**i for i, a in enumerate(ai)])*rhoc/Pc
DeltaX = d*(Pc/rhoc*fase.drhodP_T-Pc/rhoc*drho*1.5/Tr)
if DeltaX < 0:
DeltaX = 0
X = 0.13*(DeltaX/0.06)**(0.63/1.239) # Eq 22
y = X/0.4 # Eq 20
# Eq 19
if y < 1.2e-7:
Z = 0
else:
Z = 2/pi/y*(((1-1/fase.cp_cv)*atan(y)+y/fase.cp_cv)-(
1-exp(-1/(1/y+y**2/3/d**2))))
# Eq 18
k2 = 177.8514*d*fase.cp/R*Tr/fase.mu*1e-6*Z
else:
# No critical enhancement
k2 = 0
# Eq 10
k = k0*k1+k2
return 1e-3*k | def function[_ThCond, parameter[rho, T, fase, drho]]:
constant[Equation for the thermal conductivity
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
fase: dict, optional for calculate critical enhancement
phase properties
drho: float, optional for calculate critical enhancement
[∂ρ/∂P]T at reference state,
Returns
-------
k : float
Thermal conductivity, [W/mK]
Examples
--------
>>> _ThCond(998, 298.15)
0.6077128675880629
>>> _ThCond(0, 873.15)
0.07910346589648833
References
----------
IAPWS, Release on the IAPWS Formulation 2011 for the Thermal Conductivity
of Ordinary Water Substance, http://www.iapws.org/relguide/ThCond.html
]
variable[d] assign[=] binary_operation[name[rho] / name[rhoc]]
variable[Tr] assign[=] binary_operation[name[T] / name[Tc]]
variable[no] assign[=] list[[<ast.Constant object at 0x7da207f9a2c0>, <ast.Constant object at 0x7da207f9aef0>, <ast.Constant object at 0x7da207f98790>, <ast.UnaryOp object at 0x7da207f9b430>, <ast.Constant object at 0x7da207f9b7c0>]]
variable[k0] assign[=] binary_operation[binary_operation[name[Tr] ** constant[0.5]] / call[name[sum], parameter[<ast.ListComp object at 0x7da207f9b0a0>]]]
variable[I] assign[=] list[[<ast.Constant object at 0x7da207f9a5c0>, <ast.Constant object at 0x7da207f99cc0>, <ast.Constant object at 0x7da207f9b190>, <ast.Constant object at 0x7da207f98910>, <ast.Constant object at 0x7da207f9aec0>, <ast.Constant object at 0x7da207f9ab90>, <ast.Constant object at 0x7da207f991e0>, <ast.Constant object at 0x7da207f99060>, <ast.Constant object at 0x7da207f99c30>, <ast.Constant object at 0x7da207f9bc10>, <ast.Constant object at 0x7da207f9bfd0>, <ast.Constant object at 0x7da207f98400>, <ast.Constant object at 0x7da207f98040>, <ast.Constant object at 0x7da207f99ba0>, <ast.Constant object at 0x7da207f9a410>, <ast.Constant object at 0x7da207f9b2e0>, <ast.Constant object at 0x7da207f9bac0>, <ast.Constant object at 0x7da207f9a530>, <ast.Constant object at 0x7da207f98310>, <ast.Constant object at 0x7da207f9a6b0>, <ast.Constant object at 0x7da207f9abc0>, <ast.Constant object at 0x7da207f9b700>, <ast.Constant object at 0x7da207f99210>, <ast.Constant object at 0x7da207f987c0>, <ast.Constant object at 0x7da207f992a0>, <ast.Constant object at 0x7da207f9b730>, <ast.Constant object at 0x7da207f981f0>, <ast.Constant object at 0x7da207f9ba00>]]
variable[J] assign[=] list[[<ast.Constant object at 0x7da207f9b8b0>, <ast.Constant object at 0x7da207f98100>, <ast.Constant object at 0x7da207f9ab00>, <ast.Constant object at 0x7da207f9a440>, <ast.Constant object at 0x7da207f987f0>, <ast.Constant object at 0x7da207f9a560>, <ast.Constant object at 0x7da207f9a920>, <ast.Constant object at 0x7da207f9bd90>, <ast.Constant object at 0x7da207f9a650>, <ast.Constant object at 0x7da207f98550>, <ast.Constant object at 0x7da207f9a9b0>, <ast.Constant object at 0x7da207f9b880>, <ast.Constant object at 0x7da207f997e0>, <ast.Constant object at 0x7da207f995a0>, <ast.Constant object at 0x7da207f9a710>, <ast.Constant object at 0x7da207f9a110>, <ast.Constant object at 0x7da207f992d0>, <ast.Constant object at 0x7da207f9ae60>, <ast.Constant object at 0x7da207f99e10>, <ast.Constant object at 0x7da207f9b1f0>, <ast.Constant object at 0x7da207f9be50>, <ast.Constant object at 0x7da207f9b4c0>, <ast.Constant object at 0x7da207f9af50>, <ast.Constant object at 0x7da207f98bb0>, <ast.Constant object at 0x7da207f9a0b0>, <ast.Constant object at 0x7da207f9bb50>, <ast.Constant object at 0x7da207f98370>, <ast.Constant object at 0x7da207f98280>]]
variable[nij] assign[=] list[[<ast.Constant object at 0x7da207f99de0>, <ast.UnaryOp object at 0x7da207f99390>, <ast.Constant object at 0x7da207f99150>, <ast.Constant object at 0x7da207f9b550>, <ast.UnaryOp object at 0x7da207f98af0>, <ast.Constant object at 0x7da207f9af20>, <ast.Constant object at 0x7da207f98a90>, <ast.UnaryOp object at 0x7da207f993f0>, <ast.Constant object at 0x7da207f997b0>, <ast.UnaryOp object at 0x7da207f9bf10>, <ast.Constant object at 0x7da207f9af80>, <ast.UnaryOp object at 0x7da207f9ae30>, <ast.Constant object at 0x7da207f994e0>, <ast.UnaryOp object at 0x7da207f98220>, <ast.Constant object at 0x7da207f9b910>, <ast.UnaryOp object at 0x7da18bcca650>, <ast.Constant object at 0x7da18bcca6e0>, <ast.UnaryOp object at 0x7da18bcca530>, <ast.UnaryOp object at 0x7da18bcc9480>, <ast.Constant object at 0x7da18bcc9ab0>, <ast.UnaryOp object at 0x7da18bcc9f00>, <ast.Constant object at 0x7da18bcc9bd0>, <ast.UnaryOp object at 0x7da18bcca4d0>, <ast.Constant object at 0x7da18bcc9fc0>, <ast.UnaryOp object at 0x7da2054a4e50>, <ast.Constant object at 0x7da2054a5090>, <ast.UnaryOp object at 0x7da2054a6710>, <ast.Constant object at 0x7da2054a6ec0>]]
variable[k1] assign[=] call[name[exp], parameter[binary_operation[name[d] * call[name[sum], parameter[<ast.ListComp object at 0x7da2054a5ed0>]]]]]
if name[fase] begin[:]
variable[R] assign[=] constant[0.46151805]
if <ast.UnaryOp object at 0x7da2054a7cd0> begin[:]
if compare[name[d] less_or_equal[<=] constant[0.310559006]] begin[:]
variable[ai] assign[=] list[[<ast.Constant object at 0x7da2054a51b0>, <ast.UnaryOp object at 0x7da2054a77c0>, <ast.Constant object at 0x7da2054a7010>, <ast.UnaryOp object at 0x7da2054a6ad0>, <ast.Constant object at 0x7da2054a5270>, <ast.Constant object at 0x7da2054a5000>]]
variable[drho] assign[=] binary_operation[binary_operation[binary_operation[constant[1] / call[name[sum], parameter[<ast.ListComp object at 0x7da2054a7280>]]] * name[rhoc]] / name[Pc]]
variable[DeltaX] assign[=] binary_operation[name[d] * binary_operation[binary_operation[binary_operation[name[Pc] / name[rhoc]] * name[fase].drhodP_T] - binary_operation[binary_operation[binary_operation[binary_operation[name[Pc] / name[rhoc]] * name[drho]] * constant[1.5]] / name[Tr]]]]
if compare[name[DeltaX] less[<] constant[0]] begin[:]
variable[DeltaX] assign[=] constant[0]
variable[X] assign[=] binary_operation[constant[0.13] * binary_operation[binary_operation[name[DeltaX] / constant[0.06]] ** binary_operation[constant[0.63] / constant[1.239]]]]
variable[y] assign[=] binary_operation[name[X] / constant[0.4]]
if compare[name[y] less[<] constant[1.2e-07]] begin[:]
variable[Z] assign[=] constant[0]
variable[k2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[177.8514] * name[d]] * name[fase].cp] / name[R]] * name[Tr]] / name[fase].mu] * constant[1e-06]] * name[Z]]
variable[k] assign[=] binary_operation[binary_operation[name[k0] * name[k1]] + name[k2]]
return[binary_operation[constant[0.001] * name[k]]] | keyword[def] identifier[_ThCond] ( identifier[rho] , identifier[T] , identifier[fase] = keyword[None] , identifier[drho] = keyword[None] ):
literal[string]
identifier[d] = identifier[rho] / identifier[rhoc]
identifier[Tr] = identifier[T] / identifier[Tc]
identifier[no] =[ literal[int] , literal[int] , literal[int] ,- literal[int] , literal[int] ]
identifier[k0] = identifier[Tr] ** literal[int] / identifier[sum] ([ identifier[n] / identifier[Tr] ** identifier[i] keyword[for] identifier[i] , identifier[n] keyword[in] identifier[enumerate] ( identifier[no] )])
identifier[I] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[J] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[nij] =[ literal[int] ,- literal[int] , literal[int] , literal[int] ,- literal[int] ,
literal[int] , literal[int] ,- literal[int] , literal[int] ,- literal[int] ,
literal[int] ,- literal[int] , literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] , literal[int] ,- literal[int] ,
literal[int] ,- literal[int] , literal[int] ]
identifier[k1] = identifier[exp] ( identifier[d] * identifier[sum] ([( literal[int] / identifier[Tr] - literal[int] )** identifier[i] * identifier[n] *( identifier[d] - literal[int] )** identifier[j] keyword[for] identifier[i] , identifier[j] , identifier[n] keyword[in] identifier[zip] ( identifier[I] , identifier[J] , identifier[nij] )]))
keyword[if] identifier[fase] :
identifier[R] = literal[int]
keyword[if] keyword[not] identifier[drho] :
keyword[if] identifier[d] <= literal[int] :
identifier[ai] =[ literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] , literal[int] ]
keyword[elif] identifier[d] <= literal[int] :
identifier[ai] =[ literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ]
keyword[elif] identifier[d] <= literal[int] :
identifier[ai] =[ literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ]
keyword[elif] identifier[d] <= literal[int] :
identifier[ai] =[ literal[int] , literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ]
keyword[else] :
identifier[ai] =[ literal[int] , literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ]
identifier[drho] = literal[int] / identifier[sum] ([ identifier[a] * identifier[d] ** identifier[i] keyword[for] identifier[i] , identifier[a] keyword[in] identifier[enumerate] ( identifier[ai] )])* identifier[rhoc] / identifier[Pc]
identifier[DeltaX] = identifier[d] *( identifier[Pc] / identifier[rhoc] * identifier[fase] . identifier[drhodP_T] - identifier[Pc] / identifier[rhoc] * identifier[drho] * literal[int] / identifier[Tr] )
keyword[if] identifier[DeltaX] < literal[int] :
identifier[DeltaX] = literal[int]
identifier[X] = literal[int] *( identifier[DeltaX] / literal[int] )**( literal[int] / literal[int] )
identifier[y] = identifier[X] / literal[int]
keyword[if] identifier[y] < literal[int] :
identifier[Z] = literal[int]
keyword[else] :
identifier[Z] = literal[int] / identifier[pi] / identifier[y] *((( literal[int] - literal[int] / identifier[fase] . identifier[cp_cv] )* identifier[atan] ( identifier[y] )+ identifier[y] / identifier[fase] . identifier[cp_cv] )-(
literal[int] - identifier[exp] (- literal[int] /( literal[int] / identifier[y] + identifier[y] ** literal[int] / literal[int] / identifier[d] ** literal[int] ))))
identifier[k2] = literal[int] * identifier[d] * identifier[fase] . identifier[cp] / identifier[R] * identifier[Tr] / identifier[fase] . identifier[mu] * literal[int] * identifier[Z]
keyword[else] :
identifier[k2] = literal[int]
identifier[k] = identifier[k0] * identifier[k1] + identifier[k2]
keyword[return] literal[int] * identifier[k] | def _ThCond(rho, T, fase=None, drho=None):
"""Equation for the thermal conductivity
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
fase: dict, optional for calculate critical enhancement
phase properties
drho: float, optional for calculate critical enhancement
[∂ρ/∂P]T at reference state,
Returns
-------
k : float
Thermal conductivity, [W/mK]
Examples
--------
>>> _ThCond(998, 298.15)
0.6077128675880629
>>> _ThCond(0, 873.15)
0.07910346589648833
References
----------
IAPWS, Release on the IAPWS Formulation 2011 for the Thermal Conductivity
of Ordinary Water Substance, http://www.iapws.org/relguide/ThCond.html
"""
d = rho / rhoc
Tr = T / Tc
# Eq 16
no = [0.002443221, 0.01323095, 0.006770357, -0.003454586, 0.0004096266]
k0 = Tr ** 0.5 / sum([n / Tr ** i for (i, n) in enumerate(no)])
# Eq 17
I = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4]
J = [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5]
nij = [1.60397357, -0.646013523, 0.111443906, 0.102997357, -0.0504123634, 0.00609859258, 2.33771842, -2.78843778, 1.53616167, -0.463045512, 0.0832827019, -0.00719201245, 2.19650529, -4.54580785, 3.55777244, -1.40944978, 0.275418278, -0.0205938816, -1.21051378, 1.60812989, -0.621178141, 0.0716373224, -2.720337, 4.57586331, -3.18369245, 1.1168348, -0.19268305, 0.012913842]
k1 = exp(d * sum([(1 / Tr - 1) ** i * n * (d - 1) ** j for (i, j, n) in zip(I, J, nij)]))
# Critical enhancement
if fase:
R = 0.46151805
if not drho:
# Industrial formulation
# Eq 25
if d <= 0.310559006:
ai = [6.53786807199516, -5.61149954923348, 3.39624167361325, -2.27492629730878, 10.2631854662709, 1.97815050331519] # depends on [control=['if'], data=[]]
elif d <= 0.776397516:
ai = [6.52717759281799, -6.30816983387575, 8.08379285492595, -9.82240510197603, 12.1358413791395, -5.54349664571295] # depends on [control=['if'], data=[]]
elif d <= 1.242236025:
ai = [5.35500529896124, -3.96415689925446, 8.91990208918795, -12.033872950579, 9.19494865194302, -2.16866274479712] # depends on [control=['if'], data=[]]
elif d <= 1.863354037:
ai = [1.55225959906681, 0.464621290821181, 8.93237374861479, -11.0321960061126, 6.1678099993336, -0.965458722086812] # depends on [control=['if'], data=[]]
else:
ai = [1.11999926419994, 0.595748562571649, 9.8895256507892, -10.325505114704, 4.66861294457414, -0.503243546373828]
drho = 1 / sum([a * d ** i for (i, a) in enumerate(ai)]) * rhoc / Pc # depends on [control=['if'], data=[]]
DeltaX = d * (Pc / rhoc * fase.drhodP_T - Pc / rhoc * drho * 1.5 / Tr)
if DeltaX < 0:
DeltaX = 0 # depends on [control=['if'], data=['DeltaX']]
X = 0.13 * (DeltaX / 0.06) ** (0.63 / 1.239) # Eq 22
y = X / 0.4 # Eq 20
# Eq 19
if y < 1.2e-07:
Z = 0 # depends on [control=['if'], data=[]]
else:
Z = 2 / pi / y * ((1 - 1 / fase.cp_cv) * atan(y) + y / fase.cp_cv - (1 - exp(-1 / (1 / y + y ** 2 / 3 / d ** 2))))
# Eq 18
k2 = 177.8514 * d * fase.cp / R * Tr / fase.mu * 1e-06 * Z # depends on [control=['if'], data=[]]
else:
# No critical enhancement
k2 = 0
# Eq 10
k = k0 * k1 + k2
return 0.001 * k |
def to_dict(self) -> Dict[str, Any]:
"""
Creates a dictionary-based description of this exception, ready to be
serialised as JSON or YAML.
"""
jsn = {
'kind': self.__class__.__name__,
'message': self.message
} # type: Dict[str, Any]
data = self.data
if data:
jsn['data'] = data
jsn = {'error': jsn}
return jsn | def function[to_dict, parameter[self]]:
constant[
Creates a dictionary-based description of this exception, ready to be
serialised as JSON or YAML.
]
variable[jsn] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c8a3e0>, <ast.Constant object at 0x7da1b0c88bb0>], [<ast.Attribute object at 0x7da1b0c893f0>, <ast.Attribute object at 0x7da1b0c8a800>]]
variable[data] assign[=] name[self].data
if name[data] begin[:]
call[name[jsn]][constant[data]] assign[=] name[data]
variable[jsn] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c89d50>], [<ast.Name object at 0x7da1b0c8ada0>]]
return[name[jsn]] | keyword[def] identifier[to_dict] ( identifier[self] )-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[jsn] ={
literal[string] : identifier[self] . identifier[__class__] . identifier[__name__] ,
literal[string] : identifier[self] . identifier[message]
}
identifier[data] = identifier[self] . identifier[data]
keyword[if] identifier[data] :
identifier[jsn] [ literal[string] ]= identifier[data]
identifier[jsn] ={ literal[string] : identifier[jsn] }
keyword[return] identifier[jsn] | def to_dict(self) -> Dict[str, Any]:
"""
Creates a dictionary-based description of this exception, ready to be
serialised as JSON or YAML.
"""
jsn = {'kind': self.__class__.__name__, 'message': self.message} # type: Dict[str, Any]
data = self.data
if data:
jsn['data'] = data # depends on [control=['if'], data=[]]
jsn = {'error': jsn}
return jsn |
def build_node_data_bag():
"""Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
"""
nodes = lib.get_nodes()
node_data_bag_path = os.path.join('data_bags', 'node')
# In case there are leftovers
remove_local_node_data_bag()
os.makedirs(node_data_bag_path)
all_recipes = lib.get_recipes()
all_roles = lib.get_roles()
for node in nodes:
# Dots are not allowed (only alphanumeric), substitute by underscores
node['id'] = node['name'].replace('.', '_')
# Build extended role list
node['role'] = lib.get_roles_in_node(node)
node['roles'] = node['role'][:]
for role in node['role']:
node['roles'].extend(lib.get_roles_in_role(role))
node['roles'] = list(set(node['roles']))
# Build extended recipe list
node['recipes'] = lib.get_recipes_in_node(node)
# Add recipes found inside each roles in the extended role list
for role in node['roles']:
node['recipes'].extend(lib.get_recipes_in_role(role))
node['recipes'] = list(set(node['recipes']))
# Add node attributes
_add_merged_attributes(node, all_recipes, all_roles)
_add_automatic_attributes(node)
# Save node data bag item
with open(os.path.join(
'data_bags', 'node', node['id'] + '.json'), 'w') as f:
f.write(json.dumps(node)) | def function[build_node_data_bag, parameter[]]:
constant[Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
]
variable[nodes] assign[=] call[name[lib].get_nodes, parameter[]]
variable[node_data_bag_path] assign[=] call[name[os].path.join, parameter[constant[data_bags], constant[node]]]
call[name[remove_local_node_data_bag], parameter[]]
call[name[os].makedirs, parameter[name[node_data_bag_path]]]
variable[all_recipes] assign[=] call[name[lib].get_recipes, parameter[]]
variable[all_roles] assign[=] call[name[lib].get_roles, parameter[]]
for taget[name[node]] in starred[name[nodes]] begin[:]
call[name[node]][constant[id]] assign[=] call[call[name[node]][constant[name]].replace, parameter[constant[.], constant[_]]]
call[name[node]][constant[role]] assign[=] call[name[lib].get_roles_in_node, parameter[name[node]]]
call[name[node]][constant[roles]] assign[=] call[call[name[node]][constant[role]]][<ast.Slice object at 0x7da18fe91450>]
for taget[name[role]] in starred[call[name[node]][constant[role]]] begin[:]
call[call[name[node]][constant[roles]].extend, parameter[call[name[lib].get_roles_in_role, parameter[name[role]]]]]
call[name[node]][constant[roles]] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[node]][constant[roles]]]]]]
call[name[node]][constant[recipes]] assign[=] call[name[lib].get_recipes_in_node, parameter[name[node]]]
for taget[name[role]] in starred[call[name[node]][constant[roles]]] begin[:]
call[call[name[node]][constant[recipes]].extend, parameter[call[name[lib].get_recipes_in_role, parameter[name[role]]]]]
call[name[node]][constant[recipes]] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[node]][constant[recipes]]]]]]
call[name[_add_merged_attributes], parameter[name[node], name[all_recipes], name[all_roles]]]
call[name[_add_automatic_attributes], parameter[name[node]]]
with call[name[open], parameter[call[name[os].path.join, parameter[constant[data_bags], constant[node], binary_operation[call[name[node]][constant[id]] + constant[.json]]]], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[json].dumps, parameter[name[node]]]]] | keyword[def] identifier[build_node_data_bag] ():
literal[string]
identifier[nodes] = identifier[lib] . identifier[get_nodes] ()
identifier[node_data_bag_path] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] )
identifier[remove_local_node_data_bag] ()
identifier[os] . identifier[makedirs] ( identifier[node_data_bag_path] )
identifier[all_recipes] = identifier[lib] . identifier[get_recipes] ()
identifier[all_roles] = identifier[lib] . identifier[get_roles] ()
keyword[for] identifier[node] keyword[in] identifier[nodes] :
identifier[node] [ literal[string] ]= identifier[node] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[node] [ literal[string] ]= identifier[lib] . identifier[get_roles_in_node] ( identifier[node] )
identifier[node] [ literal[string] ]= identifier[node] [ literal[string] ][:]
keyword[for] identifier[role] keyword[in] identifier[node] [ literal[string] ]:
identifier[node] [ literal[string] ]. identifier[extend] ( identifier[lib] . identifier[get_roles_in_role] ( identifier[role] ))
identifier[node] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[node] [ literal[string] ]))
identifier[node] [ literal[string] ]= identifier[lib] . identifier[get_recipes_in_node] ( identifier[node] )
keyword[for] identifier[role] keyword[in] identifier[node] [ literal[string] ]:
identifier[node] [ literal[string] ]. identifier[extend] ( identifier[lib] . identifier[get_recipes_in_role] ( identifier[role] ))
identifier[node] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[node] [ literal[string] ]))
identifier[_add_merged_attributes] ( identifier[node] , identifier[all_recipes] , identifier[all_roles] )
identifier[_add_automatic_attributes] ( identifier[node] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] (
literal[string] , literal[string] , identifier[node] [ literal[string] ]+ literal[string] ), literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[node] )) | def build_node_data_bag():
"""Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
"""
nodes = lib.get_nodes()
node_data_bag_path = os.path.join('data_bags', 'node')
# In case there are leftovers
remove_local_node_data_bag()
os.makedirs(node_data_bag_path)
all_recipes = lib.get_recipes()
all_roles = lib.get_roles()
for node in nodes:
# Dots are not allowed (only alphanumeric), substitute by underscores
node['id'] = node['name'].replace('.', '_')
# Build extended role list
node['role'] = lib.get_roles_in_node(node)
node['roles'] = node['role'][:]
for role in node['role']:
node['roles'].extend(lib.get_roles_in_role(role)) # depends on [control=['for'], data=['role']]
node['roles'] = list(set(node['roles']))
# Build extended recipe list
node['recipes'] = lib.get_recipes_in_node(node)
# Add recipes found inside each roles in the extended role list
for role in node['roles']:
node['recipes'].extend(lib.get_recipes_in_role(role)) # depends on [control=['for'], data=['role']]
node['recipes'] = list(set(node['recipes']))
# Add node attributes
_add_merged_attributes(node, all_recipes, all_roles)
_add_automatic_attributes(node)
# Save node data bag item
with open(os.path.join('data_bags', 'node', node['id'] + '.json'), 'w') as f:
f.write(json.dumps(node)) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['node']] |
def transactions(self, *phids):
"""Retrieve tasks transactions.
:param phids: list of tasks identifiers
"""
params = {
self.PIDS: phids
}
response = self._call(self.MANIPHEST_TRANSACTIONS, params)
return response | def function[transactions, parameter[self]]:
constant[Retrieve tasks transactions.
:param phids: list of tasks identifiers
]
variable[params] assign[=] dictionary[[<ast.Attribute object at 0x7da1b020d120>], [<ast.Name object at 0x7da1b020d510>]]
variable[response] assign[=] call[name[self]._call, parameter[name[self].MANIPHEST_TRANSACTIONS, name[params]]]
return[name[response]] | keyword[def] identifier[transactions] ( identifier[self] ,* identifier[phids] ):
literal[string]
identifier[params] ={
identifier[self] . identifier[PIDS] : identifier[phids]
}
identifier[response] = identifier[self] . identifier[_call] ( identifier[self] . identifier[MANIPHEST_TRANSACTIONS] , identifier[params] )
keyword[return] identifier[response] | def transactions(self, *phids):
"""Retrieve tasks transactions.
:param phids: list of tasks identifiers
"""
params = {self.PIDS: phids}
response = self._call(self.MANIPHEST_TRANSACTIONS, params)
return response |
def do_reduce(func_name, *sequence):
"""
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
Functions may be registered with ``native_tags``
or can be ``builtins`` or from the ``operator`` module
Syntax::
{% reduce [function] [sequence] %}
{% reduce [function] [item1 item2 ...] %}
For example::
{% reduce add 1 2 3 4 5 %}
calculates::
((((1+2)+3)+4)+5) = 15
"""
if len(sequence)==1:
sequence = sequence[0]
return reduce(get_func(func_name), sequence) | def function[do_reduce, parameter[func_name]]:
constant[
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
Functions may be registered with ``native_tags``
or can be ``builtins`` or from the ``operator`` module
Syntax::
{% reduce [function] [sequence] %}
{% reduce [function] [item1 item2 ...] %}
For example::
{% reduce add 1 2 3 4 5 %}
calculates::
((((1+2)+3)+4)+5) = 15
]
if compare[call[name[len], parameter[name[sequence]]] equal[==] constant[1]] begin[:]
variable[sequence] assign[=] call[name[sequence]][constant[0]]
return[call[name[reduce], parameter[call[name[get_func], parameter[name[func_name]]], name[sequence]]]] | keyword[def] identifier[do_reduce] ( identifier[func_name] ,* identifier[sequence] ):
literal[string]
keyword[if] identifier[len] ( identifier[sequence] )== literal[int] :
identifier[sequence] = identifier[sequence] [ literal[int] ]
keyword[return] identifier[reduce] ( identifier[get_func] ( identifier[func_name] ), identifier[sequence] ) | def do_reduce(func_name, *sequence):
"""
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
Functions may be registered with ``native_tags``
or can be ``builtins`` or from the ``operator`` module
Syntax::
{% reduce [function] [sequence] %}
{% reduce [function] [item1 item2 ...] %}
For example::
{% reduce add 1 2 3 4 5 %}
calculates::
((((1+2)+3)+4)+5) = 15
"""
if len(sequence) == 1:
sequence = sequence[0] # depends on [control=['if'], data=[]]
return reduce(get_func(func_name), sequence) |
def _determine_notification_info(notification_arn,
notification_arn_from_pillar,
notification_types,
notification_types_from_pillar):
'''
helper method for present. ensure that notification_configs are set
'''
pillar_arn_list = copy.deepcopy(
__salt__['config.option'](notification_arn_from_pillar, {})
)
pillar_arn = None
if pillar_arn_list:
pillar_arn = pillar_arn_list[0]
pillar_notification_types = copy.deepcopy(
__salt__['config.option'](notification_types_from_pillar, {})
)
arn = notification_arn if notification_arn else pillar_arn
types = notification_types if notification_types else pillar_notification_types
return (arn, types) | def function[_determine_notification_info, parameter[notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar]]:
constant[
helper method for present. ensure that notification_configs are set
]
variable[pillar_arn_list] assign[=] call[name[copy].deepcopy, parameter[call[call[name[__salt__]][constant[config.option]], parameter[name[notification_arn_from_pillar], dictionary[[], []]]]]]
variable[pillar_arn] assign[=] constant[None]
if name[pillar_arn_list] begin[:]
variable[pillar_arn] assign[=] call[name[pillar_arn_list]][constant[0]]
variable[pillar_notification_types] assign[=] call[name[copy].deepcopy, parameter[call[call[name[__salt__]][constant[config.option]], parameter[name[notification_types_from_pillar], dictionary[[], []]]]]]
variable[arn] assign[=] <ast.IfExp object at 0x7da1b21856f0>
variable[types] assign[=] <ast.IfExp object at 0x7da1b2186410>
return[tuple[[<ast.Name object at 0x7da1b2186a40>, <ast.Name object at 0x7da1b2185420>]]] | keyword[def] identifier[_determine_notification_info] ( identifier[notification_arn] ,
identifier[notification_arn_from_pillar] ,
identifier[notification_types] ,
identifier[notification_types_from_pillar] ):
literal[string]
identifier[pillar_arn_list] = identifier[copy] . identifier[deepcopy] (
identifier[__salt__] [ literal[string] ]( identifier[notification_arn_from_pillar] ,{})
)
identifier[pillar_arn] = keyword[None]
keyword[if] identifier[pillar_arn_list] :
identifier[pillar_arn] = identifier[pillar_arn_list] [ literal[int] ]
identifier[pillar_notification_types] = identifier[copy] . identifier[deepcopy] (
identifier[__salt__] [ literal[string] ]( identifier[notification_types_from_pillar] ,{})
)
identifier[arn] = identifier[notification_arn] keyword[if] identifier[notification_arn] keyword[else] identifier[pillar_arn]
identifier[types] = identifier[notification_types] keyword[if] identifier[notification_types] keyword[else] identifier[pillar_notification_types]
keyword[return] ( identifier[arn] , identifier[types] ) | def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar):
"""
helper method for present. ensure that notification_configs are set
"""
pillar_arn_list = copy.deepcopy(__salt__['config.option'](notification_arn_from_pillar, {}))
pillar_arn = None
if pillar_arn_list:
pillar_arn = pillar_arn_list[0] # depends on [control=['if'], data=[]]
pillar_notification_types = copy.deepcopy(__salt__['config.option'](notification_types_from_pillar, {}))
arn = notification_arn if notification_arn else pillar_arn
types = notification_types if notification_types else pillar_notification_types
return (arn, types) |
def _connected(service):
'''
Verify if a connman service is connected
'''
state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State')
return state == 'online' or state == 'ready' | def function[_connected, parameter[service]]:
constant[
Verify if a connman service is connected
]
variable[state] assign[=] call[call[name[pyconnman].ConnService, parameter[call[name[os].path.join, parameter[name[SERVICE_PATH], name[service]]]]].get_property, parameter[constant[State]]]
return[<ast.BoolOp object at 0x7da2045660e0>] | keyword[def] identifier[_connected] ( identifier[service] ):
literal[string]
identifier[state] = identifier[pyconnman] . identifier[ConnService] ( identifier[os] . identifier[path] . identifier[join] ( identifier[SERVICE_PATH] , identifier[service] )). identifier[get_property] ( literal[string] )
keyword[return] identifier[state] == literal[string] keyword[or] identifier[state] == literal[string] | def _connected(service):
"""
Verify if a connman service is connected
"""
state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State')
return state == 'online' or state == 'ready' |
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels])) | def function[local_2d_self_attention_spatial_blocks, parameter[query_antecedent, kv_channels, heads, memory_h_dim, memory_w_dim, mask_right, master_dtype, slice_dtype, name]]:
constant[Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
<ast.Tuple object at 0x7da18ede6c80> assign[=] call[name[query_antecedent].shape.dims][<ast.Slice object at 0x7da18ede49d0>]
<ast.Tuple object at 0x7da18ede51e0> assign[=] call[name[query_antecedent].shape.dims][<ast.Slice object at 0x7da18ede56f0>]
<ast.Tuple object at 0x7da18ede5d20> assign[=] call[name[multihead_attention_vars], parameter[name[query_antecedent].mesh, name[heads], name[io_channels], name[kv_channels], name[master_dtype], name[slice_dtype], name[query_antecedent].dtype]]
variable[memory_antecedent] assign[=] call[name[mtf].rename_dimension, parameter[name[query_antecedent], name[h_dim].name, binary_operation[constant[memory_] + name[h_dim].name]]]
variable[memory_antecedent] assign[=] call[name[mtf].rename_dimension, parameter[name[memory_antecedent], name[w_dim].name, binary_operation[constant[memory_] + name[w_dim].name]]]
<ast.Tuple object at 0x7da204567eb0> assign[=] call[name[memory_antecedent].shape.dims][<ast.Slice object at 0x7da204564ca0>]
variable[q] assign[=] call[name[mtf].einsum, parameter[list[[<ast.Name object at 0x7da204566140>, <ast.Name object at 0x7da204567d00>]], call[name[mtf].Shape, parameter[list[[<ast.Name object at 0x7da204565a50>, <ast.Name object at 0x7da204564190>, <ast.Name object at 0x7da204565f00>, <ast.Name object at 0x7da2045648b0>, <ast.Name object at 0x7da2045657e0>, <ast.Name object at 0x7da204565600>, <ast.Name object at 0x7da204566890>]]]]]]
variable[k] assign[=] call[name[mtf].einsum, parameter[list[[<ast.Name object at 0x7da204564e20>, <ast.Name object at 0x7da2045662f0>]], call[name[mtf].Shape, parameter[list[[<ast.Name object at 0x7da204567130>, <ast.Name object at 0x7da204567c10>, <ast.Name object at 0x7da2045671c0>, <ast.Name object at 0x7da204564340>, <ast.Name object at 0x7da204566080>, <ast.Name object at 0x7da204566860>, <ast.Name object at 0x7da204564640>]]]]]]
variable[v] assign[=] call[name[mtf].einsum, parameter[list[[<ast.Name object at 0x7da204566e90>, <ast.Name object at 0x7da2045672e0>]], call[name[mtf].Shape, parameter[list[[<ast.Name object at 0x7da18dc98d00>, <ast.Name object at 0x7da18dc99420>, <ast.Name object at 0x7da18dc98fa0>, <ast.Name object at 0x7da18dc9a3b0>, <ast.Name object at 0x7da18dc9bd90>, <ast.Name object at 0x7da18dc989a0>, <ast.Name object at 0x7da18dc98b20>]]]]]]
<ast.Tuple object at 0x7da18dc9b6d0> assign[=] call[name[local_2d_halo_exchange], parameter[name[k], name[v], name[num_h_blocks], name[memory_h_dim], name[num_w_blocks], name[memory_w_dim], name[mask_right]]]
variable[mask] assign[=] constant[None]
if name[mask_right] begin[:]
variable[mask] assign[=] call[name[attention_bias_local_2d_block], parameter[name[query_antecedent].mesh, name[h_dim], name[w_dim], name[memory_h_dim], name[memory_w_dim]]]
variable[output] assign[=] call[name[dot_product_attention], parameter[name[q], name[k], name[v]]]
return[call[name[mtf].einsum, parameter[list[[<ast.Name object at 0x7da18dc9baf0>, <ast.Name object at 0x7da18dc98640>]], call[name[mtf].Shape, parameter[list[[<ast.Name object at 0x7da18dc9bc70>, <ast.Name object at 0x7da18dc998a0>, <ast.Name object at 0x7da18dc990f0>, <ast.Name object at 0x7da18dc98790>, <ast.Name object at 0x7da18dc9aa70>, <ast.Name object at 0x7da18dc98be0>]]]]]]] | keyword[def] identifier[local_2d_self_attention_spatial_blocks] ( identifier[query_antecedent] ,
identifier[kv_channels] ,
identifier[heads] ,
identifier[memory_h_dim] = keyword[None] ,
identifier[memory_w_dim] = keyword[None] ,
identifier[mask_right] = keyword[False] ,
identifier[master_dtype] = identifier[tf] . identifier[float32] ,
identifier[slice_dtype] = identifier[tf] . identifier[float32] ,
identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] (
identifier[name] , identifier[default_name] = literal[string] , identifier[values] =[ identifier[query_antecedent] ]):
identifier[h_dim] , identifier[w_dim] , identifier[io_channels] = identifier[query_antecedent] . identifier[shape] . identifier[dims] [- literal[int] :]
identifier[batch] , identifier[num_h_blocks] , identifier[num_w_blocks] = identifier[query_antecedent] . identifier[shape] . identifier[dims] [: literal[int] ]
identifier[wq] , identifier[wk] , identifier[wv] , identifier[wo] = identifier[multihead_attention_vars] (
identifier[query_antecedent] . identifier[mesh] , identifier[heads] , identifier[io_channels] , identifier[kv_channels] ,
identifier[master_dtype] , identifier[slice_dtype] , identifier[query_antecedent] . identifier[dtype] )
identifier[memory_antecedent] = identifier[mtf] . identifier[rename_dimension] ( identifier[query_antecedent] , identifier[h_dim] . identifier[name] ,
literal[string] + identifier[h_dim] . identifier[name] )
identifier[memory_antecedent] = identifier[mtf] . identifier[rename_dimension] ( identifier[memory_antecedent] , identifier[w_dim] . identifier[name] ,
literal[string] + identifier[w_dim] . identifier[name] )
identifier[memory_h_dim] , identifier[memory_w_dim] = identifier[memory_antecedent] . identifier[shape] . identifier[dims] [- literal[int] :- literal[int] ]
identifier[q] = identifier[mtf] . identifier[einsum] ([ identifier[query_antecedent] , identifier[wq] ],
identifier[mtf] . identifier[Shape] ([
identifier[batch] , identifier[heads] , identifier[num_h_blocks] , identifier[num_w_blocks] , identifier[h_dim] , identifier[w_dim] ,
identifier[kv_channels]
]))
identifier[k] = identifier[mtf] . identifier[einsum] ([ identifier[memory_antecedent] , identifier[wk] ],
identifier[mtf] . identifier[Shape] ([ identifier[batch] , identifier[heads] , identifier[num_h_blocks] , identifier[num_w_blocks] ,
identifier[memory_h_dim] , identifier[memory_w_dim] , identifier[kv_channels] ]))
identifier[v] = identifier[mtf] . identifier[einsum] ([ identifier[memory_antecedent] , identifier[wv] ],
identifier[mtf] . identifier[Shape] ([ identifier[batch] , identifier[heads] , identifier[num_h_blocks] , identifier[num_w_blocks] ,
identifier[memory_h_dim] , identifier[memory_w_dim] , identifier[kv_channels] ]))
identifier[k] , identifier[v] = identifier[local_2d_halo_exchange] ( identifier[k] , identifier[v] , identifier[num_h_blocks] , identifier[memory_h_dim] ,
identifier[num_w_blocks] , identifier[memory_w_dim] , identifier[mask_right] )
identifier[mask] = keyword[None]
keyword[if] identifier[mask_right] :
identifier[mask] = identifier[attention_bias_local_2d_block] ( identifier[query_antecedent] . identifier[mesh] , identifier[h_dim] , identifier[w_dim] ,
identifier[memory_h_dim] , identifier[memory_w_dim] )
identifier[output] = identifier[dot_product_attention] ( identifier[q] , identifier[k] , identifier[v] , identifier[mask] = identifier[mask] )
keyword[return] identifier[mtf] . identifier[einsum] (
[ identifier[output] , identifier[wo] ],
identifier[mtf] . identifier[Shape] (
[ identifier[batch] , identifier[num_h_blocks] , identifier[num_w_blocks] , identifier[h_dim] , identifier[w_dim] , identifier[io_channels] ])) | def local_2d_self_attention_spatial_blocks(query_antecedent, kv_channels, heads, memory_h_dim=None, memory_w_dim=None, mask_right=False, master_dtype=tf.float32, slice_dtype=tf.float32, name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(name, default_name='multihead_attention', values=[query_antecedent]):
(h_dim, w_dim, io_channels) = query_antecedent.shape.dims[-3:]
(batch, num_h_blocks, num_w_blocks) = query_antecedent.shape.dims[:3]
(wq, wk, wv, wo) = multihead_attention_vars(query_antecedent.mesh, heads, io_channels, kv_channels, master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name, 'memory_' + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name, 'memory_' + w_dim.name)
(memory_h_dim, memory_w_dim) = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq], mtf.Shape([batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]))
k = mtf.einsum([memory_antecedent, wk], mtf.Shape([batch, heads, num_h_blocks, num_w_blocks, memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv], mtf.Shape([batch, heads, num_h_blocks, num_w_blocks, memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
(k, v) = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim, num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim, memory_h_dim, memory_w_dim) # depends on [control=['if'], data=[]]
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum([output, wo], mtf.Shape([batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels])) # depends on [control=['with'], data=[]] |
def update_aliases(self):
""" Get aliases information from room state
Returns:
boolean: True if the aliases changed, False if not
"""
changed = False
try:
response = self.client.api.get_room_state(self.room_id)
except MatrixRequestError:
return False
for chunk in response:
content = chunk.get('content')
if content:
if 'aliases' in content:
aliases = content['aliases']
if aliases != self.aliases:
self.aliases = aliases
changed = True
if chunk.get('type') == 'm.room.canonical_alias':
canonical_alias = content['alias']
if self.canonical_alias != canonical_alias:
self.canonical_alias = canonical_alias
changed = True
if changed and self.aliases and not self.canonical_alias:
self.canonical_alias = self.aliases[0]
return changed | def function[update_aliases, parameter[self]]:
constant[ Get aliases information from room state
Returns:
boolean: True if the aliases changed, False if not
]
variable[changed] assign[=] constant[False]
<ast.Try object at 0x7da1b1709270>
for taget[name[chunk]] in starred[name[response]] begin[:]
variable[content] assign[=] call[name[chunk].get, parameter[constant[content]]]
if name[content] begin[:]
if compare[constant[aliases] in name[content]] begin[:]
variable[aliases] assign[=] call[name[content]][constant[aliases]]
if compare[name[aliases] not_equal[!=] name[self].aliases] begin[:]
name[self].aliases assign[=] name[aliases]
variable[changed] assign[=] constant[True]
if compare[call[name[chunk].get, parameter[constant[type]]] equal[==] constant[m.room.canonical_alias]] begin[:]
variable[canonical_alias] assign[=] call[name[content]][constant[alias]]
if compare[name[self].canonical_alias not_equal[!=] name[canonical_alias]] begin[:]
name[self].canonical_alias assign[=] name[canonical_alias]
variable[changed] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1950940> begin[:]
name[self].canonical_alias assign[=] call[name[self].aliases][constant[0]]
return[name[changed]] | keyword[def] identifier[update_aliases] ( identifier[self] ):
literal[string]
identifier[changed] = keyword[False]
keyword[try] :
identifier[response] = identifier[self] . identifier[client] . identifier[api] . identifier[get_room_state] ( identifier[self] . identifier[room_id] )
keyword[except] identifier[MatrixRequestError] :
keyword[return] keyword[False]
keyword[for] identifier[chunk] keyword[in] identifier[response] :
identifier[content] = identifier[chunk] . identifier[get] ( literal[string] )
keyword[if] identifier[content] :
keyword[if] literal[string] keyword[in] identifier[content] :
identifier[aliases] = identifier[content] [ literal[string] ]
keyword[if] identifier[aliases] != identifier[self] . identifier[aliases] :
identifier[self] . identifier[aliases] = identifier[aliases]
identifier[changed] = keyword[True]
keyword[if] identifier[chunk] . identifier[get] ( literal[string] )== literal[string] :
identifier[canonical_alias] = identifier[content] [ literal[string] ]
keyword[if] identifier[self] . identifier[canonical_alias] != identifier[canonical_alias] :
identifier[self] . identifier[canonical_alias] = identifier[canonical_alias]
identifier[changed] = keyword[True]
keyword[if] identifier[changed] keyword[and] identifier[self] . identifier[aliases] keyword[and] keyword[not] identifier[self] . identifier[canonical_alias] :
identifier[self] . identifier[canonical_alias] = identifier[self] . identifier[aliases] [ literal[int] ]
keyword[return] identifier[changed] | def update_aliases(self):
""" Get aliases information from room state
Returns:
boolean: True if the aliases changed, False if not
"""
changed = False
try:
response = self.client.api.get_room_state(self.room_id) # depends on [control=['try'], data=[]]
except MatrixRequestError:
return False # depends on [control=['except'], data=[]]
for chunk in response:
content = chunk.get('content')
if content:
if 'aliases' in content:
aliases = content['aliases']
if aliases != self.aliases:
self.aliases = aliases
changed = True # depends on [control=['if'], data=['aliases']] # depends on [control=['if'], data=['content']]
if chunk.get('type') == 'm.room.canonical_alias':
canonical_alias = content['alias']
if self.canonical_alias != canonical_alias:
self.canonical_alias = canonical_alias
changed = True # depends on [control=['if'], data=['canonical_alias']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']]
if changed and self.aliases and (not self.canonical_alias):
self.canonical_alias = self.aliases[0] # depends on [control=['if'], data=[]]
return changed |
def accept(self):
"""Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageAccepted()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False | def function[accept, parameter[self]]:
constant[Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
]
if call[name[self]._can_settle_message, parameter[]] begin[:]
name[self]._response assign[=] call[name[errors].MessageAccepted, parameter[]]
call[name[self]._settler, parameter[name[self]._response]]
name[self].state assign[=] name[constants].MessageState.ReceivedSettled
return[constant[True]]
return[constant[False]] | keyword[def] identifier[accept] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_can_settle_message] ():
identifier[self] . identifier[_response] = identifier[errors] . identifier[MessageAccepted] ()
identifier[self] . identifier[_settler] ( identifier[self] . identifier[_response] )
identifier[self] . identifier[state] = identifier[constants] . identifier[MessageState] . identifier[ReceivedSettled]
keyword[return] keyword[True]
keyword[return] keyword[False] | def accept(self):
"""Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageAccepted()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True # depends on [control=['if'], data=[]]
return False |
def get_roles_for_permission(permission, brain_or_object):
"""Get a list of granted roles for the given permission on the object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Roles for the given Permission
:rtype: list
"""
obj = get_object(brain_or_object)
allowed = set(rolesForPermissionOn(permission, obj))
return sorted(allowed) | def function[get_roles_for_permission, parameter[permission, brain_or_object]]:
constant[Get a list of granted roles for the given permission on the object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Roles for the given Permission
:rtype: list
]
variable[obj] assign[=] call[name[get_object], parameter[name[brain_or_object]]]
variable[allowed] assign[=] call[name[set], parameter[call[name[rolesForPermissionOn], parameter[name[permission], name[obj]]]]]
return[call[name[sorted], parameter[name[allowed]]]] | keyword[def] identifier[get_roles_for_permission] ( identifier[permission] , identifier[brain_or_object] ):
literal[string]
identifier[obj] = identifier[get_object] ( identifier[brain_or_object] )
identifier[allowed] = identifier[set] ( identifier[rolesForPermissionOn] ( identifier[permission] , identifier[obj] ))
keyword[return] identifier[sorted] ( identifier[allowed] ) | def get_roles_for_permission(permission, brain_or_object):
"""Get a list of granted roles for the given permission on the object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Roles for the given Permission
:rtype: list
"""
obj = get_object(brain_or_object)
allowed = set(rolesForPermissionOn(permission, obj))
return sorted(allowed) |
def signal_terminate(on_terminate):
"""a common case program termination signal"""
for i in [signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGTERM]:
signal.signal(i, on_terminate) | def function[signal_terminate, parameter[on_terminate]]:
constant[a common case program termination signal]
for taget[name[i]] in starred[list[[<ast.Attribute object at 0x7da20c6e5f30>, <ast.Attribute object at 0x7da20c6e77c0>, <ast.Attribute object at 0x7da20c6e67a0>, <ast.Attribute object at 0x7da18f09fc10>, <ast.Attribute object at 0x7da18f09ebc0>]]] begin[:]
call[name[signal].signal, parameter[name[i], name[on_terminate]]] | keyword[def] identifier[signal_terminate] ( identifier[on_terminate] ):
literal[string]
keyword[for] identifier[i] keyword[in] [ identifier[signal] . identifier[SIGINT] , identifier[signal] . identifier[SIGQUIT] , identifier[signal] . identifier[SIGUSR1] , identifier[signal] . identifier[SIGUSR2] , identifier[signal] . identifier[SIGTERM] ]:
identifier[signal] . identifier[signal] ( identifier[i] , identifier[on_terminate] ) | def signal_terminate(on_terminate):
"""a common case program termination signal"""
for i in [signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGTERM]:
signal.signal(i, on_terminate) # depends on [control=['for'], data=['i']] |
def _parse_version_rule(loader, version, verspec):
"""
Parse a version rule. The first token is the name of the
application implementing that API version. The remaining tokens
are key="quoted value" pairs that specify parameters; these
parameters are ignored by AVersion, but may be used by the
application.
:param loader: An object with a get_app() method, which will be
used to load the actual applications.
:param version: The version name.
:param verspec: The version text, described above.
:returns: A dictionary of three keys: "app" is the application;
"name" is the version identification string; and
"params" is a dictionary of parameters.
"""
result = dict(name=version, params={})
for token in quoted_split(verspec, ' ', quotes='"\''):
if not token:
continue
# Convert the application
if 'app' not in result:
result['app'] = loader.get_app(token)
continue
# What remains is key="quoted value" pairs...
key, _eq, value = token.partition('=')
# Set the parameter key
_set_key('version.%s' % version, result['params'], key, value)
# Make sure we have an application
if 'app' not in result:
raise ImportError("Cannot load application for version %r" % version)
return result | def function[_parse_version_rule, parameter[loader, version, verspec]]:
constant[
Parse a version rule. The first token is the name of the
application implementing that API version. The remaining tokens
are key="quoted value" pairs that specify parameters; these
parameters are ignored by AVersion, but may be used by the
application.
:param loader: An object with a get_app() method, which will be
used to load the actual applications.
:param version: The version name.
:param verspec: The version text, described above.
:returns: A dictionary of three keys: "app" is the application;
"name" is the version identification string; and
"params" is a dictionary of parameters.
]
variable[result] assign[=] call[name[dict], parameter[]]
for taget[name[token]] in starred[call[name[quoted_split], parameter[name[verspec], constant[ ]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e5d20> begin[:]
continue
if compare[constant[app] <ast.NotIn object at 0x7da2590d7190> name[result]] begin[:]
call[name[result]][constant[app]] assign[=] call[name[loader].get_app, parameter[name[token]]]
continue
<ast.Tuple object at 0x7da20c6e61d0> assign[=] call[name[token].partition, parameter[constant[=]]]
call[name[_set_key], parameter[binary_operation[constant[version.%s] <ast.Mod object at 0x7da2590d6920> name[version]], call[name[result]][constant[params]], name[key], name[value]]]
if compare[constant[app] <ast.NotIn object at 0x7da2590d7190> name[result]] begin[:]
<ast.Raise object at 0x7da20c6e7850>
return[name[result]] | keyword[def] identifier[_parse_version_rule] ( identifier[loader] , identifier[version] , identifier[verspec] ):
literal[string]
identifier[result] = identifier[dict] ( identifier[name] = identifier[version] , identifier[params] ={})
keyword[for] identifier[token] keyword[in] identifier[quoted_split] ( identifier[verspec] , literal[string] , identifier[quotes] = literal[string] ):
keyword[if] keyword[not] identifier[token] :
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[result] :
identifier[result] [ literal[string] ]= identifier[loader] . identifier[get_app] ( identifier[token] )
keyword[continue]
identifier[key] , identifier[_eq] , identifier[value] = identifier[token] . identifier[partition] ( literal[string] )
identifier[_set_key] ( literal[string] % identifier[version] , identifier[result] [ literal[string] ], identifier[key] , identifier[value] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[result] :
keyword[raise] identifier[ImportError] ( literal[string] % identifier[version] )
keyword[return] identifier[result] | def _parse_version_rule(loader, version, verspec):
"""
Parse a version rule. The first token is the name of the
application implementing that API version. The remaining tokens
are key="quoted value" pairs that specify parameters; these
parameters are ignored by AVersion, but may be used by the
application.
:param loader: An object with a get_app() method, which will be
used to load the actual applications.
:param version: The version name.
:param verspec: The version text, described above.
:returns: A dictionary of three keys: "app" is the application;
"name" is the version identification string; and
"params" is a dictionary of parameters.
"""
result = dict(name=version, params={})
for token in quoted_split(verspec, ' ', quotes='"\''):
if not token:
continue # depends on [control=['if'], data=[]]
# Convert the application
if 'app' not in result:
result['app'] = loader.get_app(token)
continue # depends on [control=['if'], data=['result']]
# What remains is key="quoted value" pairs...
(key, _eq, value) = token.partition('=')
# Set the parameter key
_set_key('version.%s' % version, result['params'], key, value) # depends on [control=['for'], data=['token']]
# Make sure we have an application
if 'app' not in result:
raise ImportError('Cannot load application for version %r' % version) # depends on [control=['if'], data=[]]
return result |
async def grab(self, *, countries=None, limit=0):
"""Gather proxies from the providers without checking.
:param list countries: (optional) List of ISO country codes
where should be located proxies
:param int limit: (optional) The maximum number of proxies
:ref:`Example of usage <proxybroker-examples-grab>`.
"""
self._countries = countries
self._limit = limit
task = asyncio.ensure_future(self._grab(check=False))
self._all_tasks.append(task) | <ast.AsyncFunctionDef object at 0x7da1b1b7bdc0> | keyword[async] keyword[def] identifier[grab] ( identifier[self] ,*, identifier[countries] = keyword[None] , identifier[limit] = literal[int] ):
literal[string]
identifier[self] . identifier[_countries] = identifier[countries]
identifier[self] . identifier[_limit] = identifier[limit]
identifier[task] = identifier[asyncio] . identifier[ensure_future] ( identifier[self] . identifier[_grab] ( identifier[check] = keyword[False] ))
identifier[self] . identifier[_all_tasks] . identifier[append] ( identifier[task] ) | async def grab(self, *, countries=None, limit=0):
"""Gather proxies from the providers without checking.
:param list countries: (optional) List of ISO country codes
where should be located proxies
:param int limit: (optional) The maximum number of proxies
:ref:`Example of usage <proxybroker-examples-grab>`.
"""
self._countries = countries
self._limit = limit
task = asyncio.ensure_future(self._grab(check=False))
self._all_tasks.append(task) |
def customData( self, key, default = None ):
"""
Return the custom data that is stored on this node for the \
given key, returning the default parameter if none was found.
:param key <str>
:param default <variant>
:return <variant>
"""
return self._customData.get(nativestring(key), default) | def function[customData, parameter[self, key, default]]:
constant[
Return the custom data that is stored on this node for the given key, returning the default parameter if none was found.
:param key <str>
:param default <variant>
:return <variant>
]
return[call[name[self]._customData.get, parameter[call[name[nativestring], parameter[name[key]]], name[default]]]] | keyword[def] identifier[customData] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_customData] . identifier[get] ( identifier[nativestring] ( identifier[key] ), identifier[default] ) | def customData(self, key, default=None):
"""
Return the custom data that is stored on this node for the given key, returning the default parameter if none was found.
:param key <str>
:param default <variant>
:return <variant>
"""
return self._customData.get(nativestring(key), default) |
def session_id(self):
"""
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
"""
if self._session_id is None:
req = self.request("POST /4/sessions")
self._session_id = req.get("session_key") or req.get("session_id")
return CallableString(self._session_id) | def function[session_id, parameter[self]]:
constant[
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
]
if compare[name[self]._session_id is constant[None]] begin[:]
variable[req] assign[=] call[name[self].request, parameter[constant[POST /4/sessions]]]
name[self]._session_id assign[=] <ast.BoolOp object at 0x7da207f00b50>
return[call[name[CallableString], parameter[name[self]._session_id]]] | keyword[def] identifier[session_id] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_session_id] keyword[is] keyword[None] :
identifier[req] = identifier[self] . identifier[request] ( literal[string] )
identifier[self] . identifier[_session_id] = identifier[req] . identifier[get] ( literal[string] ) keyword[or] identifier[req] . identifier[get] ( literal[string] )
keyword[return] identifier[CallableString] ( identifier[self] . identifier[_session_id] ) | def session_id(self):
"""
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
"""
if self._session_id is None:
req = self.request('POST /4/sessions')
self._session_id = req.get('session_key') or req.get('session_id') # depends on [control=['if'], data=[]]
return CallableString(self._session_id) |
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in)) | def function[_generate_file_set, parameter[self, var, start_date, end_date, domain, intvl_in, dtype_in_vert, dtype_in_time, intvl_out]]:
constant[Returns the file_set for the given interval in.]
<ast.Try object at 0x7da1b0467f40> | keyword[def] identifier[_generate_file_set] ( identifier[self] , identifier[var] = keyword[None] , identifier[start_date] = keyword[None] , identifier[end_date] = keyword[None] ,
identifier[domain] = keyword[None] , identifier[intvl_in] = keyword[None] , identifier[dtype_in_vert] = keyword[None] ,
identifier[dtype_in_time] = keyword[None] , identifier[intvl_out] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[file_map] [ identifier[intvl_in] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string]
literal[string] . identifier[format] ( identifier[intvl_in] )) | def _generate_file_set(self, var=None, start_date=None, end_date=None, domain=None, intvl_in=None, dtype_in_vert=None, dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('File set does not exist for the specified intvl_in {0}'.format(intvl_in)) # depends on [control=['except'], data=[]] |
def make_triangle_mesh(points, size_u, size_v, **kwargs):
""" Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple
"""
def fix_numbering(vertex_list, triangle_list):
# Initialize variables
final_vertices = []
# Get all vertices inside the triangle list
tri_vertex_ids = []
for tri in triangle_list:
for td in tri.data:
if td not in tri_vertex_ids:
tri_vertex_ids.append(td)
# Find vertices used in triangles
seen_vertices = []
for vertex in vertex_list:
if vertex.id in tri_vertex_ids and vertex.id not in seen_vertices:
final_vertices.append(vertex)
seen_vertices.append(vertex.id)
# Fix vertex numbering (automatically fixes triangle vertex numbering)
vert_new_id = 0
for vertex in final_vertices:
vertex.id = vert_new_id
vert_new_id += 1
return final_vertices, triangle_list
# Vertex spacing for triangulation
vertex_spacing = kwargs.get('vertex_spacing', 1) # defines the size of the triangles
trim_curves = kwargs.get('trims', [])
# Tessellation algorithm
tsl_func = kwargs.get('tessellate_func')
if tsl_func is None:
tsl_func = surface_tessellate
tsl_args = kwargs.get('tessellate_args', dict())
# Numbering
vrt_idx = 0 # vertex index numbering start
tri_idx = 0 # triangle index numbering start
# Variable initialization
u_jump = (1.0 / float(size_u - 1)) * vertex_spacing # for computing vertex parametric u value
v_jump = (1.0 / float(size_v - 1)) * vertex_spacing # for computing vertex parametric v value
varr_size_u = int(round((float(size_u) / float(vertex_spacing)) + 10e-8)) # vertex array size on the u-direction
varr_size_v = int(round((float(size_v) / float(vertex_spacing)) + 10e-8)) # vertex array size on the v-direction
# Generate vertices directly from input points (preliminary evaluation)
vertices = [Vertex() for _ in range(varr_size_v * varr_size_u)]
u = 0.0
for i in range(0, size_u, vertex_spacing):
v = 0.0
for j in range(0, size_v, vertex_spacing):
idx = j + (i * size_v)
vertices[vrt_idx].id = vrt_idx
vertices[vrt_idx].data = points[idx]
vertices[vrt_idx].uv = [u, v]
vrt_idx += 1
v += v_jump
u += u_jump
#
# Organization of vertices in a quad element on the parametric space:
#
# v4 v3
# o-------o i
# | | |
# | | |
# | | |_ _ _
# o-------o j
# v1 v2
#
# Generate triangles and final vertices
triangles = []
for i in range(varr_size_u - 1):
for j in range(varr_size_v - 1):
# Find vertex indices for a quad element
vertex1 = vertices[j + (i * varr_size_v)]
vertex2 = vertices[j + ((i + 1) * varr_size_v)]
vertex3 = vertices[j + 1 + ((i + 1) * varr_size_v)]
vertex4 = vertices[j + 1 + (i * varr_size_v)]
# Call tessellation function
vlst, tlst = tsl_func(vertex1, vertex2, vertex3, vertex4, vrt_idx, tri_idx, trim_curves, tsl_args)
# Add tessellation results to the return lists
vertices += vlst
triangles += tlst
# Increment index values
vrt_idx += len(vlst)
tri_idx += len(tlst)
# Fix vertex and triangle numbering (ID values)
vertices, triangles = fix_numbering(vertices, triangles)
return vertices, triangles | def function[make_triangle_mesh, parameter[points, size_u, size_v]]:
constant[ Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple
]
def function[fix_numbering, parameter[vertex_list, triangle_list]]:
variable[final_vertices] assign[=] list[[]]
variable[tri_vertex_ids] assign[=] list[[]]
for taget[name[tri]] in starred[name[triangle_list]] begin[:]
for taget[name[td]] in starred[name[tri].data] begin[:]
if compare[name[td] <ast.NotIn object at 0x7da2590d7190> name[tri_vertex_ids]] begin[:]
call[name[tri_vertex_ids].append, parameter[name[td]]]
variable[seen_vertices] assign[=] list[[]]
for taget[name[vertex]] in starred[name[vertex_list]] begin[:]
if <ast.BoolOp object at 0x7da18bcc9780> begin[:]
call[name[final_vertices].append, parameter[name[vertex]]]
call[name[seen_vertices].append, parameter[name[vertex].id]]
variable[vert_new_id] assign[=] constant[0]
for taget[name[vertex]] in starred[name[final_vertices]] begin[:]
name[vertex].id assign[=] name[vert_new_id]
<ast.AugAssign object at 0x7da18bcc9a50>
return[tuple[[<ast.Name object at 0x7da18bcc8e50>, <ast.Name object at 0x7da18bccb3d0>]]]
variable[vertex_spacing] assign[=] call[name[kwargs].get, parameter[constant[vertex_spacing], constant[1]]]
variable[trim_curves] assign[=] call[name[kwargs].get, parameter[constant[trims], list[[]]]]
variable[tsl_func] assign[=] call[name[kwargs].get, parameter[constant[tessellate_func]]]
if compare[name[tsl_func] is constant[None]] begin[:]
variable[tsl_func] assign[=] name[surface_tessellate]
variable[tsl_args] assign[=] call[name[kwargs].get, parameter[constant[tessellate_args], call[name[dict], parameter[]]]]
variable[vrt_idx] assign[=] constant[0]
variable[tri_idx] assign[=] constant[0]
variable[u_jump] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[float], parameter[binary_operation[name[size_u] - constant[1]]]]] * name[vertex_spacing]]
variable[v_jump] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[float], parameter[binary_operation[name[size_v] - constant[1]]]]] * name[vertex_spacing]]
variable[varr_size_u] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[call[name[float], parameter[name[size_u]]] / call[name[float], parameter[name[vertex_spacing]]]] + constant[1e-07]]]]]]
variable[varr_size_v] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[call[name[float], parameter[name[size_v]]] / call[name[float], parameter[name[vertex_spacing]]]] + constant[1e-07]]]]]]
variable[vertices] assign[=] <ast.ListComp object at 0x7da18bccbdc0>
variable[u] assign[=] constant[0.0]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[size_u], name[vertex_spacing]]]] begin[:]
variable[v] assign[=] constant[0.0]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], name[size_v], name[vertex_spacing]]]] begin[:]
variable[idx] assign[=] binary_operation[name[j] + binary_operation[name[i] * name[size_v]]]
call[name[vertices]][name[vrt_idx]].id assign[=] name[vrt_idx]
call[name[vertices]][name[vrt_idx]].data assign[=] call[name[points]][name[idx]]
call[name[vertices]][name[vrt_idx]].uv assign[=] list[[<ast.Name object at 0x7da18bcc8af0>, <ast.Name object at 0x7da18bcc83a0>]]
<ast.AugAssign object at 0x7da18bccb7c0>
<ast.AugAssign object at 0x7da18bccb880>
<ast.AugAssign object at 0x7da18bcc98a0>
variable[triangles] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[varr_size_u] - constant[1]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[binary_operation[name[varr_size_v] - constant[1]]]]] begin[:]
variable[vertex1] assign[=] call[name[vertices]][binary_operation[name[j] + binary_operation[name[i] * name[varr_size_v]]]]
variable[vertex2] assign[=] call[name[vertices]][binary_operation[name[j] + binary_operation[binary_operation[name[i] + constant[1]] * name[varr_size_v]]]]
variable[vertex3] assign[=] call[name[vertices]][binary_operation[binary_operation[name[j] + constant[1]] + binary_operation[binary_operation[name[i] + constant[1]] * name[varr_size_v]]]]
variable[vertex4] assign[=] call[name[vertices]][binary_operation[binary_operation[name[j] + constant[1]] + binary_operation[name[i] * name[varr_size_v]]]]
<ast.Tuple object at 0x7da18bcc97e0> assign[=] call[name[tsl_func], parameter[name[vertex1], name[vertex2], name[vertex3], name[vertex4], name[vrt_idx], name[tri_idx], name[trim_curves], name[tsl_args]]]
<ast.AugAssign object at 0x7da18bcca8c0>
<ast.AugAssign object at 0x7da18bcca950>
<ast.AugAssign object at 0x7da18bccb5e0>
<ast.AugAssign object at 0x7da18bccbb20>
<ast.Tuple object at 0x7da18bcc80a0> assign[=] call[name[fix_numbering], parameter[name[vertices], name[triangles]]]
return[tuple[[<ast.Name object at 0x7da1b2346bf0>, <ast.Name object at 0x7da1b2345ff0>]]] | keyword[def] identifier[make_triangle_mesh] ( identifier[points] , identifier[size_u] , identifier[size_v] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[fix_numbering] ( identifier[vertex_list] , identifier[triangle_list] ):
identifier[final_vertices] =[]
identifier[tri_vertex_ids] =[]
keyword[for] identifier[tri] keyword[in] identifier[triangle_list] :
keyword[for] identifier[td] keyword[in] identifier[tri] . identifier[data] :
keyword[if] identifier[td] keyword[not] keyword[in] identifier[tri_vertex_ids] :
identifier[tri_vertex_ids] . identifier[append] ( identifier[td] )
identifier[seen_vertices] =[]
keyword[for] identifier[vertex] keyword[in] identifier[vertex_list] :
keyword[if] identifier[vertex] . identifier[id] keyword[in] identifier[tri_vertex_ids] keyword[and] identifier[vertex] . identifier[id] keyword[not] keyword[in] identifier[seen_vertices] :
identifier[final_vertices] . identifier[append] ( identifier[vertex] )
identifier[seen_vertices] . identifier[append] ( identifier[vertex] . identifier[id] )
identifier[vert_new_id] = literal[int]
keyword[for] identifier[vertex] keyword[in] identifier[final_vertices] :
identifier[vertex] . identifier[id] = identifier[vert_new_id]
identifier[vert_new_id] += literal[int]
keyword[return] identifier[final_vertices] , identifier[triangle_list]
identifier[vertex_spacing] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[trim_curves] = identifier[kwargs] . identifier[get] ( literal[string] ,[])
identifier[tsl_func] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[tsl_func] keyword[is] keyword[None] :
identifier[tsl_func] = identifier[surface_tessellate]
identifier[tsl_args] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[dict] ())
identifier[vrt_idx] = literal[int]
identifier[tri_idx] = literal[int]
identifier[u_jump] =( literal[int] / identifier[float] ( identifier[size_u] - literal[int] ))* identifier[vertex_spacing]
identifier[v_jump] =( literal[int] / identifier[float] ( identifier[size_v] - literal[int] ))* identifier[vertex_spacing]
identifier[varr_size_u] = identifier[int] ( identifier[round] (( identifier[float] ( identifier[size_u] )/ identifier[float] ( identifier[vertex_spacing] ))+ literal[int] ))
identifier[varr_size_v] = identifier[int] ( identifier[round] (( identifier[float] ( identifier[size_v] )/ identifier[float] ( identifier[vertex_spacing] ))+ literal[int] ))
identifier[vertices] =[ identifier[Vertex] () keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[varr_size_v] * identifier[varr_size_u] )]
identifier[u] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[size_u] , identifier[vertex_spacing] ):
identifier[v] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[size_v] , identifier[vertex_spacing] ):
identifier[idx] = identifier[j] +( identifier[i] * identifier[size_v] )
identifier[vertices] [ identifier[vrt_idx] ]. identifier[id] = identifier[vrt_idx]
identifier[vertices] [ identifier[vrt_idx] ]. identifier[data] = identifier[points] [ identifier[idx] ]
identifier[vertices] [ identifier[vrt_idx] ]. identifier[uv] =[ identifier[u] , identifier[v] ]
identifier[vrt_idx] += literal[int]
identifier[v] += identifier[v_jump]
identifier[u] += identifier[u_jump]
identifier[triangles] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[varr_size_u] - literal[int] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[varr_size_v] - literal[int] ):
identifier[vertex1] = identifier[vertices] [ identifier[j] +( identifier[i] * identifier[varr_size_v] )]
identifier[vertex2] = identifier[vertices] [ identifier[j] +(( identifier[i] + literal[int] )* identifier[varr_size_v] )]
identifier[vertex3] = identifier[vertices] [ identifier[j] + literal[int] +(( identifier[i] + literal[int] )* identifier[varr_size_v] )]
identifier[vertex4] = identifier[vertices] [ identifier[j] + literal[int] +( identifier[i] * identifier[varr_size_v] )]
identifier[vlst] , identifier[tlst] = identifier[tsl_func] ( identifier[vertex1] , identifier[vertex2] , identifier[vertex3] , identifier[vertex4] , identifier[vrt_idx] , identifier[tri_idx] , identifier[trim_curves] , identifier[tsl_args] )
identifier[vertices] += identifier[vlst]
identifier[triangles] += identifier[tlst]
identifier[vrt_idx] += identifier[len] ( identifier[vlst] )
identifier[tri_idx] += identifier[len] ( identifier[tlst] )
identifier[vertices] , identifier[triangles] = identifier[fix_numbering] ( identifier[vertices] , identifier[triangles] )
keyword[return] identifier[vertices] , identifier[triangles] | def make_triangle_mesh(points, size_u, size_v, **kwargs):
""" Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple
"""
def fix_numbering(vertex_list, triangle_list):
# Initialize variables
final_vertices = []
# Get all vertices inside the triangle list
tri_vertex_ids = []
for tri in triangle_list:
for td in tri.data:
if td not in tri_vertex_ids:
tri_vertex_ids.append(td) # depends on [control=['if'], data=['td', 'tri_vertex_ids']] # depends on [control=['for'], data=['td']] # depends on [control=['for'], data=['tri']]
# Find vertices used in triangles
seen_vertices = []
for vertex in vertex_list:
if vertex.id in tri_vertex_ids and vertex.id not in seen_vertices:
final_vertices.append(vertex)
seen_vertices.append(vertex.id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['vertex']]
# Fix vertex numbering (automatically fixes triangle vertex numbering)
vert_new_id = 0
for vertex in final_vertices:
vertex.id = vert_new_id
vert_new_id += 1 # depends on [control=['for'], data=['vertex']]
return (final_vertices, triangle_list)
# Vertex spacing for triangulation
vertex_spacing = kwargs.get('vertex_spacing', 1) # defines the size of the triangles
trim_curves = kwargs.get('trims', [])
# Tessellation algorithm
tsl_func = kwargs.get('tessellate_func')
if tsl_func is None:
tsl_func = surface_tessellate # depends on [control=['if'], data=['tsl_func']]
tsl_args = kwargs.get('tessellate_args', dict())
# Numbering
vrt_idx = 0 # vertex index numbering start
tri_idx = 0 # triangle index numbering start
# Variable initialization
u_jump = 1.0 / float(size_u - 1) * vertex_spacing # for computing vertex parametric u value
v_jump = 1.0 / float(size_v - 1) * vertex_spacing # for computing vertex parametric v value
varr_size_u = int(round(float(size_u) / float(vertex_spacing) + 1e-07)) # vertex array size on the u-direction
varr_size_v = int(round(float(size_v) / float(vertex_spacing) + 1e-07)) # vertex array size on the v-direction
# Generate vertices directly from input points (preliminary evaluation)
vertices = [Vertex() for _ in range(varr_size_v * varr_size_u)]
u = 0.0
for i in range(0, size_u, vertex_spacing):
v = 0.0
for j in range(0, size_v, vertex_spacing):
idx = j + i * size_v
vertices[vrt_idx].id = vrt_idx
vertices[vrt_idx].data = points[idx]
vertices[vrt_idx].uv = [u, v]
vrt_idx += 1
v += v_jump # depends on [control=['for'], data=['j']]
u += u_jump # depends on [control=['for'], data=['i']]
#
# Organization of vertices in a quad element on the parametric space:
#
# v4 v3
# o-------o i
# | | |
# | | |
# | | |_ _ _
# o-------o j
# v1 v2
#
# Generate triangles and final vertices
triangles = []
for i in range(varr_size_u - 1):
for j in range(varr_size_v - 1):
# Find vertex indices for a quad element
vertex1 = vertices[j + i * varr_size_v]
vertex2 = vertices[j + (i + 1) * varr_size_v]
vertex3 = vertices[j + 1 + (i + 1) * varr_size_v]
vertex4 = vertices[j + 1 + i * varr_size_v]
# Call tessellation function
(vlst, tlst) = tsl_func(vertex1, vertex2, vertex3, vertex4, vrt_idx, tri_idx, trim_curves, tsl_args)
# Add tessellation results to the return lists
vertices += vlst
triangles += tlst
# Increment index values
vrt_idx += len(vlst)
tri_idx += len(tlst) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
# Fix vertex and triangle numbering (ID values)
(vertices, triangles) = fix_numbering(vertices, triangles)
return (vertices, triangles) |
def connection_lost(self, exc):
"""Log when connection is closed, if needed call callback."""
if exc:
self.logger.error('disconnected due to error')
else:
self.logger.info('disconnected because of close/abort.')
if self.disconnect_callback:
asyncio.ensure_future(self.disconnect_callback(), loop=self.loop) | def function[connection_lost, parameter[self, exc]]:
constant[Log when connection is closed, if needed call callback.]
if name[exc] begin[:]
call[name[self].logger.error, parameter[constant[disconnected due to error]]]
if name[self].disconnect_callback begin[:]
call[name[asyncio].ensure_future, parameter[call[name[self].disconnect_callback, parameter[]]]] | keyword[def] identifier[connection_lost] ( identifier[self] , identifier[exc] ):
literal[string]
keyword[if] identifier[exc] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[self] . identifier[disconnect_callback] :
identifier[asyncio] . identifier[ensure_future] ( identifier[self] . identifier[disconnect_callback] (), identifier[loop] = identifier[self] . identifier[loop] ) | def connection_lost(self, exc):
"""Log when connection is closed, if needed call callback."""
if exc:
self.logger.error('disconnected due to error') # depends on [control=['if'], data=[]]
else:
self.logger.info('disconnected because of close/abort.')
if self.disconnect_callback:
asyncio.ensure_future(self.disconnect_callback(), loop=self.loop) # depends on [control=['if'], data=[]] |
def logs(self):
""" Return relevant who-did-what pairs from the bug history """
for record in self.history:
if (record["when"] >= self.options.since.date
and record["when"] < self.options.until.date):
for change in record["changes"]:
yield record["who"], change | def function[logs, parameter[self]]:
constant[ Return relevant who-did-what pairs from the bug history ]
for taget[name[record]] in starred[name[self].history] begin[:]
if <ast.BoolOp object at 0x7da1b20bded0> begin[:]
for taget[name[change]] in starred[call[name[record]][constant[changes]]] begin[:]
<ast.Yield object at 0x7da1b20be140> | keyword[def] identifier[logs] ( identifier[self] ):
literal[string]
keyword[for] identifier[record] keyword[in] identifier[self] . identifier[history] :
keyword[if] ( identifier[record] [ literal[string] ]>= identifier[self] . identifier[options] . identifier[since] . identifier[date]
keyword[and] identifier[record] [ literal[string] ]< identifier[self] . identifier[options] . identifier[until] . identifier[date] ):
keyword[for] identifier[change] keyword[in] identifier[record] [ literal[string] ]:
keyword[yield] identifier[record] [ literal[string] ], identifier[change] | def logs(self):
""" Return relevant who-did-what pairs from the bug history """
for record in self.history:
if record['when'] >= self.options.since.date and record['when'] < self.options.until.date:
for change in record['changes']:
yield (record['who'], change) # depends on [control=['for'], data=['change']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['record']] |
def synthesize(self, modules, use_string, x64, native):
"""Transform sources."""
# code_opts = CodeOpts(
# str.lower, None if use_string else hash_func,
# 'reloc_delta', '->',
# True)
# gen_opts = GenOpts('defs', transformed)
print(hash_func)
groups = group_by(modules, ends_with_punctuation)
sources = self.make_source(groups, self.database)
if sources:
return stylify_files(
{'defs.h': sources[0], 'init.c': sources[1]}
)
else:
return '' | def function[synthesize, parameter[self, modules, use_string, x64, native]]:
constant[Transform sources.]
call[name[print], parameter[name[hash_func]]]
variable[groups] assign[=] call[name[group_by], parameter[name[modules], name[ends_with_punctuation]]]
variable[sources] assign[=] call[name[self].make_source, parameter[name[groups], name[self].database]]
if name[sources] begin[:]
return[call[name[stylify_files], parameter[dictionary[[<ast.Constant object at 0x7da20e955c60>, <ast.Constant object at 0x7da20e9570a0>], [<ast.Subscript object at 0x7da1b23444f0>, <ast.Subscript object at 0x7da1b2346c50>]]]]] | keyword[def] identifier[synthesize] ( identifier[self] , identifier[modules] , identifier[use_string] , identifier[x64] , identifier[native] ):
literal[string]
identifier[print] ( identifier[hash_func] )
identifier[groups] = identifier[group_by] ( identifier[modules] , identifier[ends_with_punctuation] )
identifier[sources] = identifier[self] . identifier[make_source] ( identifier[groups] , identifier[self] . identifier[database] )
keyword[if] identifier[sources] :
keyword[return] identifier[stylify_files] (
{ literal[string] : identifier[sources] [ literal[int] ], literal[string] : identifier[sources] [ literal[int] ]}
)
keyword[else] :
keyword[return] literal[string] | def synthesize(self, modules, use_string, x64, native):
"""Transform sources."""
# code_opts = CodeOpts(
# str.lower, None if use_string else hash_func,
# 'reloc_delta', '->',
# True)
# gen_opts = GenOpts('defs', transformed)
print(hash_func)
groups = group_by(modules, ends_with_punctuation)
sources = self.make_source(groups, self.database)
if sources:
return stylify_files({'defs.h': sources[0], 'init.c': sources[1]}) # depends on [control=['if'], data=[]]
else:
return '' |
def _basename(station_code, fmt=None):
"""region, country, weather_station, station_code, data_format, url."""
info = _station_info(station_code)
if not fmt:
fmt = info['data_format']
basename = '%s.%s' % (info['url'].rsplit('/', 1)[1].rsplit('.', 1)[0],
DATA_EXTENTIONS[fmt])
return basename | def function[_basename, parameter[station_code, fmt]]:
constant[region, country, weather_station, station_code, data_format, url.]
variable[info] assign[=] call[name[_station_info], parameter[name[station_code]]]
if <ast.UnaryOp object at 0x7da1b0a720e0> begin[:]
variable[fmt] assign[=] call[name[info]][constant[data_format]]
variable[basename] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0a70190>, <ast.Subscript object at 0x7da1b0a72680>]]]
return[name[basename]] | keyword[def] identifier[_basename] ( identifier[station_code] , identifier[fmt] = keyword[None] ):
literal[string]
identifier[info] = identifier[_station_info] ( identifier[station_code] )
keyword[if] keyword[not] identifier[fmt] :
identifier[fmt] = identifier[info] [ literal[string] ]
identifier[basename] = literal[string] %( identifier[info] [ literal[string] ]. identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]. identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ],
identifier[DATA_EXTENTIONS] [ identifier[fmt] ])
keyword[return] identifier[basename] | def _basename(station_code, fmt=None):
"""region, country, weather_station, station_code, data_format, url."""
info = _station_info(station_code)
if not fmt:
fmt = info['data_format'] # depends on [control=['if'], data=[]]
basename = '%s.%s' % (info['url'].rsplit('/', 1)[1].rsplit('.', 1)[0], DATA_EXTENTIONS[fmt])
return basename |
def rm_keys_from_dict(d, keys):
"""
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
"""
# Loop for each key given
for key in keys:
# Is the key in the dictionary?
if key in d:
try:
d.pop(key, None)
except KeyError:
# Not concerned with an error. Keep going.
pass
return d | def function[rm_keys_from_dict, parameter[d, keys]]:
constant[
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
]
for taget[name[key]] in starred[name[keys]] begin[:]
if compare[name[key] in name[d]] begin[:]
<ast.Try object at 0x7da18f09fa30>
return[name[d]] | keyword[def] identifier[rm_keys_from_dict] ( identifier[d] , identifier[keys] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[key] keyword[in] identifier[d] :
keyword[try] :
identifier[d] . identifier[pop] ( identifier[key] , keyword[None] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] identifier[d] | def rm_keys_from_dict(d, keys):
"""
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
"""
# Loop for each key given
for key in keys:
# Is the key in the dictionary?
if key in d:
try:
d.pop(key, None) # depends on [control=['try'], data=[]]
except KeyError:
# Not concerned with an error. Keep going.
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['key', 'd']] # depends on [control=['for'], data=['key']]
return d |
def make_key(observer):
"""Construct a unique, hashable, immutable key for an observer."""
if hasattr(observer, "__self__"):
inst = observer.__self__
method_name = observer.__name__
key = (id(inst), method_name)
else:
key = id(observer)
return key | def function[make_key, parameter[observer]]:
constant[Construct a unique, hashable, immutable key for an observer.]
if call[name[hasattr], parameter[name[observer], constant[__self__]]] begin[:]
variable[inst] assign[=] name[observer].__self__
variable[method_name] assign[=] name[observer].__name__
variable[key] assign[=] tuple[[<ast.Call object at 0x7da18fe919f0>, <ast.Name object at 0x7da2044c3cd0>]]
return[name[key]] | keyword[def] identifier[make_key] ( identifier[observer] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[observer] , literal[string] ):
identifier[inst] = identifier[observer] . identifier[__self__]
identifier[method_name] = identifier[observer] . identifier[__name__]
identifier[key] =( identifier[id] ( identifier[inst] ), identifier[method_name] )
keyword[else] :
identifier[key] = identifier[id] ( identifier[observer] )
keyword[return] identifier[key] | def make_key(observer):
"""Construct a unique, hashable, immutable key for an observer."""
if hasattr(observer, '__self__'):
inst = observer.__self__
method_name = observer.__name__
key = (id(inst), method_name) # depends on [control=['if'], data=[]]
else:
key = id(observer)
return key |
def parse_python_paths(self, args):
"""
optparse doesn't manage stuff like this:
--pythonpath /my/modules/*
but it can manages
--pythonpath=/my/modules/*/
(but without handling globing)
This method handles correctly the one without "=" and manages globing
"""
paths = []
do_paths = False
for arg in args:
if arg == '--pythonpath':
# space separated
do_paths = True
continue
elif arg.startswith('--pythonpath='):
# '=' separated
do_paths = True
arg = arg[13:]
if do_paths:
if arg.startswith('-'):
# stop thinking it's a python path
do_paths = False
continue
# ok add the python path
if '*' in arg:
paths.extend(glob.glob(arg))
else:
paths.append(arg)
return paths | def function[parse_python_paths, parameter[self, args]]:
constant[
optparse doesn't manage stuff like this:
--pythonpath /my/modules/*
but it can manages
--pythonpath=/my/modules/*/
(but without handling globing)
This method handles correctly the one without "=" and manages globing
]
variable[paths] assign[=] list[[]]
variable[do_paths] assign[=] constant[False]
for taget[name[arg]] in starred[name[args]] begin[:]
if compare[name[arg] equal[==] constant[--pythonpath]] begin[:]
variable[do_paths] assign[=] constant[True]
continue
if name[do_paths] begin[:]
if call[name[arg].startswith, parameter[constant[-]]] begin[:]
variable[do_paths] assign[=] constant[False]
continue
if compare[constant[*] in name[arg]] begin[:]
call[name[paths].extend, parameter[call[name[glob].glob, parameter[name[arg]]]]]
return[name[paths]] | keyword[def] identifier[parse_python_paths] ( identifier[self] , identifier[args] ):
literal[string]
identifier[paths] =[]
identifier[do_paths] = keyword[False]
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[arg] == literal[string] :
identifier[do_paths] = keyword[True]
keyword[continue]
keyword[elif] identifier[arg] . identifier[startswith] ( literal[string] ):
identifier[do_paths] = keyword[True]
identifier[arg] = identifier[arg] [ literal[int] :]
keyword[if] identifier[do_paths] :
keyword[if] identifier[arg] . identifier[startswith] ( literal[string] ):
identifier[do_paths] = keyword[False]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[arg] :
identifier[paths] . identifier[extend] ( identifier[glob] . identifier[glob] ( identifier[arg] ))
keyword[else] :
identifier[paths] . identifier[append] ( identifier[arg] )
keyword[return] identifier[paths] | def parse_python_paths(self, args):
"""
optparse doesn't manage stuff like this:
--pythonpath /my/modules/*
but it can manages
--pythonpath=/my/modules/*/
(but without handling globing)
This method handles correctly the one without "=" and manages globing
"""
paths = []
do_paths = False
for arg in args:
if arg == '--pythonpath':
# space separated
do_paths = True
continue # depends on [control=['if'], data=[]]
elif arg.startswith('--pythonpath='):
# '=' separated
do_paths = True
arg = arg[13:] # depends on [control=['if'], data=[]]
if do_paths:
if arg.startswith('-'):
# stop thinking it's a python path
do_paths = False
continue # depends on [control=['if'], data=[]]
# ok add the python path
if '*' in arg:
paths.extend(glob.glob(arg)) # depends on [control=['if'], data=['arg']]
else:
paths.append(arg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']]
return paths |
def is_connected(self):
"""Indication of the connection state of all children"""
return all([r.is_connected() for r in dict.values(self.children)]) | def function[is_connected, parameter[self]]:
constant[Indication of the connection state of all children]
return[call[name[all], parameter[<ast.ListComp object at 0x7da1b054a830>]]] | keyword[def] identifier[is_connected] ( identifier[self] ):
literal[string]
keyword[return] identifier[all] ([ identifier[r] . identifier[is_connected] () keyword[for] identifier[r] keyword[in] identifier[dict] . identifier[values] ( identifier[self] . identifier[children] )]) | def is_connected(self):
"""Indication of the connection state of all children"""
return all([r.is_connected() for r in dict.values(self.children)]) |
def ensure_coordinator_ready(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
with self._client._lock, self._lock:
while self.coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self.coordinator_id = self._client.least_loaded_node()
if self.coordinator_id is not None:
self._client.maybe_connect(self.coordinator_id)
continue
future = self.lookup_coordinator()
self._client.poll(future=future)
if future.failed():
if future.retriable():
if getattr(future.exception, 'invalid_metadata', False):
log.debug('Requesting metadata for group coordinator request: %s', future.exception)
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
time.sleep(self.config['retry_backoff_ms'] / 1000)
else:
raise future.exception | def function[ensure_coordinator_ready, parameter[self]]:
constant[Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
]
with name[self]._client._lock begin[:]
while call[name[self].coordinator_unknown, parameter[]] begin[:]
if compare[call[name[self].config][constant[api_version]] less[<] tuple[[<ast.Constant object at 0x7da1b1f38640>, <ast.Constant object at 0x7da1b1f3a3e0>, <ast.Constant object at 0x7da1b1f3ad40>]]] begin[:]
name[self].coordinator_id assign[=] call[name[self]._client.least_loaded_node, parameter[]]
if compare[name[self].coordinator_id is_not constant[None]] begin[:]
call[name[self]._client.maybe_connect, parameter[name[self].coordinator_id]]
continue
variable[future] assign[=] call[name[self].lookup_coordinator, parameter[]]
call[name[self]._client.poll, parameter[]]
if call[name[future].failed, parameter[]] begin[:]
if call[name[future].retriable, parameter[]] begin[:]
if call[name[getattr], parameter[name[future].exception, constant[invalid_metadata], constant[False]]] begin[:]
call[name[log].debug, parameter[constant[Requesting metadata for group coordinator request: %s], name[future].exception]]
variable[metadata_update] assign[=] call[name[self]._client.cluster.request_update, parameter[]]
call[name[self]._client.poll, parameter[]] | keyword[def] identifier[ensure_coordinator_ready] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_client] . identifier[_lock] , identifier[self] . identifier[_lock] :
keyword[while] identifier[self] . identifier[coordinator_unknown] ():
keyword[if] identifier[self] . identifier[config] [ literal[string] ]<( literal[int] , literal[int] , literal[int] ):
identifier[self] . identifier[coordinator_id] = identifier[self] . identifier[_client] . identifier[least_loaded_node] ()
keyword[if] identifier[self] . identifier[coordinator_id] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_client] . identifier[maybe_connect] ( identifier[self] . identifier[coordinator_id] )
keyword[continue]
identifier[future] = identifier[self] . identifier[lookup_coordinator] ()
identifier[self] . identifier[_client] . identifier[poll] ( identifier[future] = identifier[future] )
keyword[if] identifier[future] . identifier[failed] ():
keyword[if] identifier[future] . identifier[retriable] ():
keyword[if] identifier[getattr] ( identifier[future] . identifier[exception] , literal[string] , keyword[False] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[future] . identifier[exception] )
identifier[metadata_update] = identifier[self] . identifier[_client] . identifier[cluster] . identifier[request_update] ()
identifier[self] . identifier[_client] . identifier[poll] ( identifier[future] = identifier[metadata_update] )
keyword[else] :
identifier[time] . identifier[sleep] ( identifier[self] . identifier[config] [ literal[string] ]/ literal[int] )
keyword[else] :
keyword[raise] identifier[future] . identifier[exception] | def ensure_coordinator_ready(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
with self._client._lock, self._lock:
while self.coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self.coordinator_id = self._client.least_loaded_node()
if self.coordinator_id is not None:
self._client.maybe_connect(self.coordinator_id) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
future = self.lookup_coordinator()
self._client.poll(future=future)
if future.failed():
if future.retriable():
if getattr(future.exception, 'invalid_metadata', False):
log.debug('Requesting metadata for group coordinator request: %s', future.exception)
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update) # depends on [control=['if'], data=[]]
else:
time.sleep(self.config['retry_backoff_ms'] / 1000) # depends on [control=['if'], data=[]]
else:
raise future.exception # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['with'], data=[]] |
def list_available_beacons(self):
'''
List the available beacons
'''
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
for _beacon in self.beacons if '.beacon' in _beacon]
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': _beacons},
tag='/salt/minion/minion_beacons_list_available_complete')
return True | def function[list_available_beacons, parameter[self]]:
constant[
List the available beacons
]
variable[_beacons] assign[=] <ast.ListComp object at 0x7da1b21af4f0>
variable[evt] assign[=] call[name[salt].utils.event.get_event, parameter[constant[minion]]]
call[name[evt].fire_event, parameter[dictionary[[<ast.Constant object at 0x7da1b20106a0>, <ast.Constant object at 0x7da1b2011c90>], [<ast.Constant object at 0x7da1b2012710>, <ast.Name object at 0x7da1b2013f70>]]]]
return[constant[True]] | keyword[def] identifier[list_available_beacons] ( identifier[self] ):
literal[string]
identifier[_beacons] =[ literal[string] . identifier[format] ( identifier[_beacon] . identifier[replace] ( literal[string] , literal[string] ))
keyword[for] identifier[_beacon] keyword[in] identifier[self] . identifier[beacons] keyword[if] literal[string] keyword[in] identifier[_beacon] ]
identifier[evt] = identifier[salt] . identifier[utils] . identifier[event] . identifier[get_event] ( literal[string] , identifier[opts] = identifier[self] . identifier[opts] )
identifier[evt] . identifier[fire_event] ({ literal[string] : keyword[True] , literal[string] : identifier[_beacons] },
identifier[tag] = literal[string] )
keyword[return] keyword[True] | def list_available_beacons(self):
"""
List the available beacons
"""
_beacons = ['{0}'.format(_beacon.replace('.beacon', '')) for _beacon in self.beacons if '.beacon' in _beacon]
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': _beacons}, tag='/salt/minion/minion_beacons_list_available_complete')
return True |
def set_geometry(im, width_height):
"""Rescale the image to the new geometry.
"""
width, height = width_height
if not width and not height:
return im
im_width, im_height = im.size
# Geometry match the current size?
if (width is None) or (im_width == width):
if (height is None) or (im_height == height):
return im
ratio = float(im_width) / im_height
if width and height:
new_width = width
new_height = int(ceil(width / ratio))
if new_height < height:
new_height = height
new_width = int(ceil(height * ratio))
elif height:
new_width = int(ceil(height * ratio))
new_height = height
else:
new_width = width
new_height = int(ceil(width / ratio))
im.resize(new_width, new_height)
box = get_box(new_width, new_height, width, height)
im.crop(*box, reset_coords=True)
return im | def function[set_geometry, parameter[im, width_height]]:
constant[Rescale the image to the new geometry.
]
<ast.Tuple object at 0x7da1b085de10> assign[=] name[width_height]
if <ast.BoolOp object at 0x7da1b085f6d0> begin[:]
return[name[im]]
<ast.Tuple object at 0x7da1b085c970> assign[=] name[im].size
if <ast.BoolOp object at 0x7da1b085fa90> begin[:]
if <ast.BoolOp object at 0x7da1b085e8c0> begin[:]
return[name[im]]
variable[ratio] assign[=] binary_operation[call[name[float], parameter[name[im_width]]] / name[im_height]]
if <ast.BoolOp object at 0x7da1b085cbb0> begin[:]
variable[new_width] assign[=] name[width]
variable[new_height] assign[=] call[name[int], parameter[call[name[ceil], parameter[binary_operation[name[width] / name[ratio]]]]]]
if compare[name[new_height] less[<] name[height]] begin[:]
variable[new_height] assign[=] name[height]
variable[new_width] assign[=] call[name[int], parameter[call[name[ceil], parameter[binary_operation[name[height] * name[ratio]]]]]]
call[name[im].resize, parameter[name[new_width], name[new_height]]]
variable[box] assign[=] call[name[get_box], parameter[name[new_width], name[new_height], name[width], name[height]]]
call[name[im].crop, parameter[<ast.Starred object at 0x7da1b085c700>]]
return[name[im]] | keyword[def] identifier[set_geometry] ( identifier[im] , identifier[width_height] ):
literal[string]
identifier[width] , identifier[height] = identifier[width_height]
keyword[if] keyword[not] identifier[width] keyword[and] keyword[not] identifier[height] :
keyword[return] identifier[im]
identifier[im_width] , identifier[im_height] = identifier[im] . identifier[size]
keyword[if] ( identifier[width] keyword[is] keyword[None] ) keyword[or] ( identifier[im_width] == identifier[width] ):
keyword[if] ( identifier[height] keyword[is] keyword[None] ) keyword[or] ( identifier[im_height] == identifier[height] ):
keyword[return] identifier[im]
identifier[ratio] = identifier[float] ( identifier[im_width] )/ identifier[im_height]
keyword[if] identifier[width] keyword[and] identifier[height] :
identifier[new_width] = identifier[width]
identifier[new_height] = identifier[int] ( identifier[ceil] ( identifier[width] / identifier[ratio] ))
keyword[if] identifier[new_height] < identifier[height] :
identifier[new_height] = identifier[height]
identifier[new_width] = identifier[int] ( identifier[ceil] ( identifier[height] * identifier[ratio] ))
keyword[elif] identifier[height] :
identifier[new_width] = identifier[int] ( identifier[ceil] ( identifier[height] * identifier[ratio] ))
identifier[new_height] = identifier[height]
keyword[else] :
identifier[new_width] = identifier[width]
identifier[new_height] = identifier[int] ( identifier[ceil] ( identifier[width] / identifier[ratio] ))
identifier[im] . identifier[resize] ( identifier[new_width] , identifier[new_height] )
identifier[box] = identifier[get_box] ( identifier[new_width] , identifier[new_height] , identifier[width] , identifier[height] )
identifier[im] . identifier[crop] (* identifier[box] , identifier[reset_coords] = keyword[True] )
keyword[return] identifier[im] | def set_geometry(im, width_height):
"""Rescale the image to the new geometry.
"""
(width, height) = width_height
if not width and (not height):
return im # depends on [control=['if'], data=[]]
(im_width, im_height) = im.size
# Geometry match the current size?
if width is None or im_width == width:
if height is None or im_height == height:
return im # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
ratio = float(im_width) / im_height
if width and height:
new_width = width
new_height = int(ceil(width / ratio))
if new_height < height:
new_height = height
new_width = int(ceil(height * ratio)) # depends on [control=['if'], data=['new_height', 'height']] # depends on [control=['if'], data=[]]
elif height:
new_width = int(ceil(height * ratio))
new_height = height # depends on [control=['if'], data=[]]
else:
new_width = width
new_height = int(ceil(width / ratio))
im.resize(new_width, new_height)
box = get_box(new_width, new_height, width, height)
im.crop(*box, reset_coords=True)
return im |
def path(self, filename):
'''
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
:raises OperationNotSupported: when the backenddoesn't support direct file access
'''
if not self.backend.root:
raise OperationNotSupported(
'Direct file access is not supported by ' +
self.backend.__class__.__name__
)
return os.path.join(self.backend.root, filename) | def function[path, parameter[self, filename]]:
constant[
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
:raises OperationNotSupported: when the backenddoesn't support direct file access
]
if <ast.UnaryOp object at 0x7da1b0b52170> begin[:]
<ast.Raise object at 0x7da1b0b50790>
return[call[name[os].path.join, parameter[name[self].backend.root, name[filename]]]] | keyword[def] identifier[path] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[backend] . identifier[root] :
keyword[raise] identifier[OperationNotSupported] (
literal[string] +
identifier[self] . identifier[backend] . identifier[__class__] . identifier[__name__]
)
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[backend] . identifier[root] , identifier[filename] ) | def path(self, filename):
"""
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
:raises OperationNotSupported: when the backenddoesn't support direct file access
"""
if not self.backend.root:
raise OperationNotSupported('Direct file access is not supported by ' + self.backend.__class__.__name__) # depends on [control=['if'], data=[]]
return os.path.join(self.backend.root, filename) |
def dollarfy(x):
"""Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'."""
def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces Math elements"""
if key == 'Math':
return Str('$' + value[1] + '$')
return None
return walk(x, _dollarfy, '', {}) | def function[dollarfy, parameter[x]]:
constant[Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'.]
def function[_dollarfy, parameter[key, value, fmt, meta]]:
constant[Replaces Math elements]
if compare[name[key] equal[==] constant[Math]] begin[:]
return[call[name[Str], parameter[binary_operation[binary_operation[constant[$] + call[name[value]][constant[1]]] + constant[$]]]]]
return[constant[None]]
return[call[name[walk], parameter[name[x], name[_dollarfy], constant[], dictionary[[], []]]]] | keyword[def] identifier[dollarfy] ( identifier[x] ):
literal[string]
keyword[def] identifier[_dollarfy] ( identifier[key] , identifier[value] , identifier[fmt] , identifier[meta] ):
literal[string]
keyword[if] identifier[key] == literal[string] :
keyword[return] identifier[Str] ( literal[string] + identifier[value] [ literal[int] ]+ literal[string] )
keyword[return] keyword[None]
keyword[return] identifier[walk] ( identifier[x] , identifier[_dollarfy] , literal[string] ,{}) | def dollarfy(x):
"""Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'."""
def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument
'Replaces Math elements'
if key == 'Math':
return Str('$' + value[1] + '$') # depends on [control=['if'], data=[]]
return None
return walk(x, _dollarfy, '', {}) |
def default_type_resolver(
value: Any, info: GraphQLResolveInfo, abstract_type: GraphQLAbstractType
) -> AwaitableOrValue[Optional[Union[GraphQLObjectType, str]]]:
"""Default type resolver function.
If a resolve_type function is not given, then a default resolve behavior is used
which attempts two strategies:
First, See if the provided value has a `__typename` field defined, if so, use that
value as name of the resolved type.
Otherwise, test each possible type for the abstract type by calling `is_type_of`
for the object being coerced, returning the first type that matches.
"""
# First, look for `__typename`.
type_name = (
value.get("__typename")
if isinstance(value, dict)
# need to de-mangle the attribute assumed to be "private" in Python
else getattr(value, f"_{value.__class__.__name__}__typename", None)
)
if isinstance(type_name, str):
return type_name
# Otherwise, test each possible type.
possible_types = info.schema.get_possible_types(abstract_type)
awaitable_is_type_of_results: List[Awaitable] = []
append_awaitable_results = awaitable_is_type_of_results.append
awaitable_types: List[GraphQLObjectType] = []
append_awaitable_types = awaitable_types.append
for type_ in possible_types:
if type_.is_type_of:
is_type_of_result = type_.is_type_of(value, info)
if isawaitable(is_type_of_result):
append_awaitable_results(cast(Awaitable, is_type_of_result))
append_awaitable_types(type_)
elif is_type_of_result:
return type_
if awaitable_is_type_of_results:
# noinspection PyShadowingNames
async def get_type():
is_type_of_results = await gather(*awaitable_is_type_of_results)
for is_type_of_result, type_ in zip(is_type_of_results, awaitable_types):
if is_type_of_result:
return type_
return get_type()
return None | def function[default_type_resolver, parameter[value, info, abstract_type]]:
constant[Default type resolver function.
If a resolve_type function is not given, then a default resolve behavior is used
which attempts two strategies:
First, See if the provided value has a `__typename` field defined, if so, use that
value as name of the resolved type.
Otherwise, test each possible type for the abstract type by calling `is_type_of`
for the object being coerced, returning the first type that matches.
]
variable[type_name] assign[=] <ast.IfExp object at 0x7da1b22aeb30>
if call[name[isinstance], parameter[name[type_name], name[str]]] begin[:]
return[name[type_name]]
variable[possible_types] assign[=] call[name[info].schema.get_possible_types, parameter[name[abstract_type]]]
<ast.AnnAssign object at 0x7da1b22ac460>
variable[append_awaitable_results] assign[=] name[awaitable_is_type_of_results].append
<ast.AnnAssign object at 0x7da1b22acf10>
variable[append_awaitable_types] assign[=] name[awaitable_types].append
for taget[name[type_]] in starred[name[possible_types]] begin[:]
if name[type_].is_type_of begin[:]
variable[is_type_of_result] assign[=] call[name[type_].is_type_of, parameter[name[value], name[info]]]
if call[name[isawaitable], parameter[name[is_type_of_result]]] begin[:]
call[name[append_awaitable_results], parameter[call[name[cast], parameter[name[Awaitable], name[is_type_of_result]]]]]
call[name[append_awaitable_types], parameter[name[type_]]]
if name[awaitable_is_type_of_results] begin[:]
<ast.AsyncFunctionDef object at 0x7da1b1d370a0>
return[call[name[get_type], parameter[]]]
return[constant[None]] | keyword[def] identifier[default_type_resolver] (
identifier[value] : identifier[Any] , identifier[info] : identifier[GraphQLResolveInfo] , identifier[abstract_type] : identifier[GraphQLAbstractType]
)-> identifier[AwaitableOrValue] [ identifier[Optional] [ identifier[Union] [ identifier[GraphQLObjectType] , identifier[str] ]]]:
literal[string]
identifier[type_name] =(
identifier[value] . identifier[get] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] )
keyword[else] identifier[getattr] ( identifier[value] , literal[string] , keyword[None] )
)
keyword[if] identifier[isinstance] ( identifier[type_name] , identifier[str] ):
keyword[return] identifier[type_name]
identifier[possible_types] = identifier[info] . identifier[schema] . identifier[get_possible_types] ( identifier[abstract_type] )
identifier[awaitable_is_type_of_results] : identifier[List] [ identifier[Awaitable] ]=[]
identifier[append_awaitable_results] = identifier[awaitable_is_type_of_results] . identifier[append]
identifier[awaitable_types] : identifier[List] [ identifier[GraphQLObjectType] ]=[]
identifier[append_awaitable_types] = identifier[awaitable_types] . identifier[append]
keyword[for] identifier[type_] keyword[in] identifier[possible_types] :
keyword[if] identifier[type_] . identifier[is_type_of] :
identifier[is_type_of_result] = identifier[type_] . identifier[is_type_of] ( identifier[value] , identifier[info] )
keyword[if] identifier[isawaitable] ( identifier[is_type_of_result] ):
identifier[append_awaitable_results] ( identifier[cast] ( identifier[Awaitable] , identifier[is_type_of_result] ))
identifier[append_awaitable_types] ( identifier[type_] )
keyword[elif] identifier[is_type_of_result] :
keyword[return] identifier[type_]
keyword[if] identifier[awaitable_is_type_of_results] :
keyword[async] keyword[def] identifier[get_type] ():
identifier[is_type_of_results] = keyword[await] identifier[gather] (* identifier[awaitable_is_type_of_results] )
keyword[for] identifier[is_type_of_result] , identifier[type_] keyword[in] identifier[zip] ( identifier[is_type_of_results] , identifier[awaitable_types] ):
keyword[if] identifier[is_type_of_result] :
keyword[return] identifier[type_]
keyword[return] identifier[get_type] ()
keyword[return] keyword[None] | def default_type_resolver(value: Any, info: GraphQLResolveInfo, abstract_type: GraphQLAbstractType) -> AwaitableOrValue[Optional[Union[GraphQLObjectType, str]]]:
"""Default type resolver function.
If a resolve_type function is not given, then a default resolve behavior is used
which attempts two strategies:
First, See if the provided value has a `__typename` field defined, if so, use that
value as name of the resolved type.
Otherwise, test each possible type for the abstract type by calling `is_type_of`
for the object being coerced, returning the first type that matches.
"""
# First, look for `__typename`.
# need to de-mangle the attribute assumed to be "private" in Python
type_name = value.get('__typename') if isinstance(value, dict) else getattr(value, f'_{value.__class__.__name__}__typename', None)
if isinstance(type_name, str):
return type_name # depends on [control=['if'], data=[]]
# Otherwise, test each possible type.
possible_types = info.schema.get_possible_types(abstract_type)
awaitable_is_type_of_results: List[Awaitable] = []
append_awaitable_results = awaitable_is_type_of_results.append
awaitable_types: List[GraphQLObjectType] = []
append_awaitable_types = awaitable_types.append
for type_ in possible_types:
if type_.is_type_of:
is_type_of_result = type_.is_type_of(value, info)
if isawaitable(is_type_of_result):
append_awaitable_results(cast(Awaitable, is_type_of_result))
append_awaitable_types(type_) # depends on [control=['if'], data=[]]
elif is_type_of_result:
return type_ # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['type_']]
if awaitable_is_type_of_results:
# noinspection PyShadowingNames
async def get_type():
is_type_of_results = await gather(*awaitable_is_type_of_results)
for (is_type_of_result, type_) in zip(is_type_of_results, awaitable_types):
if is_type_of_result:
return type_ # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return get_type() # depends on [control=['if'], data=[]]
return None |
def update_relations(context, namespace_separator=':'):
"""Update the context with the relation data."""
# Add any relation data prefixed with the relation type.
relation_type = charmhelpers.core.hookenv.relation_type()
relations = []
context['current_relation'] = {}
if relation_type is not None:
relation_data = charmhelpers.core.hookenv.relation_get()
context['current_relation'] = relation_data
# Deprecated: the following use of relation data as keys
# directly in the context will be removed.
relation_data = dict(
("{relation_type}{namespace_separator}{key}".format(
relation_type=relation_type,
key=key,
namespace_separator=namespace_separator), val)
for key, val in relation_data.items())
relation_data = dict_keys_without_hyphens(relation_data)
context.update(relation_data)
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
relations = [dict_keys_without_hyphens(rel) for rel in relations]
context['relations_full'] = charmhelpers.core.hookenv.relations()
# the hookenv.relations() data structure is effectively unusable in
# templates and other contexts when trying to access relation data other
# than the current relation. So provide a more useful structure that works
# with any hook.
local_unit = charmhelpers.core.hookenv.local_unit()
relations = {}
for rname, rids in context['relations_full'].items():
relations[rname] = []
for rid, rdata in rids.items():
data = rdata.copy()
if local_unit in rdata:
data.pop(local_unit)
for unit_name, rel_data in data.items():
new_data = {'__relid__': rid, '__unit__': unit_name}
new_data.update(rel_data)
relations[rname].append(new_data)
context['relations'] = relations | def function[update_relations, parameter[context, namespace_separator]]:
constant[Update the context with the relation data.]
variable[relation_type] assign[=] call[name[charmhelpers].core.hookenv.relation_type, parameter[]]
variable[relations] assign[=] list[[]]
call[name[context]][constant[current_relation]] assign[=] dictionary[[], []]
if compare[name[relation_type] is_not constant[None]] begin[:]
variable[relation_data] assign[=] call[name[charmhelpers].core.hookenv.relation_get, parameter[]]
call[name[context]][constant[current_relation]] assign[=] name[relation_data]
variable[relation_data] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b121be80>]]
variable[relation_data] assign[=] call[name[dict_keys_without_hyphens], parameter[name[relation_data]]]
call[name[context].update, parameter[name[relation_data]]]
variable[relations] assign[=] call[name[charmhelpers].core.hookenv.relations_of_type, parameter[name[relation_type]]]
variable[relations] assign[=] <ast.ListComp object at 0x7da1b1218ee0>
call[name[context]][constant[relations_full]] assign[=] call[name[charmhelpers].core.hookenv.relations, parameter[]]
variable[local_unit] assign[=] call[name[charmhelpers].core.hookenv.local_unit, parameter[]]
variable[relations] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18f00cd00>, <ast.Name object at 0x7da18f00dde0>]]] in starred[call[call[name[context]][constant[relations_full]].items, parameter[]]] begin[:]
call[name[relations]][name[rname]] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f00f040>, <ast.Name object at 0x7da18f00c130>]]] in starred[call[name[rids].items, parameter[]]] begin[:]
variable[data] assign[=] call[name[rdata].copy, parameter[]]
if compare[name[local_unit] in name[rdata]] begin[:]
call[name[data].pop, parameter[name[local_unit]]]
for taget[tuple[[<ast.Name object at 0x7da18f00d6f0>, <ast.Name object at 0x7da18f00fc10>]]] in starred[call[name[data].items, parameter[]]] begin[:]
variable[new_data] assign[=] dictionary[[<ast.Constant object at 0x7da18f00f2b0>, <ast.Constant object at 0x7da18bc73e80>], [<ast.Name object at 0x7da18bc72a10>, <ast.Name object at 0x7da18bc73f70>]]
call[name[new_data].update, parameter[name[rel_data]]]
call[call[name[relations]][name[rname]].append, parameter[name[new_data]]]
call[name[context]][constant[relations]] assign[=] name[relations] | keyword[def] identifier[update_relations] ( identifier[context] , identifier[namespace_separator] = literal[string] ):
literal[string]
identifier[relation_type] = identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[relation_type] ()
identifier[relations] =[]
identifier[context] [ literal[string] ]={}
keyword[if] identifier[relation_type] keyword[is] keyword[not] keyword[None] :
identifier[relation_data] = identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[relation_get] ()
identifier[context] [ literal[string] ]= identifier[relation_data]
identifier[relation_data] = identifier[dict] (
( literal[string] . identifier[format] (
identifier[relation_type] = identifier[relation_type] ,
identifier[key] = identifier[key] ,
identifier[namespace_separator] = identifier[namespace_separator] ), identifier[val] )
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[relation_data] . identifier[items] ())
identifier[relation_data] = identifier[dict_keys_without_hyphens] ( identifier[relation_data] )
identifier[context] . identifier[update] ( identifier[relation_data] )
identifier[relations] = identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[relations_of_type] ( identifier[relation_type] )
identifier[relations] =[ identifier[dict_keys_without_hyphens] ( identifier[rel] ) keyword[for] identifier[rel] keyword[in] identifier[relations] ]
identifier[context] [ literal[string] ]= identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[relations] ()
identifier[local_unit] = identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[local_unit] ()
identifier[relations] ={}
keyword[for] identifier[rname] , identifier[rids] keyword[in] identifier[context] [ literal[string] ]. identifier[items] ():
identifier[relations] [ identifier[rname] ]=[]
keyword[for] identifier[rid] , identifier[rdata] keyword[in] identifier[rids] . identifier[items] ():
identifier[data] = identifier[rdata] . identifier[copy] ()
keyword[if] identifier[local_unit] keyword[in] identifier[rdata] :
identifier[data] . identifier[pop] ( identifier[local_unit] )
keyword[for] identifier[unit_name] , identifier[rel_data] keyword[in] identifier[data] . identifier[items] ():
identifier[new_data] ={ literal[string] : identifier[rid] , literal[string] : identifier[unit_name] }
identifier[new_data] . identifier[update] ( identifier[rel_data] )
identifier[relations] [ identifier[rname] ]. identifier[append] ( identifier[new_data] )
identifier[context] [ literal[string] ]= identifier[relations] | def update_relations(context, namespace_separator=':'):
"""Update the context with the relation data."""
# Add any relation data prefixed with the relation type.
relation_type = charmhelpers.core.hookenv.relation_type()
relations = []
context['current_relation'] = {}
if relation_type is not None:
relation_data = charmhelpers.core.hookenv.relation_get()
context['current_relation'] = relation_data
# Deprecated: the following use of relation data as keys
# directly in the context will be removed.
relation_data = dict((('{relation_type}{namespace_separator}{key}'.format(relation_type=relation_type, key=key, namespace_separator=namespace_separator), val) for (key, val) in relation_data.items()))
relation_data = dict_keys_without_hyphens(relation_data)
context.update(relation_data)
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
relations = [dict_keys_without_hyphens(rel) for rel in relations] # depends on [control=['if'], data=['relation_type']]
context['relations_full'] = charmhelpers.core.hookenv.relations()
# the hookenv.relations() data structure is effectively unusable in
# templates and other contexts when trying to access relation data other
# than the current relation. So provide a more useful structure that works
# with any hook.
local_unit = charmhelpers.core.hookenv.local_unit()
relations = {}
for (rname, rids) in context['relations_full'].items():
relations[rname] = []
for (rid, rdata) in rids.items():
data = rdata.copy()
if local_unit in rdata:
data.pop(local_unit) # depends on [control=['if'], data=['local_unit']]
for (unit_name, rel_data) in data.items():
new_data = {'__relid__': rid, '__unit__': unit_name}
new_data.update(rel_data)
relations[rname].append(new_data) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
context['relations'] = relations |
def _advance_params(self):
"""
Explicitly generate new values for these parameters only
when appropriate.
"""
for p in ['x','y','direction']:
self.force_new_dynamic_value(p)
self.last_time = self.time_fn() | def function[_advance_params, parameter[self]]:
constant[
Explicitly generate new values for these parameters only
when appropriate.
]
for taget[name[p]] in starred[list[[<ast.Constant object at 0x7da1b2596bf0>, <ast.Constant object at 0x7da1b2596b90>, <ast.Constant object at 0x7da1b2596c80>]]] begin[:]
call[name[self].force_new_dynamic_value, parameter[name[p]]]
name[self].last_time assign[=] call[name[self].time_fn, parameter[]] | keyword[def] identifier[_advance_params] ( identifier[self] ):
literal[string]
keyword[for] identifier[p] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[self] . identifier[force_new_dynamic_value] ( identifier[p] )
identifier[self] . identifier[last_time] = identifier[self] . identifier[time_fn] () | def _advance_params(self):
"""
Explicitly generate new values for these parameters only
when appropriate.
"""
for p in ['x', 'y', 'direction']:
self.force_new_dynamic_value(p) # depends on [control=['for'], data=['p']]
self.last_time = self.time_fn() |
def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() | def function[after_epoch, parameter[self]]:
constant[
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
]
if <ast.UnaryOp object at 0x7da20c6e63e0> begin[:]
name[self]._total_batch_count assign[=] call[name[self]._current_batch_count.copy, parameter[]]
name[self]._total_batch_count_saved assign[=] constant[True]
call[name[self]._current_batch_count.clear, parameter[]]
name[self]._current_stream_start assign[=] constant[None]
name[self]._current_stream_name assign[=] constant[None]
call[name[erase_line], parameter[]] | keyword[def] identifier[after_epoch] ( identifier[self] ,** identifier[_] )-> keyword[None] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_total_batch_count_saved] :
identifier[self] . identifier[_total_batch_count] = identifier[self] . identifier[_current_batch_count] . identifier[copy] ()
identifier[self] . identifier[_total_batch_count_saved] = keyword[True]
identifier[self] . identifier[_current_batch_count] . identifier[clear] ()
identifier[self] . identifier[_current_stream_start] = keyword[None]
identifier[self] . identifier[_current_stream_name] = keyword[None]
identifier[erase_line] () | def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True # depends on [control=['if'], data=[]]
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() |
def adj_nodes_az(az_nodes):
"""Adjust details specific to Azure."""
for node in az_nodes:
node.cloud = "azure"
node.cloud_disp = "Azure"
node.private_ips = ip_to_str(node.private_ips)
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra['location']
node.size = node.extra['properties']['hardwareProfile']['vmSize']
group_raw = node.id
unnsc, group_end = group_raw.split("resourceGroups/", 1)
group, unnsc = group_end.split("/", 1)
node.group = group
return az_nodes | def function[adj_nodes_az, parameter[az_nodes]]:
constant[Adjust details specific to Azure.]
for taget[name[node]] in starred[name[az_nodes]] begin[:]
name[node].cloud assign[=] constant[azure]
name[node].cloud_disp assign[=] constant[Azure]
name[node].private_ips assign[=] call[name[ip_to_str], parameter[name[node].private_ips]]
name[node].public_ips assign[=] call[name[ip_to_str], parameter[name[node].public_ips]]
name[node].zone assign[=] call[name[node].extra][constant[location]]
name[node].size assign[=] call[call[call[name[node].extra][constant[properties]]][constant[hardwareProfile]]][constant[vmSize]]
variable[group_raw] assign[=] name[node].id
<ast.Tuple object at 0x7da1b2651e70> assign[=] call[name[group_raw].split, parameter[constant[resourceGroups/], constant[1]]]
<ast.Tuple object at 0x7da1b253cf70> assign[=] call[name[group_end].split, parameter[constant[/], constant[1]]]
name[node].group assign[=] name[group]
return[name[az_nodes]] | keyword[def] identifier[adj_nodes_az] ( identifier[az_nodes] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[az_nodes] :
identifier[node] . identifier[cloud] = literal[string]
identifier[node] . identifier[cloud_disp] = literal[string]
identifier[node] . identifier[private_ips] = identifier[ip_to_str] ( identifier[node] . identifier[private_ips] )
identifier[node] . identifier[public_ips] = identifier[ip_to_str] ( identifier[node] . identifier[public_ips] )
identifier[node] . identifier[zone] = identifier[node] . identifier[extra] [ literal[string] ]
identifier[node] . identifier[size] = identifier[node] . identifier[extra] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[group_raw] = identifier[node] . identifier[id]
identifier[unnsc] , identifier[group_end] = identifier[group_raw] . identifier[split] ( literal[string] , literal[int] )
identifier[group] , identifier[unnsc] = identifier[group_end] . identifier[split] ( literal[string] , literal[int] )
identifier[node] . identifier[group] = identifier[group]
keyword[return] identifier[az_nodes] | def adj_nodes_az(az_nodes):
"""Adjust details specific to Azure."""
for node in az_nodes:
node.cloud = 'azure'
node.cloud_disp = 'Azure'
node.private_ips = ip_to_str(node.private_ips)
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra['location']
node.size = node.extra['properties']['hardwareProfile']['vmSize']
group_raw = node.id
(unnsc, group_end) = group_raw.split('resourceGroups/', 1)
(group, unnsc) = group_end.split('/', 1)
node.group = group # depends on [control=['for'], data=['node']]
return az_nodes |
def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job | def function[load, parameter[self, source, mode, source_format, csv_options, ignore_unknown_values, max_bad_records]]:
constant[ Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
]
variable[job] assign[=] call[name[self].load_async, parameter[name[source]]]
if compare[name[job] is_not constant[None]] begin[:]
call[name[job].wait, parameter[]]
return[name[job]] | keyword[def] identifier[load] ( identifier[self] , identifier[source] , identifier[mode] = literal[string] , identifier[source_format] = literal[string] , identifier[csv_options] = keyword[None] ,
identifier[ignore_unknown_values] = keyword[False] , identifier[max_bad_records] = literal[int] ):
literal[string]
identifier[job] = identifier[self] . identifier[load_async] ( identifier[source] ,
identifier[mode] = identifier[mode] ,
identifier[source_format] = identifier[source_format] ,
identifier[csv_options] = identifier[csv_options] ,
identifier[ignore_unknown_values] = identifier[ignore_unknown_values] ,
identifier[max_bad_records] = identifier[max_bad_records] )
keyword[if] identifier[job] keyword[is] keyword[not] keyword[None] :
identifier[job] . identifier[wait] ()
keyword[return] identifier[job] | def load(self, source, mode='create', source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source, mode=mode, source_format=source_format, csv_options=csv_options, ignore_unknown_values=ignore_unknown_values, max_bad_records=max_bad_records)
if job is not None:
job.wait() # depends on [control=['if'], data=['job']]
return job |
def coalesce(self):
"""
Coalesces any adjacent ScienceSegments. Returns the number of
ScienceSegments in the coalesced list.
"""
# check for an empty list
if len(self) == 0:
return 0
# sort the list of science segments
self.__sci_segs.sort()
# coalesce the list, checking each segment for validity as we go
outlist = []
ostop = -1
for seg in self:
start = seg.start()
stop = seg.end()
id = seg.id()
if start > ostop:
# disconnected, so flush out the existing segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
outlist.append(x)
ostart = start
ostop = stop
elif stop > ostop:
# extend the current segment
ostop = stop
# flush out the final segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
outlist.append(x)
self.__sci_segs = outlist
return len(self) | def function[coalesce, parameter[self]]:
constant[
Coalesces any adjacent ScienceSegments. Returns the number of
ScienceSegments in the coalesced list.
]
if compare[call[name[len], parameter[name[self]]] equal[==] constant[0]] begin[:]
return[constant[0]]
call[name[self].__sci_segs.sort, parameter[]]
variable[outlist] assign[=] list[[]]
variable[ostop] assign[=] <ast.UnaryOp object at 0x7da18f810430>
for taget[name[seg]] in starred[name[self]] begin[:]
variable[start] assign[=] call[name[seg].start, parameter[]]
variable[stop] assign[=] call[name[seg].end, parameter[]]
variable[id] assign[=] call[name[seg].id, parameter[]]
if compare[name[start] greater[>] name[ostop]] begin[:]
if compare[name[ostop] greater_or_equal[>=] constant[0]] begin[:]
variable[x] assign[=] call[name[ScienceSegment], parameter[call[name[tuple], parameter[list[[<ast.Name object at 0x7da18f810f10>, <ast.Name object at 0x7da18f813b20>, <ast.Name object at 0x7da18f813a30>, <ast.BinOp object at 0x7da18f813520>]]]]]]
call[name[outlist].append, parameter[name[x]]]
variable[ostart] assign[=] name[start]
variable[ostop] assign[=] name[stop]
if compare[name[ostop] greater_or_equal[>=] constant[0]] begin[:]
variable[x] assign[=] call[name[ScienceSegment], parameter[call[name[tuple], parameter[list[[<ast.Name object at 0x7da18f811120>, <ast.Name object at 0x7da18f810310>, <ast.Name object at 0x7da18f8138b0>, <ast.BinOp object at 0x7da18f812e90>]]]]]]
call[name[outlist].append, parameter[name[x]]]
name[self].__sci_segs assign[=] name[outlist]
return[call[name[len], parameter[name[self]]]] | keyword[def] identifier[coalesce] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] )== literal[int] :
keyword[return] literal[int]
identifier[self] . identifier[__sci_segs] . identifier[sort] ()
identifier[outlist] =[]
identifier[ostop] =- literal[int]
keyword[for] identifier[seg] keyword[in] identifier[self] :
identifier[start] = identifier[seg] . identifier[start] ()
identifier[stop] = identifier[seg] . identifier[end] ()
identifier[id] = identifier[seg] . identifier[id] ()
keyword[if] identifier[start] > identifier[ostop] :
keyword[if] identifier[ostop] >= literal[int] :
identifier[x] = identifier[ScienceSegment] ( identifier[tuple] ([ identifier[id] , identifier[ostart] , identifier[ostop] , identifier[ostop] - identifier[ostart] ]))
identifier[outlist] . identifier[append] ( identifier[x] )
identifier[ostart] = identifier[start]
identifier[ostop] = identifier[stop]
keyword[elif] identifier[stop] > identifier[ostop] :
identifier[ostop] = identifier[stop]
keyword[if] identifier[ostop] >= literal[int] :
identifier[x] = identifier[ScienceSegment] ( identifier[tuple] ([ identifier[id] , identifier[ostart] , identifier[ostop] , identifier[ostop] - identifier[ostart] ]))
identifier[outlist] . identifier[append] ( identifier[x] )
identifier[self] . identifier[__sci_segs] = identifier[outlist]
keyword[return] identifier[len] ( identifier[self] ) | def coalesce(self):
"""
Coalesces any adjacent ScienceSegments. Returns the number of
ScienceSegments in the coalesced list.
"""
# check for an empty list
if len(self) == 0:
return 0 # depends on [control=['if'], data=[]]
# sort the list of science segments
self.__sci_segs.sort()
# coalesce the list, checking each segment for validity as we go
outlist = []
ostop = -1
for seg in self:
start = seg.start()
stop = seg.end()
id = seg.id()
if start > ostop:
# disconnected, so flush out the existing segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
outlist.append(x) # depends on [control=['if'], data=['ostop']]
ostart = start
ostop = stop # depends on [control=['if'], data=['start', 'ostop']]
elif stop > ostop:
# extend the current segment
ostop = stop # depends on [control=['if'], data=['stop', 'ostop']] # depends on [control=['for'], data=['seg']]
# flush out the final segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
outlist.append(x) # depends on [control=['if'], data=['ostop']]
self.__sci_segs = outlist
return len(self) |
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs)) | def function[delete, parameter[self]]:
constant[
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
]
return[call[name[self].session.delete, parameter[<ast.Starred object at 0x7da1b06fd6f0>]]] | keyword[def] identifier[delete] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[session] . identifier[delete] (* identifier[args] ,** identifier[self] . identifier[get_kwargs] (** identifier[kwargs] )) | def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs)) |
def verbose_option(fn):
""" Decorator to add a --verbose option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'verbose' if the command needs it. To get the current value you can do:
>>> from peltak.core import context
>>>
>>> pretend = context.get('verbose', False)
This value will be accessible from anywhere in the code.
"""
def set_verbose(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
context.set('verbose', value or 0)
return click.option(
'-v', '--verbose',
is_flag=True,
expose_value=False,
callback=set_verbose,
help="Be verbose. Can specify multiple times for more verbosity.",
)(fn) | def function[verbose_option, parameter[fn]]:
constant[ Decorator to add a --verbose option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'verbose' if the command needs it. To get the current value you can do:
>>> from peltak.core import context
>>>
>>> pretend = context.get('verbose', False)
This value will be accessible from anywhere in the code.
]
def function[set_verbose, parameter[ctx, param, value]]:
from relative_module[peltak.core] import module[context]
call[name[context].set, parameter[constant[verbose], <ast.BoolOp object at 0x7da1b10c70a0>]]
return[call[call[name[click].option, parameter[constant[-v], constant[--verbose]]], parameter[name[fn]]]] | keyword[def] identifier[verbose_option] ( identifier[fn] ):
literal[string]
keyword[def] identifier[set_verbose] ( identifier[ctx] , identifier[param] , identifier[value] ):
keyword[from] identifier[peltak] . identifier[core] keyword[import] identifier[context]
identifier[context] . identifier[set] ( literal[string] , identifier[value] keyword[or] literal[int] )
keyword[return] identifier[click] . identifier[option] (
literal[string] , literal[string] ,
identifier[is_flag] = keyword[True] ,
identifier[expose_value] = keyword[False] ,
identifier[callback] = identifier[set_verbose] ,
identifier[help] = literal[string] ,
)( identifier[fn] ) | def verbose_option(fn):
""" Decorator to add a --verbose option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'verbose' if the command needs it. To get the current value you can do:
>>> from peltak.core import context
>>>
>>> pretend = context.get('verbose', False)
This value will be accessible from anywhere in the code.
"""
def set_verbose(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
context.set('verbose', value or 0)
return click.option('-v', '--verbose', is_flag=True, expose_value=False, callback=set_verbose, help='Be verbose. Can specify multiple times for more verbosity.')(fn) |
def get(self, key, default=None):
"""Get a key."""
key = "{0}{1}".format(self.prefix, key)
data = self.redis.get(key)
# Redis returns None not an exception
if data is None:
data = default
else:
data = json.loads(data)
return data | def function[get, parameter[self, key, default]]:
constant[Get a key.]
variable[key] assign[=] call[constant[{0}{1}].format, parameter[name[self].prefix, name[key]]]
variable[data] assign[=] call[name[self].redis.get, parameter[name[key]]]
if compare[name[data] is constant[None]] begin[:]
variable[data] assign[=] name[default]
return[name[data]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
identifier[key] = literal[string] . identifier[format] ( identifier[self] . identifier[prefix] , identifier[key] )
identifier[data] = identifier[self] . identifier[redis] . identifier[get] ( identifier[key] )
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[data] = identifier[default]
keyword[else] :
identifier[data] = identifier[json] . identifier[loads] ( identifier[data] )
keyword[return] identifier[data] | def get(self, key, default=None):
"""Get a key."""
key = '{0}{1}'.format(self.prefix, key)
data = self.redis.get(key)
# Redis returns None not an exception
if data is None:
data = default # depends on [control=['if'], data=['data']]
else:
data = json.loads(data)
return data |
def parse_flags(headers):
"""Copied from https://github.com/girishramnani/gmail/blob/master/gmail/message.py"""
if len(headers) == 0:
return []
if sys.version_info[0] == 3:
headers = bytes(headers, "ascii")
return list(imaplib.ParseFlags(headers)) | def function[parse_flags, parameter[headers]]:
constant[Copied from https://github.com/girishramnani/gmail/blob/master/gmail/message.py]
if compare[call[name[len], parameter[name[headers]]] equal[==] constant[0]] begin[:]
return[list[[]]]
if compare[call[name[sys].version_info][constant[0]] equal[==] constant[3]] begin[:]
variable[headers] assign[=] call[name[bytes], parameter[name[headers], constant[ascii]]]
return[call[name[list], parameter[call[name[imaplib].ParseFlags, parameter[name[headers]]]]]] | keyword[def] identifier[parse_flags] ( identifier[headers] ):
literal[string]
keyword[if] identifier[len] ( identifier[headers] )== literal[int] :
keyword[return] []
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] :
identifier[headers] = identifier[bytes] ( identifier[headers] , literal[string] )
keyword[return] identifier[list] ( identifier[imaplib] . identifier[ParseFlags] ( identifier[headers] )) | def parse_flags(headers):
"""Copied from https://github.com/girishramnani/gmail/blob/master/gmail/message.py"""
if len(headers) == 0:
return [] # depends on [control=['if'], data=[]]
if sys.version_info[0] == 3:
headers = bytes(headers, 'ascii') # depends on [control=['if'], data=[]]
return list(imaplib.ParseFlags(headers)) |
def allow_filtering(self):
"""
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
"""
clone = copy.deepcopy(self)
clone._allow_filtering = True
return clone | def function[allow_filtering, parameter[self]]:
constant[
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
]
variable[clone] assign[=] call[name[copy].deepcopy, parameter[name[self]]]
name[clone]._allow_filtering assign[=] constant[True]
return[name[clone]] | keyword[def] identifier[allow_filtering] ( identifier[self] ):
literal[string]
identifier[clone] = identifier[copy] . identifier[deepcopy] ( identifier[self] )
identifier[clone] . identifier[_allow_filtering] = keyword[True]
keyword[return] identifier[clone] | def allow_filtering(self):
"""
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
"""
clone = copy.deepcopy(self)
clone._allow_filtering = True
return clone |
def updatefile(self, project_id, file_path, branch_name, content, commit_message):
"""
Updates an existing file in the repository
:param project_id: project id
:param file_path: Full path to new file. Ex. lib/class.rb
:param branch_name: The name of branch
:param content: File content
:param commit_message: Commit message
:return: true if success, false if not
"""
data = {
'file_path': file_path,
'branch_name': branch_name,
'content': content,
'commit_message': commit_message
}
request = requests.put(
'{0}/{1}/repository/files'.format(self.projects_url, project_id),
headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
return request.status_code == 200 | def function[updatefile, parameter[self, project_id, file_path, branch_name, content, commit_message]]:
constant[
Updates an existing file in the repository
:param project_id: project id
:param file_path: Full path to new file. Ex. lib/class.rb
:param branch_name: The name of branch
:param content: File content
:param commit_message: Commit message
:return: true if success, false if not
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b28b6ce0>, <ast.Constant object at 0x7da1b28b5de0>, <ast.Constant object at 0x7da1b28b6d10>, <ast.Constant object at 0x7da1b28b48b0>], [<ast.Name object at 0x7da1b28b4970>, <ast.Name object at 0x7da1b28b6f50>, <ast.Name object at 0x7da1b28b7b50>, <ast.Name object at 0x7da1b28b6920>]]
variable[request] assign[=] call[name[requests].put, parameter[call[constant[{0}/{1}/repository/files].format, parameter[name[self].projects_url, name[project_id]]]]]
return[compare[name[request].status_code equal[==] constant[200]]] | keyword[def] identifier[updatefile] ( identifier[self] , identifier[project_id] , identifier[file_path] , identifier[branch_name] , identifier[content] , identifier[commit_message] ):
literal[string]
identifier[data] ={
literal[string] : identifier[file_path] ,
literal[string] : identifier[branch_name] ,
literal[string] : identifier[content] ,
literal[string] : identifier[commit_message]
}
identifier[request] = identifier[requests] . identifier[put] (
literal[string] . identifier[format] ( identifier[self] . identifier[projects_url] , identifier[project_id] ),
identifier[headers] = identifier[self] . identifier[headers] , identifier[data] = identifier[data] , identifier[verify] = identifier[self] . identifier[verify_ssl] , identifier[auth] = identifier[self] . identifier[auth] , identifier[timeout] = identifier[self] . identifier[timeout] )
keyword[return] identifier[request] . identifier[status_code] == literal[int] | def updatefile(self, project_id, file_path, branch_name, content, commit_message):
"""
Updates an existing file in the repository
:param project_id: project id
:param file_path: Full path to new file. Ex. lib/class.rb
:param branch_name: The name of branch
:param content: File content
:param commit_message: Commit message
:return: true if success, false if not
"""
data = {'file_path': file_path, 'branch_name': branch_name, 'content': content, 'commit_message': commit_message}
request = requests.put('{0}/{1}/repository/files'.format(self.projects_url, project_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
return request.status_code == 200 |
def set_last_value_to_one(probabilities):
"""!
@brief Update the last same probabilities to one.
@details All values of probability list equals to the last element are set to 1.
"""
# Start from the last elem
back_idx = - 1
# All values equal to the last elem should be set to 1
last_val = probabilities[back_idx]
# for all elements or if a elem not equal to the last elem
for _ in range(-1, -len(probabilities) - 1):
if probabilities[back_idx] == last_val:
probabilities[back_idx] = 1
else:
break | def function[set_last_value_to_one, parameter[probabilities]]:
constant[!
@brief Update the last same probabilities to one.
@details All values of probability list equals to the last element are set to 1.
]
variable[back_idx] assign[=] <ast.UnaryOp object at 0x7da1b01905b0>
variable[last_val] assign[=] call[name[probabilities]][name[back_idx]]
for taget[name[_]] in starred[call[name[range], parameter[<ast.UnaryOp object at 0x7da1b01919f0>, binary_operation[<ast.UnaryOp object at 0x7da1b0191720> - constant[1]]]]] begin[:]
if compare[call[name[probabilities]][name[back_idx]] equal[==] name[last_val]] begin[:]
call[name[probabilities]][name[back_idx]] assign[=] constant[1] | keyword[def] identifier[set_last_value_to_one] ( identifier[probabilities] ):
literal[string]
identifier[back_idx] =- literal[int]
identifier[last_val] = identifier[probabilities] [ identifier[back_idx] ]
keyword[for] identifier[_] keyword[in] identifier[range] (- literal[int] ,- identifier[len] ( identifier[probabilities] )- literal[int] ):
keyword[if] identifier[probabilities] [ identifier[back_idx] ]== identifier[last_val] :
identifier[probabilities] [ identifier[back_idx] ]= literal[int]
keyword[else] :
keyword[break] | def set_last_value_to_one(probabilities):
"""!
@brief Update the last same probabilities to one.
@details All values of probability list equals to the last element are set to 1.
"""
# Start from the last elem
back_idx = -1
# All values equal to the last elem should be set to 1
last_val = probabilities[back_idx]
# for all elements or if a elem not equal to the last elem
for _ in range(-1, -len(probabilities) - 1):
if probabilities[back_idx] == last_val:
probabilities[back_idx] = 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=[]] |
def to_distribution_values(self, values):
"""
Returns numpy array of natural logarithms of ``values``.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# avoid RuntimeWarning: divide by zero encountered in log
return numpy.log(values) | def function[to_distribution_values, parameter[self, values]]:
constant[
Returns numpy array of natural logarithms of ``values``.
]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].simplefilter, parameter[constant[ignore]]]
return[call[name[numpy].log, parameter[name[values]]]] | keyword[def] identifier[to_distribution_values] ( identifier[self] , identifier[values] ):
literal[string]
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[simplefilter] ( literal[string] )
keyword[return] identifier[numpy] . identifier[log] ( identifier[values] ) | def to_distribution_values(self, values):
"""
Returns numpy array of natural logarithms of ``values``.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# avoid RuntimeWarning: divide by zero encountered in log
return numpy.log(values) # depends on [control=['with'], data=[]] |
def image(alt_text, link_url, title=""):
"""Return an inline image.
Keyword arguments:
title -- Specify the title of the image, as seen when hovering over it.
>>> image("This is an image", "https://tinyurl.com/bright-green-tree")
''
>>> image("This is an image", "https://tinyurl.com/bright-green-tree", "tree")
' "tree"'
"""
image_string = ""
if title:
image_string += ' "' + esc_format(title) + '"'
return image_string | def function[image, parameter[alt_text, link_url, title]]:
constant[Return an inline image.
Keyword arguments:
title -- Specify the title of the image, as seen when hovering over it.
>>> image("This is an image", "https://tinyurl.com/bright-green-tree")
''
>>> image("This is an image", "https://tinyurl.com/bright-green-tree", "tree")
' "tree"'
]
variable[image_string] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[![] + call[name[esc_format], parameter[name[alt_text]]]] + constant[](]] + name[link_url]] + constant[)]]
if name[title] begin[:]
<ast.AugAssign object at 0x7da18eb560b0>
return[name[image_string]] | keyword[def] identifier[image] ( identifier[alt_text] , identifier[link_url] , identifier[title] = literal[string] ):
literal[string]
identifier[image_string] = literal[string] + identifier[esc_format] ( identifier[alt_text] )+ literal[string] + identifier[link_url] + literal[string]
keyword[if] identifier[title] :
identifier[image_string] += literal[string] + identifier[esc_format] ( identifier[title] )+ literal[string]
keyword[return] identifier[image_string] | def image(alt_text, link_url, title=''):
"""Return an inline image.
Keyword arguments:
title -- Specify the title of the image, as seen when hovering over it.
>>> image("This is an image", "https://tinyurl.com/bright-green-tree")
''
>>> image("This is an image", "https://tinyurl.com/bright-green-tree", "tree")
' "tree"'
"""
image_string = ''
if title:
image_string += ' "' + esc_format(title) + '"' # depends on [control=['if'], data=[]]
return image_string |
def is_cache(cache):
"""Returns `True` if ``cache`` is a readable cache file or object
Parameters
----------
cache : `str`, `file`, `list`
Object to detect as cache
Returns
-------
iscache : `bool`
`True` if the input object is a cache, or a file in LAL cache format,
otherwise `False`
"""
if isinstance(cache, string_types + FILE_LIKE):
try:
return bool(len(read_cache(cache)))
except (TypeError, ValueError, UnicodeDecodeError, ImportError):
# failed to parse cache
return False
if HAS_CACHE and isinstance(cache, Cache):
return True
if (isinstance(cache, (list, tuple)) and cache and
all(map(is_cache_entry, cache))):
return True
return False | def function[is_cache, parameter[cache]]:
constant[Returns `True` if ``cache`` is a readable cache file or object
Parameters
----------
cache : `str`, `file`, `list`
Object to detect as cache
Returns
-------
iscache : `bool`
`True` if the input object is a cache, or a file in LAL cache format,
otherwise `False`
]
if call[name[isinstance], parameter[name[cache], binary_operation[name[string_types] + name[FILE_LIKE]]]] begin[:]
<ast.Try object at 0x7da204620d00>
if <ast.BoolOp object at 0x7da204344b50> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da204347010> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_cache] ( identifier[cache] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[cache] , identifier[string_types] + identifier[FILE_LIKE] ):
keyword[try] :
keyword[return] identifier[bool] ( identifier[len] ( identifier[read_cache] ( identifier[cache] )))
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[UnicodeDecodeError] , identifier[ImportError] ):
keyword[return] keyword[False]
keyword[if] identifier[HAS_CACHE] keyword[and] identifier[isinstance] ( identifier[cache] , identifier[Cache] ):
keyword[return] keyword[True]
keyword[if] ( identifier[isinstance] ( identifier[cache] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[cache] keyword[and]
identifier[all] ( identifier[map] ( identifier[is_cache_entry] , identifier[cache] ))):
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_cache(cache):
"""Returns `True` if ``cache`` is a readable cache file or object
Parameters
----------
cache : `str`, `file`, `list`
Object to detect as cache
Returns
-------
iscache : `bool`
`True` if the input object is a cache, or a file in LAL cache format,
otherwise `False`
"""
if isinstance(cache, string_types + FILE_LIKE):
try:
return bool(len(read_cache(cache))) # depends on [control=['try'], data=[]]
except (TypeError, ValueError, UnicodeDecodeError, ImportError):
# failed to parse cache
return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if HAS_CACHE and isinstance(cache, Cache):
return True # depends on [control=['if'], data=[]]
if isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache)):
return True # depends on [control=['if'], data=[]]
return False |
def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True,
key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) | def function[split_bgedge, parameter[self, bgedge, guidance, sorted_guidance, account_for_colors_multiplicity_in_guidance, key]]:
constant[ Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
]
call[name[self].__split_bgedge, parameter[]] | keyword[def] identifier[split_bgedge] ( identifier[self] , identifier[bgedge] , identifier[guidance] = keyword[None] , identifier[sorted_guidance] = keyword[False] ,
identifier[account_for_colors_multiplicity_in_guidance] = keyword[True] ,
identifier[key] = keyword[None] ):
literal[string]
identifier[self] . identifier[__split_bgedge] ( identifier[bgedge] = identifier[bgedge] , identifier[guidance] = identifier[guidance] , identifier[sorted_guidance] = identifier[sorted_guidance] ,
identifier[account_for_colors_multiplicity_in_guidance] = identifier[account_for_colors_multiplicity_in_guidance] ,
identifier[key] = identifier[key] ) | def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False, account_for_colors_multiplicity_in_guidance=True, key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance, account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance, key=key) |
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name | def function[get_canonical_encoding_name, parameter[name]]:
constant[
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
]
import module[codecs]
<ast.Try object at 0x7da18bccbc10> | keyword[def] identifier[get_canonical_encoding_name] ( identifier[name] ):
literal[string]
keyword[import] identifier[codecs]
keyword[try] :
identifier[codec] = identifier[codecs] . identifier[lookup] ( identifier[name] )
keyword[except] identifier[LookupError] :
keyword[return] identifier[name]
keyword[else] :
keyword[return] identifier[codec] . identifier[name] | def get_canonical_encoding_name(name):
# type: (str) -> str
'\n Given an encoding name, get the canonical name from a codec lookup.\n\n :param str name: The name of the codec to lookup\n :return: The canonical version of the codec name\n :rtype: str\n '
import codecs
try:
codec = codecs.lookup(name) # depends on [control=['try'], data=[]]
except LookupError:
return name # depends on [control=['except'], data=[]]
else:
return codec.name |
def start(self):
""" :meth:`WStoppableTask.start` implementation that creates new thread
"""
start_event = self.start_event()
stop_event = self.stop_event()
ready_event = self.ready_event()
def thread_target():
try:
start_event.set()
self.thread_started()
if ready_event is not None:
ready_event.set()
except Exception as e:
self.exception_event().set()
self.thread_exception(e)
if self.__thread is None:
if stop_event is not None:
stop_event.clear()
if ready_event is not None:
ready_event.clear()
self.exception_event().clear()
self.__thread = Thread(target=thread_target, name=self.thread_name())
self.__thread.start() | def function[start, parameter[self]]:
constant[ :meth:`WStoppableTask.start` implementation that creates new thread
]
variable[start_event] assign[=] call[name[self].start_event, parameter[]]
variable[stop_event] assign[=] call[name[self].stop_event, parameter[]]
variable[ready_event] assign[=] call[name[self].ready_event, parameter[]]
def function[thread_target, parameter[]]:
<ast.Try object at 0x7da20c6e4c40>
if compare[name[self].__thread is constant[None]] begin[:]
if compare[name[stop_event] is_not constant[None]] begin[:]
call[name[stop_event].clear, parameter[]]
if compare[name[ready_event] is_not constant[None]] begin[:]
call[name[ready_event].clear, parameter[]]
call[call[name[self].exception_event, parameter[]].clear, parameter[]]
name[self].__thread assign[=] call[name[Thread], parameter[]]
call[name[self].__thread.start, parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[start_event] = identifier[self] . identifier[start_event] ()
identifier[stop_event] = identifier[self] . identifier[stop_event] ()
identifier[ready_event] = identifier[self] . identifier[ready_event] ()
keyword[def] identifier[thread_target] ():
keyword[try] :
identifier[start_event] . identifier[set] ()
identifier[self] . identifier[thread_started] ()
keyword[if] identifier[ready_event] keyword[is] keyword[not] keyword[None] :
identifier[ready_event] . identifier[set] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[exception_event] (). identifier[set] ()
identifier[self] . identifier[thread_exception] ( identifier[e] )
keyword[if] identifier[self] . identifier[__thread] keyword[is] keyword[None] :
keyword[if] identifier[stop_event] keyword[is] keyword[not] keyword[None] :
identifier[stop_event] . identifier[clear] ()
keyword[if] identifier[ready_event] keyword[is] keyword[not] keyword[None] :
identifier[ready_event] . identifier[clear] ()
identifier[self] . identifier[exception_event] (). identifier[clear] ()
identifier[self] . identifier[__thread] = identifier[Thread] ( identifier[target] = identifier[thread_target] , identifier[name] = identifier[self] . identifier[thread_name] ())
identifier[self] . identifier[__thread] . identifier[start] () | def start(self):
""" :meth:`WStoppableTask.start` implementation that creates new thread
"""
start_event = self.start_event()
stop_event = self.stop_event()
ready_event = self.ready_event()
def thread_target():
try:
start_event.set()
self.thread_started()
if ready_event is not None:
ready_event.set() # depends on [control=['if'], data=['ready_event']] # depends on [control=['try'], data=[]]
except Exception as e:
self.exception_event().set()
self.thread_exception(e) # depends on [control=['except'], data=['e']]
if self.__thread is None:
if stop_event is not None:
stop_event.clear() # depends on [control=['if'], data=['stop_event']]
if ready_event is not None:
ready_event.clear() # depends on [control=['if'], data=['ready_event']]
self.exception_event().clear()
self.__thread = Thread(target=thread_target, name=self.thread_name())
self.__thread.start() # depends on [control=['if'], data=[]] |
def _p100(part, total, prec=1):
'''Return percentage as string.
'''
r = float(total)
if r:
r = part * 100.0 / r
return '%.*f%%' % (prec, r)
return 'n/a' | def function[_p100, parameter[part, total, prec]]:
constant[Return percentage as string.
]
variable[r] assign[=] call[name[float], parameter[name[total]]]
if name[r] begin[:]
variable[r] assign[=] binary_operation[binary_operation[name[part] * constant[100.0]] / name[r]]
return[binary_operation[constant[%.*f%%] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7cab60>, <ast.Name object at 0x7da20c7c88b0>]]]]
return[constant[n/a]] | keyword[def] identifier[_p100] ( identifier[part] , identifier[total] , identifier[prec] = literal[int] ):
literal[string]
identifier[r] = identifier[float] ( identifier[total] )
keyword[if] identifier[r] :
identifier[r] = identifier[part] * literal[int] / identifier[r]
keyword[return] literal[string] %( identifier[prec] , identifier[r] )
keyword[return] literal[string] | def _p100(part, total, prec=1):
"""Return percentage as string.
"""
r = float(total)
if r:
r = part * 100.0 / r
return '%.*f%%' % (prec, r) # depends on [control=['if'], data=[]]
return 'n/a' |
def binary_ks_curve(y_true, y_probas):
"""This function generates the points necessary to calculate the KS
Statistic curve.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_probas (array-like, shape (n_samples)): Probability predictions of
the positive class.
Returns:
thresholds (numpy.ndarray): An array containing the X-axis values for
plotting the KS Statistic plot.
pct1 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
pct2 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
ks_statistic (float): The KS Statistic, or the maximum vertical
distance between the two curves.
max_distance_at (float): The X-axis value at which the maximum vertical
distance between the two curves is seen.
classes (np.ndarray, shape (2)): An array containing the labels of the
two classes making up `y_true`.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The KS Statistic
is only relevant in binary classification.
"""
y_true, y_probas = np.asarray(y_true), np.asarray(y_probas)
lb = LabelEncoder()
encoded_labels = lb.fit_transform(y_true)
if len(lb.classes_) != 2:
raise ValueError('Cannot calculate KS statistic for data with '
'{} category/ies'.format(len(lb.classes_)))
idx = encoded_labels == 0
data1 = np.sort(y_probas[idx])
data2 = np.sort(y_probas[np.logical_not(idx)])
ctr1, ctr2 = 0, 0
thresholds, pct1, pct2 = [], [], []
while ctr1 < len(data1) or ctr2 < len(data2):
# Check if data1 has no more elements
if ctr1 >= len(data1):
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
# Check if data2 has no more elements
elif ctr2 >= len(data2):
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
else:
if data1[ctr1] > data2[ctr2]:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
elif data1[ctr1] < data2[ctr2]:
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
else:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
thresholds.append(current)
pct1.append(ctr1)
pct2.append(ctr2)
thresholds = np.asarray(thresholds)
pct1 = np.asarray(pct1) / float(len(data1))
pct2 = np.asarray(pct2) / float(len(data2))
if thresholds[0] != 0:
thresholds = np.insert(thresholds, 0, [0.0])
pct1 = np.insert(pct1, 0, [0.0])
pct2 = np.insert(pct2, 0, [0.0])
if thresholds[-1] != 1:
thresholds = np.append(thresholds, [1.0])
pct1 = np.append(pct1, [1.0])
pct2 = np.append(pct2, [1.0])
differences = pct1 - pct2
ks_statistic, max_distance_at = (np.max(differences),
thresholds[np.argmax(differences)])
return thresholds, pct1, pct2, ks_statistic, max_distance_at, lb.classes_ | def function[binary_ks_curve, parameter[y_true, y_probas]]:
constant[This function generates the points necessary to calculate the KS
Statistic curve.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_probas (array-like, shape (n_samples)): Probability predictions of
the positive class.
Returns:
thresholds (numpy.ndarray): An array containing the X-axis values for
plotting the KS Statistic plot.
pct1 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
pct2 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
ks_statistic (float): The KS Statistic, or the maximum vertical
distance between the two curves.
max_distance_at (float): The X-axis value at which the maximum vertical
distance between the two curves is seen.
classes (np.ndarray, shape (2)): An array containing the labels of the
two classes making up `y_true`.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The KS Statistic
is only relevant in binary classification.
]
<ast.Tuple object at 0x7da1b1602920> assign[=] tuple[[<ast.Call object at 0x7da1b16037f0>, <ast.Call object at 0x7da1b1603ca0>]]
variable[lb] assign[=] call[name[LabelEncoder], parameter[]]
variable[encoded_labels] assign[=] call[name[lb].fit_transform, parameter[name[y_true]]]
if compare[call[name[len], parameter[name[lb].classes_]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1600f70>
variable[idx] assign[=] compare[name[encoded_labels] equal[==] constant[0]]
variable[data1] assign[=] call[name[np].sort, parameter[call[name[y_probas]][name[idx]]]]
variable[data2] assign[=] call[name[np].sort, parameter[call[name[y_probas]][call[name[np].logical_not, parameter[name[idx]]]]]]
<ast.Tuple object at 0x7da1b1600970> assign[=] tuple[[<ast.Constant object at 0x7da1b16006a0>, <ast.Constant object at 0x7da1b1600310>]]
<ast.Tuple object at 0x7da1b1602da0> assign[=] tuple[[<ast.List object at 0x7da1b1600640>, <ast.List object at 0x7da1b1601360>, <ast.List object at 0x7da1b1602b60>]]
while <ast.BoolOp object at 0x7da1b16009d0> begin[:]
if compare[name[ctr1] greater_or_equal[>=] call[name[len], parameter[name[data1]]]] begin[:]
variable[current] assign[=] call[name[data2]][name[ctr2]]
while <ast.BoolOp object at 0x7da1b16005b0> begin[:]
<ast.AugAssign object at 0x7da1b1603cd0>
call[name[thresholds].append, parameter[name[current]]]
call[name[pct1].append, parameter[name[ctr1]]]
call[name[pct2].append, parameter[name[ctr2]]]
variable[thresholds] assign[=] call[name[np].asarray, parameter[name[thresholds]]]
variable[pct1] assign[=] binary_operation[call[name[np].asarray, parameter[name[pct1]]] / call[name[float], parameter[call[name[len], parameter[name[data1]]]]]]
variable[pct2] assign[=] binary_operation[call[name[np].asarray, parameter[name[pct2]]] / call[name[float], parameter[call[name[len], parameter[name[data2]]]]]]
if compare[call[name[thresholds]][constant[0]] not_equal[!=] constant[0]] begin[:]
variable[thresholds] assign[=] call[name[np].insert, parameter[name[thresholds], constant[0], list[[<ast.Constant object at 0x7da1b1647d30>]]]]
variable[pct1] assign[=] call[name[np].insert, parameter[name[pct1], constant[0], list[[<ast.Constant object at 0x7da1b16444c0>]]]]
variable[pct2] assign[=] call[name[np].insert, parameter[name[pct2], constant[0], list[[<ast.Constant object at 0x7da1b16458a0>]]]]
if compare[call[name[thresholds]][<ast.UnaryOp object at 0x7da1b1645450>] not_equal[!=] constant[1]] begin[:]
variable[thresholds] assign[=] call[name[np].append, parameter[name[thresholds], list[[<ast.Constant object at 0x7da1b1646290>]]]]
variable[pct1] assign[=] call[name[np].append, parameter[name[pct1], list[[<ast.Constant object at 0x7da1b1647ac0>]]]]
variable[pct2] assign[=] call[name[np].append, parameter[name[pct2], list[[<ast.Constant object at 0x7da1b1644a30>]]]]
variable[differences] assign[=] binary_operation[name[pct1] - name[pct2]]
<ast.Tuple object at 0x7da1b1644220> assign[=] tuple[[<ast.Call object at 0x7da1b1647340>, <ast.Subscript object at 0x7da1b16454b0>]]
return[tuple[[<ast.Name object at 0x7da1b1645480>, <ast.Name object at 0x7da1b1646fb0>, <ast.Name object at 0x7da1b1645660>, <ast.Name object at 0x7da1b1645540>, <ast.Name object at 0x7da1b16454e0>, <ast.Attribute object at 0x7da1b1645390>]]] | keyword[def] identifier[binary_ks_curve] ( identifier[y_true] , identifier[y_probas] ):
literal[string]
identifier[y_true] , identifier[y_probas] = identifier[np] . identifier[asarray] ( identifier[y_true] ), identifier[np] . identifier[asarray] ( identifier[y_probas] )
identifier[lb] = identifier[LabelEncoder] ()
identifier[encoded_labels] = identifier[lb] . identifier[fit_transform] ( identifier[y_true] )
keyword[if] identifier[len] ( identifier[lb] . identifier[classes_] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[lb] . identifier[classes_] )))
identifier[idx] = identifier[encoded_labels] == literal[int]
identifier[data1] = identifier[np] . identifier[sort] ( identifier[y_probas] [ identifier[idx] ])
identifier[data2] = identifier[np] . identifier[sort] ( identifier[y_probas] [ identifier[np] . identifier[logical_not] ( identifier[idx] )])
identifier[ctr1] , identifier[ctr2] = literal[int] , literal[int]
identifier[thresholds] , identifier[pct1] , identifier[pct2] =[],[],[]
keyword[while] identifier[ctr1] < identifier[len] ( identifier[data1] ) keyword[or] identifier[ctr2] < identifier[len] ( identifier[data2] ):
keyword[if] identifier[ctr1] >= identifier[len] ( identifier[data1] ):
identifier[current] = identifier[data2] [ identifier[ctr2] ]
keyword[while] identifier[ctr2] < identifier[len] ( identifier[data2] ) keyword[and] identifier[current] == identifier[data2] [ identifier[ctr2] ]:
identifier[ctr2] += literal[int]
keyword[elif] identifier[ctr2] >= identifier[len] ( identifier[data2] ):
identifier[current] = identifier[data1] [ identifier[ctr1] ]
keyword[while] identifier[ctr1] < identifier[len] ( identifier[data1] ) keyword[and] identifier[current] == identifier[data1] [ identifier[ctr1] ]:
identifier[ctr1] += literal[int]
keyword[else] :
keyword[if] identifier[data1] [ identifier[ctr1] ]> identifier[data2] [ identifier[ctr2] ]:
identifier[current] = identifier[data2] [ identifier[ctr2] ]
keyword[while] identifier[ctr2] < identifier[len] ( identifier[data2] ) keyword[and] identifier[current] == identifier[data2] [ identifier[ctr2] ]:
identifier[ctr2] += literal[int]
keyword[elif] identifier[data1] [ identifier[ctr1] ]< identifier[data2] [ identifier[ctr2] ]:
identifier[current] = identifier[data1] [ identifier[ctr1] ]
keyword[while] identifier[ctr1] < identifier[len] ( identifier[data1] ) keyword[and] identifier[current] == identifier[data1] [ identifier[ctr1] ]:
identifier[ctr1] += literal[int]
keyword[else] :
identifier[current] = identifier[data2] [ identifier[ctr2] ]
keyword[while] identifier[ctr2] < identifier[len] ( identifier[data2] ) keyword[and] identifier[current] == identifier[data2] [ identifier[ctr2] ]:
identifier[ctr2] += literal[int]
keyword[while] identifier[ctr1] < identifier[len] ( identifier[data1] ) keyword[and] identifier[current] == identifier[data1] [ identifier[ctr1] ]:
identifier[ctr1] += literal[int]
identifier[thresholds] . identifier[append] ( identifier[current] )
identifier[pct1] . identifier[append] ( identifier[ctr1] )
identifier[pct2] . identifier[append] ( identifier[ctr2] )
identifier[thresholds] = identifier[np] . identifier[asarray] ( identifier[thresholds] )
identifier[pct1] = identifier[np] . identifier[asarray] ( identifier[pct1] )/ identifier[float] ( identifier[len] ( identifier[data1] ))
identifier[pct2] = identifier[np] . identifier[asarray] ( identifier[pct2] )/ identifier[float] ( identifier[len] ( identifier[data2] ))
keyword[if] identifier[thresholds] [ literal[int] ]!= literal[int] :
identifier[thresholds] = identifier[np] . identifier[insert] ( identifier[thresholds] , literal[int] ,[ literal[int] ])
identifier[pct1] = identifier[np] . identifier[insert] ( identifier[pct1] , literal[int] ,[ literal[int] ])
identifier[pct2] = identifier[np] . identifier[insert] ( identifier[pct2] , literal[int] ,[ literal[int] ])
keyword[if] identifier[thresholds] [- literal[int] ]!= literal[int] :
identifier[thresholds] = identifier[np] . identifier[append] ( identifier[thresholds] ,[ literal[int] ])
identifier[pct1] = identifier[np] . identifier[append] ( identifier[pct1] ,[ literal[int] ])
identifier[pct2] = identifier[np] . identifier[append] ( identifier[pct2] ,[ literal[int] ])
identifier[differences] = identifier[pct1] - identifier[pct2]
identifier[ks_statistic] , identifier[max_distance_at] =( identifier[np] . identifier[max] ( identifier[differences] ),
identifier[thresholds] [ identifier[np] . identifier[argmax] ( identifier[differences] )])
keyword[return] identifier[thresholds] , identifier[pct1] , identifier[pct2] , identifier[ks_statistic] , identifier[max_distance_at] , identifier[lb] . identifier[classes_] | def binary_ks_curve(y_true, y_probas):
"""This function generates the points necessary to calculate the KS
Statistic curve.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_probas (array-like, shape (n_samples)): Probability predictions of
the positive class.
Returns:
thresholds (numpy.ndarray): An array containing the X-axis values for
plotting the KS Statistic plot.
pct1 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
pct2 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
ks_statistic (float): The KS Statistic, or the maximum vertical
distance between the two curves.
max_distance_at (float): The X-axis value at which the maximum vertical
distance between the two curves is seen.
classes (np.ndarray, shape (2)): An array containing the labels of the
two classes making up `y_true`.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The KS Statistic
is only relevant in binary classification.
"""
(y_true, y_probas) = (np.asarray(y_true), np.asarray(y_probas))
lb = LabelEncoder()
encoded_labels = lb.fit_transform(y_true)
if len(lb.classes_) != 2:
raise ValueError('Cannot calculate KS statistic for data with {} category/ies'.format(len(lb.classes_))) # depends on [control=['if'], data=[]]
idx = encoded_labels == 0
data1 = np.sort(y_probas[idx])
data2 = np.sort(y_probas[np.logical_not(idx)])
(ctr1, ctr2) = (0, 0)
(thresholds, pct1, pct2) = ([], [], [])
while ctr1 < len(data1) or ctr2 < len(data2):
# Check if data1 has no more elements
if ctr1 >= len(data1):
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
# Check if data2 has no more elements
elif ctr2 >= len(data2):
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif data1[ctr1] > data2[ctr2]:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif data1[ctr1] < data2[ctr2]:
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1 # depends on [control=['while'], data=[]]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1 # depends on [control=['while'], data=[]]
thresholds.append(current)
pct1.append(ctr1)
pct2.append(ctr2) # depends on [control=['while'], data=[]]
thresholds = np.asarray(thresholds)
pct1 = np.asarray(pct1) / float(len(data1))
pct2 = np.asarray(pct2) / float(len(data2))
if thresholds[0] != 0:
thresholds = np.insert(thresholds, 0, [0.0])
pct1 = np.insert(pct1, 0, [0.0])
pct2 = np.insert(pct2, 0, [0.0]) # depends on [control=['if'], data=[]]
if thresholds[-1] != 1:
thresholds = np.append(thresholds, [1.0])
pct1 = np.append(pct1, [1.0])
pct2 = np.append(pct2, [1.0]) # depends on [control=['if'], data=[]]
differences = pct1 - pct2
(ks_statistic, max_distance_at) = (np.max(differences), thresholds[np.argmax(differences)])
return (thresholds, pct1, pct2, ks_statistic, max_distance_at, lb.classes_) |
def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject,
parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger,
options: Dict[str, Dict[str, Any]]) -> T:
"""
First parse all children from the parsing plan, then calls _build_object_from_parsed_children
:param desired_type:
:param obj:
:param parsing_plan_for_children:
:param logger:
:param options:
:return:
"""
pass | def function[_parse_multifile, parameter[self, desired_type, obj, parsing_plan_for_children, logger, options]]:
constant[
First parse all children from the parsing plan, then calls _build_object_from_parsed_children
:param desired_type:
:param obj:
:param parsing_plan_for_children:
:param logger:
:param options:
:return:
]
pass | keyword[def] identifier[_parse_multifile] ( identifier[self] , identifier[desired_type] : identifier[Type] [ identifier[T] ], identifier[obj] : identifier[PersistedObject] ,
identifier[parsing_plan_for_children] : identifier[Dict] [ identifier[str] , identifier[ParsingPlan] ], identifier[logger] : identifier[Logger] ,
identifier[options] : identifier[Dict] [ identifier[str] , identifier[Dict] [ identifier[str] , identifier[Any] ]])-> identifier[T] :
literal[string]
keyword[pass] | def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject, parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
"""
First parse all children from the parsing plan, then calls _build_object_from_parsed_children
:param desired_type:
:param obj:
:param parsing_plan_for_children:
:param logger:
:param options:
:return:
"""
pass |
def ping(self):
""" Send a ping message. """
self.last_ping = time.time()
try:
self.send_message({MESSAGE_TYPE: TYPE_PING})
except NotConnected:
self._socket_client.logger.error("Chromecast is disconnected. " +
"Cannot ping until reconnected.") | def function[ping, parameter[self]]:
constant[ Send a ping message. ]
name[self].last_ping assign[=] call[name[time].time, parameter[]]
<ast.Try object at 0x7da20c7c8700> | keyword[def] identifier[ping] ( identifier[self] ):
literal[string]
identifier[self] . identifier[last_ping] = identifier[time] . identifier[time] ()
keyword[try] :
identifier[self] . identifier[send_message] ({ identifier[MESSAGE_TYPE] : identifier[TYPE_PING] })
keyword[except] identifier[NotConnected] :
identifier[self] . identifier[_socket_client] . identifier[logger] . identifier[error] ( literal[string] +
literal[string] ) | def ping(self):
""" Send a ping message. """
self.last_ping = time.time()
try:
self.send_message({MESSAGE_TYPE: TYPE_PING}) # depends on [control=['try'], data=[]]
except NotConnected:
self._socket_client.logger.error('Chromecast is disconnected. ' + 'Cannot ping until reconnected.') # depends on [control=['except'], data=[]] |
def get_allocated_fragments(self, id_or_uri, count=-1, start=0):
"""
Gets all fragments that have been allocated in range.
Args:
id_or_uri:
ID or URI of range.
count:
The number of resources to return. A count of -1 requests all items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceed the total number
of items.
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
Returns:
list: A list with the allocated fragements.
"""
uri = self._client.build_uri(id_or_uri) + "/allocated-fragments?start={0}&count={1}".format(start, count)
return self._client.get_collection(uri) | def function[get_allocated_fragments, parameter[self, id_or_uri, count, start]]:
constant[
Gets all fragments that have been allocated in range.
Args:
id_or_uri:
ID or URI of range.
count:
The number of resources to return. A count of -1 requests all items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceed the total number
of items.
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
Returns:
list: A list with the allocated fragements.
]
variable[uri] assign[=] binary_operation[call[name[self]._client.build_uri, parameter[name[id_or_uri]]] + call[constant[/allocated-fragments?start={0}&count={1}].format, parameter[name[start], name[count]]]]
return[call[name[self]._client.get_collection, parameter[name[uri]]]] | keyword[def] identifier[get_allocated_fragments] ( identifier[self] , identifier[id_or_uri] , identifier[count] =- literal[int] , identifier[start] = literal[int] ):
literal[string]
identifier[uri] = identifier[self] . identifier[_client] . identifier[build_uri] ( identifier[id_or_uri] )+ literal[string] . identifier[format] ( identifier[start] , identifier[count] )
keyword[return] identifier[self] . identifier[_client] . identifier[get_collection] ( identifier[uri] ) | def get_allocated_fragments(self, id_or_uri, count=-1, start=0):
"""
Gets all fragments that have been allocated in range.
Args:
id_or_uri:
ID or URI of range.
count:
The number of resources to return. A count of -1 requests all items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceed the total number
of items.
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
Returns:
list: A list with the allocated fragements.
"""
uri = self._client.build_uri(id_or_uri) + '/allocated-fragments?start={0}&count={1}'.format(start, count)
return self._client.get_collection(uri) |
def tgread_bytes(self):
"""
Reads a Telegram-encoded byte array, without the need of
specifying its length.
"""
first_byte = self.read_byte()
if first_byte == 254:
length = self.read_byte() | (self.read_byte() << 8) | (
self.read_byte() << 16)
padding = length % 4
else:
length = first_byte
padding = (length + 1) % 4
data = self.read(length)
if padding > 0:
padding = 4 - padding
self.read(padding)
return data | def function[tgread_bytes, parameter[self]]:
constant[
Reads a Telegram-encoded byte array, without the need of
specifying its length.
]
variable[first_byte] assign[=] call[name[self].read_byte, parameter[]]
if compare[name[first_byte] equal[==] constant[254]] begin[:]
variable[length] assign[=] binary_operation[binary_operation[call[name[self].read_byte, parameter[]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[self].read_byte, parameter[]] <ast.LShift object at 0x7da2590d69e0> constant[8]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[self].read_byte, parameter[]] <ast.LShift object at 0x7da2590d69e0> constant[16]]]
variable[padding] assign[=] binary_operation[name[length] <ast.Mod object at 0x7da2590d6920> constant[4]]
variable[data] assign[=] call[name[self].read, parameter[name[length]]]
if compare[name[padding] greater[>] constant[0]] begin[:]
variable[padding] assign[=] binary_operation[constant[4] - name[padding]]
call[name[self].read, parameter[name[padding]]]
return[name[data]] | keyword[def] identifier[tgread_bytes] ( identifier[self] ):
literal[string]
identifier[first_byte] = identifier[self] . identifier[read_byte] ()
keyword[if] identifier[first_byte] == literal[int] :
identifier[length] = identifier[self] . identifier[read_byte] ()|( identifier[self] . identifier[read_byte] ()<< literal[int] )|(
identifier[self] . identifier[read_byte] ()<< literal[int] )
identifier[padding] = identifier[length] % literal[int]
keyword[else] :
identifier[length] = identifier[first_byte]
identifier[padding] =( identifier[length] + literal[int] )% literal[int]
identifier[data] = identifier[self] . identifier[read] ( identifier[length] )
keyword[if] identifier[padding] > literal[int] :
identifier[padding] = literal[int] - identifier[padding]
identifier[self] . identifier[read] ( identifier[padding] )
keyword[return] identifier[data] | def tgread_bytes(self):
"""
Reads a Telegram-encoded byte array, without the need of
specifying its length.
"""
first_byte = self.read_byte()
if first_byte == 254:
length = self.read_byte() | self.read_byte() << 8 | self.read_byte() << 16
padding = length % 4 # depends on [control=['if'], data=[]]
else:
length = first_byte
padding = (length + 1) % 4
data = self.read(length)
if padding > 0:
padding = 4 - padding
self.read(padding) # depends on [control=['if'], data=['padding']]
return data |
def calc_requiredremoterelease_v1(self):
"""Guess the required release necessary to not fall below the threshold
value at a cross section far downstream with a certain level of certainty.
Required control parameter:
|RemoteDischargeSafety|
Required derived parameters:
|RemoteDischargeSmoothPar|
|dam_derived.TOY|
Required flux sequence:
|RemoteDemand|
|RemoteFailure|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = RemoteDemand + RemoteDischargeSafety
\\cdot smooth_{logistic1}(RemoteFailure, RemoteDischargeSmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a safety factor of 0.5 m³/s for the summer months and
no safety factor at all for the winter months:
>>> remotedischargesafety(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.remotedischargesmoothpar.update()
Assume the actual demand at the cross section downsstream has actually
been estimated to be 2 m³/s:
>>> fluxes.remotedemand = 2.0
Prepare a test function, that calculates the required discharge
based on the parameter values defined above and for a "remote
failure" values ranging between -4 and 4 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_requiredremoterelease_v1,
... last_example=9,
... parseqs=(fluxes.remotefailure,
... fluxes.requiredremoterelease))
>>> test.nexts.remotefailure = range(-4, 5)
On May 31, the safety factor is 0 m³/s. Hence no discharge is
added to the estimated remote demand of 2 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.0 |
| 3 | -2.0 | 2.0 |
| 4 | -1.0 | 2.0 |
| 5 | 0.0 | 2.0 |
| 6 | 1.0 | 2.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 2.0 |
| 9 | 4.0 | 2.0 |
On April 1, the safety factor is 1 m³/s. If the remote failure was
exactly zero in the past, meaning the control of the dam was perfect,
only 0.5 m³/s are added to the estimated remote demand of 2 m³/s.
If the actual recharge did actually fall below the threshold value,
up to 1 m³/s is added. If the the actual discharge exceeded the
threshold value by 2 or 3 m³/s, virtually nothing is added:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.000001 |
| 3 | -2.0 | 2.000102 |
| 4 | -1.0 | 2.01 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 2.99 |
| 7 | 2.0 | 2.999898 |
| 8 | 3.0 | 2.999999 |
| 9 | 4.0 | 3.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.requiredremoterelease = (
flu.remotedemand+con.remotedischargesafety[der.toy[self.idx_sim]] *
smoothutils.smooth_logistic1(
flu.remotefailure,
der.remotedischargesmoothpar[der.toy[self.idx_sim]])) | def function[calc_requiredremoterelease_v1, parameter[self]]:
constant[Guess the required release necessary to not fall below the threshold
value at a cross section far downstream with a certain level of certainty.
Required control parameter:
|RemoteDischargeSafety|
Required derived parameters:
|RemoteDischargeSmoothPar|
|dam_derived.TOY|
Required flux sequence:
|RemoteDemand|
|RemoteFailure|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = RemoteDemand + RemoteDischargeSafety
\cdot smooth_{logistic1}(RemoteFailure, RemoteDischargeSmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a safety factor of 0.5 m³/s for the summer months and
no safety factor at all for the winter months:
>>> remotedischargesafety(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.remotedischargesmoothpar.update()
Assume the actual demand at the cross section downsstream has actually
been estimated to be 2 m³/s:
>>> fluxes.remotedemand = 2.0
Prepare a test function, that calculates the required discharge
based on the parameter values defined above and for a "remote
failure" values ranging between -4 and 4 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_requiredremoterelease_v1,
... last_example=9,
... parseqs=(fluxes.remotefailure,
... fluxes.requiredremoterelease))
>>> test.nexts.remotefailure = range(-4, 5)
On May 31, the safety factor is 0 m³/s. Hence no discharge is
added to the estimated remote demand of 2 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.0 |
| 3 | -2.0 | 2.0 |
| 4 | -1.0 | 2.0 |
| 5 | 0.0 | 2.0 |
| 6 | 1.0 | 2.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 2.0 |
| 9 | 4.0 | 2.0 |
On April 1, the safety factor is 1 m³/s. If the remote failure was
exactly zero in the past, meaning the control of the dam was perfect,
only 0.5 m³/s are added to the estimated remote demand of 2 m³/s.
If the actual recharge did actually fall below the threshold value,
up to 1 m³/s is added. If the the actual discharge exceeded the
threshold value by 2 or 3 m³/s, virtually nothing is added:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.000001 |
| 3 | -2.0 | 2.000102 |
| 4 | -1.0 | 2.01 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 2.99 |
| 7 | 2.0 | 2.999898 |
| 8 | 3.0 | 2.999999 |
| 9 | 4.0 | 3.0 |
]
variable[con] assign[=] name[self].parameters.control.fastaccess
variable[der] assign[=] name[self].parameters.derived.fastaccess
variable[flu] assign[=] name[self].sequences.fluxes.fastaccess
name[flu].requiredremoterelease assign[=] binary_operation[name[flu].remotedemand + binary_operation[call[name[con].remotedischargesafety][call[name[der].toy][name[self].idx_sim]] * call[name[smoothutils].smooth_logistic1, parameter[name[flu].remotefailure, call[name[der].remotedischargesmoothpar][call[name[der].toy][name[self].idx_sim]]]]]] | keyword[def] identifier[calc_requiredremoterelease_v1] ( identifier[self] ):
literal[string]
identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess]
identifier[der] = identifier[self] . identifier[parameters] . identifier[derived] . identifier[fastaccess]
identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess]
identifier[flu] . identifier[requiredremoterelease] =(
identifier[flu] . identifier[remotedemand] + identifier[con] . identifier[remotedischargesafety] [ identifier[der] . identifier[toy] [ identifier[self] . identifier[idx_sim] ]]*
identifier[smoothutils] . identifier[smooth_logistic1] (
identifier[flu] . identifier[remotefailure] ,
identifier[der] . identifier[remotedischargesmoothpar] [ identifier[der] . identifier[toy] [ identifier[self] . identifier[idx_sim] ]])) | def calc_requiredremoterelease_v1(self):
"""Guess the required release necessary to not fall below the threshold
value at a cross section far downstream with a certain level of certainty.
Required control parameter:
|RemoteDischargeSafety|
Required derived parameters:
|RemoteDischargeSmoothPar|
|dam_derived.TOY|
Required flux sequence:
|RemoteDemand|
|RemoteFailure|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = RemoteDemand + RemoteDischargeSafety
\\cdot smooth_{logistic1}(RemoteFailure, RemoteDischargeSmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a safety factor of 0.5 m³/s for the summer months and
no safety factor at all for the winter months:
>>> remotedischargesafety(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.remotedischargesmoothpar.update()
Assume the actual demand at the cross section downsstream has actually
been estimated to be 2 m³/s:
>>> fluxes.remotedemand = 2.0
Prepare a test function, that calculates the required discharge
based on the parameter values defined above and for a "remote
failure" values ranging between -4 and 4 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_requiredremoterelease_v1,
... last_example=9,
... parseqs=(fluxes.remotefailure,
... fluxes.requiredremoterelease))
>>> test.nexts.remotefailure = range(-4, 5)
On May 31, the safety factor is 0 m³/s. Hence no discharge is
added to the estimated remote demand of 2 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.0 |
| 3 | -2.0 | 2.0 |
| 4 | -1.0 | 2.0 |
| 5 | 0.0 | 2.0 |
| 6 | 1.0 | 2.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 2.0 |
| 9 | 4.0 | 2.0 |
On April 1, the safety factor is 1 m³/s. If the remote failure was
exactly zero in the past, meaning the control of the dam was perfect,
only 0.5 m³/s are added to the estimated remote demand of 2 m³/s.
If the actual recharge did actually fall below the threshold value,
up to 1 m³/s is added. If the the actual discharge exceeded the
threshold value by 2 or 3 m³/s, virtually nothing is added:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.000001 |
| 3 | -2.0 | 2.000102 |
| 4 | -1.0 | 2.01 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 2.99 |
| 7 | 2.0 | 2.999898 |
| 8 | 3.0 | 2.999999 |
| 9 | 4.0 | 3.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.requiredremoterelease = flu.remotedemand + con.remotedischargesafety[der.toy[self.idx_sim]] * smoothutils.smooth_logistic1(flu.remotefailure, der.remotedischargesmoothpar[der.toy[self.idx_sim]]) |
def walk_instructions(self, mapping=identity):
"""Iterate over instructions.
:return: an iterator over :class:`instructions in grid
<InstructionInGrid>`
:param mapping: funcion to map the result
.. code:: python
for pos, c in layout.walk_instructions(lambda i: (i.xy, i.color)):
print("color {} at {}".format(c, pos))
"""
instructions = chain(*self.walk_rows(lambda row: row.instructions))
return map(mapping, instructions) | def function[walk_instructions, parameter[self, mapping]]:
constant[Iterate over instructions.
:return: an iterator over :class:`instructions in grid
<InstructionInGrid>`
:param mapping: funcion to map the result
.. code:: python
for pos, c in layout.walk_instructions(lambda i: (i.xy, i.color)):
print("color {} at {}".format(c, pos))
]
variable[instructions] assign[=] call[name[chain], parameter[<ast.Starred object at 0x7da1affc3e50>]]
return[call[name[map], parameter[name[mapping], name[instructions]]]] | keyword[def] identifier[walk_instructions] ( identifier[self] , identifier[mapping] = identifier[identity] ):
literal[string]
identifier[instructions] = identifier[chain] (* identifier[self] . identifier[walk_rows] ( keyword[lambda] identifier[row] : identifier[row] . identifier[instructions] ))
keyword[return] identifier[map] ( identifier[mapping] , identifier[instructions] ) | def walk_instructions(self, mapping=identity):
"""Iterate over instructions.
:return: an iterator over :class:`instructions in grid
<InstructionInGrid>`
:param mapping: funcion to map the result
.. code:: python
for pos, c in layout.walk_instructions(lambda i: (i.xy, i.color)):
print("color {} at {}".format(c, pos))
"""
instructions = chain(*self.walk_rows(lambda row: row.instructions))
return map(mapping, instructions) |
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | def function[plot_results, parameter[allresults]]:
constant[
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
]
if compare[name[split_fn] is constant[None]] begin[:]
variable[split_fn] assign[=] <ast.Lambda object at 0x7da18f722800>
if compare[name[group_fn] is constant[None]] begin[:]
variable[group_fn] assign[=] <ast.Lambda object at 0x7da18f720b50>
variable[sk2r] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[result]] in starred[name[allresults]] begin[:]
variable[splitkey] assign[=] call[name[split_fn], parameter[name[result]]]
call[call[name[sk2r]][name[splitkey]].append, parameter[name[result]]]
assert[compare[call[name[len], parameter[name[sk2r]]] greater[>] constant[0]]]
assert[call[name[isinstance], parameter[name[resample], name[int]]]]
variable[nrows] assign[=] call[name[len], parameter[name[sk2r]]]
variable[ncols] assign[=] constant[1]
variable[figsize] assign[=] <ast.BoolOp object at 0x7da18f720730>
<ast.Tuple object at 0x7da18f7224d0> assign[=] call[name[plt].subplots, parameter[name[nrows], name[ncols]]]
variable[groups] assign[=] call[name[list], parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da18f721660>]]]]
variable[default_samples] assign[=] constant[512]
if name[average_group] begin[:]
variable[resample] assign[=] <ast.BoolOp object at 0x7da18f722380>
for taget[tuple[[<ast.Name object at 0x7da18f723e20>, <ast.Name object at 0x7da18f7213f0>]]] in starred[call[name[enumerate], parameter[call[name[sorted], parameter[call[name[sk2r].keys, parameter[]]]]]]] begin[:]
variable[g2l] assign[=] dictionary[[], []]
variable[g2c] assign[=] call[name[defaultdict], parameter[name[int]]]
variable[sresults] assign[=] call[name[sk2r]][name[sk]]
variable[gresults] assign[=] call[name[defaultdict], parameter[name[list]]]
variable[ax] assign[=] call[call[name[axarr]][name[isplit]]][constant[0]]
for taget[name[result]] in starred[name[sresults]] begin[:]
variable[group] assign[=] call[name[group_fn], parameter[name[result]]]
<ast.AugAssign object at 0x7da20c7c9f30>
<ast.Tuple object at 0x7da20c7c9330> assign[=] call[name[xy_fn], parameter[name[result]]]
if compare[name[x] is constant[None]] begin[:]
variable[x] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[y]]]]]
<ast.Tuple object at 0x7da20c7c9960> assign[=] call[name[map], parameter[name[np].asarray, tuple[[<ast.Name object at 0x7da20c7c8c40>, <ast.Name object at 0x7da20c7c9ae0>]]]]
if name[average_group] begin[:]
call[call[name[gresults]][name[group]].append, parameter[tuple[[<ast.Name object at 0x7da20c7c81f0>, <ast.Name object at 0x7da20c7c92d0>]]]]
if name[average_group] begin[:]
for taget[name[group]] in starred[call[name[sorted], parameter[name[groups]]]] begin[:]
variable[xys] assign[=] call[name[gresults]][name[group]]
if <ast.UnaryOp object at 0x7da20c7caec0> begin[:]
continue
variable[color] assign[=] call[name[COLORS]][binary_operation[call[name[groups].index, parameter[name[group]]] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[COLORS]]]]]
variable[origxs] assign[=] <ast.ListComp object at 0x7da204347f70>
variable[minxlen] assign[=] call[name[min], parameter[call[name[map], parameter[name[len], name[origxs]]]]]
def function[allequal, parameter[qs]]:
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da204345810>]]]
if name[resample] begin[:]
variable[low] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da204347d90>]]
variable[high] assign[=] call[name[min], parameter[<ast.GeneratorExp object at 0x7da204346ad0>]]
variable[usex] assign[=] call[name[np].linspace, parameter[name[low], name[high], name[resample]]]
variable[ys] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c6530>, <ast.Name object at 0x7da20c6c5e70>]]] in starred[name[xys]] begin[:]
call[name[ys].append, parameter[call[call[name[symmetric_ema], parameter[name[x], name[y], name[low], name[high], name[resample]]]][constant[1]]]]
variable[ymean] assign[=] call[name[np].mean, parameter[name[ys]]]
variable[ystd] assign[=] call[name[np].std, parameter[name[ys]]]
variable[ystderr] assign[=] binary_operation[name[ystd] / call[name[np].sqrt, parameter[call[name[len], parameter[name[ys]]]]]]
<ast.Tuple object at 0x7da20c6c4670> assign[=] call[call[call[name[axarr]][name[isplit]]][constant[0]].plot, parameter[name[usex], name[ymean]]]
call[name[g2l]][name[group]] assign[=] name[l]
if name[shaded_err] begin[:]
call[name[ax].fill_between, parameter[name[usex], binary_operation[name[ymean] - name[ystderr]], binary_operation[name[ymean] + name[ystderr]]]]
if name[shaded_std] begin[:]
call[name[ax].fill_between, parameter[name[usex], binary_operation[name[ymean] - name[ystd]], binary_operation[name[ymean] + name[ystd]]]]
call[name[plt].tight_layout, parameter[]]
if call[name[any], parameter[call[name[g2l].keys, parameter[]]]] begin[:]
call[name[ax].legend, parameter[call[name[g2l].values, parameter[]], <ast.IfExp object at 0x7da18c4ce7d0>]]
call[name[ax].set_title, parameter[name[sk]]]
return[tuple[[<ast.Name object at 0x7da18c4cd0c0>, <ast.Name object at 0x7da18c4cdf30>]]] | keyword[def] identifier[plot_results] (
identifier[allresults] ,*,
identifier[xy_fn] = identifier[default_xy_fn] ,
identifier[split_fn] = identifier[default_split_fn] ,
identifier[group_fn] = identifier[default_split_fn] ,
identifier[average_group] = keyword[False] ,
identifier[shaded_std] = keyword[True] ,
identifier[shaded_err] = keyword[True] ,
identifier[figsize] = keyword[None] ,
identifier[legend_outside] = keyword[False] ,
identifier[resample] = literal[int] ,
identifier[smooth_step] = literal[int]
):
literal[string]
keyword[if] identifier[split_fn] keyword[is] keyword[None] : identifier[split_fn] = keyword[lambda] identifier[_] : literal[string]
keyword[if] identifier[group_fn] keyword[is] keyword[None] : identifier[group_fn] = keyword[lambda] identifier[_] : literal[string]
identifier[sk2r] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[result] keyword[in] identifier[allresults] :
identifier[splitkey] = identifier[split_fn] ( identifier[result] )
identifier[sk2r] [ identifier[splitkey] ]. identifier[append] ( identifier[result] )
keyword[assert] identifier[len] ( identifier[sk2r] )> literal[int]
keyword[assert] identifier[isinstance] ( identifier[resample] , identifier[int] ), literal[string]
identifier[nrows] = identifier[len] ( identifier[sk2r] )
identifier[ncols] = literal[int]
identifier[figsize] = identifier[figsize] keyword[or] ( literal[int] , literal[int] * identifier[nrows] )
identifier[f] , identifier[axarr] = identifier[plt] . identifier[subplots] ( identifier[nrows] , identifier[ncols] , identifier[sharex] = keyword[False] , identifier[squeeze] = keyword[False] , identifier[figsize] = identifier[figsize] )
identifier[groups] = identifier[list] ( identifier[set] ( identifier[group_fn] ( identifier[result] ) keyword[for] identifier[result] keyword[in] identifier[allresults] ))
identifier[default_samples] = literal[int]
keyword[if] identifier[average_group] :
identifier[resample] = identifier[resample] keyword[or] identifier[default_samples]
keyword[for] ( identifier[isplit] , identifier[sk] ) keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[sk2r] . identifier[keys] ())):
identifier[g2l] ={}
identifier[g2c] = identifier[defaultdict] ( identifier[int] )
identifier[sresults] = identifier[sk2r] [ identifier[sk] ]
identifier[gresults] = identifier[defaultdict] ( identifier[list] )
identifier[ax] = identifier[axarr] [ identifier[isplit] ][ literal[int] ]
keyword[for] identifier[result] keyword[in] identifier[sresults] :
identifier[group] = identifier[group_fn] ( identifier[result] )
identifier[g2c] [ identifier[group] ]+= literal[int]
identifier[x] , identifier[y] = identifier[xy_fn] ( identifier[result] )
keyword[if] identifier[x] keyword[is] keyword[None] : identifier[x] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[y] ))
identifier[x] , identifier[y] = identifier[map] ( identifier[np] . identifier[asarray] ,( identifier[x] , identifier[y] ))
keyword[if] identifier[average_group] :
identifier[gresults] [ identifier[group] ]. identifier[append] (( identifier[x] , identifier[y] ))
keyword[else] :
keyword[if] identifier[resample] :
identifier[x] , identifier[y] , identifier[counts] = identifier[symmetric_ema] ( identifier[x] , identifier[y] , identifier[x] [ literal[int] ], identifier[x] [- literal[int] ], identifier[resample] , identifier[decay_steps] = identifier[smooth_step] )
identifier[l] ,= identifier[ax] . identifier[plot] ( identifier[x] , identifier[y] , identifier[color] = identifier[COLORS] [ identifier[groups] . identifier[index] ( identifier[group] )% identifier[len] ( identifier[COLORS] )])
identifier[g2l] [ identifier[group] ]= identifier[l]
keyword[if] identifier[average_group] :
keyword[for] identifier[group] keyword[in] identifier[sorted] ( identifier[groups] ):
identifier[xys] = identifier[gresults] [ identifier[group] ]
keyword[if] keyword[not] identifier[any] ( identifier[xys] ):
keyword[continue]
identifier[color] = identifier[COLORS] [ identifier[groups] . identifier[index] ( identifier[group] )% identifier[len] ( identifier[COLORS] )]
identifier[origxs] =[ identifier[xy] [ literal[int] ] keyword[for] identifier[xy] keyword[in] identifier[xys] ]
identifier[minxlen] = identifier[min] ( identifier[map] ( identifier[len] , identifier[origxs] ))
keyword[def] identifier[allequal] ( identifier[qs] ):
keyword[return] identifier[all] (( identifier[q] == identifier[qs] [ literal[int] ]). identifier[all] () keyword[for] identifier[q] keyword[in] identifier[qs] [ literal[int] :])
keyword[if] identifier[resample] :
identifier[low] = identifier[max] ( identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[origxs] )
identifier[high] = identifier[min] ( identifier[x] [- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[origxs] )
identifier[usex] = identifier[np] . identifier[linspace] ( identifier[low] , identifier[high] , identifier[resample] )
identifier[ys] =[]
keyword[for] ( identifier[x] , identifier[y] ) keyword[in] identifier[xys] :
identifier[ys] . identifier[append] ( identifier[symmetric_ema] ( identifier[x] , identifier[y] , identifier[low] , identifier[high] , identifier[resample] , identifier[decay_steps] = identifier[smooth_step] )[ literal[int] ])
keyword[else] :
keyword[assert] identifier[allequal] ([ identifier[x] [: identifier[minxlen] ] keyword[for] identifier[x] keyword[in] identifier[origxs] ]), literal[string]
identifier[usex] = identifier[origxs] [ literal[int] ]
identifier[ys] =[ identifier[xy] [ literal[int] ][: identifier[minxlen] ] keyword[for] identifier[xy] keyword[in] identifier[xys] ]
identifier[ymean] = identifier[np] . identifier[mean] ( identifier[ys] , identifier[axis] = literal[int] )
identifier[ystd] = identifier[np] . identifier[std] ( identifier[ys] , identifier[axis] = literal[int] )
identifier[ystderr] = identifier[ystd] / identifier[np] . identifier[sqrt] ( identifier[len] ( identifier[ys] ))
identifier[l] ,= identifier[axarr] [ identifier[isplit] ][ literal[int] ]. identifier[plot] ( identifier[usex] , identifier[ymean] , identifier[color] = identifier[color] )
identifier[g2l] [ identifier[group] ]= identifier[l]
keyword[if] identifier[shaded_err] :
identifier[ax] . identifier[fill_between] ( identifier[usex] , identifier[ymean] - identifier[ystderr] , identifier[ymean] + identifier[ystderr] , identifier[color] = identifier[color] , identifier[alpha] = literal[int] )
keyword[if] identifier[shaded_std] :
identifier[ax] . identifier[fill_between] ( identifier[usex] , identifier[ymean] - identifier[ystd] , identifier[ymean] + identifier[ystd] , identifier[color] = identifier[color] , identifier[alpha] = literal[int] )
identifier[plt] . identifier[tight_layout] ()
keyword[if] identifier[any] ( identifier[g2l] . identifier[keys] ()):
identifier[ax] . identifier[legend] (
identifier[g2l] . identifier[values] (),
[ literal[string] %( identifier[g] , identifier[g2c] [ identifier[g] ]) keyword[for] identifier[g] keyword[in] identifier[g2l] ] keyword[if] identifier[average_group] keyword[else] identifier[g2l] . identifier[keys] (),
identifier[loc] = literal[int] keyword[if] identifier[legend_outside] keyword[else] keyword[None] ,
identifier[bbox_to_anchor] =( literal[int] , literal[int] ) keyword[if] identifier[legend_outside] keyword[else] keyword[None] )
identifier[ax] . identifier[set_title] ( identifier[sk] )
keyword[return] identifier[f] , identifier[axarr] | def plot_results(allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, figsize=None, legend_outside=False, resample=0, smooth_step=1.0):
"""
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
"""
if split_fn is None:
split_fn = lambda _: '' # depends on [control=['if'], data=['split_fn']]
if group_fn is None:
group_fn = lambda _: '' # depends on [control=['if'], data=['group_fn']]
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result) # depends on [control=['for'], data=['result']]
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
(f, axarr) = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set((group_fn(result) for result in allresults)))
default_samples = 512
if average_group:
resample = resample or default_samples # depends on [control=['if'], data=[]]
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
(x, y) = xy_fn(result)
if x is None:
x = np.arange(len(y)) # depends on [control=['if'], data=['x']]
(x, y) = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x, y)) # depends on [control=['if'], data=[]]
else:
if resample:
(x, y, counts) = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step) # depends on [control=['if'], data=[]]
(l,) = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l # depends on [control=['for'], data=['result']]
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue # depends on [control=['if'], data=[]]
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all(((q == qs[0]).all() for q in qs[1:]))
if resample:
low = max((x[0] for x in origxs))
high = min((x[-1] for x in origxs))
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
assert allequal([x[:minxlen] for x in origxs]), 'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
(l,) = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=0.4) # depends on [control=['if'], data=[]]
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=0.2) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['group']] # depends on [control=['if'], data=[]]
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(g2l.values(), ['%s (%i)' % (g, g2c[g]) for g in g2l] if average_group else g2l.keys(), loc=2 if legend_outside else None, bbox_to_anchor=(1, 1) if legend_outside else None) # depends on [control=['if'], data=[]]
ax.set_title(sk) # depends on [control=['for'], data=[]]
return (f, axarr) |
def validate(filename):
"""
Use W3C validator service: https://bitbucket.org/nmb10/py_w3c/ .
:param filename: the filename to validate
"""
import HTMLParser
from py_w3c.validators.html.validator import HTMLValidator
h = HTMLParser.HTMLParser() # for unescaping WC3 messages
vld = HTMLValidator()
LOG.info("Validating: {0}".format(filename))
# call w3c webservice
vld.validate_file(filename)
# display errors and warning
for err in vld.errors:
LOG.error(u'line: {0}; col: {1}; message: {2}'.
format(err['line'], err['col'], h.unescape(err['message']))
)
for err in vld.warnings:
LOG.warning(u'line: {0}; col: {1}; message: {2}'.
format(err['line'], err['col'], h.unescape(err['message']))
) | def function[validate, parameter[filename]]:
constant[
Use W3C validator service: https://bitbucket.org/nmb10/py_w3c/ .
:param filename: the filename to validate
]
import module[HTMLParser]
from relative_module[py_w3c.validators.html.validator] import module[HTMLValidator]
variable[h] assign[=] call[name[HTMLParser].HTMLParser, parameter[]]
variable[vld] assign[=] call[name[HTMLValidator], parameter[]]
call[name[LOG].info, parameter[call[constant[Validating: {0}].format, parameter[name[filename]]]]]
call[name[vld].validate_file, parameter[name[filename]]]
for taget[name[err]] in starred[name[vld].errors] begin[:]
call[name[LOG].error, parameter[call[constant[line: {0}; col: {1}; message: {2}].format, parameter[call[name[err]][constant[line]], call[name[err]][constant[col]], call[name[h].unescape, parameter[call[name[err]][constant[message]]]]]]]]
for taget[name[err]] in starred[name[vld].warnings] begin[:]
call[name[LOG].warning, parameter[call[constant[line: {0}; col: {1}; message: {2}].format, parameter[call[name[err]][constant[line]], call[name[err]][constant[col]], call[name[h].unescape, parameter[call[name[err]][constant[message]]]]]]]] | keyword[def] identifier[validate] ( identifier[filename] ):
literal[string]
keyword[import] identifier[HTMLParser]
keyword[from] identifier[py_w3c] . identifier[validators] . identifier[html] . identifier[validator] keyword[import] identifier[HTMLValidator]
identifier[h] = identifier[HTMLParser] . identifier[HTMLParser] ()
identifier[vld] = identifier[HTMLValidator] ()
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] ( identifier[filename] ))
identifier[vld] . identifier[validate_file] ( identifier[filename] )
keyword[for] identifier[err] keyword[in] identifier[vld] . identifier[errors] :
identifier[LOG] . identifier[error] ( literal[string] .
identifier[format] ( identifier[err] [ literal[string] ], identifier[err] [ literal[string] ], identifier[h] . identifier[unescape] ( identifier[err] [ literal[string] ]))
)
keyword[for] identifier[err] keyword[in] identifier[vld] . identifier[warnings] :
identifier[LOG] . identifier[warning] ( literal[string] .
identifier[format] ( identifier[err] [ literal[string] ], identifier[err] [ literal[string] ], identifier[h] . identifier[unescape] ( identifier[err] [ literal[string] ]))
) | def validate(filename):
"""
Use W3C validator service: https://bitbucket.org/nmb10/py_w3c/ .
:param filename: the filename to validate
"""
import HTMLParser
from py_w3c.validators.html.validator import HTMLValidator
h = HTMLParser.HTMLParser() # for unescaping WC3 messages
vld = HTMLValidator()
LOG.info('Validating: {0}'.format(filename))
# call w3c webservice
vld.validate_file(filename)
# display errors and warning
for err in vld.errors:
LOG.error(u'line: {0}; col: {1}; message: {2}'.format(err['line'], err['col'], h.unescape(err['message']))) # depends on [control=['for'], data=['err']]
for err in vld.warnings:
LOG.warning(u'line: {0}; col: {1}; message: {2}'.format(err['line'], err['col'], h.unescape(err['message']))) # depends on [control=['for'], data=['err']] |
def delete(self, key):
"""Delete method of CRUD operation for all data types.
Args:
key (string): The variable to write to the DB.
Returns:
(string): Result of DB write.
"""
data = None
if key is not None:
data = self.db.delete(key.strip())
else:
self.tcex.log.warning(u'The key field was None.')
return data | def function[delete, parameter[self, key]]:
constant[Delete method of CRUD operation for all data types.
Args:
key (string): The variable to write to the DB.
Returns:
(string): Result of DB write.
]
variable[data] assign[=] constant[None]
if compare[name[key] is_not constant[None]] begin[:]
variable[data] assign[=] call[name[self].db.delete, parameter[call[name[key].strip, parameter[]]]]
return[name[data]] | keyword[def] identifier[delete] ( identifier[self] , identifier[key] ):
literal[string]
identifier[data] = keyword[None]
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[data] = identifier[self] . identifier[db] . identifier[delete] ( identifier[key] . identifier[strip] ())
keyword[else] :
identifier[self] . identifier[tcex] . identifier[log] . identifier[warning] ( literal[string] )
keyword[return] identifier[data] | def delete(self, key):
"""Delete method of CRUD operation for all data types.
Args:
key (string): The variable to write to the DB.
Returns:
(string): Result of DB write.
"""
data = None
if key is not None:
data = self.db.delete(key.strip()) # depends on [control=['if'], data=['key']]
else:
self.tcex.log.warning(u'The key field was None.')
return data |
def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list)
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts | def function[get_job_amounts, parameter[agent, project_name, spider_name]]:
constant[
Get amounts that pending job amount, running job amount, finished job amount.
]
variable[job_list] assign[=] call[name[agent].get_job_list, parameter[name[project_name]]]
variable[pending_job_list] assign[=] call[name[job_list]][constant[pending]]
variable[running_job_list] assign[=] call[name[job_list]][constant[running]]
variable[finished_job_list] assign[=] call[name[job_list]][constant[finished]]
variable[job_amounts] assign[=] dictionary[[], []]
if compare[name[spider_name] is constant[None]] begin[:]
call[name[job_amounts]][constant[pending]] assign[=] call[name[len], parameter[name[pending_job_list]]]
call[name[job_amounts]][constant[running]] assign[=] call[name[len], parameter[name[running_job_list]]]
call[name[job_amounts]][constant[finished]] assign[=] call[name[len], parameter[name[finished_job_list]]]
return[name[job_amounts]] | keyword[def] identifier[get_job_amounts] ( identifier[agent] , identifier[project_name] , identifier[spider_name] = keyword[None] ):
literal[string]
identifier[job_list] = identifier[agent] . identifier[get_job_list] ( identifier[project_name] )
identifier[pending_job_list] = identifier[job_list] [ literal[string] ]
identifier[running_job_list] = identifier[job_list] [ literal[string] ]
identifier[finished_job_list] = identifier[job_list] [ literal[string] ]
identifier[job_amounts] ={}
keyword[if] identifier[spider_name] keyword[is] keyword[None] :
identifier[job_amounts] [ literal[string] ]= identifier[len] ( identifier[pending_job_list] )
identifier[job_amounts] [ literal[string] ]= identifier[len] ( identifier[running_job_list] )
identifier[job_amounts] [ literal[string] ]= identifier[len] ( identifier[finished_job_list] )
keyword[else] :
identifier[job_amounts] [ literal[string] ]= identifier[len] ([ identifier[j] keyword[for] identifier[j] keyword[in] identifier[pending_job_list] keyword[if] identifier[j] [ literal[string] ]== identifier[spider_name] ])
identifier[job_amounts] [ literal[string] ]= identifier[len] ([ identifier[j] keyword[for] identifier[j] keyword[in] identifier[running_job_list] keyword[if] identifier[j] [ literal[string] ]== identifier[spider_name] ])
identifier[job_amounts] [ literal[string] ]= identifier[len] ([ identifier[j] keyword[for] identifier[j] keyword[in] identifier[finished_job_list] keyword[if] identifier[j] [ literal[string] ]== identifier[spider_name] ])
keyword[return] identifier[job_amounts] | def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list) # depends on [control=['if'], data=[]]
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts |
def extend(self, other):
"""
Adds the values from the iterable *other* to the end of this
collection.
"""
def extend_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values))
if self.writeback:
for i, v in enumerate(values, len_self - len(values)):
self.cache[i] = v
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extend_trans, other.key)
else:
use_redis = False
self._transaction(extend_trans) | def function[extend, parameter[self, other]]:
constant[
Adds the values from the iterable *other* to the end of this
collection.
]
def function[extend_trans, parameter[pipe]]:
variable[values] assign[=] <ast.IfExp object at 0x7da20e9b08b0>
variable[len_self] assign[=] call[name[pipe].rpush, parameter[name[self].key, <ast.Starred object at 0x7da20e9b1900>]]
if name[self].writeback begin[:]
for taget[tuple[[<ast.Name object at 0x7da20e9b0220>, <ast.Name object at 0x7da20e9b35b0>]]] in starred[call[name[enumerate], parameter[name[values], binary_operation[name[len_self] - call[name[len], parameter[name[values]]]]]]] begin[:]
call[name[self].cache][name[i]] assign[=] name[v]
if call[name[self]._same_redis, parameter[name[other], name[RedisCollection]]] begin[:]
variable[use_redis] assign[=] constant[True]
call[name[self]._transaction, parameter[name[extend_trans], name[other].key]] | keyword[def] identifier[extend] ( identifier[self] , identifier[other] ):
literal[string]
keyword[def] identifier[extend_trans] ( identifier[pipe] ):
identifier[values] = identifier[list] ( identifier[other] . identifier[__iter__] ( identifier[pipe] )) keyword[if] identifier[use_redis] keyword[else] identifier[other]
identifier[len_self] = identifier[pipe] . identifier[rpush] ( identifier[self] . identifier[key] ,*( identifier[self] . identifier[_pickle] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ))
keyword[if] identifier[self] . identifier[writeback] :
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[values] , identifier[len_self] - identifier[len] ( identifier[values] )):
identifier[self] . identifier[cache] [ identifier[i] ]= identifier[v]
keyword[if] identifier[self] . identifier[_same_redis] ( identifier[other] , identifier[RedisCollection] ):
identifier[use_redis] = keyword[True]
identifier[self] . identifier[_transaction] ( identifier[extend_trans] , identifier[other] . identifier[key] )
keyword[else] :
identifier[use_redis] = keyword[False]
identifier[self] . identifier[_transaction] ( identifier[extend_trans] ) | def extend(self, other):
"""
Adds the values from the iterable *other* to the end of this
collection.
"""
def extend_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values))
if self.writeback:
for (i, v) in enumerate(values, len_self - len(values)):
self.cache[i] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extend_trans, other.key) # depends on [control=['if'], data=[]]
else:
use_redis = False
self._transaction(extend_trans) |
def transfer_causal_edges(graph, source: BaseEntity, target: BaseEntity) -> Iterable[str]:
"""Transfer causal edges that the source has to the target and yield the resulting hashes."""
for _, v, data in graph.out_edges(source, data=True):
if data[RELATION] not in CAUSAL_RELATIONS:
continue
yield graph.add_qualified_edge(
target,
v,
relation=data[RELATION],
evidence=data[EVIDENCE],
citation=data[CITATION],
annotations=data.get(ANNOTATIONS),
subject_modifier=data.get(SUBJECT),
object_modifier=data.get(OBJECT),
)
for u, _, data in graph.in_edges(source, data=True):
if data[RELATION] not in CAUSAL_RELATIONS:
continue
yield graph.add_qualified_edge(
u,
target,
relation=data[RELATION],
evidence=data[EVIDENCE],
citation=data[CITATION],
annotations=data.get(ANNOTATIONS),
subject_modifier=data.get(SUBJECT),
object_modifier=data.get(OBJECT),
) | def function[transfer_causal_edges, parameter[graph, source, target]]:
constant[Transfer causal edges that the source has to the target and yield the resulting hashes.]
for taget[tuple[[<ast.Name object at 0x7da1b0cb46a0>, <ast.Name object at 0x7da1b0cb78b0>, <ast.Name object at 0x7da1b0cb6c80>]]] in starred[call[name[graph].out_edges, parameter[name[source]]]] begin[:]
if compare[call[name[data]][name[RELATION]] <ast.NotIn object at 0x7da2590d7190> name[CAUSAL_RELATIONS]] begin[:]
continue
<ast.Yield object at 0x7da1b0cb7760>
for taget[tuple[[<ast.Name object at 0x7da1b0cb5ff0>, <ast.Name object at 0x7da1b0cb4e20>, <ast.Name object at 0x7da1b0cb43d0>]]] in starred[call[name[graph].in_edges, parameter[name[source]]]] begin[:]
if compare[call[name[data]][name[RELATION]] <ast.NotIn object at 0x7da2590d7190> name[CAUSAL_RELATIONS]] begin[:]
continue
<ast.Yield object at 0x7da1b0cb4040> | keyword[def] identifier[transfer_causal_edges] ( identifier[graph] , identifier[source] : identifier[BaseEntity] , identifier[target] : identifier[BaseEntity] )-> identifier[Iterable] [ identifier[str] ]:
literal[string]
keyword[for] identifier[_] , identifier[v] , identifier[data] keyword[in] identifier[graph] . identifier[out_edges] ( identifier[source] , identifier[data] = keyword[True] ):
keyword[if] identifier[data] [ identifier[RELATION] ] keyword[not] keyword[in] identifier[CAUSAL_RELATIONS] :
keyword[continue]
keyword[yield] identifier[graph] . identifier[add_qualified_edge] (
identifier[target] ,
identifier[v] ,
identifier[relation] = identifier[data] [ identifier[RELATION] ],
identifier[evidence] = identifier[data] [ identifier[EVIDENCE] ],
identifier[citation] = identifier[data] [ identifier[CITATION] ],
identifier[annotations] = identifier[data] . identifier[get] ( identifier[ANNOTATIONS] ),
identifier[subject_modifier] = identifier[data] . identifier[get] ( identifier[SUBJECT] ),
identifier[object_modifier] = identifier[data] . identifier[get] ( identifier[OBJECT] ),
)
keyword[for] identifier[u] , identifier[_] , identifier[data] keyword[in] identifier[graph] . identifier[in_edges] ( identifier[source] , identifier[data] = keyword[True] ):
keyword[if] identifier[data] [ identifier[RELATION] ] keyword[not] keyword[in] identifier[CAUSAL_RELATIONS] :
keyword[continue]
keyword[yield] identifier[graph] . identifier[add_qualified_edge] (
identifier[u] ,
identifier[target] ,
identifier[relation] = identifier[data] [ identifier[RELATION] ],
identifier[evidence] = identifier[data] [ identifier[EVIDENCE] ],
identifier[citation] = identifier[data] [ identifier[CITATION] ],
identifier[annotations] = identifier[data] . identifier[get] ( identifier[ANNOTATIONS] ),
identifier[subject_modifier] = identifier[data] . identifier[get] ( identifier[SUBJECT] ),
identifier[object_modifier] = identifier[data] . identifier[get] ( identifier[OBJECT] ),
) | def transfer_causal_edges(graph, source: BaseEntity, target: BaseEntity) -> Iterable[str]:
"""Transfer causal edges that the source has to the target and yield the resulting hashes."""
for (_, v, data) in graph.out_edges(source, data=True):
if data[RELATION] not in CAUSAL_RELATIONS:
continue # depends on [control=['if'], data=[]]
yield graph.add_qualified_edge(target, v, relation=data[RELATION], evidence=data[EVIDENCE], citation=data[CITATION], annotations=data.get(ANNOTATIONS), subject_modifier=data.get(SUBJECT), object_modifier=data.get(OBJECT)) # depends on [control=['for'], data=[]]
for (u, _, data) in graph.in_edges(source, data=True):
if data[RELATION] not in CAUSAL_RELATIONS:
continue # depends on [control=['if'], data=[]]
yield graph.add_qualified_edge(u, target, relation=data[RELATION], evidence=data[EVIDENCE], citation=data[CITATION], annotations=data.get(ANNOTATIONS), subject_modifier=data.get(SUBJECT), object_modifier=data.get(OBJECT)) # depends on [control=['for'], data=[]] |
def size(self):
"""Returns total number of gate operations in circuit.
Returns:
int: Total number of gate operations.
"""
gate_ops = 0
for instr, _, _ in self.data:
if instr.name not in ['barrier', 'snapshot']:
gate_ops += 1
return gate_ops | def function[size, parameter[self]]:
constant[Returns total number of gate operations in circuit.
Returns:
int: Total number of gate operations.
]
variable[gate_ops] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0535b40>, <ast.Name object at 0x7da1b0536680>, <ast.Name object at 0x7da1b0536530>]]] in starred[name[self].data] begin[:]
if compare[name[instr].name <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b0536b60>, <ast.Constant object at 0x7da1b0534ee0>]]] begin[:]
<ast.AugAssign object at 0x7da1b0537a00>
return[name[gate_ops]] | keyword[def] identifier[size] ( identifier[self] ):
literal[string]
identifier[gate_ops] = literal[int]
keyword[for] identifier[instr] , identifier[_] , identifier[_] keyword[in] identifier[self] . identifier[data] :
keyword[if] identifier[instr] . identifier[name] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[gate_ops] += literal[int]
keyword[return] identifier[gate_ops] | def size(self):
"""Returns total number of gate operations in circuit.
Returns:
int: Total number of gate operations.
"""
gate_ops = 0
for (instr, _, _) in self.data:
if instr.name not in ['barrier', 'snapshot']:
gate_ops += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return gate_ops |
def root_sa_root_access(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
access = ET.SubElement(root, "access")
access.text = kwargs.pop('access')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[root_sa_root_access, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[root_sa] assign[=] call[name[ET].SubElement, parameter[name[config], constant[root-sa]]]
variable[root] assign[=] call[name[ET].SubElement, parameter[name[root_sa], constant[root]]]
variable[access] assign[=] call[name[ET].SubElement, parameter[name[root], constant[access]]]
name[access].text assign[=] call[name[kwargs].pop, parameter[constant[access]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[root_sa_root_access] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[root_sa] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[root] = identifier[ET] . identifier[SubElement] ( identifier[root_sa] , literal[string] )
identifier[access] = identifier[ET] . identifier[SubElement] ( identifier[root] , literal[string] )
identifier[access] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def root_sa_root_access(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
root_sa = ET.SubElement(config, 'root-sa', xmlns='urn:brocade.com:mgmt:brocade-aaa')
root = ET.SubElement(root_sa, 'root')
access = ET.SubElement(root, 'access')
access.text = kwargs.pop('access')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def recv_msg(body,
message):
"""recv_msg
Handler method - fires when a messages is consumed from
the ``FORWARD_QUEUE`` queue running in the ``FORWARD_BROKER_URL``
broker.
:param body: message body
:param message: message object can ack, requeue or reject
"""
log.info(("callback received msg "))
agg.handle_msg(
body=body,
org_message=message) | def function[recv_msg, parameter[body, message]]:
constant[recv_msg
Handler method - fires when a messages is consumed from
the ``FORWARD_QUEUE`` queue running in the ``FORWARD_BROKER_URL``
broker.
:param body: message body
:param message: message object can ack, requeue or reject
]
call[name[log].info, parameter[constant[callback received msg ]]]
call[name[agg].handle_msg, parameter[]] | keyword[def] identifier[recv_msg] ( identifier[body] ,
identifier[message] ):
literal[string]
identifier[log] . identifier[info] (( literal[string] ))
identifier[agg] . identifier[handle_msg] (
identifier[body] = identifier[body] ,
identifier[org_message] = identifier[message] ) | def recv_msg(body, message):
"""recv_msg
Handler method - fires when a messages is consumed from
the ``FORWARD_QUEUE`` queue running in the ``FORWARD_BROKER_URL``
broker.
:param body: message body
:param message: message object can ack, requeue or reject
"""
log.info('callback received msg ')
agg.handle_msg(body=body, org_message=message) |
def _GetVal(self, obj, key):
"""Recurse down an attribute chain to the actual result data."""
if "." in key:
lhs, rhs = key.split(".", 1)
obj2 = getattr(obj, lhs, None)
if obj2 is None:
return None
return self._GetVal(obj2, rhs)
else:
return getattr(obj, key, None) | def function[_GetVal, parameter[self, obj, key]]:
constant[Recurse down an attribute chain to the actual result data.]
if compare[constant[.] in name[key]] begin[:]
<ast.Tuple object at 0x7da1b1c1aa70> assign[=] call[name[key].split, parameter[constant[.], constant[1]]]
variable[obj2] assign[=] call[name[getattr], parameter[name[obj], name[lhs], constant[None]]]
if compare[name[obj2] is constant[None]] begin[:]
return[constant[None]]
return[call[name[self]._GetVal, parameter[name[obj2], name[rhs]]]] | keyword[def] identifier[_GetVal] ( identifier[self] , identifier[obj] , identifier[key] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[key] :
identifier[lhs] , identifier[rhs] = identifier[key] . identifier[split] ( literal[string] , literal[int] )
identifier[obj2] = identifier[getattr] ( identifier[obj] , identifier[lhs] , keyword[None] )
keyword[if] identifier[obj2] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[_GetVal] ( identifier[obj2] , identifier[rhs] )
keyword[else] :
keyword[return] identifier[getattr] ( identifier[obj] , identifier[key] , keyword[None] ) | def _GetVal(self, obj, key):
"""Recurse down an attribute chain to the actual result data."""
if '.' in key:
(lhs, rhs) = key.split('.', 1)
obj2 = getattr(obj, lhs, None)
if obj2 is None:
return None # depends on [control=['if'], data=[]]
return self._GetVal(obj2, rhs) # depends on [control=['if'], data=['key']]
else:
return getattr(obj, key, None) |
def set_centralized_assembled_values(self, a):
"""Set assembled matrix values on processor 0."""
if self.myid != 0:
return
assert a.size == self.id.nz
self._refs.update(a=a)
self.id.a = self.cast_array(a) | def function[set_centralized_assembled_values, parameter[self, a]]:
constant[Set assembled matrix values on processor 0.]
if compare[name[self].myid not_equal[!=] constant[0]] begin[:]
return[None]
assert[compare[name[a].size equal[==] name[self].id.nz]]
call[name[self]._refs.update, parameter[]]
name[self].id.a assign[=] call[name[self].cast_array, parameter[name[a]]] | keyword[def] identifier[set_centralized_assembled_values] ( identifier[self] , identifier[a] ):
literal[string]
keyword[if] identifier[self] . identifier[myid] != literal[int] :
keyword[return]
keyword[assert] identifier[a] . identifier[size] == identifier[self] . identifier[id] . identifier[nz]
identifier[self] . identifier[_refs] . identifier[update] ( identifier[a] = identifier[a] )
identifier[self] . identifier[id] . identifier[a] = identifier[self] . identifier[cast_array] ( identifier[a] ) | def set_centralized_assembled_values(self, a):
"""Set assembled matrix values on processor 0."""
if self.myid != 0:
return # depends on [control=['if'], data=[]]
assert a.size == self.id.nz
self._refs.update(a=a)
self.id.a = self.cast_array(a) |
def delete(args):
"""Delete NApps from server."""
mgr = NAppsManager()
for napp in args['<napp>']:
mgr.set_napp(*napp)
LOG.info('Deleting NApp %s from server...', mgr.napp_id)
try:
mgr.delete()
LOG.info(' Deleted.')
except requests.HTTPError as exception:
if exception.response.status_code == 405:
LOG.error('Delete Napp is not allowed yet.')
else:
msg = json.loads(exception.response.content)
LOG.error(' Server error: %s - ', msg['error']) | def function[delete, parameter[args]]:
constant[Delete NApps from server.]
variable[mgr] assign[=] call[name[NAppsManager], parameter[]]
for taget[name[napp]] in starred[call[name[args]][constant[<napp>]]] begin[:]
call[name[mgr].set_napp, parameter[<ast.Starred object at 0x7da20c6c6380>]]
call[name[LOG].info, parameter[constant[Deleting NApp %s from server...], name[mgr].napp_id]]
<ast.Try object at 0x7da20c6c51b0> | keyword[def] identifier[delete] ( identifier[args] ):
literal[string]
identifier[mgr] = identifier[NAppsManager] ()
keyword[for] identifier[napp] keyword[in] identifier[args] [ literal[string] ]:
identifier[mgr] . identifier[set_napp] (* identifier[napp] )
identifier[LOG] . identifier[info] ( literal[string] , identifier[mgr] . identifier[napp_id] )
keyword[try] :
identifier[mgr] . identifier[delete] ()
identifier[LOG] . identifier[info] ( literal[string] )
keyword[except] identifier[requests] . identifier[HTTPError] keyword[as] identifier[exception] :
keyword[if] identifier[exception] . identifier[response] . identifier[status_code] == literal[int] :
identifier[LOG] . identifier[error] ( literal[string] )
keyword[else] :
identifier[msg] = identifier[json] . identifier[loads] ( identifier[exception] . identifier[response] . identifier[content] )
identifier[LOG] . identifier[error] ( literal[string] , identifier[msg] [ literal[string] ]) | def delete(args):
"""Delete NApps from server."""
mgr = NAppsManager()
for napp in args['<napp>']:
mgr.set_napp(*napp)
LOG.info('Deleting NApp %s from server...', mgr.napp_id)
try:
mgr.delete()
LOG.info(' Deleted.') # depends on [control=['try'], data=[]]
except requests.HTTPError as exception:
if exception.response.status_code == 405:
LOG.error('Delete Napp is not allowed yet.') # depends on [control=['if'], data=[]]
else:
msg = json.loads(exception.response.content)
LOG.error(' Server error: %s - ', msg['error']) # depends on [control=['except'], data=['exception']] # depends on [control=['for'], data=['napp']] |
def update_record(self, domain, record, data=None, priority=None, ttl=None,
comment=None):
"""
Modifies an existing record for a domain.
"""
return domain.update_record(record, data=data, priority=priority,
ttl=ttl, comment=comment) | def function[update_record, parameter[self, domain, record, data, priority, ttl, comment]]:
constant[
Modifies an existing record for a domain.
]
return[call[name[domain].update_record, parameter[name[record]]]] | keyword[def] identifier[update_record] ( identifier[self] , identifier[domain] , identifier[record] , identifier[data] = keyword[None] , identifier[priority] = keyword[None] , identifier[ttl] = keyword[None] ,
identifier[comment] = keyword[None] ):
literal[string]
keyword[return] identifier[domain] . identifier[update_record] ( identifier[record] , identifier[data] = identifier[data] , identifier[priority] = identifier[priority] ,
identifier[ttl] = identifier[ttl] , identifier[comment] = identifier[comment] ) | def update_record(self, domain, record, data=None, priority=None, ttl=None, comment=None):
"""
Modifies an existing record for a domain.
"""
return domain.update_record(record, data=data, priority=priority, ttl=ttl, comment=comment) |
def _validate_tileset(self, tileset):
"""Validate the tileset name and
ensure that it includes the username
"""
if '.' not in tileset:
tileset = "{0}.{1}".format(self.username, tileset)
pattern = '^[a-z0-9-_]{1,32}\.[a-z0-9-_]{1,32}$'
if not re.match(pattern, tileset, flags=re.IGNORECASE):
raise ValidationError(
'tileset {0} is invalid, must match r"{1}"'.format(
tileset, pattern))
return tileset | def function[_validate_tileset, parameter[self, tileset]]:
constant[Validate the tileset name and
ensure that it includes the username
]
if compare[constant[.] <ast.NotIn object at 0x7da2590d7190> name[tileset]] begin[:]
variable[tileset] assign[=] call[constant[{0}.{1}].format, parameter[name[self].username, name[tileset]]]
variable[pattern] assign[=] constant[^[a-z0-9-_]{1,32}\.[a-z0-9-_]{1,32}$]
if <ast.UnaryOp object at 0x7da1b17fa6e0> begin[:]
<ast.Raise object at 0x7da1b17fa800>
return[name[tileset]] | keyword[def] identifier[_validate_tileset] ( identifier[self] , identifier[tileset] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[tileset] :
identifier[tileset] = literal[string] . identifier[format] ( identifier[self] . identifier[username] , identifier[tileset] )
identifier[pattern] = literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[pattern] , identifier[tileset] , identifier[flags] = identifier[re] . identifier[IGNORECASE] ):
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] (
identifier[tileset] , identifier[pattern] ))
keyword[return] identifier[tileset] | def _validate_tileset(self, tileset):
"""Validate the tileset name and
ensure that it includes the username
"""
if '.' not in tileset:
tileset = '{0}.{1}'.format(self.username, tileset) # depends on [control=['if'], data=['tileset']]
pattern = '^[a-z0-9-_]{1,32}\\.[a-z0-9-_]{1,32}$'
if not re.match(pattern, tileset, flags=re.IGNORECASE):
raise ValidationError('tileset {0} is invalid, must match r"{1}"'.format(tileset, pattern)) # depends on [control=['if'], data=[]]
return tileset |
def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password) | def function[authenticate, parameter[self, transport, account_name, password]]:
constant[
Authenticates account using soap method.
]
call[name[Authenticator].authenticate, parameter[name[self], name[transport], name[account_name], name[password]]]
if compare[name[password] equal[==] constant[None]] begin[:]
return[call[name[self].pre_auth, parameter[name[transport], name[account_name]]]] | keyword[def] identifier[authenticate] ( identifier[self] , identifier[transport] , identifier[account_name] , identifier[password] = keyword[None] ):
literal[string]
identifier[Authenticator] . identifier[authenticate] ( identifier[self] , identifier[transport] , identifier[account_name] , identifier[password] )
keyword[if] identifier[password] == keyword[None] :
keyword[return] identifier[self] . identifier[pre_auth] ( identifier[transport] , identifier[account_name] )
keyword[else] :
keyword[return] identifier[self] . identifier[auth] ( identifier[transport] , identifier[account_name] , identifier[password] ) | def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name) # depends on [control=['if'], data=[]]
else:
return self.auth(transport, account_name, password) |
def send(self, obj):
"""Send object"""
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
self.send_bytes(buf.getvalue()) | def function[send, parameter[self, obj]]:
constant[Send object]
variable[buf] assign[=] call[name[io].BytesIO, parameter[]]
call[call[name[ForkingPickler], parameter[name[buf], name[pickle].HIGHEST_PROTOCOL]].dump, parameter[name[obj]]]
call[name[self].send_bytes, parameter[call[name[buf].getvalue, parameter[]]]] | keyword[def] identifier[send] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[buf] = identifier[io] . identifier[BytesIO] ()
identifier[ForkingPickler] ( identifier[buf] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] ). identifier[dump] ( identifier[obj] )
identifier[self] . identifier[send_bytes] ( identifier[buf] . identifier[getvalue] ()) | def send(self, obj):
"""Send object"""
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
self.send_bytes(buf.getvalue()) |
async def set_chat_description(self, chat_id: typing.Union[base.Integer, base.String],
description: typing.Union[base.String, None] = None) -> base.Boolean:
"""
Use this method to change the description of a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#setchatdescription
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param description: New chat description, 0-255 characters
:type description: :obj:`typing.Union[base.String, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_DESCRIPTION, payload)
return result | <ast.AsyncFunctionDef object at 0x7da1b17a94e0> | keyword[async] keyword[def] identifier[set_chat_description] ( identifier[self] , identifier[chat_id] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , identifier[base] . identifier[String] ],
identifier[description] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[String] , keyword[None] ]= keyword[None] )-> identifier[base] . identifier[Boolean] :
literal[string]
identifier[payload] = identifier[generate_payload] (** identifier[locals] ())
identifier[result] = keyword[await] identifier[self] . identifier[request] ( identifier[api] . identifier[Methods] . identifier[SET_CHAT_DESCRIPTION] , identifier[payload] )
keyword[return] identifier[result] | async def set_chat_description(self, chat_id: typing.Union[base.Integer, base.String], description: typing.Union[base.String, None]=None) -> base.Boolean:
"""
Use this method to change the description of a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#setchatdescription
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param description: New chat description, 0-255 characters
:type description: :obj:`typing.Union[base.String, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_DESCRIPTION, payload)
return result |
def average_fft(self, fftlength=None, overlap=0, window=None):
"""Compute the averaged one-dimensional DFT of this `TimeSeries`.
This method computes a number of FFTs of duration ``fftlength``
and ``overlap`` (both given in seconds), and returns the mean
average. This method is analogous to the Welch average method
for power spectra.
Parameters
----------
fftlength : `float`
number of seconds in single FFT, default, use
whole `TimeSeries`
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
Returns
-------
out : complex-valued `~gwpy.frequencyseries.FrequencySeries`
the transformed output, with populated frequencies array
metadata
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
"""
from gwpy.spectrogram import Spectrogram
# format lengths
if fftlength is None:
fftlength = self.duration
if isinstance(fftlength, units.Quantity):
fftlength = fftlength.value
nfft = int((fftlength * self.sample_rate).decompose().value)
noverlap = int((overlap * self.sample_rate).decompose().value)
navg = divmod(self.size-noverlap, (nfft-noverlap))[0]
# format window
if window is None:
window = 'boxcar'
if isinstance(window, (str, tuple)):
win = signal.get_window(window, nfft)
else:
win = numpy.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
elif win.shape[0] != nfft:
raise ValueError('Window is the wrong size.')
win = win.astype(self.dtype)
scaling = 1. / numpy.absolute(win).mean()
if nfft % 2:
nfreqs = (nfft + 1) // 2
else:
nfreqs = nfft // 2 + 1
ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex),
channel=self.channel, epoch=self.epoch, f0=0,
df=1 / fftlength, dt=1, copy=True)
# stride through TimeSeries, recording FFTs as columns of Spectrogram
idx = 0
for i in range(navg):
# find step TimeSeries
idx_end = idx + nfft
if idx_end > self.size:
continue
stepseries = self[idx:idx_end].detrend() * win
# calculated FFT, weight, and stack
fft_ = stepseries.fft(nfft=nfft) * scaling
ffts.value[i, :] = fft_.value
idx += (nfft - noverlap)
mean = ffts.mean(0)
mean.name = self.name
mean.epoch = self.epoch
mean.channel = self.channel
return mean | def function[average_fft, parameter[self, fftlength, overlap, window]]:
constant[Compute the averaged one-dimensional DFT of this `TimeSeries`.
This method computes a number of FFTs of duration ``fftlength``
and ``overlap`` (both given in seconds), and returns the mean
average. This method is analogous to the Welch average method
for power spectra.
Parameters
----------
fftlength : `float`
number of seconds in single FFT, default, use
whole `TimeSeries`
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
Returns
-------
out : complex-valued `~gwpy.frequencyseries.FrequencySeries`
the transformed output, with populated frequencies array
metadata
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
]
from relative_module[gwpy.spectrogram] import module[Spectrogram]
if compare[name[fftlength] is constant[None]] begin[:]
variable[fftlength] assign[=] name[self].duration
if call[name[isinstance], parameter[name[fftlength], name[units].Quantity]] begin[:]
variable[fftlength] assign[=] name[fftlength].value
variable[nfft] assign[=] call[name[int], parameter[call[binary_operation[name[fftlength] * name[self].sample_rate].decompose, parameter[]].value]]
variable[noverlap] assign[=] call[name[int], parameter[call[binary_operation[name[overlap] * name[self].sample_rate].decompose, parameter[]].value]]
variable[navg] assign[=] call[call[name[divmod], parameter[binary_operation[name[self].size - name[noverlap]], binary_operation[name[nfft] - name[noverlap]]]]][constant[0]]
if compare[name[window] is constant[None]] begin[:]
variable[window] assign[=] constant[boxcar]
if call[name[isinstance], parameter[name[window], tuple[[<ast.Name object at 0x7da18f09de10>, <ast.Name object at 0x7da18f09f7f0>]]]] begin[:]
variable[win] assign[=] call[name[signal].get_window, parameter[name[window], name[nfft]]]
variable[win] assign[=] call[name[win].astype, parameter[name[self].dtype]]
variable[scaling] assign[=] binary_operation[constant[1.0] / call[call[name[numpy].absolute, parameter[name[win]]].mean, parameter[]]]
if binary_operation[name[nfft] <ast.Mod object at 0x7da2590d6920> constant[2]] begin[:]
variable[nfreqs] assign[=] binary_operation[binary_operation[name[nfft] + constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[ffts] assign[=] call[name[Spectrogram], parameter[call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da18f09fd30>, <ast.Name object at 0x7da18f09cdf0>]]]]]]
variable[idx] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[navg]]]] begin[:]
variable[idx_end] assign[=] binary_operation[name[idx] + name[nfft]]
if compare[name[idx_end] greater[>] name[self].size] begin[:]
continue
variable[stepseries] assign[=] binary_operation[call[call[name[self]][<ast.Slice object at 0x7da1b0608e20>].detrend, parameter[]] * name[win]]
variable[fft_] assign[=] binary_operation[call[name[stepseries].fft, parameter[]] * name[scaling]]
call[name[ffts].value][tuple[[<ast.Name object at 0x7da18f09f310>, <ast.Slice object at 0x7da18f09ce50>]]] assign[=] name[fft_].value
<ast.AugAssign object at 0x7da18f09f550>
variable[mean] assign[=] call[name[ffts].mean, parameter[constant[0]]]
name[mean].name assign[=] name[self].name
name[mean].epoch assign[=] name[self].epoch
name[mean].channel assign[=] name[self].channel
return[name[mean]] | keyword[def] identifier[average_fft] ( identifier[self] , identifier[fftlength] = keyword[None] , identifier[overlap] = literal[int] , identifier[window] = keyword[None] ):
literal[string]
keyword[from] identifier[gwpy] . identifier[spectrogram] keyword[import] identifier[Spectrogram]
keyword[if] identifier[fftlength] keyword[is] keyword[None] :
identifier[fftlength] = identifier[self] . identifier[duration]
keyword[if] identifier[isinstance] ( identifier[fftlength] , identifier[units] . identifier[Quantity] ):
identifier[fftlength] = identifier[fftlength] . identifier[value]
identifier[nfft] = identifier[int] (( identifier[fftlength] * identifier[self] . identifier[sample_rate] ). identifier[decompose] (). identifier[value] )
identifier[noverlap] = identifier[int] (( identifier[overlap] * identifier[self] . identifier[sample_rate] ). identifier[decompose] (). identifier[value] )
identifier[navg] = identifier[divmod] ( identifier[self] . identifier[size] - identifier[noverlap] ,( identifier[nfft] - identifier[noverlap] ))[ literal[int] ]
keyword[if] identifier[window] keyword[is] keyword[None] :
identifier[window] = literal[string]
keyword[if] identifier[isinstance] ( identifier[window] ,( identifier[str] , identifier[tuple] )):
identifier[win] = identifier[signal] . identifier[get_window] ( identifier[window] , identifier[nfft] )
keyword[else] :
identifier[win] = identifier[numpy] . identifier[asarray] ( identifier[window] )
keyword[if] identifier[len] ( identifier[win] . identifier[shape] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[win] . identifier[shape] [ literal[int] ]!= identifier[nfft] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[win] = identifier[win] . identifier[astype] ( identifier[self] . identifier[dtype] )
identifier[scaling] = literal[int] / identifier[numpy] . identifier[absolute] ( identifier[win] ). identifier[mean] ()
keyword[if] identifier[nfft] % literal[int] :
identifier[nfreqs] =( identifier[nfft] + literal[int] )// literal[int]
keyword[else] :
identifier[nfreqs] = identifier[nfft] // literal[int] + literal[int]
identifier[ffts] = identifier[Spectrogram] ( identifier[numpy] . identifier[zeros] (( identifier[navg] , identifier[nfreqs] ), identifier[dtype] = identifier[numpy] . identifier[complex] ),
identifier[channel] = identifier[self] . identifier[channel] , identifier[epoch] = identifier[self] . identifier[epoch] , identifier[f0] = literal[int] ,
identifier[df] = literal[int] / identifier[fftlength] , identifier[dt] = literal[int] , identifier[copy] = keyword[True] )
identifier[idx] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[navg] ):
identifier[idx_end] = identifier[idx] + identifier[nfft]
keyword[if] identifier[idx_end] > identifier[self] . identifier[size] :
keyword[continue]
identifier[stepseries] = identifier[self] [ identifier[idx] : identifier[idx_end] ]. identifier[detrend] ()* identifier[win]
identifier[fft_] = identifier[stepseries] . identifier[fft] ( identifier[nfft] = identifier[nfft] )* identifier[scaling]
identifier[ffts] . identifier[value] [ identifier[i] ,:]= identifier[fft_] . identifier[value]
identifier[idx] +=( identifier[nfft] - identifier[noverlap] )
identifier[mean] = identifier[ffts] . identifier[mean] ( literal[int] )
identifier[mean] . identifier[name] = identifier[self] . identifier[name]
identifier[mean] . identifier[epoch] = identifier[self] . identifier[epoch]
identifier[mean] . identifier[channel] = identifier[self] . identifier[channel]
keyword[return] identifier[mean] | def average_fft(self, fftlength=None, overlap=0, window=None):
"""Compute the averaged one-dimensional DFT of this `TimeSeries`.
This method computes a number of FFTs of duration ``fftlength``
and ``overlap`` (both given in seconds), and returns the mean
average. This method is analogous to the Welch average method
for power spectra.
Parameters
----------
fftlength : `float`
number of seconds in single FFT, default, use
whole `TimeSeries`
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
Returns
-------
out : complex-valued `~gwpy.frequencyseries.FrequencySeries`
the transformed output, with populated frequencies array
metadata
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
"""
from gwpy.spectrogram import Spectrogram
# format lengths
if fftlength is None:
fftlength = self.duration # depends on [control=['if'], data=['fftlength']]
if isinstance(fftlength, units.Quantity):
fftlength = fftlength.value # depends on [control=['if'], data=[]]
nfft = int((fftlength * self.sample_rate).decompose().value)
noverlap = int((overlap * self.sample_rate).decompose().value)
navg = divmod(self.size - noverlap, nfft - noverlap)[0]
# format window
if window is None:
window = 'boxcar' # depends on [control=['if'], data=['window']]
if isinstance(window, (str, tuple)):
win = signal.get_window(window, nfft) # depends on [control=['if'], data=[]]
else:
win = numpy.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D') # depends on [control=['if'], data=[]]
elif win.shape[0] != nfft:
raise ValueError('Window is the wrong size.') # depends on [control=['if'], data=[]]
win = win.astype(self.dtype)
scaling = 1.0 / numpy.absolute(win).mean()
if nfft % 2:
nfreqs = (nfft + 1) // 2 # depends on [control=['if'], data=[]]
else:
nfreqs = nfft // 2 + 1
ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex), channel=self.channel, epoch=self.epoch, f0=0, df=1 / fftlength, dt=1, copy=True)
# stride through TimeSeries, recording FFTs as columns of Spectrogram
idx = 0
for i in range(navg):
# find step TimeSeries
idx_end = idx + nfft
if idx_end > self.size:
continue # depends on [control=['if'], data=[]]
stepseries = self[idx:idx_end].detrend() * win
# calculated FFT, weight, and stack
fft_ = stepseries.fft(nfft=nfft) * scaling
ffts.value[i, :] = fft_.value
idx += nfft - noverlap # depends on [control=['for'], data=['i']]
mean = ffts.mean(0)
mean.name = self.name
mean.epoch = self.epoch
mean.channel = self.channel
return mean |
def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id) | def function[purge_db, parameter[self]]:
constant[
Purge all database records for the current user.
]
with call[name[self].engine.begin, parameter[]] begin[:]
call[name[purge_remote_checkpoints], parameter[name[db], name[self].user_id]] | keyword[def] identifier[purge_db] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[engine] . identifier[begin] () keyword[as] identifier[db] :
identifier[purge_remote_checkpoints] ( identifier[db] , identifier[self] . identifier[user_id] ) | def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id) # depends on [control=['with'], data=['db']] |
def keyserverprefs(self):
"""
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
"""
if 'KeyServerPreferences' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyServerPreferences'])).flags
return [] | def function[keyserverprefs, parameter[self]]:
constant[
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
]
if compare[constant[KeyServerPreferences] in name[self]._signature.subpackets] begin[:]
return[call[name[next], parameter[call[name[iter], parameter[call[name[self]._signature.subpackets][constant[h_KeyServerPreferences]]]]]].flags]
return[list[[]]] | keyword[def] identifier[keyserverprefs] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_signature] . identifier[subpackets] :
keyword[return] identifier[next] ( identifier[iter] ( identifier[self] . identifier[_signature] . identifier[subpackets] [ literal[string] ])). identifier[flags]
keyword[return] [] | def keyserverprefs(self):
"""
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
"""
if 'KeyServerPreferences' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyServerPreferences'])).flags # depends on [control=['if'], data=[]]
return [] |
def segment(text: str, custom_dict: Trie = None) -> List[str]:
"""
Dictionary-based word segmentation, using maximal matching algorithm and Thai Character Cluster
:param str text: text to be tokenized to words
:return: list of words, tokenized from the text
"""
if not text or not isinstance(text, str):
return []
if not custom_dict:
custom_dict = DEFAULT_DICT_TRIE
return list(_onecut(text, custom_dict)) | def function[segment, parameter[text, custom_dict]]:
constant[
Dictionary-based word segmentation, using maximal matching algorithm and Thai Character Cluster
:param str text: text to be tokenized to words
:return: list of words, tokenized from the text
]
if <ast.BoolOp object at 0x7da1b19daa40> begin[:]
return[list[[]]]
if <ast.UnaryOp object at 0x7da1b19d8f70> begin[:]
variable[custom_dict] assign[=] name[DEFAULT_DICT_TRIE]
return[call[name[list], parameter[call[name[_onecut], parameter[name[text], name[custom_dict]]]]]] | keyword[def] identifier[segment] ( identifier[text] : identifier[str] , identifier[custom_dict] : identifier[Trie] = keyword[None] )-> identifier[List] [ identifier[str] ]:
literal[string]
keyword[if] keyword[not] identifier[text] keyword[or] keyword[not] identifier[isinstance] ( identifier[text] , identifier[str] ):
keyword[return] []
keyword[if] keyword[not] identifier[custom_dict] :
identifier[custom_dict] = identifier[DEFAULT_DICT_TRIE]
keyword[return] identifier[list] ( identifier[_onecut] ( identifier[text] , identifier[custom_dict] )) | def segment(text: str, custom_dict: Trie=None) -> List[str]:
"""
Dictionary-based word segmentation, using maximal matching algorithm and Thai Character Cluster
:param str text: text to be tokenized to words
:return: list of words, tokenized from the text
"""
if not text or not isinstance(text, str):
return [] # depends on [control=['if'], data=[]]
if not custom_dict:
custom_dict = DEFAULT_DICT_TRIE # depends on [control=['if'], data=[]]
return list(_onecut(text, custom_dict)) |
def close(self):
"""Close this connection."""
logger('HttpConnection').debug('Closing connection....')
self.writer.close()
self._closed = True | def function[close, parameter[self]]:
constant[Close this connection.]
call[call[name[logger], parameter[constant[HttpConnection]]].debug, parameter[constant[Closing connection....]]]
call[name[self].writer.close, parameter[]]
name[self]._closed assign[=] constant[True] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[logger] ( literal[string] ). identifier[debug] ( literal[string] )
identifier[self] . identifier[writer] . identifier[close] ()
identifier[self] . identifier[_closed] = keyword[True] | def close(self):
"""Close this connection."""
logger('HttpConnection').debug('Closing connection....')
self.writer.close()
self._closed = True |
def render_log_filename(ti, try_number, filename_template):
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number) | def function[render_log_filename, parameter[ti, try_number, filename_template]]:
constant[
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
]
<ast.Tuple object at 0x7da18f00f760> assign[=] call[name[parse_template_string], parameter[name[filename_template]]]
if name[filename_jinja_template] begin[:]
variable[jinja_context] assign[=] call[name[ti].get_template_context, parameter[]]
call[name[jinja_context]][constant[try_number]] assign[=] name[try_number]
return[call[name[filename_jinja_template].render, parameter[]]]
return[call[name[filename_template].format, parameter[]]] | keyword[def] identifier[render_log_filename] ( identifier[ti] , identifier[try_number] , identifier[filename_template] ):
literal[string]
identifier[filename_template] , identifier[filename_jinja_template] = identifier[parse_template_string] ( identifier[filename_template] )
keyword[if] identifier[filename_jinja_template] :
identifier[jinja_context] = identifier[ti] . identifier[get_template_context] ()
identifier[jinja_context] [ literal[string] ]= identifier[try_number]
keyword[return] identifier[filename_jinja_template] . identifier[render] (** identifier[jinja_context] )
keyword[return] identifier[filename_template] . identifier[format] ( identifier[dag_id] = identifier[ti] . identifier[dag_id] ,
identifier[task_id] = identifier[ti] . identifier[task_id] ,
identifier[execution_date] = identifier[ti] . identifier[execution_date] . identifier[isoformat] (),
identifier[try_number] = identifier[try_number] ) | def render_log_filename(ti, try_number, filename_template):
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
(filename_template, filename_jinja_template) = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context) # depends on [control=['if'], data=[]]
return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number) |
def push_notebook(document=None, state=None, handle=None):
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
'''
from ..protocol import Protocol
if state is None:
state = curstate()
if not document:
document = state.document
if not document:
warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
events = list(handle.doc._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silently
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return
handle.doc._held_events = []
msg = Protocol("1.0").create("PATCH-DOC", events)
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for header, payload in msg.buffers:
handle.comms.send(json.dumps(header))
handle.comms.send(buffers=[payload]) | def function[push_notebook, parameter[document, state, handle]]:
constant[ Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
]
from relative_module[protocol] import module[Protocol]
if compare[name[state] is constant[None]] begin[:]
variable[state] assign[=] call[name[curstate], parameter[]]
if <ast.UnaryOp object at 0x7da1b21d7490> begin[:]
variable[document] assign[=] name[state].document
if <ast.UnaryOp object at 0x7da1b21d4700> begin[:]
call[name[warn], parameter[constant[No document to push]]]
return[None]
if compare[name[handle] is constant[None]] begin[:]
variable[handle] assign[=] name[state].last_comms_handle
if <ast.UnaryOp object at 0x7da1b21d5990> begin[:]
call[name[warn], parameter[constant[Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()]]]
return[None]
variable[events] assign[=] call[name[list], parameter[name[handle].doc._held_events]]
if compare[call[name[len], parameter[name[events]]] equal[==] constant[0]] begin[:]
return[None]
name[handle].doc._held_events assign[=] list[[]]
variable[msg] assign[=] call[call[name[Protocol], parameter[constant[1.0]]].create, parameter[constant[PATCH-DOC], name[events]]]
call[name[handle].comms.send, parameter[name[msg].header_json]]
call[name[handle].comms.send, parameter[name[msg].metadata_json]]
call[name[handle].comms.send, parameter[name[msg].content_json]]
for taget[tuple[[<ast.Name object at 0x7da1b1f63550>, <ast.Name object at 0x7da1b1f623e0>]]] in starred[name[msg].buffers] begin[:]
call[name[handle].comms.send, parameter[call[name[json].dumps, parameter[name[header]]]]]
call[name[handle].comms.send, parameter[]] | keyword[def] identifier[push_notebook] ( identifier[document] = keyword[None] , identifier[state] = keyword[None] , identifier[handle] = keyword[None] ):
literal[string]
keyword[from] .. identifier[protocol] keyword[import] identifier[Protocol]
keyword[if] identifier[state] keyword[is] keyword[None] :
identifier[state] = identifier[curstate] ()
keyword[if] keyword[not] identifier[document] :
identifier[document] = identifier[state] . identifier[document]
keyword[if] keyword[not] identifier[document] :
identifier[warn] ( literal[string] )
keyword[return]
keyword[if] identifier[handle] keyword[is] keyword[None] :
identifier[handle] = identifier[state] . identifier[last_comms_handle]
keyword[if] keyword[not] identifier[handle] :
identifier[warn] ( literal[string] )
keyword[return]
identifier[events] = identifier[list] ( identifier[handle] . identifier[doc] . identifier[_held_events] )
keyword[if] identifier[len] ( identifier[events] )== literal[int] :
keyword[return]
identifier[handle] . identifier[doc] . identifier[_held_events] =[]
identifier[msg] = identifier[Protocol] ( literal[string] ). identifier[create] ( literal[string] , identifier[events] )
identifier[handle] . identifier[comms] . identifier[send] ( identifier[msg] . identifier[header_json] )
identifier[handle] . identifier[comms] . identifier[send] ( identifier[msg] . identifier[metadata_json] )
identifier[handle] . identifier[comms] . identifier[send] ( identifier[msg] . identifier[content_json] )
keyword[for] identifier[header] , identifier[payload] keyword[in] identifier[msg] . identifier[buffers] :
identifier[handle] . identifier[comms] . identifier[send] ( identifier[json] . identifier[dumps] ( identifier[header] ))
identifier[handle] . identifier[comms] . identifier[send] ( identifier[buffers] =[ identifier[payload] ]) | def push_notebook(document=None, state=None, handle=None):
""" Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
"""
from ..protocol import Protocol
if state is None:
state = curstate() # depends on [control=['if'], data=['state']]
if not document:
document = state.document # depends on [control=['if'], data=[]]
if not document:
warn('No document to push')
return # depends on [control=['if'], data=[]]
if handle is None:
handle = state.last_comms_handle # depends on [control=['if'], data=['handle']]
if not handle:
warn('Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()')
return # depends on [control=['if'], data=[]]
events = list(handle.doc._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silently
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return # depends on [control=['if'], data=[]]
handle.doc._held_events = []
msg = Protocol('1.0').create('PATCH-DOC', events)
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for (header, payload) in msg.buffers:
handle.comms.send(json.dumps(header))
handle.comms.send(buffers=[payload]) # depends on [control=['for'], data=[]] |
def connect(self, *, db=None):
"""
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
"""
if not self.properties.network:
self.new_state(DeviceFromDB)
else:
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning(
"Segmentation not supported.... expect slow responses."
)
self.new_state(RPDeviceConnected)
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Offline: provide database name to load stored data."
)
self._log.warning("Ex. controller.connect(db = 'backup')") | def function[connect, parameter[self]]:
constant[
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
]
if <ast.UnaryOp object at 0x7da1b040a020> begin[:]
call[name[self].new_state, parameter[name[DeviceFromDB]]] | keyword[def] identifier[connect] ( identifier[self] ,*, identifier[db] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[properties] . identifier[network] :
identifier[self] . identifier[new_state] ( identifier[DeviceFromDB] )
keyword[else] :
keyword[try] :
identifier[name] = identifier[self] . identifier[properties] . identifier[network] . identifier[read] (
literal[string] . identifier[format] (
identifier[self] . identifier[properties] . identifier[address] , identifier[self] . identifier[properties] . identifier[device_id]
)
)
identifier[segmentation] = identifier[self] . identifier[properties] . identifier[network] . identifier[read] (
literal[string] . identifier[format] (
identifier[self] . identifier[properties] . identifier[address] , identifier[self] . identifier[properties] . identifier[device_id]
)
)
keyword[if] keyword[not] identifier[self] . identifier[segmentation_supported] keyword[or] identifier[segmentation] keyword[not] keyword[in] (
literal[string] ,
literal[string] ,
):
identifier[segmentation_supported] = keyword[False]
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[segmentation_supported] = keyword[True]
keyword[if] identifier[name] :
keyword[if] identifier[segmentation_supported] :
identifier[self] . identifier[new_state] ( identifier[RPMDeviceConnected] )
keyword[else] :
identifier[self] . identifier[new_state] ( identifier[RPDeviceConnected] )
keyword[except] identifier[SegmentationNotSupported] :
identifier[self] . identifier[segmentation_supported] = keyword[False]
identifier[self] . identifier[_log] . identifier[warning] (
literal[string]
)
identifier[self] . identifier[new_state] ( identifier[RPDeviceConnected] )
keyword[except] ( identifier[NoResponseFromController] , identifier[AttributeError] ) keyword[as] identifier[error] :
keyword[if] identifier[self] . identifier[properties] . identifier[db_name] :
identifier[self] . identifier[new_state] ( identifier[DeviceFromDB] )
keyword[else] :
identifier[self] . identifier[_log] . identifier[warning] (
literal[string]
)
identifier[self] . identifier[_log] . identifier[warning] ( literal[string] ) | def connect(self, *, db=None):
"""
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
"""
if not self.properties.network:
self.new_state(DeviceFromDB) # depends on [control=['if'], data=[]]
else:
try:
name = self.properties.network.read('{} device {} objectName'.format(self.properties.address, self.properties.device_id))
segmentation = self.properties.network.read('{} device {} segmentationSupported'.format(self.properties.address, self.properties.device_id))
if not self.segmentation_supported or segmentation not in ('segmentedTransmit', 'segmentedBoth'):
segmentation_supported = False
self._log.debug('Segmentation not supported') # depends on [control=['if'], data=[]]
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected) # depends on [control=['if'], data=[]]
else:
self.new_state(RPDeviceConnected) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning('Segmentation not supported.... expect slow responses.')
self.new_state(RPDeviceConnected) # depends on [control=['except'], data=[]]
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB) # depends on [control=['if'], data=[]]
else:
self._log.warning('Offline: provide database name to load stored data.')
self._log.warning("Ex. controller.connect(db = 'backup')") # depends on [control=['except'], data=[]] |
def clean_locks(root=None):
'''
Remove unused locks that do not currently (with regard to repositories
used) lock any package.
root
Operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_locks
'''
LCK = "removed"
out = {LCK: 0}
locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
if not os.path.exists(locks):
return out
for node in __zypper__(root=root).xml.call('cl').getElementsByTagName("message"):
text = node.childNodes[0].nodeValue.lower()
if text.startswith(LCK):
out[LCK] = text.split(" ")[1]
break
return out | def function[clean_locks, parameter[root]]:
constant[
Remove unused locks that do not currently (with regard to repositories
used) lock any package.
root
Operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_locks
]
variable[LCK] assign[=] constant[removed]
variable[out] assign[=] dictionary[[<ast.Name object at 0x7da1b1c11720>], [<ast.Constant object at 0x7da1b1c13340>]]
variable[locks] assign[=] <ast.IfExp object at 0x7da1b1c10cd0>
if <ast.UnaryOp object at 0x7da1b1c123b0> begin[:]
return[name[out]]
for taget[name[node]] in starred[call[call[call[name[__zypper__], parameter[]].xml.call, parameter[constant[cl]]].getElementsByTagName, parameter[constant[message]]]] begin[:]
variable[text] assign[=] call[call[name[node].childNodes][constant[0]].nodeValue.lower, parameter[]]
if call[name[text].startswith, parameter[name[LCK]]] begin[:]
call[name[out]][name[LCK]] assign[=] call[call[name[text].split, parameter[constant[ ]]]][constant[1]]
break
return[name[out]] | keyword[def] identifier[clean_locks] ( identifier[root] = keyword[None] ):
literal[string]
identifier[LCK] = literal[string]
identifier[out] ={ identifier[LCK] : literal[int] }
identifier[locks] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[os] . identifier[path] . identifier[relpath] ( identifier[LOCKS] , identifier[os] . identifier[path] . identifier[sep] )) keyword[if] identifier[root] keyword[else] identifier[LOCKS]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[locks] ):
keyword[return] identifier[out]
keyword[for] identifier[node] keyword[in] identifier[__zypper__] ( identifier[root] = identifier[root] ). identifier[xml] . identifier[call] ( literal[string] ). identifier[getElementsByTagName] ( literal[string] ):
identifier[text] = identifier[node] . identifier[childNodes] [ literal[int] ]. identifier[nodeValue] . identifier[lower] ()
keyword[if] identifier[text] . identifier[startswith] ( identifier[LCK] ):
identifier[out] [ identifier[LCK] ]= identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[break]
keyword[return] identifier[out] | def clean_locks(root=None):
"""
Remove unused locks that do not currently (with regard to repositories
used) lock any package.
root
Operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_locks
"""
LCK = 'removed'
out = {LCK: 0}
locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
if not os.path.exists(locks):
return out # depends on [control=['if'], data=[]]
for node in __zypper__(root=root).xml.call('cl').getElementsByTagName('message'):
text = node.childNodes[0].nodeValue.lower()
if text.startswith(LCK):
out[LCK] = text.split(' ')[1]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return out |
def make(keyvals):
"""
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
"""
kwargs = {}
for key, value in keyvals:
if key == "columns":
kwargs["col_formats"] = [c["format"] for c in value]
kwargs["col_types"] = [c["type"] for c in value]
kwargs["col_header"] = [c["name"] for c in value]
kwargs["row_header"] = len(value)
if key == "name": kwargs["table_header"] = value
if key == "description": kwargs["table_description"] = value
if key == "data": kwargs["raw_cell_values"] = value
return H2OTwoDimTable(**kwargs) | def function[make, parameter[keyvals]]:
constant[
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
]
variable[kwargs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e7910>, <ast.Name object at 0x7da20c6e4d90>]]] in starred[name[keyvals]] begin[:]
if compare[name[key] equal[==] constant[columns]] begin[:]
call[name[kwargs]][constant[col_formats]] assign[=] <ast.ListComp object at 0x7da20c6e66e0>
call[name[kwargs]][constant[col_types]] assign[=] <ast.ListComp object at 0x7da20c6e6110>
call[name[kwargs]][constant[col_header]] assign[=] <ast.ListComp object at 0x7da20c6e46a0>
call[name[kwargs]][constant[row_header]] assign[=] call[name[len], parameter[name[value]]]
if compare[name[key] equal[==] constant[name]] begin[:]
call[name[kwargs]][constant[table_header]] assign[=] name[value]
if compare[name[key] equal[==] constant[description]] begin[:]
call[name[kwargs]][constant[table_description]] assign[=] name[value]
if compare[name[key] equal[==] constant[data]] begin[:]
call[name[kwargs]][constant[raw_cell_values]] assign[=] name[value]
return[call[name[H2OTwoDimTable], parameter[]]] | keyword[def] identifier[make] ( identifier[keyvals] ):
literal[string]
identifier[kwargs] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[keyvals] :
keyword[if] identifier[key] == literal[string] :
identifier[kwargs] [ literal[string] ]=[ identifier[c] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[value] ]
identifier[kwargs] [ literal[string] ]=[ identifier[c] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[value] ]
identifier[kwargs] [ literal[string] ]=[ identifier[c] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[value] ]
identifier[kwargs] [ literal[string] ]= identifier[len] ( identifier[value] )
keyword[if] identifier[key] == literal[string] : identifier[kwargs] [ literal[string] ]= identifier[value]
keyword[if] identifier[key] == literal[string] : identifier[kwargs] [ literal[string] ]= identifier[value]
keyword[if] identifier[key] == literal[string] : identifier[kwargs] [ literal[string] ]= identifier[value]
keyword[return] identifier[H2OTwoDimTable] (** identifier[kwargs] ) | def make(keyvals):
"""
Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict.
:param keyvals: list of (key, value) tuples
:return: new H2OTwoDimTable object
"""
kwargs = {}
for (key, value) in keyvals:
if key == 'columns':
kwargs['col_formats'] = [c['format'] for c in value]
kwargs['col_types'] = [c['type'] for c in value]
kwargs['col_header'] = [c['name'] for c in value]
kwargs['row_header'] = len(value) # depends on [control=['if'], data=[]]
if key == 'name':
kwargs['table_header'] = value # depends on [control=['if'], data=[]]
if key == 'description':
kwargs['table_description'] = value # depends on [control=['if'], data=[]]
if key == 'data':
kwargs['raw_cell_values'] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return H2OTwoDimTable(**kwargs) |
def bundles():
"""Display bundles."""
per_page = int(request.args.get('per_page', 30))
page = int(request.args.get('page', 1))
query = store.bundles()
query_page = query.paginate(page, per_page=per_page)
data = []
for bundle_obj in query_page.items:
bundle_data = bundle_obj.to_dict()
bundle_data['versions'] = [version.to_dict() for version in bundle_obj.versions]
data.append(bundle_data)
return jsonify(bundles=data) | def function[bundles, parameter[]]:
constant[Display bundles.]
variable[per_page] assign[=] call[name[int], parameter[call[name[request].args.get, parameter[constant[per_page], constant[30]]]]]
variable[page] assign[=] call[name[int], parameter[call[name[request].args.get, parameter[constant[page], constant[1]]]]]
variable[query] assign[=] call[name[store].bundles, parameter[]]
variable[query_page] assign[=] call[name[query].paginate, parameter[name[page]]]
variable[data] assign[=] list[[]]
for taget[name[bundle_obj]] in starred[name[query_page].items] begin[:]
variable[bundle_data] assign[=] call[name[bundle_obj].to_dict, parameter[]]
call[name[bundle_data]][constant[versions]] assign[=] <ast.ListComp object at 0x7da1b0949a80>
call[name[data].append, parameter[name[bundle_data]]]
return[call[name[jsonify], parameter[]]] | keyword[def] identifier[bundles] ():
literal[string]
identifier[per_page] = identifier[int] ( identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[int] ))
identifier[page] = identifier[int] ( identifier[request] . identifier[args] . identifier[get] ( literal[string] , literal[int] ))
identifier[query] = identifier[store] . identifier[bundles] ()
identifier[query_page] = identifier[query] . identifier[paginate] ( identifier[page] , identifier[per_page] = identifier[per_page] )
identifier[data] =[]
keyword[for] identifier[bundle_obj] keyword[in] identifier[query_page] . identifier[items] :
identifier[bundle_data] = identifier[bundle_obj] . identifier[to_dict] ()
identifier[bundle_data] [ literal[string] ]=[ identifier[version] . identifier[to_dict] () keyword[for] identifier[version] keyword[in] identifier[bundle_obj] . identifier[versions] ]
identifier[data] . identifier[append] ( identifier[bundle_data] )
keyword[return] identifier[jsonify] ( identifier[bundles] = identifier[data] ) | def bundles():
"""Display bundles."""
per_page = int(request.args.get('per_page', 30))
page = int(request.args.get('page', 1))
query = store.bundles()
query_page = query.paginate(page, per_page=per_page)
data = []
for bundle_obj in query_page.items:
bundle_data = bundle_obj.to_dict()
bundle_data['versions'] = [version.to_dict() for version in bundle_obj.versions]
data.append(bundle_data) # depends on [control=['for'], data=['bundle_obj']]
return jsonify(bundles=data) |
def daemons_stop(self, timeout=30, kill_children=False):
"""Stop the Alignak daemons
Iterate over the self-launched daemons and their children list to send a TERM
Wait for daemons to terminate and then send a KILL for those that are not yet stopped
As a default behavior, only the launched daemons are killed, not their children.
Each daemon will manage its children killing
:param timeout: delay to wait before killing a daemon
:type timeout: int
:param kill_children: also kill the children (defaults to False)
:type kill_children: bool
:return: True if all daemons stopped
"""
def on_terminate(proc):
"""Process termination callback function"""
logger.debug("process %s terminated with exit code %s", proc.pid, proc.returncode)
result = True
if self.my_daemons:
logger.info("Alignak self-launched daemons stop:")
start = time.time()
for daemon in list(self.my_daemons.values()):
# Terminate the daemon and its children process
procs = []
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
for process in procs:
try:
logger.info("- terminating process %s", process.name())
process.terminate()
except psutil.AccessDenied:
logger.warning("Process %s is %s", process.name(), process.status())
procs = []
for daemon in list(self.my_daemons.values()):
# Stop the daemon and its children process
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
_, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# Kill processes
for process in alive:
logger.warning("Process %s did not stopped, trying to kill", process.name())
process.kill()
_, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for process in alive:
logger.warning("process %s survived SIGKILL; giving up", process.name())
result = False
logger.debug("Stopping daemons duration: %.2f seconds", time.time() - start)
return result | def function[daemons_stop, parameter[self, timeout, kill_children]]:
constant[Stop the Alignak daemons
Iterate over the self-launched daemons and their children list to send a TERM
Wait for daemons to terminate and then send a KILL for those that are not yet stopped
As a default behavior, only the launched daemons are killed, not their children.
Each daemon will manage its children killing
:param timeout: delay to wait before killing a daemon
:type timeout: int
:param kill_children: also kill the children (defaults to False)
:type kill_children: bool
:return: True if all daemons stopped
]
def function[on_terminate, parameter[proc]]:
constant[Process termination callback function]
call[name[logger].debug, parameter[constant[process %s terminated with exit code %s], name[proc].pid, name[proc].returncode]]
variable[result] assign[=] constant[True]
if name[self].my_daemons begin[:]
call[name[logger].info, parameter[constant[Alignak self-launched daemons stop:]]]
variable[start] assign[=] call[name[time].time, parameter[]]
for taget[name[daemon]] in starred[call[name[list], parameter[call[name[self].my_daemons.values, parameter[]]]]] begin[:]
variable[procs] assign[=] list[[]]
if name[kill_children] begin[:]
variable[procs] assign[=] call[call[name[daemon]][constant[process]].children, parameter[]]
call[name[procs].append, parameter[call[name[daemon]][constant[process]]]]
for taget[name[process]] in starred[name[procs]] begin[:]
<ast.Try object at 0x7da18dc04f70>
variable[procs] assign[=] list[[]]
for taget[name[daemon]] in starred[call[name[list], parameter[call[name[self].my_daemons.values, parameter[]]]]] begin[:]
if name[kill_children] begin[:]
variable[procs] assign[=] call[call[name[daemon]][constant[process]].children, parameter[]]
call[name[procs].append, parameter[call[name[daemon]][constant[process]]]]
<ast.Tuple object at 0x7da20e9b2b90> assign[=] call[name[psutil].wait_procs, parameter[name[procs]]]
if name[alive] begin[:]
for taget[name[process]] in starred[name[alive]] begin[:]
call[name[logger].warning, parameter[constant[Process %s did not stopped, trying to kill], call[name[process].name, parameter[]]]]
call[name[process].kill, parameter[]]
<ast.Tuple object at 0x7da18bc73730> assign[=] call[name[psutil].wait_procs, parameter[name[alive]]]
if name[alive] begin[:]
for taget[name[process]] in starred[name[alive]] begin[:]
call[name[logger].warning, parameter[constant[process %s survived SIGKILL; giving up], call[name[process].name, parameter[]]]]
variable[result] assign[=] constant[False]
call[name[logger].debug, parameter[constant[Stopping daemons duration: %.2f seconds], binary_operation[call[name[time].time, parameter[]] - name[start]]]]
return[name[result]] | keyword[def] identifier[daemons_stop] ( identifier[self] , identifier[timeout] = literal[int] , identifier[kill_children] = keyword[False] ):
literal[string]
keyword[def] identifier[on_terminate] ( identifier[proc] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[proc] . identifier[pid] , identifier[proc] . identifier[returncode] )
identifier[result] = keyword[True]
keyword[if] identifier[self] . identifier[my_daemons] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[start] = identifier[time] . identifier[time] ()
keyword[for] identifier[daemon] keyword[in] identifier[list] ( identifier[self] . identifier[my_daemons] . identifier[values] ()):
identifier[procs] =[]
keyword[if] identifier[kill_children] :
identifier[procs] = identifier[daemon] [ literal[string] ]. identifier[children] ()
identifier[procs] . identifier[append] ( identifier[daemon] [ literal[string] ])
keyword[for] identifier[process] keyword[in] identifier[procs] :
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] , identifier[process] . identifier[name] ())
identifier[process] . identifier[terminate] ()
keyword[except] identifier[psutil] . identifier[AccessDenied] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[process] . identifier[name] (), identifier[process] . identifier[status] ())
identifier[procs] =[]
keyword[for] identifier[daemon] keyword[in] identifier[list] ( identifier[self] . identifier[my_daemons] . identifier[values] ()):
keyword[if] identifier[kill_children] :
identifier[procs] = identifier[daemon] [ literal[string] ]. identifier[children] ()
identifier[procs] . identifier[append] ( identifier[daemon] [ literal[string] ])
identifier[_] , identifier[alive] = identifier[psutil] . identifier[wait_procs] ( identifier[procs] , identifier[timeout] = identifier[timeout] , identifier[callback] = identifier[on_terminate] )
keyword[if] identifier[alive] :
keyword[for] identifier[process] keyword[in] identifier[alive] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[process] . identifier[name] ())
identifier[process] . identifier[kill] ()
identifier[_] , identifier[alive] = identifier[psutil] . identifier[wait_procs] ( identifier[alive] , identifier[timeout] = identifier[timeout] , identifier[callback] = identifier[on_terminate] )
keyword[if] identifier[alive] :
keyword[for] identifier[process] keyword[in] identifier[alive] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[process] . identifier[name] ())
identifier[result] = keyword[False]
identifier[logger] . identifier[debug] ( literal[string] , identifier[time] . identifier[time] ()- identifier[start] )
keyword[return] identifier[result] | def daemons_stop(self, timeout=30, kill_children=False):
"""Stop the Alignak daemons
Iterate over the self-launched daemons and their children list to send a TERM
Wait for daemons to terminate and then send a KILL for those that are not yet stopped
As a default behavior, only the launched daemons are killed, not their children.
Each daemon will manage its children killing
:param timeout: delay to wait before killing a daemon
:type timeout: int
:param kill_children: also kill the children (defaults to False)
:type kill_children: bool
:return: True if all daemons stopped
"""
def on_terminate(proc):
"""Process termination callback function"""
logger.debug('process %s terminated with exit code %s', proc.pid, proc.returncode)
result = True
if self.my_daemons:
logger.info('Alignak self-launched daemons stop:')
start = time.time()
for daemon in list(self.my_daemons.values()):
# Terminate the daemon and its children process
procs = []
if kill_children:
procs = daemon['process'].children() # depends on [control=['if'], data=[]]
procs.append(daemon['process'])
for process in procs:
try:
logger.info('- terminating process %s', process.name())
process.terminate() # depends on [control=['try'], data=[]]
except psutil.AccessDenied:
logger.warning('Process %s is %s', process.name(), process.status()) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['process']] # depends on [control=['for'], data=['daemon']]
procs = []
for daemon in list(self.my_daemons.values()):
# Stop the daemon and its children process
if kill_children:
procs = daemon['process'].children() # depends on [control=['if'], data=[]]
procs.append(daemon['process']) # depends on [control=['for'], data=['daemon']]
(_, alive) = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# Kill processes
for process in alive:
logger.warning('Process %s did not stopped, trying to kill', process.name())
process.kill() # depends on [control=['for'], data=['process']]
(_, alive) = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for process in alive:
logger.warning('process %s survived SIGKILL; giving up', process.name())
result = False # depends on [control=['for'], data=['process']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
logger.debug('Stopping daemons duration: %.2f seconds', time.time() - start) # depends on [control=['if'], data=[]]
return result |
def get_json(self):
""" Gets market chart data from today to a previous date """
today = dt.now()
DIRECTION = 'last'
epochs = date.get_end_start_epochs(today.year, today.month, today.day,
DIRECTION, self.unit, self.count)
return poloniex.chart_json(epochs['shifted'], epochs['initial'],
self.period, self.symbol)[0] | def function[get_json, parameter[self]]:
constant[ Gets market chart data from today to a previous date ]
variable[today] assign[=] call[name[dt].now, parameter[]]
variable[DIRECTION] assign[=] constant[last]
variable[epochs] assign[=] call[name[date].get_end_start_epochs, parameter[name[today].year, name[today].month, name[today].day, name[DIRECTION], name[self].unit, name[self].count]]
return[call[call[name[poloniex].chart_json, parameter[call[name[epochs]][constant[shifted]], call[name[epochs]][constant[initial]], name[self].period, name[self].symbol]]][constant[0]]] | keyword[def] identifier[get_json] ( identifier[self] ):
literal[string]
identifier[today] = identifier[dt] . identifier[now] ()
identifier[DIRECTION] = literal[string]
identifier[epochs] = identifier[date] . identifier[get_end_start_epochs] ( identifier[today] . identifier[year] , identifier[today] . identifier[month] , identifier[today] . identifier[day] ,
identifier[DIRECTION] , identifier[self] . identifier[unit] , identifier[self] . identifier[count] )
keyword[return] identifier[poloniex] . identifier[chart_json] ( identifier[epochs] [ literal[string] ], identifier[epochs] [ literal[string] ],
identifier[self] . identifier[period] , identifier[self] . identifier[symbol] )[ literal[int] ] | def get_json(self):
""" Gets market chart data from today to a previous date """
today = dt.now()
DIRECTION = 'last'
epochs = date.get_end_start_epochs(today.year, today.month, today.day, DIRECTION, self.unit, self.count)
return poloniex.chart_json(epochs['shifted'], epochs['initial'], self.period, self.symbol)[0] |
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict | def function[_set_default_format, parameter[self, vmin, vmax]]:
constant[Returns the default ticks spacing.]
if compare[name[self].plot_obj.date_axis_info is constant[None]] begin[:]
name[self].plot_obj.date_axis_info assign[=] call[name[self].finder, parameter[name[vmin], name[vmax], name[self].freq]]
variable[info] assign[=] name[self].plot_obj.date_axis_info
if name[self].isminor begin[:]
variable[format] assign[=] call[name[np].compress, parameter[binary_operation[call[name[info]][constant[min]] <ast.BitAnd object at 0x7da2590d6b60> call[name[np].logical_not, parameter[call[name[info]][constant[maj]]]]], name[info]]]
name[self].formatdict assign[=] <ast.DictComp object at 0x7da18f00f4f0>
return[name[self].formatdict] | keyword[def] identifier[_set_default_format] ( identifier[self] , identifier[vmin] , identifier[vmax] ):
literal[string]
keyword[if] identifier[self] . identifier[plot_obj] . identifier[date_axis_info] keyword[is] keyword[None] :
identifier[self] . identifier[plot_obj] . identifier[date_axis_info] = identifier[self] . identifier[finder] ( identifier[vmin] , identifier[vmax] , identifier[self] . identifier[freq] )
identifier[info] = identifier[self] . identifier[plot_obj] . identifier[date_axis_info]
keyword[if] identifier[self] . identifier[isminor] :
identifier[format] = identifier[np] . identifier[compress] ( identifier[info] [ literal[string] ]& identifier[np] . identifier[logical_not] ( identifier[info] [ literal[string] ]),
identifier[info] )
keyword[else] :
identifier[format] = identifier[np] . identifier[compress] ( identifier[info] [ literal[string] ], identifier[info] )
identifier[self] . identifier[formatdict] ={ identifier[x] : identifier[f] keyword[for] ( identifier[x] , identifier[_] , identifier[_] , identifier[f] ) keyword[in] identifier[format] }
keyword[return] identifier[self] . identifier[formatdict] | def _set_default_format(self, vmin, vmax):
"""Returns the default ticks spacing."""
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) # depends on [control=['if'], data=[]]
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']), info) # depends on [control=['if'], data=[]]
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict |
def image_resize(file_paths, new_dir, width, height):
'''Resizes all images with given paths to new dimensions.
Uses up/downscaling with antialiasing.
Parameters
----------
file_paths : List[str]
List of path strings for image to resize.
new_dir : str
Directory to place resized images.
width : int
Target width of new resized images.
height : int
Target height of new resized images.
'''
if new_dir[-1] != '/':
new_dir += '/'
if not os.path.exists(os.path.dirname(new_dir)):
os.makedirs(os.path.dirname(new_dir))
for f in tqdm.tqdm(file_paths):
img = Image.open(f).resize((width, height), Image.ANTIALIAS).convert('RGB')
new = os.path.join(new_dir, os.path.basename(f))
img.save(new) | def function[image_resize, parameter[file_paths, new_dir, width, height]]:
constant[Resizes all images with given paths to new dimensions.
Uses up/downscaling with antialiasing.
Parameters
----------
file_paths : List[str]
List of path strings for image to resize.
new_dir : str
Directory to place resized images.
width : int
Target width of new resized images.
height : int
Target height of new resized images.
]
if compare[call[name[new_dir]][<ast.UnaryOp object at 0x7da1b184b970>] not_equal[!=] constant[/]] begin[:]
<ast.AugAssign object at 0x7da1b184bcd0>
if <ast.UnaryOp object at 0x7da1b184add0> begin[:]
call[name[os].makedirs, parameter[call[name[os].path.dirname, parameter[name[new_dir]]]]]
for taget[name[f]] in starred[call[name[tqdm].tqdm, parameter[name[file_paths]]]] begin[:]
variable[img] assign[=] call[call[call[name[Image].open, parameter[name[f]]].resize, parameter[tuple[[<ast.Name object at 0x7da1b184a770>, <ast.Name object at 0x7da1b184bb20>]], name[Image].ANTIALIAS]].convert, parameter[constant[RGB]]]
variable[new] assign[=] call[name[os].path.join, parameter[name[new_dir], call[name[os].path.basename, parameter[name[f]]]]]
call[name[img].save, parameter[name[new]]] | keyword[def] identifier[image_resize] ( identifier[file_paths] , identifier[new_dir] , identifier[width] , identifier[height] ):
literal[string]
keyword[if] identifier[new_dir] [- literal[int] ]!= literal[string] :
identifier[new_dir] += literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[new_dir] )):
identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[new_dir] ))
keyword[for] identifier[f] keyword[in] identifier[tqdm] . identifier[tqdm] ( identifier[file_paths] ):
identifier[img] = identifier[Image] . identifier[open] ( identifier[f] ). identifier[resize] (( identifier[width] , identifier[height] ), identifier[Image] . identifier[ANTIALIAS] ). identifier[convert] ( literal[string] )
identifier[new] = identifier[os] . identifier[path] . identifier[join] ( identifier[new_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[f] ))
identifier[img] . identifier[save] ( identifier[new] ) | def image_resize(file_paths, new_dir, width, height):
"""Resizes all images with given paths to new dimensions.
Uses up/downscaling with antialiasing.
Parameters
----------
file_paths : List[str]
List of path strings for image to resize.
new_dir : str
Directory to place resized images.
width : int
Target width of new resized images.
height : int
Target height of new resized images.
"""
if new_dir[-1] != '/':
new_dir += '/' # depends on [control=['if'], data=[]]
if not os.path.exists(os.path.dirname(new_dir)):
os.makedirs(os.path.dirname(new_dir)) # depends on [control=['if'], data=[]]
for f in tqdm.tqdm(file_paths):
img = Image.open(f).resize((width, height), Image.ANTIALIAS).convert('RGB')
new = os.path.join(new_dir, os.path.basename(f))
img.save(new) # depends on [control=['for'], data=['f']] |
def link_down(self, ofp_port):
""" DESIGNATED_PORT/NON_DESIGNATED_PORT: change status to DISABLE.
ROOT_PORT: change status to DISABLE and recalculate STP. """
port = self.ports[ofp_port.port_no]
init_stp_flg = bool(port.role is ROOT_PORT)
port.down(PORT_STATE_DISABLE, msg_init=True)
self.ports_state[ofp_port.port_no] = ofp_port.state
if init_stp_flg:
self.recalculate_spanning_tree() | def function[link_down, parameter[self, ofp_port]]:
constant[ DESIGNATED_PORT/NON_DESIGNATED_PORT: change status to DISABLE.
ROOT_PORT: change status to DISABLE and recalculate STP. ]
variable[port] assign[=] call[name[self].ports][name[ofp_port].port_no]
variable[init_stp_flg] assign[=] call[name[bool], parameter[compare[name[port].role is name[ROOT_PORT]]]]
call[name[port].down, parameter[name[PORT_STATE_DISABLE]]]
call[name[self].ports_state][name[ofp_port].port_no] assign[=] name[ofp_port].state
if name[init_stp_flg] begin[:]
call[name[self].recalculate_spanning_tree, parameter[]] | keyword[def] identifier[link_down] ( identifier[self] , identifier[ofp_port] ):
literal[string]
identifier[port] = identifier[self] . identifier[ports] [ identifier[ofp_port] . identifier[port_no] ]
identifier[init_stp_flg] = identifier[bool] ( identifier[port] . identifier[role] keyword[is] identifier[ROOT_PORT] )
identifier[port] . identifier[down] ( identifier[PORT_STATE_DISABLE] , identifier[msg_init] = keyword[True] )
identifier[self] . identifier[ports_state] [ identifier[ofp_port] . identifier[port_no] ]= identifier[ofp_port] . identifier[state]
keyword[if] identifier[init_stp_flg] :
identifier[self] . identifier[recalculate_spanning_tree] () | def link_down(self, ofp_port):
""" DESIGNATED_PORT/NON_DESIGNATED_PORT: change status to DISABLE.
ROOT_PORT: change status to DISABLE and recalculate STP. """
port = self.ports[ofp_port.port_no]
init_stp_flg = bool(port.role is ROOT_PORT)
port.down(PORT_STATE_DISABLE, msg_init=True)
self.ports_state[ofp_port.port_no] = ofp_port.state
if init_stp_flg:
self.recalculate_spanning_tree() # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.