code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def __shouldSysExit(self, iteration):
"""
Checks to see if the model should exit based on the exitAfter dummy
parameter
"""
if self._exitAfter is None \
or iteration < self._exitAfter:
return False
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params'])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex,
zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID | def function[__shouldSysExit, parameter[self, iteration]]:
constant[
Checks to see if the model should exit based on the exitAfter dummy
parameter
]
if <ast.BoolOp object at 0x7da18c4cfc70> begin[:]
return[constant[False]]
variable[results] assign[=] call[name[self]._jobsDAO.modelsGetFieldsForJob, parameter[name[self]._jobID, list[[<ast.Constant object at 0x7da18c4cdae0>]]]]
variable[modelIDs] assign[=] <ast.ListComp object at 0x7da18c4cdc00>
variable[modelNums] assign[=] <ast.ListComp object at 0x7da18c4ce9b0>
variable[sameModelNumbers] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da18c4cdd80>, call[name[zip], parameter[name[modelIDs], name[modelNums]]]]]
variable[firstModelID] assign[=] call[name[min], parameter[call[call[name[zip], parameter[<ast.Starred object at 0x7da18c4ced10>]]][constant[0]]]]
return[compare[name[firstModelID] equal[==] name[self]._modelID]] | keyword[def] identifier[__shouldSysExit] ( identifier[self] , identifier[iteration] ):
literal[string]
keyword[if] identifier[self] . identifier[_exitAfter] keyword[is] keyword[None] keyword[or] identifier[iteration] < identifier[self] . identifier[_exitAfter] :
keyword[return] keyword[False]
identifier[results] = identifier[self] . identifier[_jobsDAO] . identifier[modelsGetFieldsForJob] ( identifier[self] . identifier[_jobID] ,[ literal[string] ])
identifier[modelIDs] =[ identifier[e] [ literal[int] ] keyword[for] identifier[e] keyword[in] identifier[results] ]
identifier[modelNums] =[ identifier[json] . identifier[loads] ( identifier[e] [ literal[int] ][ literal[int] ])[ literal[string] ][ literal[string] ] keyword[for] identifier[e] keyword[in] identifier[results] ]
identifier[sameModelNumbers] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]== identifier[self] . identifier[modelIndex] ,
identifier[zip] ( identifier[modelIDs] , identifier[modelNums] ))
identifier[firstModelID] = identifier[min] ( identifier[zip] (* identifier[sameModelNumbers] )[ literal[int] ])
keyword[return] identifier[firstModelID] == identifier[self] . identifier[_modelID] | def __shouldSysExit(self, iteration):
"""
Checks to see if the model should exit based on the exitAfter dummy
parameter
"""
if self._exitAfter is None or iteration < self._exitAfter:
return False # depends on [control=['if'], data=[]]
results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params'])
modelIDs = [e[0] for e in results]
modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results]
sameModelNumbers = filter(lambda x: x[1] == self.modelIndex, zip(modelIDs, modelNums))
firstModelID = min(zip(*sameModelNumbers)[0])
return firstModelID == self._modelID |
def _le_ropenarrow(self, annot, p1, p2, lr):
"""Make stream commands for right open arrow line end symbol. "lr" denotes left (False) or right point.
"""
m, im, L, R, w, scol, fcol, opacity = self._le_annot_parms(annot, p1, p2)
shift = 2.5
d = shift * max(1, w)
p2 = R - (d/3., 0) if lr else L + (d/3., 0)
p1 = p2 + (2*d, -d) if lr else p2 + (-2*d, -d)
p3 = p2 + (2*d, d) if lr else p2 + (-2*d, d)
p1 *= im
p2 *= im
p3 *= im
ap = "\nq\n%s%f %f m\n" % (opacity, p1.x, p1.y)
ap += "%f %f l\n" % (p2.x, p2.y)
ap += "%f %f l\n" % (p3.x, p3.y)
ap += "%g w\n" % w
ap += scol + fcol + "S\nQ\n"
return ap | def function[_le_ropenarrow, parameter[self, annot, p1, p2, lr]]:
constant[Make stream commands for right open arrow line end symbol. "lr" denotes left (False) or right point.
]
<ast.Tuple object at 0x7da18f00d660> assign[=] call[name[self]._le_annot_parms, parameter[name[annot], name[p1], name[p2]]]
variable[shift] assign[=] constant[2.5]
variable[d] assign[=] binary_operation[name[shift] * call[name[max], parameter[constant[1], name[w]]]]
variable[p2] assign[=] <ast.IfExp object at 0x7da18f00c6d0>
variable[p1] assign[=] <ast.IfExp object at 0x7da18f00d600>
variable[p3] assign[=] <ast.IfExp object at 0x7da18f00c5e0>
<ast.AugAssign object at 0x7da18f00cc70>
<ast.AugAssign object at 0x7da18f00c6a0>
<ast.AugAssign object at 0x7da18f00ce50>
variable[ap] assign[=] binary_operation[constant[
q
%s%f %f m
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812f80>, <ast.Attribute object at 0x7da18f812020>, <ast.Attribute object at 0x7da18f811750>]]]
<ast.AugAssign object at 0x7da18f810070>
<ast.AugAssign object at 0x7da18f813940>
<ast.AugAssign object at 0x7da1b26acb80>
<ast.AugAssign object at 0x7da1b26ac6d0>
return[name[ap]] | keyword[def] identifier[_le_ropenarrow] ( identifier[self] , identifier[annot] , identifier[p1] , identifier[p2] , identifier[lr] ):
literal[string]
identifier[m] , identifier[im] , identifier[L] , identifier[R] , identifier[w] , identifier[scol] , identifier[fcol] , identifier[opacity] = identifier[self] . identifier[_le_annot_parms] ( identifier[annot] , identifier[p1] , identifier[p2] )
identifier[shift] = literal[int]
identifier[d] = identifier[shift] * identifier[max] ( literal[int] , identifier[w] )
identifier[p2] = identifier[R] -( identifier[d] / literal[int] , literal[int] ) keyword[if] identifier[lr] keyword[else] identifier[L] +( identifier[d] / literal[int] , literal[int] )
identifier[p1] = identifier[p2] +( literal[int] * identifier[d] ,- identifier[d] ) keyword[if] identifier[lr] keyword[else] identifier[p2] +(- literal[int] * identifier[d] ,- identifier[d] )
identifier[p3] = identifier[p2] +( literal[int] * identifier[d] , identifier[d] ) keyword[if] identifier[lr] keyword[else] identifier[p2] +(- literal[int] * identifier[d] , identifier[d] )
identifier[p1] *= identifier[im]
identifier[p2] *= identifier[im]
identifier[p3] *= identifier[im]
identifier[ap] = literal[string] %( identifier[opacity] , identifier[p1] . identifier[x] , identifier[p1] . identifier[y] )
identifier[ap] += literal[string] %( identifier[p2] . identifier[x] , identifier[p2] . identifier[y] )
identifier[ap] += literal[string] %( identifier[p3] . identifier[x] , identifier[p3] . identifier[y] )
identifier[ap] += literal[string] % identifier[w]
identifier[ap] += identifier[scol] + identifier[fcol] + literal[string]
keyword[return] identifier[ap] | def _le_ropenarrow(self, annot, p1, p2, lr):
"""Make stream commands for right open arrow line end symbol. "lr" denotes left (False) or right point.
"""
(m, im, L, R, w, scol, fcol, opacity) = self._le_annot_parms(annot, p1, p2)
shift = 2.5
d = shift * max(1, w)
p2 = R - (d / 3.0, 0) if lr else L + (d / 3.0, 0)
p1 = p2 + (2 * d, -d) if lr else p2 + (-2 * d, -d)
p3 = p2 + (2 * d, d) if lr else p2 + (-2 * d, d)
p1 *= im
p2 *= im
p3 *= im
ap = '\nq\n%s%f %f m\n' % (opacity, p1.x, p1.y)
ap += '%f %f l\n' % (p2.x, p2.y)
ap += '%f %f l\n' % (p3.x, p3.y)
ap += '%g w\n' % w
ap += scol + fcol + 'S\nQ\n'
return ap |
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs) | def function[delete_cookie, parameter[self, key]]:
constant[ Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. ]
call[name[kwargs]][constant[max_age]] assign[=] <ast.UnaryOp object at 0x7da1b18ac460>
call[name[kwargs]][constant[expires]] assign[=] constant[0]
call[name[self].set_cookie, parameter[name[key], constant[]]] | keyword[def] identifier[delete_cookie] ( identifier[self] , identifier[key] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]=- literal[int]
identifier[kwargs] [ literal[string] ]= literal[int]
identifier[self] . identifier[set_cookie] ( identifier[key] , literal[string] ,** identifier[kwargs] ) | def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs) |
def timeout_selecting(self):
"""Timeout of selecting on SELECTING state.
Not specifiyed in [:rfc:`7844`].
See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`.
"""
logger.debug('C2.1: T In %s, timeout receiving response to select.',
self.current_state)
if len(self.offers) >= MAX_OFFERS_COLLECTED:
logger.debug('C2.2: T Maximum number of offers reached, '
'raise REQUESTING.')
raise self.REQUESTING()
if self.discover_attempts >= MAX_ATTEMPTS_DISCOVER:
logger.debug('C2.3: T Maximum number of discover retries is %s'
' and already sent %s.',
MAX_ATTEMPTS_DISCOVER, self.discover_attempts)
if len(self.offers) <= 0:
logger.debug('C2.4: T. But no OFFERS where received, '
'raise ERROR.')
raise self.ERROR()
logger.debug('C2.4: F. But there is some OFFERS, '
'raise REQUESTING.')
raise self.REQUESTING()
logger.debug('C2.2: F. Still not received all OFFERS, but not '
'max # attemps reached, raise SELECTING.')
raise self.SELECTING() | def function[timeout_selecting, parameter[self]]:
constant[Timeout of selecting on SELECTING state.
Not specifiyed in [:rfc:`7844`].
See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`.
]
call[name[logger].debug, parameter[constant[C2.1: T In %s, timeout receiving response to select.], name[self].current_state]]
if compare[call[name[len], parameter[name[self].offers]] greater_or_equal[>=] name[MAX_OFFERS_COLLECTED]] begin[:]
call[name[logger].debug, parameter[constant[C2.2: T Maximum number of offers reached, raise REQUESTING.]]]
<ast.Raise object at 0x7da1b03b95d0>
if compare[name[self].discover_attempts greater_or_equal[>=] name[MAX_ATTEMPTS_DISCOVER]] begin[:]
call[name[logger].debug, parameter[constant[C2.3: T Maximum number of discover retries is %s and already sent %s.], name[MAX_ATTEMPTS_DISCOVER], name[self].discover_attempts]]
if compare[call[name[len], parameter[name[self].offers]] less_or_equal[<=] constant[0]] begin[:]
call[name[logger].debug, parameter[constant[C2.4: T. But no OFFERS where received, raise ERROR.]]]
<ast.Raise object at 0x7da1b03ba500>
call[name[logger].debug, parameter[constant[C2.4: F. But there is some OFFERS, raise REQUESTING.]]]
<ast.Raise object at 0x7da1b03ba3b0>
call[name[logger].debug, parameter[constant[C2.2: F. Still not received all OFFERS, but not max # attemps reached, raise SELECTING.]]]
<ast.Raise object at 0x7da1b03ba710> | keyword[def] identifier[timeout_selecting] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[current_state] )
keyword[if] identifier[len] ( identifier[self] . identifier[offers] )>= identifier[MAX_OFFERS_COLLECTED] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[raise] identifier[self] . identifier[REQUESTING] ()
keyword[if] identifier[self] . identifier[discover_attempts] >= identifier[MAX_ATTEMPTS_DISCOVER] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] ,
identifier[MAX_ATTEMPTS_DISCOVER] , identifier[self] . identifier[discover_attempts] )
keyword[if] identifier[len] ( identifier[self] . identifier[offers] )<= literal[int] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[raise] identifier[self] . identifier[ERROR] ()
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[raise] identifier[self] . identifier[REQUESTING] ()
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[raise] identifier[self] . identifier[SELECTING] () | def timeout_selecting(self):
"""Timeout of selecting on SELECTING state.
Not specifiyed in [:rfc:`7844`].
See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`.
"""
logger.debug('C2.1: T In %s, timeout receiving response to select.', self.current_state)
if len(self.offers) >= MAX_OFFERS_COLLECTED:
logger.debug('C2.2: T Maximum number of offers reached, raise REQUESTING.')
raise self.REQUESTING() # depends on [control=['if'], data=[]]
if self.discover_attempts >= MAX_ATTEMPTS_DISCOVER:
logger.debug('C2.3: T Maximum number of discover retries is %s and already sent %s.', MAX_ATTEMPTS_DISCOVER, self.discover_attempts)
if len(self.offers) <= 0:
logger.debug('C2.4: T. But no OFFERS where received, raise ERROR.')
raise self.ERROR() # depends on [control=['if'], data=[]]
logger.debug('C2.4: F. But there is some OFFERS, raise REQUESTING.')
raise self.REQUESTING() # depends on [control=['if'], data=['MAX_ATTEMPTS_DISCOVER']]
logger.debug('C2.2: F. Still not received all OFFERS, but not max # attemps reached, raise SELECTING.')
raise self.SELECTING() |
def exists(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
'''
return self._aggregation_op(tf.reduce_any, self, vars_list) | def function[exists, parameter[self, vars_list]]:
constant[Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
]
return[call[name[self]._aggregation_op, parameter[name[tf].reduce_any, name[self], name[vars_list]]]] | keyword[def] identifier[exists] ( identifier[self] , identifier[vars_list] : identifier[List] [ identifier[str] ])-> literal[string] :
literal[string]
keyword[return] identifier[self] . identifier[_aggregation_op] ( identifier[tf] . identifier[reduce_any] , identifier[self] , identifier[vars_list] ) | def exists(self, vars_list: List[str]) -> 'TensorFluent':
"""Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
"""
return self._aggregation_op(tf.reduce_any, self, vars_list) |
def depends(func=None, after=None, before=None, priority=None):
"""Decorator to specify test dependencies
:param after: The test needs to run after this/these tests. String or list of strings.
:param before: The test needs to run before this/these tests. String or list of strings.
"""
if not (func is None or inspect.ismethod(func) or inspect.isfunction(func)):
raise ValueError("depends decorator can only be used on functions or methods")
if not (after or before or priority):
raise ValueError("depends decorator needs at least one argument")
# This avoids some nesting in the decorator
# If called without func the decorator was called with optional args
# so we'll return a function with those args filled in.
if func is None:
return partial(depends, after=after, before=before, priority=priority)
def self_check(a, b):
if a == b:
raise ValueError("Test '{}' cannot depend on itself".format(a))
def handle_dep(conditions, _before=True):
if conditions:
if type(conditions) is not list:
conditions = [conditions]
for cond in conditions:
if hasattr(cond, '__call__'):
cond = cond.__name__
self_check(func.__name__, cond)
if _before:
soft_dependencies[cond].add(func.__name__)
else:
dependencies[func.__name__].add(cond)
handle_dep(before)
handle_dep(after, False)
if priority:
priorities[func.__name__] = priority
@wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner | def function[depends, parameter[func, after, before, priority]]:
constant[Decorator to specify test dependencies
:param after: The test needs to run after this/these tests. String or list of strings.
:param before: The test needs to run before this/these tests. String or list of strings.
]
if <ast.UnaryOp object at 0x7da1b253afe0> begin[:]
<ast.Raise object at 0x7da1b253ae00>
if <ast.UnaryOp object at 0x7da1b2539a80> begin[:]
<ast.Raise object at 0x7da1b253beb0>
if compare[name[func] is constant[None]] begin[:]
return[call[name[partial], parameter[name[depends]]]]
def function[self_check, parameter[a, b]]:
if compare[name[a] equal[==] name[b]] begin[:]
<ast.Raise object at 0x7da1b25384f0>
def function[handle_dep, parameter[conditions, _before]]:
if name[conditions] begin[:]
if compare[call[name[type], parameter[name[conditions]]] is_not name[list]] begin[:]
variable[conditions] assign[=] list[[<ast.Name object at 0x7da1b2538dc0>]]
for taget[name[cond]] in starred[name[conditions]] begin[:]
if call[name[hasattr], parameter[name[cond], constant[__call__]]] begin[:]
variable[cond] assign[=] name[cond].__name__
call[name[self_check], parameter[name[func].__name__, name[cond]]]
if name[_before] begin[:]
call[call[name[soft_dependencies]][name[cond]].add, parameter[name[func].__name__]]
call[name[handle_dep], parameter[name[before]]]
call[name[handle_dep], parameter[name[after], constant[False]]]
if name[priority] begin[:]
call[name[priorities]][name[func].__name__] assign[=] name[priority]
def function[inner, parameter[]]:
return[call[name[func], parameter[<ast.Starred object at 0x7da1b253af80>]]]
return[name[inner]] | keyword[def] identifier[depends] ( identifier[func] = keyword[None] , identifier[after] = keyword[None] , identifier[before] = keyword[None] , identifier[priority] = keyword[None] ):
literal[string]
keyword[if] keyword[not] ( identifier[func] keyword[is] keyword[None] keyword[or] identifier[inspect] . identifier[ismethod] ( identifier[func] ) keyword[or] identifier[inspect] . identifier[isfunction] ( identifier[func] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] ( identifier[after] keyword[or] identifier[before] keyword[or] identifier[priority] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[func] keyword[is] keyword[None] :
keyword[return] identifier[partial] ( identifier[depends] , identifier[after] = identifier[after] , identifier[before] = identifier[before] , identifier[priority] = identifier[priority] )
keyword[def] identifier[self_check] ( identifier[a] , identifier[b] ):
keyword[if] identifier[a] == identifier[b] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[a] ))
keyword[def] identifier[handle_dep] ( identifier[conditions] , identifier[_before] = keyword[True] ):
keyword[if] identifier[conditions] :
keyword[if] identifier[type] ( identifier[conditions] ) keyword[is] keyword[not] identifier[list] :
identifier[conditions] =[ identifier[conditions] ]
keyword[for] identifier[cond] keyword[in] identifier[conditions] :
keyword[if] identifier[hasattr] ( identifier[cond] , literal[string] ):
identifier[cond] = identifier[cond] . identifier[__name__]
identifier[self_check] ( identifier[func] . identifier[__name__] , identifier[cond] )
keyword[if] identifier[_before] :
identifier[soft_dependencies] [ identifier[cond] ]. identifier[add] ( identifier[func] . identifier[__name__] )
keyword[else] :
identifier[dependencies] [ identifier[func] . identifier[__name__] ]. identifier[add] ( identifier[cond] )
identifier[handle_dep] ( identifier[before] )
identifier[handle_dep] ( identifier[after] , keyword[False] )
keyword[if] identifier[priority] :
identifier[priorities] [ identifier[func] . identifier[__name__] ]= identifier[priority]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[inner] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[inner] | def depends(func=None, after=None, before=None, priority=None):
"""Decorator to specify test dependencies
:param after: The test needs to run after this/these tests. String or list of strings.
:param before: The test needs to run before this/these tests. String or list of strings.
"""
if not (func is None or inspect.ismethod(func) or inspect.isfunction(func)):
raise ValueError('depends decorator can only be used on functions or methods') # depends on [control=['if'], data=[]]
if not (after or before or priority):
raise ValueError('depends decorator needs at least one argument') # depends on [control=['if'], data=[]]
# This avoids some nesting in the decorator
# If called without func the decorator was called with optional args
# so we'll return a function with those args filled in.
if func is None:
return partial(depends, after=after, before=before, priority=priority) # depends on [control=['if'], data=[]]
def self_check(a, b):
if a == b:
raise ValueError("Test '{}' cannot depend on itself".format(a)) # depends on [control=['if'], data=['a']]
def handle_dep(conditions, _before=True):
if conditions:
if type(conditions) is not list:
conditions = [conditions] # depends on [control=['if'], data=[]]
for cond in conditions:
if hasattr(cond, '__call__'):
cond = cond.__name__ # depends on [control=['if'], data=[]]
self_check(func.__name__, cond)
if _before:
soft_dependencies[cond].add(func.__name__) # depends on [control=['if'], data=[]]
else:
dependencies[func.__name__].add(cond) # depends on [control=['for'], data=['cond']] # depends on [control=['if'], data=[]]
handle_dep(before)
handle_dep(after, False)
if priority:
priorities[func.__name__] = priority # depends on [control=['if'], data=[]]
@wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner |
def add_extruded_obstacles(self, top_polys, make_ccw=True):
"""
Add polyhedras to the Place by giving their top polygon and
applying extrusion along the z axis. The resulting polygon
from the intersection will be declared as a hole in the Surface.
:param top_polys: Polygons to be extruded to the Surface.
:type top_polys: list of ``pyny.Polygon``
:param make_ccw: If True, points will be sorted ccw.
:type make_ccw: bool
:returns: None
.. note:: When a top polygon is projected and it
instersects multiple Surface's polygons, a independent
polyhedron will be created for each individual
intersection\*.
.. warning:: The top polygons have to be over the Surface, that
is, their z=0 projection have to be inside of Surface's z=0
projection.
.. warning:: If the Polyhedra are not created with this method
or ``Polyhedron.by_two_polygons()``, holes will not be
added.
"""
if type(top_polys) != list: top_polys = [top_polys]
for poly1 in top_polys:
if type(poly1) != Polygon:
obstacle = Polygon(poly1, make_ccw)
intersections_dict = self.surface.intersect_with(obstacle)
base = []
for i, xy in intersections_dict.items():
base.append(self.surface[i].get_height(xy, full=True))
base_surf = Surface(base)
base_surf.melt()
for base_poly in base_surf:
obst_points = obstacle.get_height(base_poly.points,
full=True)
self.surface.holes.append(base_poly)
self.polyhedra.append(Polyhedron.by_two_polygons(
base_poly.points,
obst_points,
make_ccw)) | def function[add_extruded_obstacles, parameter[self, top_polys, make_ccw]]:
constant[
Add polyhedras to the Place by giving their top polygon and
applying extrusion along the z axis. The resulting polygon
from the intersection will be declared as a hole in the Surface.
:param top_polys: Polygons to be extruded to the Surface.
:type top_polys: list of ``pyny.Polygon``
:param make_ccw: If True, points will be sorted ccw.
:type make_ccw: bool
:returns: None
.. note:: When a top polygon is projected and it
instersects multiple Surface's polygons, a independent
polyhedron will be created for each individual
intersection\*.
.. warning:: The top polygons have to be over the Surface, that
is, their z=0 projection have to be inside of Surface's z=0
projection.
.. warning:: If the Polyhedra are not created with this method
or ``Polyhedron.by_two_polygons()``, holes will not be
added.
]
if compare[call[name[type], parameter[name[top_polys]]] not_equal[!=] name[list]] begin[:]
variable[top_polys] assign[=] list[[<ast.Name object at 0x7da1b24ad4b0>]]
for taget[name[poly1]] in starred[name[top_polys]] begin[:]
if compare[call[name[type], parameter[name[poly1]]] not_equal[!=] name[Polygon]] begin[:]
variable[obstacle] assign[=] call[name[Polygon], parameter[name[poly1], name[make_ccw]]]
variable[intersections_dict] assign[=] call[name[self].surface.intersect_with, parameter[name[obstacle]]]
variable[base] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b24ae290>, <ast.Name object at 0x7da1b24afdc0>]]] in starred[call[name[intersections_dict].items, parameter[]]] begin[:]
call[name[base].append, parameter[call[call[name[self].surface][name[i]].get_height, parameter[name[xy]]]]]
variable[base_surf] assign[=] call[name[Surface], parameter[name[base]]]
call[name[base_surf].melt, parameter[]]
for taget[name[base_poly]] in starred[name[base_surf]] begin[:]
variable[obst_points] assign[=] call[name[obstacle].get_height, parameter[name[base_poly].points]]
call[name[self].surface.holes.append, parameter[name[base_poly]]]
call[name[self].polyhedra.append, parameter[call[name[Polyhedron].by_two_polygons, parameter[name[base_poly].points, name[obst_points], name[make_ccw]]]]] | keyword[def] identifier[add_extruded_obstacles] ( identifier[self] , identifier[top_polys] , identifier[make_ccw] = keyword[True] ):
literal[string]
keyword[if] identifier[type] ( identifier[top_polys] )!= identifier[list] : identifier[top_polys] =[ identifier[top_polys] ]
keyword[for] identifier[poly1] keyword[in] identifier[top_polys] :
keyword[if] identifier[type] ( identifier[poly1] )!= identifier[Polygon] :
identifier[obstacle] = identifier[Polygon] ( identifier[poly1] , identifier[make_ccw] )
identifier[intersections_dict] = identifier[self] . identifier[surface] . identifier[intersect_with] ( identifier[obstacle] )
identifier[base] =[]
keyword[for] identifier[i] , identifier[xy] keyword[in] identifier[intersections_dict] . identifier[items] ():
identifier[base] . identifier[append] ( identifier[self] . identifier[surface] [ identifier[i] ]. identifier[get_height] ( identifier[xy] , identifier[full] = keyword[True] ))
identifier[base_surf] = identifier[Surface] ( identifier[base] )
identifier[base_surf] . identifier[melt] ()
keyword[for] identifier[base_poly] keyword[in] identifier[base_surf] :
identifier[obst_points] = identifier[obstacle] . identifier[get_height] ( identifier[base_poly] . identifier[points] ,
identifier[full] = keyword[True] )
identifier[self] . identifier[surface] . identifier[holes] . identifier[append] ( identifier[base_poly] )
identifier[self] . identifier[polyhedra] . identifier[append] ( identifier[Polyhedron] . identifier[by_two_polygons] (
identifier[base_poly] . identifier[points] ,
identifier[obst_points] ,
identifier[make_ccw] )) | def add_extruded_obstacles(self, top_polys, make_ccw=True):
"""
Add polyhedras to the Place by giving their top polygon and
applying extrusion along the z axis. The resulting polygon
from the intersection will be declared as a hole in the Surface.
:param top_polys: Polygons to be extruded to the Surface.
:type top_polys: list of ``pyny.Polygon``
:param make_ccw: If True, points will be sorted ccw.
:type make_ccw: bool
:returns: None
.. note:: When a top polygon is projected and it
instersects multiple Surface's polygons, a independent
polyhedron will be created for each individual
intersection\\*.
.. warning:: The top polygons have to be over the Surface, that
is, their z=0 projection have to be inside of Surface's z=0
projection.
.. warning:: If the Polyhedra are not created with this method
or ``Polyhedron.by_two_polygons()``, holes will not be
added.
"""
if type(top_polys) != list:
top_polys = [top_polys] # depends on [control=['if'], data=[]]
for poly1 in top_polys:
if type(poly1) != Polygon:
obstacle = Polygon(poly1, make_ccw) # depends on [control=['if'], data=['Polygon']]
intersections_dict = self.surface.intersect_with(obstacle)
base = []
for (i, xy) in intersections_dict.items():
base.append(self.surface[i].get_height(xy, full=True)) # depends on [control=['for'], data=[]]
base_surf = Surface(base)
base_surf.melt()
for base_poly in base_surf:
obst_points = obstacle.get_height(base_poly.points, full=True)
self.surface.holes.append(base_poly)
self.polyhedra.append(Polyhedron.by_two_polygons(base_poly.points, obst_points, make_ccw)) # depends on [control=['for'], data=['base_poly']] # depends on [control=['for'], data=['poly1']] |
def _discarded_reads1_out_file_name(self):
"""Checks if file name is set for discarded reads1 output.
Returns absolute path."""
if self.Parameters['-3'].isOn():
discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value))
else:
raise ValueError(
"No discarded-reads1 (flag -3) output path specified")
return discarded_reads1 | def function[_discarded_reads1_out_file_name, parameter[self]]:
constant[Checks if file name is set for discarded reads1 output.
Returns absolute path.]
if call[call[name[self].Parameters][constant[-3]].isOn, parameter[]] begin[:]
variable[discarded_reads1] assign[=] call[name[self]._absolute, parameter[call[name[str], parameter[call[name[self].Parameters][constant[-3]].Value]]]]
return[name[discarded_reads1]] | keyword[def] identifier[_discarded_reads1_out_file_name] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[Parameters] [ literal[string] ]. identifier[isOn] ():
identifier[discarded_reads1] = identifier[self] . identifier[_absolute] ( identifier[str] ( identifier[self] . identifier[Parameters] [ literal[string] ]. identifier[Value] ))
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[return] identifier[discarded_reads1] | def _discarded_reads1_out_file_name(self):
"""Checks if file name is set for discarded reads1 output.
Returns absolute path."""
if self.Parameters['-3'].isOn():
discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value)) # depends on [control=['if'], data=[]]
else:
raise ValueError('No discarded-reads1 (flag -3) output path specified')
return discarded_reads1 |
def constraints_stmt(stmt, env=None):
"""
Since a statement may define new names or return an expression ,
the constraints that result are in a
ConstrainedEnv mapping names to types, with constraints, and maybe
having a return type (which is a constrained type)
"""
env = env or {}
if isinstance(stmt, ast.FunctionDef):
arg_env = fn_env(stmt.args)
body_env = extended_env(env, arg_env)
constraints = []
return_type = None # TODO: should be fresh and constrained?
for body_stmt in stmt.body:
cs = constraints_stmt(body_stmt, env=body_env)
body_env.update(cs.env)
constraints += cs.constraints
return_type = union(return_type, cs.return_type)
env[stmt.name] = Function(arg_types=[arg_env[arg.id] for arg in stmt.args.args],
return_type=return_type)
return ConstrainedEnv(env=env, constraints=constraints)
elif isinstance(stmt, ast.Expr):
constrained_ty = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=constrained_ty.constraints)
elif isinstance(stmt, ast.Return):
if stmt.value:
expr_result = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=expr_result.constraints, return_type=expr_result.type)
else:
result = fresh()
return ConstrainedEnv(env=env, constraints=[Constraint(subtype=result, supertype=NamedType('NoneType'))])
elif isinstance(stmt, ast.Assign):
if len(stmt.targets) > 1:
raise NotImplementedError('Cannot generate constraints for multi-target assignments yet')
expr_result = constraints_expr(stmt.value, env=env)
target = stmt.targets[0].id
# For an assignment, we actually generate a fresh variable so that it can be the union of all things assigned
# to it. We do not do any typestate funkiness.
if target not in env:
env[target] = fresh()
return ConstrainedEnv(env=env,
constraints = expr_result.constraints + [Constraint(subtype=expr_result.type,
supertype=env[target])])
else:
raise NotImplementedError('Constraint gen for stmt %s' % stmt) | def function[constraints_stmt, parameter[stmt, env]]:
constant[
Since a statement may define new names or return an expression ,
the constraints that result are in a
ConstrainedEnv mapping names to types, with constraints, and maybe
having a return type (which is a constrained type)
]
variable[env] assign[=] <ast.BoolOp object at 0x7da1b0a4a7a0>
if call[name[isinstance], parameter[name[stmt], name[ast].FunctionDef]] begin[:]
variable[arg_env] assign[=] call[name[fn_env], parameter[name[stmt].args]]
variable[body_env] assign[=] call[name[extended_env], parameter[name[env], name[arg_env]]]
variable[constraints] assign[=] list[[]]
variable[return_type] assign[=] constant[None]
for taget[name[body_stmt]] in starred[name[stmt].body] begin[:]
variable[cs] assign[=] call[name[constraints_stmt], parameter[name[body_stmt]]]
call[name[body_env].update, parameter[name[cs].env]]
<ast.AugAssign object at 0x7da1b0a4a9b0>
variable[return_type] assign[=] call[name[union], parameter[name[return_type], name[cs].return_type]]
call[name[env]][name[stmt].name] assign[=] call[name[Function], parameter[]]
return[call[name[ConstrainedEnv], parameter[]]] | keyword[def] identifier[constraints_stmt] ( identifier[stmt] , identifier[env] = keyword[None] ):
literal[string]
identifier[env] = identifier[env] keyword[or] {}
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[FunctionDef] ):
identifier[arg_env] = identifier[fn_env] ( identifier[stmt] . identifier[args] )
identifier[body_env] = identifier[extended_env] ( identifier[env] , identifier[arg_env] )
identifier[constraints] =[]
identifier[return_type] = keyword[None]
keyword[for] identifier[body_stmt] keyword[in] identifier[stmt] . identifier[body] :
identifier[cs] = identifier[constraints_stmt] ( identifier[body_stmt] , identifier[env] = identifier[body_env] )
identifier[body_env] . identifier[update] ( identifier[cs] . identifier[env] )
identifier[constraints] += identifier[cs] . identifier[constraints]
identifier[return_type] = identifier[union] ( identifier[return_type] , identifier[cs] . identifier[return_type] )
identifier[env] [ identifier[stmt] . identifier[name] ]= identifier[Function] ( identifier[arg_types] =[ identifier[arg_env] [ identifier[arg] . identifier[id] ] keyword[for] identifier[arg] keyword[in] identifier[stmt] . identifier[args] . identifier[args] ],
identifier[return_type] = identifier[return_type] )
keyword[return] identifier[ConstrainedEnv] ( identifier[env] = identifier[env] , identifier[constraints] = identifier[constraints] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[Expr] ):
identifier[constrained_ty] = identifier[constraints_expr] ( identifier[stmt] . identifier[value] , identifier[env] = identifier[env] )
keyword[return] identifier[ConstrainedEnv] ( identifier[env] = identifier[env] , identifier[constraints] = identifier[constrained_ty] . identifier[constraints] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[Return] ):
keyword[if] identifier[stmt] . identifier[value] :
identifier[expr_result] = identifier[constraints_expr] ( identifier[stmt] . identifier[value] , identifier[env] = identifier[env] )
keyword[return] identifier[ConstrainedEnv] ( identifier[env] = identifier[env] , identifier[constraints] = identifier[expr_result] . identifier[constraints] , identifier[return_type] = identifier[expr_result] . identifier[type] )
keyword[else] :
identifier[result] = identifier[fresh] ()
keyword[return] identifier[ConstrainedEnv] ( identifier[env] = identifier[env] , identifier[constraints] =[ identifier[Constraint] ( identifier[subtype] = identifier[result] , identifier[supertype] = identifier[NamedType] ( literal[string] ))])
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[Assign] ):
keyword[if] identifier[len] ( identifier[stmt] . identifier[targets] )> literal[int] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[expr_result] = identifier[constraints_expr] ( identifier[stmt] . identifier[value] , identifier[env] = identifier[env] )
identifier[target] = identifier[stmt] . identifier[targets] [ literal[int] ]. identifier[id]
keyword[if] identifier[target] keyword[not] keyword[in] identifier[env] :
identifier[env] [ identifier[target] ]= identifier[fresh] ()
keyword[return] identifier[ConstrainedEnv] ( identifier[env] = identifier[env] ,
identifier[constraints] = identifier[expr_result] . identifier[constraints] +[ identifier[Constraint] ( identifier[subtype] = identifier[expr_result] . identifier[type] ,
identifier[supertype] = identifier[env] [ identifier[target] ])])
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] % identifier[stmt] ) | def constraints_stmt(stmt, env=None):
"""
Since a statement may define new names or return an expression ,
the constraints that result are in a
ConstrainedEnv mapping names to types, with constraints, and maybe
having a return type (which is a constrained type)
"""
env = env or {}
if isinstance(stmt, ast.FunctionDef):
arg_env = fn_env(stmt.args)
body_env = extended_env(env, arg_env)
constraints = []
return_type = None # TODO: should be fresh and constrained?
for body_stmt in stmt.body:
cs = constraints_stmt(body_stmt, env=body_env)
body_env.update(cs.env)
constraints += cs.constraints
return_type = union(return_type, cs.return_type) # depends on [control=['for'], data=['body_stmt']]
env[stmt.name] = Function(arg_types=[arg_env[arg.id] for arg in stmt.args.args], return_type=return_type)
return ConstrainedEnv(env=env, constraints=constraints) # depends on [control=['if'], data=[]]
elif isinstance(stmt, ast.Expr):
constrained_ty = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=constrained_ty.constraints) # depends on [control=['if'], data=[]]
elif isinstance(stmt, ast.Return):
if stmt.value:
expr_result = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=expr_result.constraints, return_type=expr_result.type) # depends on [control=['if'], data=[]]
else:
result = fresh()
return ConstrainedEnv(env=env, constraints=[Constraint(subtype=result, supertype=NamedType('NoneType'))]) # depends on [control=['if'], data=[]]
elif isinstance(stmt, ast.Assign):
if len(stmt.targets) > 1:
raise NotImplementedError('Cannot generate constraints for multi-target assignments yet') # depends on [control=['if'], data=[]]
expr_result = constraints_expr(stmt.value, env=env)
target = stmt.targets[0].id
# For an assignment, we actually generate a fresh variable so that it can be the union of all things assigned
# to it. We do not do any typestate funkiness.
if target not in env:
env[target] = fresh() # depends on [control=['if'], data=['target', 'env']]
return ConstrainedEnv(env=env, constraints=expr_result.constraints + [Constraint(subtype=expr_result.type, supertype=env[target])]) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Constraint gen for stmt %s' % stmt) |
def collect(self, order_ref):
"""Collect the progress status of the order with the specified
order reference.
:param order_ref: The UUID string specifying which order to
collect status from.
:type order_ref: str
:return: The CollectResponse parsed to a dictionary.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
"""
try:
out = self.client.service.Collect(order_ref)
except Error as e:
raise get_error_class(e, "Could not complete Collect call.")
return self._dictify(out) | def function[collect, parameter[self, order_ref]]:
constant[Collect the progress status of the order with the specified
order reference.
:param order_ref: The UUID string specifying which order to
collect status from.
:type order_ref: str
:return: The CollectResponse parsed to a dictionary.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
]
<ast.Try object at 0x7da2047ebd30>
return[call[name[self]._dictify, parameter[name[out]]]] | keyword[def] identifier[collect] ( identifier[self] , identifier[order_ref] ):
literal[string]
keyword[try] :
identifier[out] = identifier[self] . identifier[client] . identifier[service] . identifier[Collect] ( identifier[order_ref] )
keyword[except] identifier[Error] keyword[as] identifier[e] :
keyword[raise] identifier[get_error_class] ( identifier[e] , literal[string] )
keyword[return] identifier[self] . identifier[_dictify] ( identifier[out] ) | def collect(self, order_ref):
"""Collect the progress status of the order with the specified
order reference.
:param order_ref: The UUID string specifying which order to
collect status from.
:type order_ref: str
:return: The CollectResponse parsed to a dictionary.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
"""
try:
out = self.client.service.Collect(order_ref) # depends on [control=['try'], data=[]]
except Error as e:
raise get_error_class(e, 'Could not complete Collect call.') # depends on [control=['except'], data=['e']]
return self._dictify(out) |
def send_alert_to_configured_integration(integration_alert):
"""Send IntegrationAlert to configured integration."""
try:
alert = integration_alert.alert
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
integration_actions_instance = configured_integration.integration.module
alert_fields = dict()
if integration.required_fields:
if not all([hasattr(alert, _) for _ in integration.required_fields]):
logger.debug("Alert does not have all required_fields (%s) for integration %s, skipping",
integration.required_fields,
integration.name)
return
exclude_fields = ["alert_type", "service_type"]
alert_fields = {}
for field in alert.__slots__:
if hasattr(alert, field) and field not in exclude_fields:
alert_fields[field] = getattr(alert, field)
logger.debug("Sending alert %s to %s", alert_fields, integration.name)
output_data, output_file_content = integration_actions_instance.send_event(alert_fields)
if integration.polling_enabled:
integration_alert.status = IntegrationAlertStatuses.POLLING.name
polling_integration_alerts.append(integration_alert)
else:
integration_alert.status = IntegrationAlertStatuses.DONE.name
integration_alert.send_time = get_current_datetime_utc()
integration_alert.output_data = json.dumps(output_data)
# TODO: do something with successfully handled alerts? They are all written to debug log file
except exceptions.IntegrationMissingRequiredFieldError as exc:
logger.exception("Send response formatting for integration alert %s failed. Missing required fields",
integration_alert,
exc.message)
integration_alert.status = IntegrationAlertStatuses.ERROR_MISSING_SEND_FIELDS.name
except exceptions.IntegrationOutputFormatError:
logger.exception("Send response formatting for integration alert %s failed", integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING_FORMATTING.name
except exceptions.IntegrationSendEventError as exc:
integration_send_retries = integration_alert.retries if integration_alert.retries <= MAX_SEND_RETRIES \
else MAX_SEND_RETRIES # making sure we do not exceed celery max retries
send_retries_left = integration_send_retries - 1
integration_alert.retries = send_retries_left
logger.error("Sending integration alert %s failed. Message: %s. Retries left: %s",
integration_alert,
exc.message,
send_retries_left)
if send_retries_left == 0:
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING.name
if send_retries_left > 0:
sleep(SEND_ALERT_DATA_INTERVAL)
send_alert_to_configured_integration(integration_alert) | def function[send_alert_to_configured_integration, parameter[integration_alert]]:
constant[Send IntegrationAlert to configured integration.]
<ast.Try object at 0x7da1b12922c0> | keyword[def] identifier[send_alert_to_configured_integration] ( identifier[integration_alert] ):
literal[string]
keyword[try] :
identifier[alert] = identifier[integration_alert] . identifier[alert]
identifier[configured_integration] = identifier[integration_alert] . identifier[configured_integration]
identifier[integration] = identifier[configured_integration] . identifier[integration]
identifier[integration_actions_instance] = identifier[configured_integration] . identifier[integration] . identifier[module]
identifier[alert_fields] = identifier[dict] ()
keyword[if] identifier[integration] . identifier[required_fields] :
keyword[if] keyword[not] identifier[all] ([ identifier[hasattr] ( identifier[alert] , identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[integration] . identifier[required_fields] ]):
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[integration] . identifier[required_fields] ,
identifier[integration] . identifier[name] )
keyword[return]
identifier[exclude_fields] =[ literal[string] , literal[string] ]
identifier[alert_fields] ={}
keyword[for] identifier[field] keyword[in] identifier[alert] . identifier[__slots__] :
keyword[if] identifier[hasattr] ( identifier[alert] , identifier[field] ) keyword[and] identifier[field] keyword[not] keyword[in] identifier[exclude_fields] :
identifier[alert_fields] [ identifier[field] ]= identifier[getattr] ( identifier[alert] , identifier[field] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[alert_fields] , identifier[integration] . identifier[name] )
identifier[output_data] , identifier[output_file_content] = identifier[integration_actions_instance] . identifier[send_event] ( identifier[alert_fields] )
keyword[if] identifier[integration] . identifier[polling_enabled] :
identifier[integration_alert] . identifier[status] = identifier[IntegrationAlertStatuses] . identifier[POLLING] . identifier[name]
identifier[polling_integration_alerts] . identifier[append] ( identifier[integration_alert] )
keyword[else] :
identifier[integration_alert] . identifier[status] = identifier[IntegrationAlertStatuses] . identifier[DONE] . identifier[name]
identifier[integration_alert] . identifier[send_time] = identifier[get_current_datetime_utc] ()
identifier[integration_alert] . identifier[output_data] = identifier[json] . identifier[dumps] ( identifier[output_data] )
keyword[except] identifier[exceptions] . identifier[IntegrationMissingRequiredFieldError] keyword[as] identifier[exc] :
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[integration_alert] ,
identifier[exc] . identifier[message] )
identifier[integration_alert] . identifier[status] = identifier[IntegrationAlertStatuses] . identifier[ERROR_MISSING_SEND_FIELDS] . identifier[name]
keyword[except] identifier[exceptions] . identifier[IntegrationOutputFormatError] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[integration_alert] )
identifier[integration_alert] . identifier[status] = identifier[IntegrationAlertStatuses] . identifier[ERROR_SENDING_FORMATTING] . identifier[name]
keyword[except] identifier[exceptions] . identifier[IntegrationSendEventError] keyword[as] identifier[exc] :
identifier[integration_send_retries] = identifier[integration_alert] . identifier[retries] keyword[if] identifier[integration_alert] . identifier[retries] <= identifier[MAX_SEND_RETRIES] keyword[else] identifier[MAX_SEND_RETRIES]
identifier[send_retries_left] = identifier[integration_send_retries] - literal[int]
identifier[integration_alert] . identifier[retries] = identifier[send_retries_left]
identifier[logger] . identifier[error] ( literal[string] ,
identifier[integration_alert] ,
identifier[exc] . identifier[message] ,
identifier[send_retries_left] )
keyword[if] identifier[send_retries_left] == literal[int] :
identifier[integration_alert] . identifier[status] = identifier[IntegrationAlertStatuses] . identifier[ERROR_SENDING] . identifier[name]
keyword[if] identifier[send_retries_left] > literal[int] :
identifier[sleep] ( identifier[SEND_ALERT_DATA_INTERVAL] )
identifier[send_alert_to_configured_integration] ( identifier[integration_alert] ) | def send_alert_to_configured_integration(integration_alert):
"""Send IntegrationAlert to configured integration."""
try:
alert = integration_alert.alert
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
integration_actions_instance = configured_integration.integration.module
alert_fields = dict()
if integration.required_fields:
if not all([hasattr(alert, _) for _ in integration.required_fields]):
logger.debug('Alert does not have all required_fields (%s) for integration %s, skipping', integration.required_fields, integration.name)
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
exclude_fields = ['alert_type', 'service_type']
alert_fields = {}
for field in alert.__slots__:
if hasattr(alert, field) and field not in exclude_fields:
alert_fields[field] = getattr(alert, field) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
logger.debug('Sending alert %s to %s', alert_fields, integration.name)
(output_data, output_file_content) = integration_actions_instance.send_event(alert_fields)
if integration.polling_enabled:
integration_alert.status = IntegrationAlertStatuses.POLLING.name
polling_integration_alerts.append(integration_alert) # depends on [control=['if'], data=[]]
else:
integration_alert.status = IntegrationAlertStatuses.DONE.name
integration_alert.send_time = get_current_datetime_utc()
integration_alert.output_data = json.dumps(output_data) # depends on [control=['try'], data=[]]
# TODO: do something with successfully handled alerts? They are all written to debug log file
except exceptions.IntegrationMissingRequiredFieldError as exc:
logger.exception('Send response formatting for integration alert %s failed. Missing required fields', integration_alert, exc.message)
integration_alert.status = IntegrationAlertStatuses.ERROR_MISSING_SEND_FIELDS.name # depends on [control=['except'], data=['exc']]
except exceptions.IntegrationOutputFormatError:
logger.exception('Send response formatting for integration alert %s failed', integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING_FORMATTING.name # depends on [control=['except'], data=[]]
except exceptions.IntegrationSendEventError as exc:
integration_send_retries = integration_alert.retries if integration_alert.retries <= MAX_SEND_RETRIES else MAX_SEND_RETRIES # making sure we do not exceed celery max retries
send_retries_left = integration_send_retries - 1
integration_alert.retries = send_retries_left
logger.error('Sending integration alert %s failed. Message: %s. Retries left: %s', integration_alert, exc.message, send_retries_left)
if send_retries_left == 0:
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING.name # depends on [control=['if'], data=[]]
if send_retries_left > 0:
sleep(SEND_ALERT_DATA_INTERVAL)
send_alert_to_configured_integration(integration_alert) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']] |
def setPermanence(self, columnIndex, permanence):
"""
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
"""
assert(columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False) | def function[setPermanence, parameter[self, columnIndex, permanence]]:
constant[
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
]
assert[compare[name[columnIndex] less[<] name[self]._numColumns]]
call[name[self]._updatePermanencesForColumn, parameter[name[permanence], name[columnIndex]]] | keyword[def] identifier[setPermanence] ( identifier[self] , identifier[columnIndex] , identifier[permanence] ):
literal[string]
keyword[assert] ( identifier[columnIndex] < identifier[self] . identifier[_numColumns] )
identifier[self] . identifier[_updatePermanencesForColumn] ( identifier[permanence] , identifier[columnIndex] , identifier[raisePerm] = keyword[False] ) | def setPermanence(self, columnIndex, permanence):
"""
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
"""
assert columnIndex < self._numColumns
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False) |
def exebench(width):
"""
benchorg.jpg is
'http://upload.wikimedia.org/wikipedia/commons/d/df/SAND_LUE.jpg'
"""
height = width * 2 / 3
with Benchmarker(width=30, loop=N) as bm:
for i in bm('kaa.imlib2'):
imlib2_scale('benchorg.jpg', width, height)
for i in bm("PIL"):
pil_scale('benchorg.jpg', width, height)
for i in bm("pgmagick(blob-read)"):
pgmagick_scale_from_blob('benchorg.jpg', width, height)
for i in bm("pgmagick(normal-read)"):
pgmagick_scale('benchorg.jpg', width, height)
for i in bm("pgmagick(scale+sharpen)"):
pgmagick_scale_plus_sharpen('benchorg.jpg', width, height)
for i in bm("opencv"):
opencv_scale('benchorg.jpg', width, height)
for i in bm("pyimlib2"):
pyimlib2_scale('benchorg.jpg', width, height)
for i in bm("pyimlib2_with_pgsharpen"):
pyimlib2_scale_with_pgmagicksharpen('benchorg.jpg', width, height)
return bm.results | def function[exebench, parameter[width]]:
constant[
benchorg.jpg is
'http://upload.wikimedia.org/wikipedia/commons/d/df/SAND_LUE.jpg'
]
variable[height] assign[=] binary_operation[binary_operation[name[width] * constant[2]] / constant[3]]
with call[name[Benchmarker], parameter[]] begin[:]
for taget[name[i]] in starred[call[name[bm], parameter[constant[kaa.imlib2]]]] begin[:]
call[name[imlib2_scale], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[PIL]]]] begin[:]
call[name[pil_scale], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[pgmagick(blob-read)]]]] begin[:]
call[name[pgmagick_scale_from_blob], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[pgmagick(normal-read)]]]] begin[:]
call[name[pgmagick_scale], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[pgmagick(scale+sharpen)]]]] begin[:]
call[name[pgmagick_scale_plus_sharpen], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[opencv]]]] begin[:]
call[name[opencv_scale], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[pyimlib2]]]] begin[:]
call[name[pyimlib2_scale], parameter[constant[benchorg.jpg], name[width], name[height]]]
for taget[name[i]] in starred[call[name[bm], parameter[constant[pyimlib2_with_pgsharpen]]]] begin[:]
call[name[pyimlib2_scale_with_pgmagicksharpen], parameter[constant[benchorg.jpg], name[width], name[height]]]
return[name[bm].results] | keyword[def] identifier[exebench] ( identifier[width] ):
literal[string]
identifier[height] = identifier[width] * literal[int] / literal[int]
keyword[with] identifier[Benchmarker] ( identifier[width] = literal[int] , identifier[loop] = identifier[N] ) keyword[as] identifier[bm] :
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[imlib2_scale] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pil_scale] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pgmagick_scale_from_blob] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pgmagick_scale] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pgmagick_scale_plus_sharpen] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[opencv_scale] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pyimlib2_scale] ( literal[string] , identifier[width] , identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[bm] ( literal[string] ):
identifier[pyimlib2_scale_with_pgmagicksharpen] ( literal[string] , identifier[width] , identifier[height] )
keyword[return] identifier[bm] . identifier[results] | def exebench(width):
"""
benchorg.jpg is
'http://upload.wikimedia.org/wikipedia/commons/d/df/SAND_LUE.jpg'
"""
height = width * 2 / 3
with Benchmarker(width=30, loop=N) as bm:
for i in bm('kaa.imlib2'):
imlib2_scale('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('PIL'):
pil_scale('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('pgmagick(blob-read)'):
pgmagick_scale_from_blob('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('pgmagick(normal-read)'):
pgmagick_scale('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('pgmagick(scale+sharpen)'):
pgmagick_scale_plus_sharpen('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('opencv'):
opencv_scale('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('pyimlib2'):
pyimlib2_scale('benchorg.jpg', width, height) # depends on [control=['for'], data=[]]
for i in bm('pyimlib2_with_pgsharpen'):
pyimlib2_scale_with_pgmagicksharpen('benchorg.jpg', width, height) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['bm']]
return bm.results |
def DeleteInstance(self, InstanceName, **extra):
# pylint: disable=invalid-name
"""
Delete an instance.
This method performs the DeleteInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
InstanceName (:class:`~pywbem.CIMInstanceName`):
The instance path of the instance to be deleted.
If this object does not specify a namespace, the default namespace
of the connection is used.
Its `host` attribute will be ignored.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
"""
exc = None
method_name = 'DeleteInstance'
if self._operation_recorders:
self.operation_recorder_reset()
self.operation_recorder_stage_pywbem_args(
method=method_name,
InstanceName=InstanceName,
**extra)
try:
stats = self.statistics.start_timer(method_name)
namespace = self._iparam_namespace_from_objectname(
InstanceName, 'InstanceName')
instancename = self._iparam_instancename(InstanceName)
self._imethodcall(
method_name,
namespace,
InstanceName=instancename,
has_return_value=False,
**extra)
return
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(None, exc) | def function[DeleteInstance, parameter[self, InstanceName]]:
constant[
Delete an instance.
This method performs the DeleteInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
InstanceName (:class:`~pywbem.CIMInstanceName`):
The instance path of the instance to be deleted.
If this object does not specify a namespace, the default namespace
of the connection is used.
Its `host` attribute will be ignored.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
]
variable[exc] assign[=] constant[None]
variable[method_name] assign[=] constant[DeleteInstance]
if name[self]._operation_recorders begin[:]
call[name[self].operation_recorder_reset, parameter[]]
call[name[self].operation_recorder_stage_pywbem_args, parameter[]]
<ast.Try object at 0x7da20c6c7bb0> | keyword[def] identifier[DeleteInstance] ( identifier[self] , identifier[InstanceName] ,** identifier[extra] ):
literal[string]
identifier[exc] = keyword[None]
identifier[method_name] = literal[string]
keyword[if] identifier[self] . identifier[_operation_recorders] :
identifier[self] . identifier[operation_recorder_reset] ()
identifier[self] . identifier[operation_recorder_stage_pywbem_args] (
identifier[method] = identifier[method_name] ,
identifier[InstanceName] = identifier[InstanceName] ,
** identifier[extra] )
keyword[try] :
identifier[stats] = identifier[self] . identifier[statistics] . identifier[start_timer] ( identifier[method_name] )
identifier[namespace] = identifier[self] . identifier[_iparam_namespace_from_objectname] (
identifier[InstanceName] , literal[string] )
identifier[instancename] = identifier[self] . identifier[_iparam_instancename] ( identifier[InstanceName] )
identifier[self] . identifier[_imethodcall] (
identifier[method_name] ,
identifier[namespace] ,
identifier[InstanceName] = identifier[instancename] ,
identifier[has_return_value] = keyword[False] ,
** identifier[extra] )
keyword[return]
keyword[except] ( identifier[CIMXMLParseError] , identifier[XMLParseError] ) keyword[as] identifier[exce] :
identifier[exce] . identifier[request_data] = identifier[self] . identifier[last_raw_request]
identifier[exce] . identifier[response_data] = identifier[self] . identifier[last_raw_reply]
identifier[exc] = identifier[exce]
keyword[raise]
keyword[except] identifier[Exception] keyword[as] identifier[exce] :
identifier[exc] = identifier[exce]
keyword[raise]
keyword[finally] :
identifier[self] . identifier[_last_operation_time] = identifier[stats] . identifier[stop_timer] (
identifier[self] . identifier[last_request_len] , identifier[self] . identifier[last_reply_len] ,
identifier[self] . identifier[last_server_response_time] , identifier[exc] )
keyword[if] identifier[self] . identifier[_operation_recorders] :
identifier[self] . identifier[operation_recorder_stage_result] ( keyword[None] , identifier[exc] ) | def DeleteInstance(self, InstanceName, **extra):
# pylint: disable=invalid-name
'\n Delete an instance.\n\n This method performs the DeleteInstance operation\n (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all\n methods performing such operations.\n\n If the operation succeeds, this method returns.\n Otherwise, this method raises an exception.\n\n Parameters:\n\n InstanceName (:class:`~pywbem.CIMInstanceName`):\n The instance path of the instance to be deleted.\n If this object does not specify a namespace, the default namespace\n of the connection is used.\n Its `host` attribute will be ignored.\n\n **extra :\n Additional keyword arguments are passed as additional operation\n parameters to the WBEM server.\n Note that :term:`DSP0200` does not define any additional parameters\n for this operation.\n\n Raises:\n\n Exceptions described in :class:`~pywbem.WBEMConnection`.\n '
exc = None
method_name = 'DeleteInstance'
if self._operation_recorders:
self.operation_recorder_reset()
self.operation_recorder_stage_pywbem_args(method=method_name, InstanceName=InstanceName, **extra) # depends on [control=['if'], data=[]]
try:
stats = self.statistics.start_timer(method_name)
namespace = self._iparam_namespace_from_objectname(InstanceName, 'InstanceName')
instancename = self._iparam_instancename(InstanceName)
self._imethodcall(method_name, namespace, InstanceName=instancename, has_return_value=False, **extra)
return # depends on [control=['try'], data=[]]
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise # depends on [control=['except'], data=['exce']]
except Exception as exce:
exc = exce
raise # depends on [control=['except'], data=['exce']]
finally:
self._last_operation_time = stats.stop_timer(self.last_request_len, self.last_reply_len, self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(None, exc) # depends on [control=['if'], data=[]] |
def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
"""
yield from self.__filename_to_functions.get(filename, []) | def function[in_file, parameter[self, filename]]:
constant[
Returns an iterator over all of the functions definitions that are
contained within a given file.
]
<ast.YieldFrom object at 0x7da20c6a9b40> | keyword[def] identifier[in_file] ( identifier[self] , identifier[filename] : identifier[str] )-> identifier[Iterator] [ identifier[FunctionDesc] ]:
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[__filename_to_functions] . identifier[get] ( identifier[filename] ,[]) | def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
"""
yield from self.__filename_to_functions.get(filename, []) |
def get(self, project_name, updatetime=None, md5sum=None):
'''get project data object, return None if not exists'''
if time.time() - self.last_check_projects > self.CHECK_PROJECTS_INTERVAL:
self._check_projects()
if self._need_update(project_name, updatetime, md5sum):
self._update_project(project_name)
return self.projects.get(project_name, None) | def function[get, parameter[self, project_name, updatetime, md5sum]]:
constant[get project data object, return None if not exists]
if compare[binary_operation[call[name[time].time, parameter[]] - name[self].last_check_projects] greater[>] name[self].CHECK_PROJECTS_INTERVAL] begin[:]
call[name[self]._check_projects, parameter[]]
if call[name[self]._need_update, parameter[name[project_name], name[updatetime], name[md5sum]]] begin[:]
call[name[self]._update_project, parameter[name[project_name]]]
return[call[name[self].projects.get, parameter[name[project_name], constant[None]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[project_name] , identifier[updatetime] = keyword[None] , identifier[md5sum] = keyword[None] ):
literal[string]
keyword[if] identifier[time] . identifier[time] ()- identifier[self] . identifier[last_check_projects] > identifier[self] . identifier[CHECK_PROJECTS_INTERVAL] :
identifier[self] . identifier[_check_projects] ()
keyword[if] identifier[self] . identifier[_need_update] ( identifier[project_name] , identifier[updatetime] , identifier[md5sum] ):
identifier[self] . identifier[_update_project] ( identifier[project_name] )
keyword[return] identifier[self] . identifier[projects] . identifier[get] ( identifier[project_name] , keyword[None] ) | def get(self, project_name, updatetime=None, md5sum=None):
"""get project data object, return None if not exists"""
if time.time() - self.last_check_projects > self.CHECK_PROJECTS_INTERVAL:
self._check_projects() # depends on [control=['if'], data=[]]
if self._need_update(project_name, updatetime, md5sum):
self._update_project(project_name) # depends on [control=['if'], data=[]]
return self.projects.get(project_name, None) |
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
nu = self.mu / (self.eps + np.dot(x, x))
self.w += nu * x * e**3 | def function[adapt, parameter[self, d, x]]:
constant[
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
]
variable[y] assign[=] call[name[np].dot, parameter[name[self].w, name[x]]]
variable[e] assign[=] binary_operation[name[d] - name[y]]
variable[nu] assign[=] binary_operation[name[self].mu / binary_operation[name[self].eps + call[name[np].dot, parameter[name[x], name[x]]]]]
<ast.AugAssign object at 0x7da1b0e4ab30> | keyword[def] identifier[adapt] ( identifier[self] , identifier[d] , identifier[x] ):
literal[string]
identifier[y] = identifier[np] . identifier[dot] ( identifier[self] . identifier[w] , identifier[x] )
identifier[e] = identifier[d] - identifier[y]
identifier[nu] = identifier[self] . identifier[mu] /( identifier[self] . identifier[eps] + identifier[np] . identifier[dot] ( identifier[x] , identifier[x] ))
identifier[self] . identifier[w] += identifier[nu] * identifier[x] * identifier[e] ** literal[int] | def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
nu = self.mu / (self.eps + np.dot(x, x))
self.w += nu * x * e ** 3 |
def decls(
self,
name=None,
function=None,
decl_type=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of declarations, that are matched defined criteria"""
return (
self._find_multiple(
self._impl_matchers[
scopedef_t.decl],
name=name,
function=function,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
) | def function[decls, parameter[self, name, function, decl_type, header_dir, header_file, recursive, allow_empty]]:
constant[returns a set of declarations, that are matched defined criteria]
return[call[name[self]._find_multiple, parameter[call[name[self]._impl_matchers][name[scopedef_t].decl]]]] | keyword[def] identifier[decls] (
identifier[self] ,
identifier[name] = keyword[None] ,
identifier[function] = keyword[None] ,
identifier[decl_type] = keyword[None] ,
identifier[header_dir] = keyword[None] ,
identifier[header_file] = keyword[None] ,
identifier[recursive] = keyword[None] ,
identifier[allow_empty] = keyword[None] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_find_multiple] (
identifier[self] . identifier[_impl_matchers] [
identifier[scopedef_t] . identifier[decl] ],
identifier[name] = identifier[name] ,
identifier[function] = identifier[function] ,
identifier[decl_type] = identifier[decl_type] ,
identifier[header_dir] = identifier[header_dir] ,
identifier[header_file] = identifier[header_file] ,
identifier[recursive] = identifier[recursive] ,
identifier[allow_empty] = identifier[allow_empty] )
) | def decls(self, name=None, function=None, decl_type=None, header_dir=None, header_file=None, recursive=None, allow_empty=None):
"""returns a set of declarations, that are matched defined criteria"""
return self._find_multiple(self._impl_matchers[scopedef_t.decl], name=name, function=function, decl_type=decl_type, header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) |
def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the transform job description to init params that can be handled by the class constructor
Args:
job_details (dict): the returned job details from a describe_transform_job API call.
Returns:
dict: The transformed init_params
"""
init_params = dict()
init_params['model_name'] = job_details['ModelName']
init_params['instance_count'] = job_details['TransformResources']['InstanceCount']
init_params['instance_type'] = job_details['TransformResources']['InstanceType']
init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')
init_params['strategy'] = job_details.get('BatchStrategy')
init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')
init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']
init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')
init_params['accept'] = job_details['TransformOutput'].get('Accept')
init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')
init_params['max_payload'] = job_details.get('MaxPayloadInMB')
init_params['base_transform_job_name'] = job_details['TransformJobName']
return init_params | def function[_prepare_init_params_from_job_description, parameter[cls, job_details]]:
constant[Convert the transform job description to init params that can be handled by the class constructor
Args:
job_details (dict): the returned job details from a describe_transform_job API call.
Returns:
dict: The transformed init_params
]
variable[init_params] assign[=] call[name[dict], parameter[]]
call[name[init_params]][constant[model_name]] assign[=] call[name[job_details]][constant[ModelName]]
call[name[init_params]][constant[instance_count]] assign[=] call[call[name[job_details]][constant[TransformResources]]][constant[InstanceCount]]
call[name[init_params]][constant[instance_type]] assign[=] call[call[name[job_details]][constant[TransformResources]]][constant[InstanceType]]
call[name[init_params]][constant[volume_kms_key]] assign[=] call[call[name[job_details]][constant[TransformResources]].get, parameter[constant[VolumeKmsKeyId]]]
call[name[init_params]][constant[strategy]] assign[=] call[name[job_details].get, parameter[constant[BatchStrategy]]]
call[name[init_params]][constant[assemble_with]] assign[=] call[call[name[job_details]][constant[TransformOutput]].get, parameter[constant[AssembleWith]]]
call[name[init_params]][constant[output_path]] assign[=] call[call[name[job_details]][constant[TransformOutput]]][constant[S3OutputPath]]
call[name[init_params]][constant[output_kms_key]] assign[=] call[call[name[job_details]][constant[TransformOutput]].get, parameter[constant[KmsKeyId]]]
call[name[init_params]][constant[accept]] assign[=] call[call[name[job_details]][constant[TransformOutput]].get, parameter[constant[Accept]]]
call[name[init_params]][constant[max_concurrent_transforms]] assign[=] call[name[job_details].get, parameter[constant[MaxConcurrentTransforms]]]
call[name[init_params]][constant[max_payload]] assign[=] call[name[job_details].get, parameter[constant[MaxPayloadInMB]]]
call[name[init_params]][constant[base_transform_job_name]] assign[=] call[name[job_details]][constant[TransformJobName]]
return[name[init_params]] | keyword[def] identifier[_prepare_init_params_from_job_description] ( identifier[cls] , identifier[job_details] ):
literal[string]
identifier[init_params] = identifier[dict] ()
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ][ literal[string] ]
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ][ literal[string] ]
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] . identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ][ literal[string] ]
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] . identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] . identifier[get] ( literal[string] )
identifier[init_params] [ literal[string] ]= identifier[job_details] [ literal[string] ]
keyword[return] identifier[init_params] | def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the transform job description to init params that can be handled by the class constructor
Args:
job_details (dict): the returned job details from a describe_transform_job API call.
Returns:
dict: The transformed init_params
"""
init_params = dict()
init_params['model_name'] = job_details['ModelName']
init_params['instance_count'] = job_details['TransformResources']['InstanceCount']
init_params['instance_type'] = job_details['TransformResources']['InstanceType']
init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')
init_params['strategy'] = job_details.get('BatchStrategy')
init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')
init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']
init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')
init_params['accept'] = job_details['TransformOutput'].get('Accept')
init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')
init_params['max_payload'] = job_details.get('MaxPayloadInMB')
init_params['base_transform_job_name'] = job_details['TransformJobName']
return init_params |
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
"""
assert(znear != zfar)
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar) | def function[perspective, parameter[fovy, aspect, znear, zfar]]:
constant[Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
]
assert[compare[name[znear] not_equal[!=] name[zfar]]]
variable[h] assign[=] binary_operation[call[name[math].tan, parameter[binary_operation[binary_operation[name[fovy] / constant[360.0]] * name[math].pi]]] * name[znear]]
variable[w] assign[=] binary_operation[name[h] * name[aspect]]
return[call[name[frustum], parameter[<ast.UnaryOp object at 0x7da18c4cf520>, name[w], <ast.UnaryOp object at 0x7da18c4ce9e0>, name[h], name[znear], name[zfar]]]] | keyword[def] identifier[perspective] ( identifier[fovy] , identifier[aspect] , identifier[znear] , identifier[zfar] ):
literal[string]
keyword[assert] ( identifier[znear] != identifier[zfar] )
identifier[h] = identifier[math] . identifier[tan] ( identifier[fovy] / literal[int] * identifier[math] . identifier[pi] )* identifier[znear]
identifier[w] = identifier[h] * identifier[aspect]
keyword[return] identifier[frustum] (- identifier[w] , identifier[w] ,- identifier[h] , identifier[h] , identifier[znear] , identifier[zfar] ) | def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
"""
assert znear != zfar
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar) |
def start_logging(log_fpath=None, mode='a', appname='default', log_dir=None):
r"""
Overwrites utool print functions to use a logger
CommandLine:
python -m utool.util_logging --test-start_logging:0
python -m utool.util_logging --test-start_logging:1
Example0:
>>> # DISABLE_DOCTEST
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> ut.util_logging._utool_print()('hello world')
>>> ut.util_logging._utool_write()('writing1')
>>> ut.util_logging._utool_write()('writing2\n')
>>> ut.util_logging._utool_write()('writing3')
>>> ut.util_logging._utool_flush()()
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> print('current_log_text =\n%s' % (current_log_text,))
>>> assert current_log_text.find('hello world') > 0, 'cant hello world'
>>> assert current_log_text.find('writing1writing2') > 0, 'cant find writing1writing2'
>>> assert current_log_text.find('writing3') > 0, 'cant find writing3'
Example1:
>>> # DISABLE_DOCTEST
>>> # Ensure that progress is logged
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> [x for x in ut.ProgressIter(range(0, 1000), freq=4)]
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> assert current_log_text.find('rate') > 0, 'progress was not logged'
>>> print(current_log_text)
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
global __CURRENT_LOG_FPATH__
if LOGGING_VERBOSE:
print('[utool] start_logging()')
# FIXME: The test for doctest may not work
if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and not __inside_doctest():
if LOGGING_VERBOSE:
print('[utool] start_logging()... rootcheck OK')
#logging.config.dictConfig(LOGGING)
if log_fpath is None:
log_fpath = get_log_fpath(num='next', appname=appname, log_dir=log_dir)
__CURRENT_LOG_FPATH__ = log_fpath
# Print what is about to happen
if VERBOSE or LOGGING_VERBOSE:
startmsg = ('logging to log_fpath=%r' % log_fpath)
_utool_print()(startmsg)
# Create root logger
__UTOOL_ROOT_LOGGER__ = logging.getLogger('root')
__UTOOL_ROOT_LOGGER__.setLevel('DEBUG')
# create file handler which logs even debug messages
#fh = logging.handlers.WatchedFileHandler(log_fpath)
logfile_handler = logging.FileHandler(log_fpath, mode=mode)
#stdout_handler = logging.StreamHandler(__UTOOL_STDOUT__)
stdout_handler = CustomStreamHandler(__UTOOL_STDOUT__)
stdout_handler.terminator = ''
# http://stackoverflow.com/questions/7168790/suppress-newline-in-python-logging-module
#stdout_handler.terminator = ''
add_logging_handler(logfile_handler, format_='file')
add_logging_handler(stdout_handler, format_='stdout')
__UTOOL_ROOT_LOGGER__.propagate = False
__UTOOL_ROOT_LOGGER__.setLevel(logging.DEBUG)
# Overwrite utool functions with the logging functions
def utool_flush(*args):
""" flushes whatever is in the current utool write buffer """
# Flushes only the stdout handler
stdout_handler.flush()
#__UTOOL_ROOT_LOGGER__.flush()
#global __UTOOL_WRITE_BUFFER__
#if len(__UTOOL_WRITE_BUFFER__) > 0:
# msg = ''.join(__UTOOL_WRITE_BUFFER__)
# #sys.stdout.write('FLUSHING %r\n' % (len(__UTOOL_WRITE_BUFFER__)))
# __UTOOL_WRITE_BUFFER__ = []
# return __UTOOL_ROOT_LOGGER__.info(msg)
#__PYTHON_FLUSH__()
def utool_write(*args):
""" writes to current utool logs and to sys.stdout.write """
#global __UTOOL_WRITE_BUFFER__
#sys.stdout.write('WRITEING\n')
msg = ', '.join(map(six.text_type, args))
#__UTOOL_WRITE_BUFFER__.append(msg)
__UTOOL_ROOT_LOGGER__.info(msg)
#if msg.endswith('\n'):
# # Flush on newline, and remove newline
# __UTOOL_WRITE_BUFFER__[-1] = __UTOOL_WRITE_BUFFER__[-1][:-1]
# utool_flush()
#elif len(__UTOOL_WRITE_BUFFER__) > 32:
# # Flush if buffer is too large
# utool_flush()
if not PRINT_ALL_CALLERS:
def utool_print(*args):
""" standard utool print function """
#sys.stdout.write('PRINT\n')
endline = '\n'
try:
msg = ', '.join(map(six.text_type, args))
return __UTOOL_ROOT_LOGGER__.info(msg + endline)
except UnicodeDecodeError:
new_msg = ', '.join(map(meta_util_six.ensure_unicode, args))
#print(new_msg)
return __UTOOL_ROOT_LOGGER__.info(new_msg + endline)
else:
def utool_print(*args):
""" debugging utool print function """
import utool as ut
utool_flush()
endline = '\n'
__UTOOL_ROOT_LOGGER__.info('\n\n----------')
__UTOOL_ROOT_LOGGER__.info(ut.get_caller_name(range(0, 20)))
return __UTOOL_ROOT_LOGGER__.info(', '.join(map(six.text_type, args)) + endline)
def utool_printdbg(*args):
""" DRPRICATE standard utool print debug function """
return __UTOOL_ROOT_LOGGER__.debug(', '.join(map(six.text_type, args)))
# overwrite the utool printers
__UTOOL_WRITE__ = utool_write
__UTOOL_FLUSH__ = utool_flush
__UTOOL_PRINT__ = utool_print
# Test out our shiney new logger
if VERBOSE or LOGGING_VERBOSE:
__UTOOL_PRINT__('<__LOG_START__>')
__UTOOL_PRINT__(startmsg)
else:
if LOGGING_VERBOSE:
print('[utool] start_logging()... FAILED TO START')
print('DEBUG INFO')
print('__inside_doctest() = %r' % (__inside_doctest(),))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,)) | def function[start_logging, parameter[log_fpath, mode, appname, log_dir]]:
constant[
Overwrites utool print functions to use a logger
CommandLine:
python -m utool.util_logging --test-start_logging:0
python -m utool.util_logging --test-start_logging:1
Example0:
>>> # DISABLE_DOCTEST
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> ut.util_logging._utool_print()('hello world')
>>> ut.util_logging._utool_write()('writing1')
>>> ut.util_logging._utool_write()('writing2\n')
>>> ut.util_logging._utool_write()('writing3')
>>> ut.util_logging._utool_flush()()
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> print('current_log_text =\n%s' % (current_log_text,))
>>> assert current_log_text.find('hello world') > 0, 'cant hello world'
>>> assert current_log_text.find('writing1writing2') > 0, 'cant find writing1writing2'
>>> assert current_log_text.find('writing3') > 0, 'cant find writing3'
Example1:
>>> # DISABLE_DOCTEST
>>> # Ensure that progress is logged
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> [x for x in ut.ProgressIter(range(0, 1000), freq=4)]
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> assert current_log_text.find('rate') > 0, 'progress was not logged'
>>> print(current_log_text)
]
<ast.Global object at 0x7da1b24eb340>
<ast.Global object at 0x7da1b24eaf50>
<ast.Global object at 0x7da1b24ea590>
<ast.Global object at 0x7da1b24eb190>
<ast.Global object at 0x7da1b24eb370>
if name[LOGGING_VERBOSE] begin[:]
call[name[print], parameter[constant[[utool] start_logging()]]]
if <ast.BoolOp object at 0x7da1b24eac80> begin[:]
if name[LOGGING_VERBOSE] begin[:]
call[name[print], parameter[constant[[utool] start_logging()... rootcheck OK]]]
if compare[name[log_fpath] is constant[None]] begin[:]
variable[log_fpath] assign[=] call[name[get_log_fpath], parameter[]]
variable[__CURRENT_LOG_FPATH__] assign[=] name[log_fpath]
if <ast.BoolOp object at 0x7da1b24ebe80> begin[:]
variable[startmsg] assign[=] binary_operation[constant[logging to log_fpath=%r] <ast.Mod object at 0x7da2590d6920> name[log_fpath]]
call[call[name[_utool_print], parameter[]], parameter[name[startmsg]]]
variable[__UTOOL_ROOT_LOGGER__] assign[=] call[name[logging].getLogger, parameter[constant[root]]]
call[name[__UTOOL_ROOT_LOGGER__].setLevel, parameter[constant[DEBUG]]]
variable[logfile_handler] assign[=] call[name[logging].FileHandler, parameter[name[log_fpath]]]
variable[stdout_handler] assign[=] call[name[CustomStreamHandler], parameter[name[__UTOOL_STDOUT__]]]
name[stdout_handler].terminator assign[=] constant[]
call[name[add_logging_handler], parameter[name[logfile_handler]]]
call[name[add_logging_handler], parameter[name[stdout_handler]]]
name[__UTOOL_ROOT_LOGGER__].propagate assign[=] constant[False]
call[name[__UTOOL_ROOT_LOGGER__].setLevel, parameter[name[logging].DEBUG]]
def function[utool_flush, parameter[]]:
constant[ flushes whatever is in the current utool write buffer ]
call[name[stdout_handler].flush, parameter[]]
def function[utool_write, parameter[]]:
constant[ writes to current utool logs and to sys.stdout.write ]
variable[msg] assign[=] call[constant[, ].join, parameter[call[name[map], parameter[name[six].text_type, name[args]]]]]
call[name[__UTOOL_ROOT_LOGGER__].info, parameter[name[msg]]]
if <ast.UnaryOp object at 0x7da1b24ebc70> begin[:]
def function[utool_print, parameter[]]:
constant[ standard utool print function ]
variable[endline] assign[=] constant[
]
<ast.Try object at 0x7da1b24e9e40>
def function[utool_printdbg, parameter[]]:
constant[ DRPRICATE standard utool print debug function ]
return[call[name[__UTOOL_ROOT_LOGGER__].debug, parameter[call[constant[, ].join, parameter[call[name[map], parameter[name[six].text_type, name[args]]]]]]]]
variable[__UTOOL_WRITE__] assign[=] name[utool_write]
variable[__UTOOL_FLUSH__] assign[=] name[utool_flush]
variable[__UTOOL_PRINT__] assign[=] name[utool_print]
if <ast.BoolOp object at 0x7da1b24ebd00> begin[:]
call[name[__UTOOL_PRINT__], parameter[constant[<__LOG_START__>]]]
call[name[__UTOOL_PRINT__], parameter[name[startmsg]]] | keyword[def] identifier[start_logging] ( identifier[log_fpath] = keyword[None] , identifier[mode] = literal[string] , identifier[appname] = literal[string] , identifier[log_dir] = keyword[None] ):
literal[string]
keyword[global] identifier[__UTOOL_ROOT_LOGGER__]
keyword[global] identifier[__UTOOL_PRINT__]
keyword[global] identifier[__UTOOL_WRITE__]
keyword[global] identifier[__UTOOL_FLUSH__]
keyword[global] identifier[__CURRENT_LOG_FPATH__]
keyword[if] identifier[LOGGING_VERBOSE] :
identifier[print] ( literal[string] )
keyword[if] identifier[__UTOOL_ROOT_LOGGER__] keyword[is] keyword[None] keyword[and] identifier[__IN_MAIN_PROCESS__] keyword[and] keyword[not] identifier[__inside_doctest] ():
keyword[if] identifier[LOGGING_VERBOSE] :
identifier[print] ( literal[string] )
keyword[if] identifier[log_fpath] keyword[is] keyword[None] :
identifier[log_fpath] = identifier[get_log_fpath] ( identifier[num] = literal[string] , identifier[appname] = identifier[appname] , identifier[log_dir] = identifier[log_dir] )
identifier[__CURRENT_LOG_FPATH__] = identifier[log_fpath]
keyword[if] identifier[VERBOSE] keyword[or] identifier[LOGGING_VERBOSE] :
identifier[startmsg] =( literal[string] % identifier[log_fpath] )
identifier[_utool_print] ()( identifier[startmsg] )
identifier[__UTOOL_ROOT_LOGGER__] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[__UTOOL_ROOT_LOGGER__] . identifier[setLevel] ( literal[string] )
identifier[logfile_handler] = identifier[logging] . identifier[FileHandler] ( identifier[log_fpath] , identifier[mode] = identifier[mode] )
identifier[stdout_handler] = identifier[CustomStreamHandler] ( identifier[__UTOOL_STDOUT__] )
identifier[stdout_handler] . identifier[terminator] = literal[string]
identifier[add_logging_handler] ( identifier[logfile_handler] , identifier[format_] = literal[string] )
identifier[add_logging_handler] ( identifier[stdout_handler] , identifier[format_] = literal[string] )
identifier[__UTOOL_ROOT_LOGGER__] . identifier[propagate] = keyword[False]
identifier[__UTOOL_ROOT_LOGGER__] . identifier[setLevel] ( identifier[logging] . identifier[DEBUG] )
keyword[def] identifier[utool_flush] (* identifier[args] ):
literal[string]
identifier[stdout_handler] . identifier[flush] ()
keyword[def] identifier[utool_write] (* identifier[args] ):
literal[string]
identifier[msg] = literal[string] . identifier[join] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[args] ))
identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( identifier[msg] )
keyword[if] keyword[not] identifier[PRINT_ALL_CALLERS] :
keyword[def] identifier[utool_print] (* identifier[args] ):
literal[string]
identifier[endline] = literal[string]
keyword[try] :
identifier[msg] = literal[string] . identifier[join] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[args] ))
keyword[return] identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( identifier[msg] + identifier[endline] )
keyword[except] identifier[UnicodeDecodeError] :
identifier[new_msg] = literal[string] . identifier[join] ( identifier[map] ( identifier[meta_util_six] . identifier[ensure_unicode] , identifier[args] ))
keyword[return] identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( identifier[new_msg] + identifier[endline] )
keyword[else] :
keyword[def] identifier[utool_print] (* identifier[args] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
identifier[utool_flush] ()
identifier[endline] = literal[string]
identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( literal[string] )
identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( identifier[ut] . identifier[get_caller_name] ( identifier[range] ( literal[int] , literal[int] )))
keyword[return] identifier[__UTOOL_ROOT_LOGGER__] . identifier[info] ( literal[string] . identifier[join] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[args] ))+ identifier[endline] )
keyword[def] identifier[utool_printdbg] (* identifier[args] ):
literal[string]
keyword[return] identifier[__UTOOL_ROOT_LOGGER__] . identifier[debug] ( literal[string] . identifier[join] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[args] )))
identifier[__UTOOL_WRITE__] = identifier[utool_write]
identifier[__UTOOL_FLUSH__] = identifier[utool_flush]
identifier[__UTOOL_PRINT__] = identifier[utool_print]
keyword[if] identifier[VERBOSE] keyword[or] identifier[LOGGING_VERBOSE] :
identifier[__UTOOL_PRINT__] ( literal[string] )
identifier[__UTOOL_PRINT__] ( identifier[startmsg] )
keyword[else] :
keyword[if] identifier[LOGGING_VERBOSE] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] %( identifier[__inside_doctest] (),))
identifier[print] ( literal[string] %( identifier[__IN_MAIN_PROCESS__] ,))
identifier[print] ( literal[string] %( identifier[__UTOOL_ROOT_LOGGER__] ,)) | def start_logging(log_fpath=None, mode='a', appname='default', log_dir=None):
"""
Overwrites utool print functions to use a logger
CommandLine:
python -m utool.util_logging --test-start_logging:0
python -m utool.util_logging --test-start_logging:1
Example0:
>>> # DISABLE_DOCTEST
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> ut.util_logging._utool_print()('hello world')
>>> ut.util_logging._utool_write()('writing1')
>>> ut.util_logging._utool_write()('writing2\\n')
>>> ut.util_logging._utool_write()('writing3')
>>> ut.util_logging._utool_flush()()
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> print('current_log_text =\\n%s' % (current_log_text,))
>>> assert current_log_text.find('hello world') > 0, 'cant hello world'
>>> assert current_log_text.find('writing1writing2') > 0, 'cant find writing1writing2'
>>> assert current_log_text.find('writing3') > 0, 'cant find writing3'
Example1:
>>> # DISABLE_DOCTEST
>>> # Ensure that progress is logged
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> [x for x in ut.ProgressIter(range(0, 1000), freq=4)]
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> assert current_log_text.find('rate') > 0, 'progress was not logged'
>>> print(current_log_text)
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
global __CURRENT_LOG_FPATH__
if LOGGING_VERBOSE:
print('[utool] start_logging()') # depends on [control=['if'], data=[]]
# FIXME: The test for doctest may not work
if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and (not __inside_doctest()):
if LOGGING_VERBOSE:
print('[utool] start_logging()... rootcheck OK') # depends on [control=['if'], data=[]]
#logging.config.dictConfig(LOGGING)
if log_fpath is None:
log_fpath = get_log_fpath(num='next', appname=appname, log_dir=log_dir) # depends on [control=['if'], data=['log_fpath']]
__CURRENT_LOG_FPATH__ = log_fpath
# Print what is about to happen
if VERBOSE or LOGGING_VERBOSE:
startmsg = 'logging to log_fpath=%r' % log_fpath
_utool_print()(startmsg) # depends on [control=['if'], data=[]]
# Create root logger
__UTOOL_ROOT_LOGGER__ = logging.getLogger('root')
__UTOOL_ROOT_LOGGER__.setLevel('DEBUG')
# create file handler which logs even debug messages
#fh = logging.handlers.WatchedFileHandler(log_fpath)
logfile_handler = logging.FileHandler(log_fpath, mode=mode)
#stdout_handler = logging.StreamHandler(__UTOOL_STDOUT__)
stdout_handler = CustomStreamHandler(__UTOOL_STDOUT__)
stdout_handler.terminator = ''
# http://stackoverflow.com/questions/7168790/suppress-newline-in-python-logging-module
#stdout_handler.terminator = ''
add_logging_handler(logfile_handler, format_='file')
add_logging_handler(stdout_handler, format_='stdout')
__UTOOL_ROOT_LOGGER__.propagate = False
__UTOOL_ROOT_LOGGER__.setLevel(logging.DEBUG)
# Overwrite utool functions with the logging functions
def utool_flush(*args):
""" flushes whatever is in the current utool write buffer """
# Flushes only the stdout handler
stdout_handler.flush()
#__UTOOL_ROOT_LOGGER__.flush()
#global __UTOOL_WRITE_BUFFER__
#if len(__UTOOL_WRITE_BUFFER__) > 0:
# msg = ''.join(__UTOOL_WRITE_BUFFER__)
# #sys.stdout.write('FLUSHING %r\n' % (len(__UTOOL_WRITE_BUFFER__)))
# __UTOOL_WRITE_BUFFER__ = []
# return __UTOOL_ROOT_LOGGER__.info(msg)
#__PYTHON_FLUSH__()
def utool_write(*args):
""" writes to current utool logs and to sys.stdout.write """
#global __UTOOL_WRITE_BUFFER__
#sys.stdout.write('WRITEING\n')
msg = ', '.join(map(six.text_type, args))
#__UTOOL_WRITE_BUFFER__.append(msg)
__UTOOL_ROOT_LOGGER__.info(msg)
#if msg.endswith('\n'):
# # Flush on newline, and remove newline
# __UTOOL_WRITE_BUFFER__[-1] = __UTOOL_WRITE_BUFFER__[-1][:-1]
# utool_flush()
#elif len(__UTOOL_WRITE_BUFFER__) > 32:
# # Flush if buffer is too large
# utool_flush()
if not PRINT_ALL_CALLERS:
def utool_print(*args):
""" standard utool print function """
#sys.stdout.write('PRINT\n')
endline = '\n'
try:
msg = ', '.join(map(six.text_type, args))
return __UTOOL_ROOT_LOGGER__.info(msg + endline) # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
new_msg = ', '.join(map(meta_util_six.ensure_unicode, args))
#print(new_msg)
return __UTOOL_ROOT_LOGGER__.info(new_msg + endline) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
def utool_print(*args):
""" debugging utool print function """
import utool as ut
utool_flush()
endline = '\n'
__UTOOL_ROOT_LOGGER__.info('\n\n----------')
__UTOOL_ROOT_LOGGER__.info(ut.get_caller_name(range(0, 20)))
return __UTOOL_ROOT_LOGGER__.info(', '.join(map(six.text_type, args)) + endline)
def utool_printdbg(*args):
""" DRPRICATE standard utool print debug function """
return __UTOOL_ROOT_LOGGER__.debug(', '.join(map(six.text_type, args)))
# overwrite the utool printers
__UTOOL_WRITE__ = utool_write
__UTOOL_FLUSH__ = utool_flush
__UTOOL_PRINT__ = utool_print
# Test out our shiney new logger
if VERBOSE or LOGGING_VERBOSE:
__UTOOL_PRINT__('<__LOG_START__>')
__UTOOL_PRINT__(startmsg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif LOGGING_VERBOSE:
print('[utool] start_logging()... FAILED TO START')
print('DEBUG INFO')
print('__inside_doctest() = %r' % (__inside_doctest(),))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,)) # depends on [control=['if'], data=[]] |
def _get_ports_list(app_name, port_specs):
""" Returns a list of formatted port mappings for an app """
if app_name not in port_specs['docker_compose']:
return []
return ["{}:{}".format(port_spec['mapped_host_port'], port_spec['in_container_port'])
for port_spec in port_specs['docker_compose'][app_name]] | def function[_get_ports_list, parameter[app_name, port_specs]]:
constant[ Returns a list of formatted port mappings for an app ]
if compare[name[app_name] <ast.NotIn object at 0x7da2590d7190> call[name[port_specs]][constant[docker_compose]]] begin[:]
return[list[[]]]
return[<ast.ListComp object at 0x7da20c990070>] | keyword[def] identifier[_get_ports_list] ( identifier[app_name] , identifier[port_specs] ):
literal[string]
keyword[if] identifier[app_name] keyword[not] keyword[in] identifier[port_specs] [ literal[string] ]:
keyword[return] []
keyword[return] [ literal[string] . identifier[format] ( identifier[port_spec] [ literal[string] ], identifier[port_spec] [ literal[string] ])
keyword[for] identifier[port_spec] keyword[in] identifier[port_specs] [ literal[string] ][ identifier[app_name] ]] | def _get_ports_list(app_name, port_specs):
""" Returns a list of formatted port mappings for an app """
if app_name not in port_specs['docker_compose']:
return [] # depends on [control=['if'], data=[]]
return ['{}:{}'.format(port_spec['mapped_host_port'], port_spec['in_container_port']) for port_spec in port_specs['docker_compose'][app_name]] |
def vec(data, dtype=float):
""" Makes GLfloat or GLuint vector containing float or uint args.
By default, newtype is 'float', but can be set to 'int' to make
uint list. """
gl_types = {float: gl.GLfloat, int: gl.GLuint}
try:
gl_dtype = gl_types[dtype]
except KeyError:
raise TypeError('dtype not recognized. Recognized types are int and float')
if gl_dtype == gl.GLuint:
for el in data:
if el < 0:
raise ValueError("integer ratcave.vec arrays are unsigned--negative values are not supported.")
return (gl_dtype * len(data))(*data) | def function[vec, parameter[data, dtype]]:
constant[ Makes GLfloat or GLuint vector containing float or uint args.
By default, newtype is 'float', but can be set to 'int' to make
uint list. ]
variable[gl_types] assign[=] dictionary[[<ast.Name object at 0x7da1b1c18d90>, <ast.Name object at 0x7da1b1c18eb0>], [<ast.Attribute object at 0x7da1b1c188e0>, <ast.Attribute object at 0x7da1b1c1bfa0>]]
<ast.Try object at 0x7da1b1c18cd0>
if compare[name[gl_dtype] equal[==] name[gl].GLuint] begin[:]
for taget[name[el]] in starred[name[data]] begin[:]
if compare[name[el] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1c19390>
return[call[binary_operation[name[gl_dtype] * call[name[len], parameter[name[data]]]], parameter[<ast.Starred object at 0x7da1b1c19600>]]] | keyword[def] identifier[vec] ( identifier[data] , identifier[dtype] = identifier[float] ):
literal[string]
identifier[gl_types] ={ identifier[float] : identifier[gl] . identifier[GLfloat] , identifier[int] : identifier[gl] . identifier[GLuint] }
keyword[try] :
identifier[gl_dtype] = identifier[gl_types] [ identifier[dtype] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[gl_dtype] == identifier[gl] . identifier[GLuint] :
keyword[for] identifier[el] keyword[in] identifier[data] :
keyword[if] identifier[el] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] ( identifier[gl_dtype] * identifier[len] ( identifier[data] ))(* identifier[data] ) | def vec(data, dtype=float):
""" Makes GLfloat or GLuint vector containing float or uint args.
By default, newtype is 'float', but can be set to 'int' to make
uint list. """
gl_types = {float: gl.GLfloat, int: gl.GLuint}
try:
gl_dtype = gl_types[dtype] # depends on [control=['try'], data=[]]
except KeyError:
raise TypeError('dtype not recognized. Recognized types are int and float') # depends on [control=['except'], data=[]]
if gl_dtype == gl.GLuint:
for el in data:
if el < 0:
raise ValueError('integer ratcave.vec arrays are unsigned--negative values are not supported.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']] # depends on [control=['if'], data=[]]
return (gl_dtype * len(data))(*data) |
def Attach(self, pid):
"""Attach to the process with the given pid."""
if self.inferior.is_running:
answer = raw_input('Already attached to process ' +
str(self.inferior.pid) +
'. Detach? [y]/n ')
if answer and answer != 'y' and answer != 'yes':
return None
self.Detach()
# Whatever position we had before will not make any sense now
for plugin in self.plugins:
plugin.position = None
self.inferior.Reinit(pid) | def function[Attach, parameter[self, pid]]:
constant[Attach to the process with the given pid.]
if name[self].inferior.is_running begin[:]
variable[answer] assign[=] call[name[raw_input], parameter[binary_operation[binary_operation[constant[Already attached to process ] + call[name[str], parameter[name[self].inferior.pid]]] + constant[. Detach? [y]/n ]]]]
if <ast.BoolOp object at 0x7da204963160> begin[:]
return[constant[None]]
call[name[self].Detach, parameter[]]
for taget[name[plugin]] in starred[name[self].plugins] begin[:]
name[plugin].position assign[=] constant[None]
call[name[self].inferior.Reinit, parameter[name[pid]]] | keyword[def] identifier[Attach] ( identifier[self] , identifier[pid] ):
literal[string]
keyword[if] identifier[self] . identifier[inferior] . identifier[is_running] :
identifier[answer] = identifier[raw_input] ( literal[string] +
identifier[str] ( identifier[self] . identifier[inferior] . identifier[pid] )+
literal[string] )
keyword[if] identifier[answer] keyword[and] identifier[answer] != literal[string] keyword[and] identifier[answer] != literal[string] :
keyword[return] keyword[None]
identifier[self] . identifier[Detach] ()
keyword[for] identifier[plugin] keyword[in] identifier[self] . identifier[plugins] :
identifier[plugin] . identifier[position] = keyword[None]
identifier[self] . identifier[inferior] . identifier[Reinit] ( identifier[pid] ) | def Attach(self, pid):
"""Attach to the process with the given pid."""
if self.inferior.is_running:
answer = raw_input('Already attached to process ' + str(self.inferior.pid) + '. Detach? [y]/n ')
if answer and answer != 'y' and (answer != 'yes'):
return None # depends on [control=['if'], data=[]]
self.Detach() # depends on [control=['if'], data=[]]
# Whatever position we had before will not make any sense now
for plugin in self.plugins:
plugin.position = None # depends on [control=['for'], data=['plugin']]
self.inferior.Reinit(pid) |
def load_post(wp_post_id):
"""
Called from load_post_webhook.
This builds a generic WPAPILoader and uses its load_post() to insert/update content for the post.
:param wp_post_id: the WordPress post ID to load
:return: None
"""
# wait a bit to give WordPress REST API a chance to catch up
time.sleep(1)
loader = WPAPILoader()
post = loader.load_post(wp_post_id)
if post:
logger.info("Successfully loaded post wp_post_id=%s, pk=%s", wp_post_id, post.pk)
else:
logger.warning("Error loading post wp_post_id=%s", wp_post_id) | def function[load_post, parameter[wp_post_id]]:
constant[
Called from load_post_webhook.
This builds a generic WPAPILoader and uses its load_post() to insert/update content for the post.
:param wp_post_id: the WordPress post ID to load
:return: None
]
call[name[time].sleep, parameter[constant[1]]]
variable[loader] assign[=] call[name[WPAPILoader], parameter[]]
variable[post] assign[=] call[name[loader].load_post, parameter[name[wp_post_id]]]
if name[post] begin[:]
call[name[logger].info, parameter[constant[Successfully loaded post wp_post_id=%s, pk=%s], name[wp_post_id], name[post].pk]] | keyword[def] identifier[load_post] ( identifier[wp_post_id] ):
literal[string]
identifier[time] . identifier[sleep] ( literal[int] )
identifier[loader] = identifier[WPAPILoader] ()
identifier[post] = identifier[loader] . identifier[load_post] ( identifier[wp_post_id] )
keyword[if] identifier[post] :
identifier[logger] . identifier[info] ( literal[string] , identifier[wp_post_id] , identifier[post] . identifier[pk] )
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[wp_post_id] ) | def load_post(wp_post_id):
"""
Called from load_post_webhook.
This builds a generic WPAPILoader and uses its load_post() to insert/update content for the post.
:param wp_post_id: the WordPress post ID to load
:return: None
"""
# wait a bit to give WordPress REST API a chance to catch up
time.sleep(1)
loader = WPAPILoader()
post = loader.load_post(wp_post_id)
if post:
logger.info('Successfully loaded post wp_post_id=%s, pk=%s', wp_post_id, post.pk) # depends on [control=['if'], data=[]]
else:
logger.warning('Error loading post wp_post_id=%s', wp_post_id) |
def acquire_authorization_header(self):
"""Acquire tokens from AAD."""
try:
return self._acquire_authorization_header()
except AdalError as error:
if self._authentication_method is AuthenticationMethod.aad_username_password:
kwargs = {"username": self._username, "client_id": self._client_id}
elif self._authentication_method is AuthenticationMethod.aad_application_key:
kwargs = {"client_id": self._client_id}
elif self._authentication_method is AuthenticationMethod.aad_device_login:
kwargs = {"client_id": self._client_id}
elif self._authentication_method is AuthenticationMethod.aad_application_certificate:
kwargs = {"client_id": self._client_id, "thumbprint": self._thumbprint}
else:
raise error
kwargs["resource"] = self._kusto_cluster
kwargs["authority"] = self._adal_context.authority.url
raise KustoAuthenticationError(self._authentication_method.value, error, **kwargs) | def function[acquire_authorization_header, parameter[self]]:
constant[Acquire tokens from AAD.]
<ast.Try object at 0x7da1b1669120> | keyword[def] identifier[acquire_authorization_header] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_acquire_authorization_header] ()
keyword[except] identifier[AdalError] keyword[as] identifier[error] :
keyword[if] identifier[self] . identifier[_authentication_method] keyword[is] identifier[AuthenticationMethod] . identifier[aad_username_password] :
identifier[kwargs] ={ literal[string] : identifier[self] . identifier[_username] , literal[string] : identifier[self] . identifier[_client_id] }
keyword[elif] identifier[self] . identifier[_authentication_method] keyword[is] identifier[AuthenticationMethod] . identifier[aad_application_key] :
identifier[kwargs] ={ literal[string] : identifier[self] . identifier[_client_id] }
keyword[elif] identifier[self] . identifier[_authentication_method] keyword[is] identifier[AuthenticationMethod] . identifier[aad_device_login] :
identifier[kwargs] ={ literal[string] : identifier[self] . identifier[_client_id] }
keyword[elif] identifier[self] . identifier[_authentication_method] keyword[is] identifier[AuthenticationMethod] . identifier[aad_application_certificate] :
identifier[kwargs] ={ literal[string] : identifier[self] . identifier[_client_id] , literal[string] : identifier[self] . identifier[_thumbprint] }
keyword[else] :
keyword[raise] identifier[error]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_kusto_cluster]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_adal_context] . identifier[authority] . identifier[url]
keyword[raise] identifier[KustoAuthenticationError] ( identifier[self] . identifier[_authentication_method] . identifier[value] , identifier[error] ,** identifier[kwargs] ) | def acquire_authorization_header(self):
"""Acquire tokens from AAD."""
try:
return self._acquire_authorization_header() # depends on [control=['try'], data=[]]
except AdalError as error:
if self._authentication_method is AuthenticationMethod.aad_username_password:
kwargs = {'username': self._username, 'client_id': self._client_id} # depends on [control=['if'], data=[]]
elif self._authentication_method is AuthenticationMethod.aad_application_key:
kwargs = {'client_id': self._client_id} # depends on [control=['if'], data=[]]
elif self._authentication_method is AuthenticationMethod.aad_device_login:
kwargs = {'client_id': self._client_id} # depends on [control=['if'], data=[]]
elif self._authentication_method is AuthenticationMethod.aad_application_certificate:
kwargs = {'client_id': self._client_id, 'thumbprint': self._thumbprint} # depends on [control=['if'], data=[]]
else:
raise error
kwargs['resource'] = self._kusto_cluster
kwargs['authority'] = self._adal_context.authority.url
raise KustoAuthenticationError(self._authentication_method.value, error, **kwargs) # depends on [control=['except'], data=['error']] |
def make_payment_script(address, blockchain='bitcoin', **blockchain_opts):
"""
High-level API call (meant to be blockchain agnostic)
Make a pay-to-address script.
"""
if blockchain == 'bitcoin':
return btc_make_payment_script(address, **blockchain_opts)
else:
raise ValueError("Unknown blockchain '{}'".format(blockchain)) | def function[make_payment_script, parameter[address, blockchain]]:
constant[
High-level API call (meant to be blockchain agnostic)
Make a pay-to-address script.
]
if compare[name[blockchain] equal[==] constant[bitcoin]] begin[:]
return[call[name[btc_make_payment_script], parameter[name[address]]]] | keyword[def] identifier[make_payment_script] ( identifier[address] , identifier[blockchain] = literal[string] ,** identifier[blockchain_opts] ):
literal[string]
keyword[if] identifier[blockchain] == literal[string] :
keyword[return] identifier[btc_make_payment_script] ( identifier[address] ,** identifier[blockchain_opts] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[blockchain] )) | def make_payment_script(address, blockchain='bitcoin', **blockchain_opts):
"""
High-level API call (meant to be blockchain agnostic)
Make a pay-to-address script.
"""
if blockchain == 'bitcoin':
return btc_make_payment_script(address, **blockchain_opts) # depends on [control=['if'], data=[]]
else:
raise ValueError("Unknown blockchain '{}'".format(blockchain)) |
def lifecycle_rules(self):
"""Retrieve or set lifecycle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's lifecycle rules mappings. Mutating the
list or one of its dicts has no effect unless you then re-assign
the dict via the setter. E.g.:
>>> rules = bucket.lifecycle_rules
>>> rules.append({'origin': '/foo', ...})
>>> rules[1]['rule']['action']['type'] = 'Delete'
>>> del rules[0]
>>> bucket.lifecycle_rules = rules
>>> bucket.update()
:setter: Set lifestyle rules for this bucket.
:getter: Gets the lifestyle rules for this bucket.
:rtype: generator(dict)
:returns: A sequence of mappings describing each lifecycle rule.
"""
info = self._properties.get("lifecycle", {})
for rule in info.get("rule", ()):
action_type = rule["action"]["type"]
if action_type == "Delete":
yield LifecycleRuleDelete.from_api_repr(rule)
elif action_type == "SetStorageClass":
yield LifecycleRuleSetStorageClass.from_api_repr(rule)
else:
raise ValueError("Unknown lifecycle rule: {}".format(rule)) | def function[lifecycle_rules, parameter[self]]:
constant[Retrieve or set lifecycle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's lifecycle rules mappings. Mutating the
list or one of its dicts has no effect unless you then re-assign
the dict via the setter. E.g.:
>>> rules = bucket.lifecycle_rules
>>> rules.append({'origin': '/foo', ...})
>>> rules[1]['rule']['action']['type'] = 'Delete'
>>> del rules[0]
>>> bucket.lifecycle_rules = rules
>>> bucket.update()
:setter: Set lifestyle rules for this bucket.
:getter: Gets the lifestyle rules for this bucket.
:rtype: generator(dict)
:returns: A sequence of mappings describing each lifecycle rule.
]
variable[info] assign[=] call[name[self]._properties.get, parameter[constant[lifecycle], dictionary[[], []]]]
for taget[name[rule]] in starred[call[name[info].get, parameter[constant[rule], tuple[[]]]]] begin[:]
variable[action_type] assign[=] call[call[name[rule]][constant[action]]][constant[type]]
if compare[name[action_type] equal[==] constant[Delete]] begin[:]
<ast.Yield object at 0x7da18f00c430> | keyword[def] identifier[lifecycle_rules] ( identifier[self] ):
literal[string]
identifier[info] = identifier[self] . identifier[_properties] . identifier[get] ( literal[string] ,{})
keyword[for] identifier[rule] keyword[in] identifier[info] . identifier[get] ( literal[string] ,()):
identifier[action_type] = identifier[rule] [ literal[string] ][ literal[string] ]
keyword[if] identifier[action_type] == literal[string] :
keyword[yield] identifier[LifecycleRuleDelete] . identifier[from_api_repr] ( identifier[rule] )
keyword[elif] identifier[action_type] == literal[string] :
keyword[yield] identifier[LifecycleRuleSetStorageClass] . identifier[from_api_repr] ( identifier[rule] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rule] )) | def lifecycle_rules(self):
"""Retrieve or set lifecycle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's lifecycle rules mappings. Mutating the
list or one of its dicts has no effect unless you then re-assign
the dict via the setter. E.g.:
>>> rules = bucket.lifecycle_rules
>>> rules.append({'origin': '/foo', ...})
>>> rules[1]['rule']['action']['type'] = 'Delete'
>>> del rules[0]
>>> bucket.lifecycle_rules = rules
>>> bucket.update()
:setter: Set lifestyle rules for this bucket.
:getter: Gets the lifestyle rules for this bucket.
:rtype: generator(dict)
:returns: A sequence of mappings describing each lifecycle rule.
"""
info = self._properties.get('lifecycle', {})
for rule in info.get('rule', ()):
action_type = rule['action']['type']
if action_type == 'Delete':
yield LifecycleRuleDelete.from_api_repr(rule) # depends on [control=['if'], data=[]]
elif action_type == 'SetStorageClass':
yield LifecycleRuleSetStorageClass.from_api_repr(rule) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown lifecycle rule: {}'.format(rule)) # depends on [control=['for'], data=['rule']] |
def get_attachments(self):
"""Return the objects from the UIDs given in the request
"""
# Create a mapping of source ARs for copy
uids = self.request.form.get("attachment_uids", [])
return map(self.get_object_by_uid, uids) | def function[get_attachments, parameter[self]]:
constant[Return the objects from the UIDs given in the request
]
variable[uids] assign[=] call[name[self].request.form.get, parameter[constant[attachment_uids], list[[]]]]
return[call[name[map], parameter[name[self].get_object_by_uid, name[uids]]]] | keyword[def] identifier[get_attachments] ( identifier[self] ):
literal[string]
identifier[uids] = identifier[self] . identifier[request] . identifier[form] . identifier[get] ( literal[string] ,[])
keyword[return] identifier[map] ( identifier[self] . identifier[get_object_by_uid] , identifier[uids] ) | def get_attachments(self):
"""Return the objects from the UIDs given in the request
"""
# Create a mapping of source ARs for copy
uids = self.request.form.get('attachment_uids', [])
return map(self.get_object_by_uid, uids) |
def update_check(package_name, package_version, bypass_cache=False, url=None,
**extra_data):
"""Convenience method that outputs to stdout if an update is available."""
checker = UpdateChecker(url)
checker.bypass_cache = bypass_cache
result = checker.check(package_name, package_version, **extra_data)
if result:
print(result) | def function[update_check, parameter[package_name, package_version, bypass_cache, url]]:
constant[Convenience method that outputs to stdout if an update is available.]
variable[checker] assign[=] call[name[UpdateChecker], parameter[name[url]]]
name[checker].bypass_cache assign[=] name[bypass_cache]
variable[result] assign[=] call[name[checker].check, parameter[name[package_name], name[package_version]]]
if name[result] begin[:]
call[name[print], parameter[name[result]]] | keyword[def] identifier[update_check] ( identifier[package_name] , identifier[package_version] , identifier[bypass_cache] = keyword[False] , identifier[url] = keyword[None] ,
** identifier[extra_data] ):
literal[string]
identifier[checker] = identifier[UpdateChecker] ( identifier[url] )
identifier[checker] . identifier[bypass_cache] = identifier[bypass_cache]
identifier[result] = identifier[checker] . identifier[check] ( identifier[package_name] , identifier[package_version] ,** identifier[extra_data] )
keyword[if] identifier[result] :
identifier[print] ( identifier[result] ) | def update_check(package_name, package_version, bypass_cache=False, url=None, **extra_data):
"""Convenience method that outputs to stdout if an update is available."""
checker = UpdateChecker(url)
checker.bypass_cache = bypass_cache
result = checker.check(package_name, package_version, **extra_data)
if result:
print(result) # depends on [control=['if'], data=[]] |
def sync_deps(self, path):
"""
Creates or updates a directory structure that can be used to avoid
doing a full rebuild whenever the configuration is changed, mirroring
include/config/ in the kernel.
This function is intended to be called during each build, before
compiling source files that depend on configuration symbols.
path:
Path to directory
sync_deps(path) does the following:
1. If the directory <path> does not exist, it is created.
2. If <path>/auto.conf exists, old symbol values are loaded from it,
which are then compared against the current symbol values. If a
symbol has changed value (would generate different output in
autoconf.h compared to before), the change is signaled by
touch'ing a file corresponding to the symbol.
The first time sync_deps() is run on a directory, <path>/auto.conf
won't exist, and no old symbol values will be available. This
logically has the same effect as updating the entire
configuration.
The path to a symbol's file is calculated from the symbol's name
by replacing all '_' with '/' and appending '.h'. For example, the
symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO
gets the file <path>/foo.h.
This scheme matches the C tools. The point is to avoid having a
single directory with a huge number of files, which the underlying
filesystem might not handle well.
3. A new auto.conf with the current symbol values is written, to keep
track of them for the next build.
The last piece of the puzzle is knowing what symbols each source file
depends on. Knowing that, dependencies can be added from source files
to the files corresponding to the symbols they depends on. The source
file will then get recompiled (only) when the symbol value changes
(provided sync_deps() is run first during each build).
The tool in the kernel that extracts symbol dependencies from source
files is scripts/basic/fixdep.c. Missing symbol files also correspond
to "not changed", which fixdep deals with by using the $(wildcard) Make
function when adding symbol prerequisites to source files.
In case you need a different scheme for your project, the sync_deps()
implementation can be used as a template.
"""
if not exists(path):
os.mkdir(path, 0o755)
# Load old values from auto.conf, if any
self._load_old_vals(path)
for sym in self.unique_defined_syms:
# Note: _write_to_conf is determined when the value is
# calculated. This is a hidden function call due to
# property magic.
val = sym.str_value
# Note: n tristate values do not get written to auto.conf and
# autoconf.h, making a missing symbol logically equivalent to n
if sym._write_to_conf:
if sym._old_val is None and \
sym.orig_type in _BOOL_TRISTATE and \
val == "n":
# No old value (the symbol was missing or n), new value n.
# No change.
continue
if val == sym._old_val:
# New value matches old. No change.
continue
elif sym._old_val is None:
# The symbol wouldn't appear in autoconf.h (because
# _write_to_conf is false), and it wouldn't have appeared in
# autoconf.h previously either (because it didn't appear in
# auto.conf). No change.
continue
# 'sym' has a new value. Flag it.
_touch_dep_file(path, sym.name)
# Remember the current values as the "new old" values.
#
# This call could go anywhere after the call to _load_old_vals(), but
# putting it last means _sync_deps() can be safely rerun if it fails
# before this point.
self._write_old_vals(path) | def function[sync_deps, parameter[self, path]]:
constant[
Creates or updates a directory structure that can be used to avoid
doing a full rebuild whenever the configuration is changed, mirroring
include/config/ in the kernel.
This function is intended to be called during each build, before
compiling source files that depend on configuration symbols.
path:
Path to directory
sync_deps(path) does the following:
1. If the directory <path> does not exist, it is created.
2. If <path>/auto.conf exists, old symbol values are loaded from it,
which are then compared against the current symbol values. If a
symbol has changed value (would generate different output in
autoconf.h compared to before), the change is signaled by
touch'ing a file corresponding to the symbol.
The first time sync_deps() is run on a directory, <path>/auto.conf
won't exist, and no old symbol values will be available. This
logically has the same effect as updating the entire
configuration.
The path to a symbol's file is calculated from the symbol's name
by replacing all '_' with '/' and appending '.h'. For example, the
symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO
gets the file <path>/foo.h.
This scheme matches the C tools. The point is to avoid having a
single directory with a huge number of files, which the underlying
filesystem might not handle well.
3. A new auto.conf with the current symbol values is written, to keep
track of them for the next build.
The last piece of the puzzle is knowing what symbols each source file
depends on. Knowing that, dependencies can be added from source files
to the files corresponding to the symbols they depends on. The source
file will then get recompiled (only) when the symbol value changes
(provided sync_deps() is run first during each build).
The tool in the kernel that extracts symbol dependencies from source
files is scripts/basic/fixdep.c. Missing symbol files also correspond
to "not changed", which fixdep deals with by using the $(wildcard) Make
function when adding symbol prerequisites to source files.
In case you need a different scheme for your project, the sync_deps()
implementation can be used as a template.
]
if <ast.UnaryOp object at 0x7da18f811e40> begin[:]
call[name[os].mkdir, parameter[name[path], constant[493]]]
call[name[self]._load_old_vals, parameter[name[path]]]
for taget[name[sym]] in starred[name[self].unique_defined_syms] begin[:]
variable[val] assign[=] name[sym].str_value
if name[sym]._write_to_conf begin[:]
if <ast.BoolOp object at 0x7da18f810d00> begin[:]
continue
if compare[name[val] equal[==] name[sym]._old_val] begin[:]
continue
call[name[_touch_dep_file], parameter[name[path], name[sym].name]]
call[name[self]._write_old_vals, parameter[name[path]]] | keyword[def] identifier[sync_deps] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[exists] ( identifier[path] ):
identifier[os] . identifier[mkdir] ( identifier[path] , literal[int] )
identifier[self] . identifier[_load_old_vals] ( identifier[path] )
keyword[for] identifier[sym] keyword[in] identifier[self] . identifier[unique_defined_syms] :
identifier[val] = identifier[sym] . identifier[str_value]
keyword[if] identifier[sym] . identifier[_write_to_conf] :
keyword[if] identifier[sym] . identifier[_old_val] keyword[is] keyword[None] keyword[and] identifier[sym] . identifier[orig_type] keyword[in] identifier[_BOOL_TRISTATE] keyword[and] identifier[val] == literal[string] :
keyword[continue]
keyword[if] identifier[val] == identifier[sym] . identifier[_old_val] :
keyword[continue]
keyword[elif] identifier[sym] . identifier[_old_val] keyword[is] keyword[None] :
keyword[continue]
identifier[_touch_dep_file] ( identifier[path] , identifier[sym] . identifier[name] )
identifier[self] . identifier[_write_old_vals] ( identifier[path] ) | def sync_deps(self, path):
"""
Creates or updates a directory structure that can be used to avoid
doing a full rebuild whenever the configuration is changed, mirroring
include/config/ in the kernel.
This function is intended to be called during each build, before
compiling source files that depend on configuration symbols.
path:
Path to directory
sync_deps(path) does the following:
1. If the directory <path> does not exist, it is created.
2. If <path>/auto.conf exists, old symbol values are loaded from it,
which are then compared against the current symbol values. If a
symbol has changed value (would generate different output in
autoconf.h compared to before), the change is signaled by
touch'ing a file corresponding to the symbol.
The first time sync_deps() is run on a directory, <path>/auto.conf
won't exist, and no old symbol values will be available. This
logically has the same effect as updating the entire
configuration.
The path to a symbol's file is calculated from the symbol's name
by replacing all '_' with '/' and appending '.h'. For example, the
symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO
gets the file <path>/foo.h.
This scheme matches the C tools. The point is to avoid having a
single directory with a huge number of files, which the underlying
filesystem might not handle well.
3. A new auto.conf with the current symbol values is written, to keep
track of them for the next build.
The last piece of the puzzle is knowing what symbols each source file
depends on. Knowing that, dependencies can be added from source files
to the files corresponding to the symbols they depends on. The source
file will then get recompiled (only) when the symbol value changes
(provided sync_deps() is run first during each build).
The tool in the kernel that extracts symbol dependencies from source
files is scripts/basic/fixdep.c. Missing symbol files also correspond
to "not changed", which fixdep deals with by using the $(wildcard) Make
function when adding symbol prerequisites to source files.
In case you need a different scheme for your project, the sync_deps()
implementation can be used as a template.
"""
if not exists(path):
os.mkdir(path, 493) # depends on [control=['if'], data=[]]
# Load old values from auto.conf, if any
self._load_old_vals(path)
for sym in self.unique_defined_syms:
# Note: _write_to_conf is determined when the value is
# calculated. This is a hidden function call due to
# property magic.
val = sym.str_value
# Note: n tristate values do not get written to auto.conf and
# autoconf.h, making a missing symbol logically equivalent to n
if sym._write_to_conf:
if sym._old_val is None and sym.orig_type in _BOOL_TRISTATE and (val == 'n'):
# No old value (the symbol was missing or n), new value n.
# No change.
continue # depends on [control=['if'], data=[]]
if val == sym._old_val:
# New value matches old. No change.
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif sym._old_val is None:
# The symbol wouldn't appear in autoconf.h (because
# _write_to_conf is false), and it wouldn't have appeared in
# autoconf.h previously either (because it didn't appear in
# auto.conf). No change.
continue # depends on [control=['if'], data=[]]
# 'sym' has a new value. Flag it.
_touch_dep_file(path, sym.name) # depends on [control=['for'], data=['sym']]
# Remember the current values as the "new old" values.
#
# This call could go anywhere after the call to _load_old_vals(), but
# putting it last means _sync_deps() can be safely rerun if it fails
# before this point.
self._write_old_vals(path) |
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple) | def function[strftime, parameter[self, fmt]]:
constant[Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
]
variable[timetuple] assign[=] tuple[[<ast.Constant object at 0x7da18f8117e0>, <ast.Constant object at 0x7da18f813f40>, <ast.Constant object at 0x7da18f812b90>, <ast.Attribute object at 0x7da18f811750>, <ast.Attribute object at 0x7da18f810220>, <ast.Attribute object at 0x7da20c7cba00>, <ast.Constant object at 0x7da20c7caf20>, <ast.Constant object at 0x7da20c7c9360>, <ast.UnaryOp object at 0x7da20c7c9d80>]]
return[call[name[_wrap_strftime], parameter[name[self], name[fmt], name[timetuple]]]] | keyword[def] identifier[strftime] ( identifier[self] , identifier[fmt] ):
literal[string]
identifier[timetuple] =( literal[int] , literal[int] , literal[int] ,
identifier[self] . identifier[_hour] , identifier[self] . identifier[_minute] , identifier[self] . identifier[_second] ,
literal[int] , literal[int] ,- literal[int] )
keyword[return] identifier[_wrap_strftime] ( identifier[self] , identifier[fmt] , identifier[timetuple] ) | def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1, self._hour, self._minute, self._second, 0, 1, -1)
return _wrap_strftime(self, fmt, timetuple) |
def get_custom_value(self, field_name):
""" Get a value for a specified custom field
field_name - Name of the custom field you want.
"""
custom_field = self.get_custom_field(field_name)
return CustomFieldValue.objects.get_or_create(
field=custom_field, object_id=self.id)[0].value | def function[get_custom_value, parameter[self, field_name]]:
constant[ Get a value for a specified custom field
field_name - Name of the custom field you want.
]
variable[custom_field] assign[=] call[name[self].get_custom_field, parameter[name[field_name]]]
return[call[call[name[CustomFieldValue].objects.get_or_create, parameter[]]][constant[0]].value] | keyword[def] identifier[get_custom_value] ( identifier[self] , identifier[field_name] ):
literal[string]
identifier[custom_field] = identifier[self] . identifier[get_custom_field] ( identifier[field_name] )
keyword[return] identifier[CustomFieldValue] . identifier[objects] . identifier[get_or_create] (
identifier[field] = identifier[custom_field] , identifier[object_id] = identifier[self] . identifier[id] )[ literal[int] ]. identifier[value] | def get_custom_value(self, field_name):
""" Get a value for a specified custom field
field_name - Name of the custom field you want.
"""
custom_field = self.get_custom_field(field_name)
return CustomFieldValue.objects.get_or_create(field=custom_field, object_id=self.id)[0].value |
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams | def function[transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog, parameter[]]:
constant[Set of hyperparameters.]
variable[hparams] assign[=] call[name[transformer_base_vq1_16_nb1_packed_nda_b01_scales], parameter[]]
name[hparams].batch_size assign[=] constant[2048]
name[hparams].max_length assign[=] constant[1024]
name[hparams].filter_size assign[=] constant[3072]
return[name[hparams]] | keyword[def] identifier[transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog] ():
literal[string]
identifier[hparams] = identifier[transformer_base_vq1_16_nb1_packed_nda_b01_scales] ()
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[max_length] = literal[int]
identifier[hparams] . identifier[filter_size] = literal[int]
keyword[return] identifier[hparams] | def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams |
def htmlize_list(items):
"""
Turn a python list into an html list.
"""
out = ["<ul>"]
for item in items:
out.append("<li>" + htmlize(item) + "</li>")
out.append("</ul>")
return "\n".join(out) | def function[htmlize_list, parameter[items]]:
constant[
Turn a python list into an html list.
]
variable[out] assign[=] list[[<ast.Constant object at 0x7da1b25857b0>]]
for taget[name[item]] in starred[name[items]] begin[:]
call[name[out].append, parameter[binary_operation[binary_operation[constant[<li>] + call[name[htmlize], parameter[name[item]]]] + constant[</li>]]]]
call[name[out].append, parameter[constant[</ul>]]]
return[call[constant[
].join, parameter[name[out]]]] | keyword[def] identifier[htmlize_list] ( identifier[items] ):
literal[string]
identifier[out] =[ literal[string] ]
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[out] . identifier[append] ( literal[string] + identifier[htmlize] ( identifier[item] )+ literal[string] )
identifier[out] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[out] ) | def htmlize_list(items):
"""
Turn a python list into an html list.
"""
out = ['<ul>']
for item in items:
out.append('<li>' + htmlize(item) + '</li>') # depends on [control=['for'], data=['item']]
out.append('</ul>')
return '\n'.join(out) |
def estimate_flux(self):
"""
Estimate the star's flux by summing values in the input cutout
array.
Missing data is filled in by interpolation to better estimate
the total flux.
"""
from .epsf import _interpolate_missing_data
if np.any(self.mask):
data_interp = _interpolate_missing_data(self.data, method='cubic',
mask=self.mask)
data_interp = _interpolate_missing_data(data_interp,
method='nearest',
mask=self.mask)
flux = np.sum(data_interp, dtype=np.float64)
else:
flux = np.sum(self.data, dtype=np.float64)
return flux | def function[estimate_flux, parameter[self]]:
constant[
Estimate the star's flux by summing values in the input cutout
array.
Missing data is filled in by interpolation to better estimate
the total flux.
]
from relative_module[epsf] import module[_interpolate_missing_data]
if call[name[np].any, parameter[name[self].mask]] begin[:]
variable[data_interp] assign[=] call[name[_interpolate_missing_data], parameter[name[self].data]]
variable[data_interp] assign[=] call[name[_interpolate_missing_data], parameter[name[data_interp]]]
variable[flux] assign[=] call[name[np].sum, parameter[name[data_interp]]]
return[name[flux]] | keyword[def] identifier[estimate_flux] ( identifier[self] ):
literal[string]
keyword[from] . identifier[epsf] keyword[import] identifier[_interpolate_missing_data]
keyword[if] identifier[np] . identifier[any] ( identifier[self] . identifier[mask] ):
identifier[data_interp] = identifier[_interpolate_missing_data] ( identifier[self] . identifier[data] , identifier[method] = literal[string] ,
identifier[mask] = identifier[self] . identifier[mask] )
identifier[data_interp] = identifier[_interpolate_missing_data] ( identifier[data_interp] ,
identifier[method] = literal[string] ,
identifier[mask] = identifier[self] . identifier[mask] )
identifier[flux] = identifier[np] . identifier[sum] ( identifier[data_interp] , identifier[dtype] = identifier[np] . identifier[float64] )
keyword[else] :
identifier[flux] = identifier[np] . identifier[sum] ( identifier[self] . identifier[data] , identifier[dtype] = identifier[np] . identifier[float64] )
keyword[return] identifier[flux] | def estimate_flux(self):
"""
Estimate the star's flux by summing values in the input cutout
array.
Missing data is filled in by interpolation to better estimate
the total flux.
"""
from .epsf import _interpolate_missing_data
if np.any(self.mask):
data_interp = _interpolate_missing_data(self.data, method='cubic', mask=self.mask)
data_interp = _interpolate_missing_data(data_interp, method='nearest', mask=self.mask)
flux = np.sum(data_interp, dtype=np.float64) # depends on [control=['if'], data=[]]
else:
flux = np.sum(self.data, dtype=np.float64)
return flux |
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text) | def function[text, parameter[self, text]]:
constant[ puts text in the entity. Whitespace and newlines are stripped to single spaces. ]
if name[text] begin[:]
variable[text] assign[=] call[name[utfstr], parameter[name[text]]]
variable[text] assign[=] call[name[text].strip, parameter[]]
variable[text] assign[=] call[name[re].sub, parameter[constant[\s+], constant[ ], name[text]]]
if name[text] begin[:]
name[self].dirty assign[=] constant[True]
call[name[self].escpos.text, parameter[name[text]]] | keyword[def] identifier[text] ( identifier[self] , identifier[text] ):
literal[string]
keyword[if] identifier[text] :
identifier[text] = identifier[utfstr] ( identifier[text] )
identifier[text] = identifier[text] . identifier[strip] ()
identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] )
keyword[if] identifier[text] :
identifier[self] . identifier[dirty] = keyword[True]
identifier[self] . identifier[escpos] . identifier[text] ( identifier[text] ) | def text(self, text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\\s+', ' ', text)
if text:
self.dirty = True
self.escpos.text(text) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_show_name(self):
"""
Get video show name from the website. It's located in the div with 'data-hover'
attribute under the 'title' key.
Returns:
str: Video show name.
"""
div = self.soup.find('div', attrs={'data-hover': True})
data = json.loads(div['data-hover'])
show_name = data.get('title')
return show_name | def function[get_show_name, parameter[self]]:
constant[
Get video show name from the website. It's located in the div with 'data-hover'
attribute under the 'title' key.
Returns:
str: Video show name.
]
variable[div] assign[=] call[name[self].soup.find, parameter[constant[div]]]
variable[data] assign[=] call[name[json].loads, parameter[call[name[div]][constant[data-hover]]]]
variable[show_name] assign[=] call[name[data].get, parameter[constant[title]]]
return[name[show_name]] | keyword[def] identifier[get_show_name] ( identifier[self] ):
literal[string]
identifier[div] = identifier[self] . identifier[soup] . identifier[find] ( literal[string] , identifier[attrs] ={ literal[string] : keyword[True] })
identifier[data] = identifier[json] . identifier[loads] ( identifier[div] [ literal[string] ])
identifier[show_name] = identifier[data] . identifier[get] ( literal[string] )
keyword[return] identifier[show_name] | def get_show_name(self):
"""
Get video show name from the website. It's located in the div with 'data-hover'
attribute under the 'title' key.
Returns:
str: Video show name.
"""
div = self.soup.find('div', attrs={'data-hover': True})
data = json.loads(div['data-hover'])
show_name = data.get('title')
return show_name |
def _lincomb(self, a, x1, b, x2, out):
"""Implement the linear combination of ``x1`` and ``x2``.
Compute ``out = a*x1 + b*x2`` using optimized
BLAS routines if possible.
This function is part of the subclassing API. Do not
call it directly.
Parameters
----------
a, b : `TensorSpace.field` element
Scalars to multiply ``x1`` and ``x2`` with.
x1, x2 : `NumpyTensor`
Summands in the linear combination.
out : `NumpyTensor`
Tensor to which the result is written.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.element([0, 1, 1])
>>> y = space.element([0, 0, 1])
>>> out = space.element()
>>> result = space.lincomb(1, x, 2, y, out)
>>> result
rn(3).element([ 0., 1., 3.])
>>> result is out
True
"""
_lincomb_impl(a, x1, b, x2, out) | def function[_lincomb, parameter[self, a, x1, b, x2, out]]:
constant[Implement the linear combination of ``x1`` and ``x2``.
Compute ``out = a*x1 + b*x2`` using optimized
BLAS routines if possible.
This function is part of the subclassing API. Do not
call it directly.
Parameters
----------
a, b : `TensorSpace.field` element
Scalars to multiply ``x1`` and ``x2`` with.
x1, x2 : `NumpyTensor`
Summands in the linear combination.
out : `NumpyTensor`
Tensor to which the result is written.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.element([0, 1, 1])
>>> y = space.element([0, 0, 1])
>>> out = space.element()
>>> result = space.lincomb(1, x, 2, y, out)
>>> result
rn(3).element([ 0., 1., 3.])
>>> result is out
True
]
call[name[_lincomb_impl], parameter[name[a], name[x1], name[b], name[x2], name[out]]] | keyword[def] identifier[_lincomb] ( identifier[self] , identifier[a] , identifier[x1] , identifier[b] , identifier[x2] , identifier[out] ):
literal[string]
identifier[_lincomb_impl] ( identifier[a] , identifier[x1] , identifier[b] , identifier[x2] , identifier[out] ) | def _lincomb(self, a, x1, b, x2, out):
"""Implement the linear combination of ``x1`` and ``x2``.
Compute ``out = a*x1 + b*x2`` using optimized
BLAS routines if possible.
This function is part of the subclassing API. Do not
call it directly.
Parameters
----------
a, b : `TensorSpace.field` element
Scalars to multiply ``x1`` and ``x2`` with.
x1, x2 : `NumpyTensor`
Summands in the linear combination.
out : `NumpyTensor`
Tensor to which the result is written.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.element([0, 1, 1])
>>> y = space.element([0, 0, 1])
>>> out = space.element()
>>> result = space.lincomb(1, x, 2, y, out)
>>> result
rn(3).element([ 0., 1., 3.])
>>> result is out
True
"""
_lincomb_impl(a, x1, b, x2, out) |
def defaults_cluster_role_env(cluster_role_env):
"""
if role is not provided, supply userid
if environ is not provided, supply 'default'
"""
if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0:
return (cluster_role_env[0], getpass.getuser(), ENVIRON)
return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2]) | def function[defaults_cluster_role_env, parameter[cluster_role_env]]:
constant[
if role is not provided, supply userid
if environ is not provided, supply 'default'
]
if <ast.BoolOp object at 0x7da18f58c0d0> begin[:]
return[tuple[[<ast.Subscript object at 0x7da20c993160>, <ast.Call object at 0x7da20c992e90>, <ast.Name object at 0x7da20c991f60>]]]
return[tuple[[<ast.Subscript object at 0x7da20c6c59c0>, <ast.Subscript object at 0x7da20c6c5ea0>, <ast.Subscript object at 0x7da20c6c7d30>]]] | keyword[def] identifier[defaults_cluster_role_env] ( identifier[cluster_role_env] ):
literal[string]
keyword[if] identifier[len] ( identifier[cluster_role_env] [ literal[int] ])== literal[int] keyword[and] identifier[len] ( identifier[cluster_role_env] [ literal[int] ])== literal[int] :
keyword[return] ( identifier[cluster_role_env] [ literal[int] ], identifier[getpass] . identifier[getuser] (), identifier[ENVIRON] )
keyword[return] ( identifier[cluster_role_env] [ literal[int] ], identifier[cluster_role_env] [ literal[int] ], identifier[cluster_role_env] [ literal[int] ]) | def defaults_cluster_role_env(cluster_role_env):
"""
if role is not provided, supply userid
if environ is not provided, supply 'default'
"""
if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0:
return (cluster_role_env[0], getpass.getuser(), ENVIRON) # depends on [control=['if'], data=[]]
return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2]) |
def align_image_with_openpnm(im):
r"""
Rotates an image to agree with the coordinates used in OpenPNM. It is
unclear why they are not in agreement to start with. This is necessary
for overlaying the image and the network in Paraview.
Parameters
----------
im : ND-array
The image to be rotated. Can be the Boolean image of the pore space or
any other image of interest.
Returns
-------
image : ND-array
Returns a copy of ``im`` rotated accordingly.
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
im = sp.copy(im)
if im.ndim == 2:
im = (sp.swapaxes(im, 1, 0))
im = im[-1::-1, :]
elif im.ndim == 3:
im = (sp.swapaxes(im, 2, 0))
im = im[:, -1::-1, :]
return im | def function[align_image_with_openpnm, parameter[im]]:
constant[
Rotates an image to agree with the coordinates used in OpenPNM. It is
unclear why they are not in agreement to start with. This is necessary
for overlaying the image and the network in Paraview.
Parameters
----------
im : ND-array
The image to be rotated. Can be the Boolean image of the pore space or
any other image of interest.
Returns
-------
image : ND-array
Returns a copy of ``im`` rotated accordingly.
]
if compare[name[im].ndim not_equal[!=] call[name[im].squeeze, parameter[]].ndim] begin[:]
call[name[warnings].warn, parameter[binary_operation[binary_operation[binary_operation[constant[Input image conains a singleton axis:] + call[name[str], parameter[name[im].shape]]] + constant[ Reduce dimensionality with np.squeeze(im) to avoid]] + constant[ unexpected behavior.]]]]
variable[im] assign[=] call[name[sp].copy, parameter[name[im]]]
if compare[name[im].ndim equal[==] constant[2]] begin[:]
variable[im] assign[=] call[name[sp].swapaxes, parameter[name[im], constant[1], constant[0]]]
variable[im] assign[=] call[name[im]][tuple[[<ast.Slice object at 0x7da1b0650250>, <ast.Slice object at 0x7da1b0651c00>]]]
return[name[im]] | keyword[def] identifier[align_image_with_openpnm] ( identifier[im] ):
literal[string]
keyword[if] identifier[im] . identifier[ndim] != identifier[im] . identifier[squeeze] (). identifier[ndim] :
identifier[warnings] . identifier[warn] ( literal[string] + identifier[str] ( identifier[im] . identifier[shape] )+
literal[string] +
literal[string] )
identifier[im] = identifier[sp] . identifier[copy] ( identifier[im] )
keyword[if] identifier[im] . identifier[ndim] == literal[int] :
identifier[im] =( identifier[sp] . identifier[swapaxes] ( identifier[im] , literal[int] , literal[int] ))
identifier[im] = identifier[im] [- literal[int] ::- literal[int] ,:]
keyword[elif] identifier[im] . identifier[ndim] == literal[int] :
identifier[im] =( identifier[sp] . identifier[swapaxes] ( identifier[im] , literal[int] , literal[int] ))
identifier[im] = identifier[im] [:,- literal[int] ::- literal[int] ,:]
keyword[return] identifier[im] | def align_image_with_openpnm(im):
"""
Rotates an image to agree with the coordinates used in OpenPNM. It is
unclear why they are not in agreement to start with. This is necessary
for overlaying the image and the network in Paraview.
Parameters
----------
im : ND-array
The image to be rotated. Can be the Boolean image of the pore space or
any other image of interest.
Returns
-------
image : ND-array
Returns a copy of ``im`` rotated accordingly.
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') # depends on [control=['if'], data=[]]
im = sp.copy(im)
if im.ndim == 2:
im = sp.swapaxes(im, 1, 0)
im = im[-1::-1, :] # depends on [control=['if'], data=[]]
elif im.ndim == 3:
im = sp.swapaxes(im, 2, 0)
im = im[:, -1::-1, :] # depends on [control=['if'], data=[]]
return im |
def add_imt(fname, imt):
"""
>>> add_imt('/path/to/hcurve_23.csv', 'SA(0.1)')
'/path/to/hcurve-SA(0.1)_23.csv'
"""
name = os.path.basename(fname)
newname = re.sub(r'(_\d+\.)', '-%s\\1' % imt, name)
return os.path.join(os.path.dirname(fname), newname) | def function[add_imt, parameter[fname, imt]]:
constant[
>>> add_imt('/path/to/hcurve_23.csv', 'SA(0.1)')
'/path/to/hcurve-SA(0.1)_23.csv'
]
variable[name] assign[=] call[name[os].path.basename, parameter[name[fname]]]
variable[newname] assign[=] call[name[re].sub, parameter[constant[(_\d+\.)], binary_operation[constant[-%s\1] <ast.Mod object at 0x7da2590d6920> name[imt]], name[name]]]
return[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[fname]]], name[newname]]]] | keyword[def] identifier[add_imt] ( identifier[fname] , identifier[imt] ):
literal[string]
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fname] )
identifier[newname] = identifier[re] . identifier[sub] ( literal[string] , literal[string] % identifier[imt] , identifier[name] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fname] ), identifier[newname] ) | def add_imt(fname, imt):
"""
>>> add_imt('/path/to/hcurve_23.csv', 'SA(0.1)')
'/path/to/hcurve-SA(0.1)_23.csv'
"""
name = os.path.basename(fname)
newname = re.sub('(_\\d+\\.)', '-%s\\1' % imt, name)
return os.path.join(os.path.dirname(fname), newname) |
def validate_layout_display(self, table, display_condition):
"""Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0.
"""
display = False
if display_condition is None:
display = True
else:
display_query = 'select count(*) from {} where {}'.format(table, display_condition)
try:
cur = self.db_conn.cursor()
cur.execute(display_query.replace('"', ''))
rows = cur.fetchall()
if rows[0][0] > 0:
display = True
except sqlite3.Error as e:
print('"{}" query returned an error: ({}).'.format(display_query, e))
sys.exit(1)
return display | def function[validate_layout_display, parameter[self, table, display_condition]]:
constant[Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0.
]
variable[display] assign[=] constant[False]
if compare[name[display_condition] is constant[None]] begin[:]
variable[display] assign[=] constant[True]
return[name[display]] | keyword[def] identifier[validate_layout_display] ( identifier[self] , identifier[table] , identifier[display_condition] ):
literal[string]
identifier[display] = keyword[False]
keyword[if] identifier[display_condition] keyword[is] keyword[None] :
identifier[display] = keyword[True]
keyword[else] :
identifier[display_query] = literal[string] . identifier[format] ( identifier[table] , identifier[display_condition] )
keyword[try] :
identifier[cur] = identifier[self] . identifier[db_conn] . identifier[cursor] ()
identifier[cur] . identifier[execute] ( identifier[display_query] . identifier[replace] ( literal[string] , literal[string] ))
identifier[rows] = identifier[cur] . identifier[fetchall] ()
keyword[if] identifier[rows] [ literal[int] ][ literal[int] ]> literal[int] :
identifier[display] = keyword[True]
keyword[except] identifier[sqlite3] . identifier[Error] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[display_query] , identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[display] | def validate_layout_display(self, table, display_condition):
"""Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0.
"""
display = False
if display_condition is None:
display = True # depends on [control=['if'], data=[]]
else:
display_query = 'select count(*) from {} where {}'.format(table, display_condition)
try:
cur = self.db_conn.cursor()
cur.execute(display_query.replace('"', ''))
rows = cur.fetchall()
if rows[0][0] > 0:
display = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except sqlite3.Error as e:
print('"{}" query returned an error: ({}).'.format(display_query, e))
sys.exit(1) # depends on [control=['except'], data=['e']]
return display |
def addItem(self, child, href):
"""Add a new item (a catalogue or resource) as a child of this catalogue."""
assert isinstance(child, Base), "child must be a hypercat Catalogue or Resource"
child.setHref(href)
for item in self.items:
assert item.href != href, "All items in a catalogue must have unique hrefs : "+href
self.items += [child] # Add new
return | def function[addItem, parameter[self, child, href]]:
constant[Add a new item (a catalogue or resource) as a child of this catalogue.]
assert[call[name[isinstance], parameter[name[child], name[Base]]]]
call[name[child].setHref, parameter[name[href]]]
for taget[name[item]] in starred[name[self].items] begin[:]
assert[compare[name[item].href not_equal[!=] name[href]]]
<ast.AugAssign object at 0x7da1b1594c70>
return[None] | keyword[def] identifier[addItem] ( identifier[self] , identifier[child] , identifier[href] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[child] , identifier[Base] ), literal[string]
identifier[child] . identifier[setHref] ( identifier[href] )
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[items] :
keyword[assert] identifier[item] . identifier[href] != identifier[href] , literal[string] + identifier[href]
identifier[self] . identifier[items] +=[ identifier[child] ]
keyword[return] | def addItem(self, child, href):
"""Add a new item (a catalogue or resource) as a child of this catalogue."""
assert isinstance(child, Base), 'child must be a hypercat Catalogue or Resource'
child.setHref(href)
for item in self.items:
assert item.href != href, 'All items in a catalogue must have unique hrefs : ' + href # depends on [control=['for'], data=['item']]
self.items += [child] # Add new
return |
def pre(*content, sep='\n'):
"""
Make mono-width text block (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[3]) | def function[pre, parameter[]]:
constant[
Make mono-width text block (Markdown)
:param content:
:param sep:
:return:
]
return[call[name[_md], parameter[call[name[_join], parameter[<ast.Starred object at 0x7da1b18fe200>]]]]] | keyword[def] identifier[pre] (* identifier[content] , identifier[sep] = literal[string] ):
literal[string]
keyword[return] identifier[_md] ( identifier[_join] (* identifier[content] , identifier[sep] = identifier[sep] ), identifier[symbols] = identifier[MD_SYMBOLS] [ literal[int] ]) | def pre(*content, sep='\n'):
"""
Make mono-width text block (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[3]) |
def ansi_format_iter( self, x_start=0, y_start=0, width=None, height=None, frame=0, columns=1, downsample=1, frame_index=None, frame_flip_v=0, frame_flip_h=0 ):
"""Return the ANSI escape sequence to render the image.
x_start
Offset from the left of the image data to render from. Defaults to 0.
y_start
Offset from the top of the image data to render from. Defaults to 0.
width
Width of the image data to render. Defaults to the image width.
height
Height of the image data to render. Defaults to the image height.
frame
Single frame number/object, or a list of frames to render in sequence. Defaults to frame 0.
columns
Number of frames to render per line (useful for printing tilemaps!). Defaults to 1.
downsample
Shrink larger images by printing every nth pixel only. Defaults to 1.
frame_index
Constant or mrc.Ref for a frame object property denoting the index. Defaults to None
(i.e. frame itself should be an index).
frame_flip_v
Constant or mrc.Ref for a frame object property for whether to mirror vertically.
Defaults to 0.
frame_flip_h
Constant or mrc.Ref for a frame object property for whether to mirror horizontally.
Defaults to 0.
"""
assert x_start in range( 0, self.width )
assert y_start in range( 0, self.height )
if frame_index is not None:
fn_index = lambda fr: mrc.property_get( frame_index, fr )
else:
fn_index = lambda fr: fr if fr in range( 0, self.frame_count ) else None
fn_flip_v = lambda fr: mrc.property_get( frame_flip_v, fr )
fn_flip_h = lambda fr: mrc.property_get( frame_flip_h, fr )
frames = []
try:
frame_iter = iter( frame )
frames = [f for f in frame_iter]
except TypeError:
frames = [frame]
if not width:
width = self.width-x_start
if not height:
height = self.height-y_start
stride = width*height
def data_fetch( x, y, fr_obj ):
fr = fn_index( fr_obj )
if fr is None:
return Transparent()
if not ((0 <= x < self.width) and (0 <= y < self.height)):
return Transparent()
if fn_flip_h( fr_obj ):
x = self.width - x - 1
if fn_flip_v( fr_obj ):
y = self.height - y - 1
index = self.width*y + x
p = self.source[stride*fr+index]
if self.mask:
p = p if self.mask[stride*fr+index] else None
return self.palette[p] if p is not None else Transparent()
for x in ansi.format_image_iter( data_fetch, x_start, y_start, width, height, frames, columns, downsample ):
yield x
return | def function[ansi_format_iter, parameter[self, x_start, y_start, width, height, frame, columns, downsample, frame_index, frame_flip_v, frame_flip_h]]:
constant[Return the ANSI escape sequence to render the image.
x_start
Offset from the left of the image data to render from. Defaults to 0.
y_start
Offset from the top of the image data to render from. Defaults to 0.
width
Width of the image data to render. Defaults to the image width.
height
Height of the image data to render. Defaults to the image height.
frame
Single frame number/object, or a list of frames to render in sequence. Defaults to frame 0.
columns
Number of frames to render per line (useful for printing tilemaps!). Defaults to 1.
downsample
Shrink larger images by printing every nth pixel only. Defaults to 1.
frame_index
Constant or mrc.Ref for a frame object property denoting the index. Defaults to None
(i.e. frame itself should be an index).
frame_flip_v
Constant or mrc.Ref for a frame object property for whether to mirror vertically.
Defaults to 0.
frame_flip_h
Constant or mrc.Ref for a frame object property for whether to mirror horizontally.
Defaults to 0.
]
assert[compare[name[x_start] in call[name[range], parameter[constant[0], name[self].width]]]]
assert[compare[name[y_start] in call[name[range], parameter[constant[0], name[self].height]]]]
if compare[name[frame_index] is_not constant[None]] begin[:]
variable[fn_index] assign[=] <ast.Lambda object at 0x7da1b1110b20>
variable[fn_flip_v] assign[=] <ast.Lambda object at 0x7da1b11a2260>
variable[fn_flip_h] assign[=] <ast.Lambda object at 0x7da1b11a08b0>
variable[frames] assign[=] list[[]]
<ast.Try object at 0x7da1b11a0790>
if <ast.UnaryOp object at 0x7da1b11a2b60> begin[:]
variable[width] assign[=] binary_operation[name[self].width - name[x_start]]
if <ast.UnaryOp object at 0x7da1b11a0f70> begin[:]
variable[height] assign[=] binary_operation[name[self].height - name[y_start]]
variable[stride] assign[=] binary_operation[name[width] * name[height]]
def function[data_fetch, parameter[x, y, fr_obj]]:
variable[fr] assign[=] call[name[fn_index], parameter[name[fr_obj]]]
if compare[name[fr] is constant[None]] begin[:]
return[call[name[Transparent], parameter[]]]
if <ast.UnaryOp object at 0x7da1b11a0640> begin[:]
return[call[name[Transparent], parameter[]]]
if call[name[fn_flip_h], parameter[name[fr_obj]]] begin[:]
variable[x] assign[=] binary_operation[binary_operation[name[self].width - name[x]] - constant[1]]
if call[name[fn_flip_v], parameter[name[fr_obj]]] begin[:]
variable[y] assign[=] binary_operation[binary_operation[name[self].height - name[y]] - constant[1]]
variable[index] assign[=] binary_operation[binary_operation[name[self].width * name[y]] + name[x]]
variable[p] assign[=] call[name[self].source][binary_operation[binary_operation[name[stride] * name[fr]] + name[index]]]
if name[self].mask begin[:]
variable[p] assign[=] <ast.IfExp object at 0x7da1b11a1cf0>
return[<ast.IfExp object at 0x7da1b11a00d0>]
for taget[name[x]] in starred[call[name[ansi].format_image_iter, parameter[name[data_fetch], name[x_start], name[y_start], name[width], name[height], name[frames], name[columns], name[downsample]]]] begin[:]
<ast.Yield object at 0x7da1b11a12a0>
return[None] | keyword[def] identifier[ansi_format_iter] ( identifier[self] , identifier[x_start] = literal[int] , identifier[y_start] = literal[int] , identifier[width] = keyword[None] , identifier[height] = keyword[None] , identifier[frame] = literal[int] , identifier[columns] = literal[int] , identifier[downsample] = literal[int] , identifier[frame_index] = keyword[None] , identifier[frame_flip_v] = literal[int] , identifier[frame_flip_h] = literal[int] ):
literal[string]
keyword[assert] identifier[x_start] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[width] )
keyword[assert] identifier[y_start] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[height] )
keyword[if] identifier[frame_index] keyword[is] keyword[not] keyword[None] :
identifier[fn_index] = keyword[lambda] identifier[fr] : identifier[mrc] . identifier[property_get] ( identifier[frame_index] , identifier[fr] )
keyword[else] :
identifier[fn_index] = keyword[lambda] identifier[fr] : identifier[fr] keyword[if] identifier[fr] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[frame_count] ) keyword[else] keyword[None]
identifier[fn_flip_v] = keyword[lambda] identifier[fr] : identifier[mrc] . identifier[property_get] ( identifier[frame_flip_v] , identifier[fr] )
identifier[fn_flip_h] = keyword[lambda] identifier[fr] : identifier[mrc] . identifier[property_get] ( identifier[frame_flip_h] , identifier[fr] )
identifier[frames] =[]
keyword[try] :
identifier[frame_iter] = identifier[iter] ( identifier[frame] )
identifier[frames] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[frame_iter] ]
keyword[except] identifier[TypeError] :
identifier[frames] =[ identifier[frame] ]
keyword[if] keyword[not] identifier[width] :
identifier[width] = identifier[self] . identifier[width] - identifier[x_start]
keyword[if] keyword[not] identifier[height] :
identifier[height] = identifier[self] . identifier[height] - identifier[y_start]
identifier[stride] = identifier[width] * identifier[height]
keyword[def] identifier[data_fetch] ( identifier[x] , identifier[y] , identifier[fr_obj] ):
identifier[fr] = identifier[fn_index] ( identifier[fr_obj] )
keyword[if] identifier[fr] keyword[is] keyword[None] :
keyword[return] identifier[Transparent] ()
keyword[if] keyword[not] (( literal[int] <= identifier[x] < identifier[self] . identifier[width] ) keyword[and] ( literal[int] <= identifier[y] < identifier[self] . identifier[height] )):
keyword[return] identifier[Transparent] ()
keyword[if] identifier[fn_flip_h] ( identifier[fr_obj] ):
identifier[x] = identifier[self] . identifier[width] - identifier[x] - literal[int]
keyword[if] identifier[fn_flip_v] ( identifier[fr_obj] ):
identifier[y] = identifier[self] . identifier[height] - identifier[y] - literal[int]
identifier[index] = identifier[self] . identifier[width] * identifier[y] + identifier[x]
identifier[p] = identifier[self] . identifier[source] [ identifier[stride] * identifier[fr] + identifier[index] ]
keyword[if] identifier[self] . identifier[mask] :
identifier[p] = identifier[p] keyword[if] identifier[self] . identifier[mask] [ identifier[stride] * identifier[fr] + identifier[index] ] keyword[else] keyword[None]
keyword[return] identifier[self] . identifier[palette] [ identifier[p] ] keyword[if] identifier[p] keyword[is] keyword[not] keyword[None] keyword[else] identifier[Transparent] ()
keyword[for] identifier[x] keyword[in] identifier[ansi] . identifier[format_image_iter] ( identifier[data_fetch] , identifier[x_start] , identifier[y_start] , identifier[width] , identifier[height] , identifier[frames] , identifier[columns] , identifier[downsample] ):
keyword[yield] identifier[x]
keyword[return] | def ansi_format_iter(self, x_start=0, y_start=0, width=None, height=None, frame=0, columns=1, downsample=1, frame_index=None, frame_flip_v=0, frame_flip_h=0):
"""Return the ANSI escape sequence to render the image.
x_start
Offset from the left of the image data to render from. Defaults to 0.
y_start
Offset from the top of the image data to render from. Defaults to 0.
width
Width of the image data to render. Defaults to the image width.
height
Height of the image data to render. Defaults to the image height.
frame
Single frame number/object, or a list of frames to render in sequence. Defaults to frame 0.
columns
Number of frames to render per line (useful for printing tilemaps!). Defaults to 1.
downsample
Shrink larger images by printing every nth pixel only. Defaults to 1.
frame_index
Constant or mrc.Ref for a frame object property denoting the index. Defaults to None
(i.e. frame itself should be an index).
frame_flip_v
Constant or mrc.Ref for a frame object property for whether to mirror vertically.
Defaults to 0.
frame_flip_h
Constant or mrc.Ref for a frame object property for whether to mirror horizontally.
Defaults to 0.
"""
assert x_start in range(0, self.width)
assert y_start in range(0, self.height)
if frame_index is not None:
fn_index = lambda fr: mrc.property_get(frame_index, fr) # depends on [control=['if'], data=['frame_index']]
else:
fn_index = lambda fr: fr if fr in range(0, self.frame_count) else None
fn_flip_v = lambda fr: mrc.property_get(frame_flip_v, fr)
fn_flip_h = lambda fr: mrc.property_get(frame_flip_h, fr)
frames = []
try:
frame_iter = iter(frame)
frames = [f for f in frame_iter] # depends on [control=['try'], data=[]]
except TypeError:
frames = [frame] # depends on [control=['except'], data=[]]
if not width:
width = self.width - x_start # depends on [control=['if'], data=[]]
if not height:
height = self.height - y_start # depends on [control=['if'], data=[]]
stride = width * height
def data_fetch(x, y, fr_obj):
fr = fn_index(fr_obj)
if fr is None:
return Transparent() # depends on [control=['if'], data=[]]
if not (0 <= x < self.width and 0 <= y < self.height):
return Transparent() # depends on [control=['if'], data=[]]
if fn_flip_h(fr_obj):
x = self.width - x - 1 # depends on [control=['if'], data=[]]
if fn_flip_v(fr_obj):
y = self.height - y - 1 # depends on [control=['if'], data=[]]
index = self.width * y + x
p = self.source[stride * fr + index]
if self.mask:
p = p if self.mask[stride * fr + index] else None # depends on [control=['if'], data=[]]
return self.palette[p] if p is not None else Transparent()
for x in ansi.format_image_iter(data_fetch, x_start, y_start, width, height, frames, columns, downsample):
yield x # depends on [control=['for'], data=['x']]
return |
def _output_path(self, input_path, to_format, archive=False):
"""Construct an output path string from an input path string.
:param str input_path: Input path string.
:return: Output path string.
:rtype: :py:class:`str`
"""
indirpath, fname = os.path.split(os.path.abspath(os.path.normpath(input_path)))
commonprefix = os.path.commonprefix([os.path.abspath(self.file_generator.from_path),
os.path.abspath(indirpath)])
commonparts = commonprefix.split(os.sep)
inparts = indirpath.split(os.sep)
outparts = inparts[len(commonparts):]
if archive:
outdirpath = os.path.join(*outparts) if outparts else ""
else:
outdirpath = os.path.join(self.file_generator.to_path, *outparts)
return os.path.join(outdirpath, fname + self.file_generator.file_extension[to_format]) | def function[_output_path, parameter[self, input_path, to_format, archive]]:
constant[Construct an output path string from an input path string.
:param str input_path: Input path string.
:return: Output path string.
:rtype: :py:class:`str`
]
<ast.Tuple object at 0x7da1b2240e80> assign[=] call[name[os].path.split, parameter[call[name[os].path.abspath, parameter[call[name[os].path.normpath, parameter[name[input_path]]]]]]]
variable[commonprefix] assign[=] call[name[os].path.commonprefix, parameter[list[[<ast.Call object at 0x7da1b2240d00>, <ast.Call object at 0x7da1b2240580>]]]]
variable[commonparts] assign[=] call[name[commonprefix].split, parameter[name[os].sep]]
variable[inparts] assign[=] call[name[indirpath].split, parameter[name[os].sep]]
variable[outparts] assign[=] call[name[inparts]][<ast.Slice object at 0x7da1b2241750>]
if name[archive] begin[:]
variable[outdirpath] assign[=] <ast.IfExp object at 0x7da1b2240430>
return[call[name[os].path.join, parameter[name[outdirpath], binary_operation[name[fname] + call[name[self].file_generator.file_extension][name[to_format]]]]]] | keyword[def] identifier[_output_path] ( identifier[self] , identifier[input_path] , identifier[to_format] , identifier[archive] = keyword[False] ):
literal[string]
identifier[indirpath] , identifier[fname] = identifier[os] . identifier[path] . identifier[split] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[normpath] ( identifier[input_path] )))
identifier[commonprefix] = identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[os] . identifier[path] . identifier[abspath] ( identifier[self] . identifier[file_generator] . identifier[from_path] ),
identifier[os] . identifier[path] . identifier[abspath] ( identifier[indirpath] )])
identifier[commonparts] = identifier[commonprefix] . identifier[split] ( identifier[os] . identifier[sep] )
identifier[inparts] = identifier[indirpath] . identifier[split] ( identifier[os] . identifier[sep] )
identifier[outparts] = identifier[inparts] [ identifier[len] ( identifier[commonparts] ):]
keyword[if] identifier[archive] :
identifier[outdirpath] = identifier[os] . identifier[path] . identifier[join] (* identifier[outparts] ) keyword[if] identifier[outparts] keyword[else] literal[string]
keyword[else] :
identifier[outdirpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[file_generator] . identifier[to_path] ,* identifier[outparts] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[outdirpath] , identifier[fname] + identifier[self] . identifier[file_generator] . identifier[file_extension] [ identifier[to_format] ]) | def _output_path(self, input_path, to_format, archive=False):
"""Construct an output path string from an input path string.
:param str input_path: Input path string.
:return: Output path string.
:rtype: :py:class:`str`
"""
(indirpath, fname) = os.path.split(os.path.abspath(os.path.normpath(input_path)))
commonprefix = os.path.commonprefix([os.path.abspath(self.file_generator.from_path), os.path.abspath(indirpath)])
commonparts = commonprefix.split(os.sep)
inparts = indirpath.split(os.sep)
outparts = inparts[len(commonparts):]
if archive:
outdirpath = os.path.join(*outparts) if outparts else '' # depends on [control=['if'], data=[]]
else:
outdirpath = os.path.join(self.file_generator.to_path, *outparts)
return os.path.join(outdirpath, fname + self.file_generator.file_extension[to_format]) |
def get_processes(process_dir, base_source_uri):
"""Find processes in path.
:param str process_dir: Path to the directory where to search for processes
:param str base_source_uri: Base URL of the source code repository with process definitions
:return: Dictionary of processes where keys are URLs pointing to processes'
source code and values are processes' definitions parsed from YAML files
:rtype: dict
:raises: ValueError: if multiple processes with the same slug are found
"""
global PROCESS_CACHE # pylint: disable=global-statement
if PROCESS_CACHE is not None:
return PROCESS_CACHE
all_process_files = []
process_file_extensions = ['*.yaml', '*.yml']
for root, _, filenames in os.walk(process_dir):
for extension in process_file_extensions:
for filename in fnmatch.filter(filenames, extension):
all_process_files.append(os.path.join(root, filename))
def read_yaml_file(fname):
"""Read the yaml file."""
with open(fname) as f:
return yaml.load(f, Loader=yaml.FullLoader)
processes = []
for process_file in all_process_files:
processes_in_file = read_yaml_file(process_file)
for process in processes_in_file:
# This section finds the line in file where the
# defintion of the process starts. (there are
# multiple process definition in some files).
startline = get_process_definition_start(process_file, process['slug'])
# Put together URL to starting line of process definition.
process['source_uri'] = base_source_uri + process_file[len(process_dir) + 1:] + '#L' + str(startline)
if 'category' not in process:
process['category'] = 'uncategorized'
processes.append(process)
PROCESS_CACHE = processes
return processes | def function[get_processes, parameter[process_dir, base_source_uri]]:
constant[Find processes in path.
:param str process_dir: Path to the directory where to search for processes
:param str base_source_uri: Base URL of the source code repository with process definitions
:return: Dictionary of processes where keys are URLs pointing to processes'
source code and values are processes' definitions parsed from YAML files
:rtype: dict
:raises: ValueError: if multiple processes with the same slug are found
]
<ast.Global object at 0x7da1b1add9f0>
if compare[name[PROCESS_CACHE] is_not constant[None]] begin[:]
return[name[PROCESS_CACHE]]
variable[all_process_files] assign[=] list[[]]
variable[process_file_extensions] assign[=] list[[<ast.Constant object at 0x7da1b1add8a0>, <ast.Constant object at 0x7da1b1adc3d0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1adc880>, <ast.Name object at 0x7da1b1adc5e0>, <ast.Name object at 0x7da1b1addcc0>]]] in starred[call[name[os].walk, parameter[name[process_dir]]]] begin[:]
for taget[name[extension]] in starred[name[process_file_extensions]] begin[:]
for taget[name[filename]] in starred[call[name[fnmatch].filter, parameter[name[filenames], name[extension]]]] begin[:]
call[name[all_process_files].append, parameter[call[name[os].path.join, parameter[name[root], name[filename]]]]]
def function[read_yaml_file, parameter[fname]]:
constant[Read the yaml file.]
with call[name[open], parameter[name[fname]]] begin[:]
return[call[name[yaml].load, parameter[name[f]]]]
variable[processes] assign[=] list[[]]
for taget[name[process_file]] in starred[name[all_process_files]] begin[:]
variable[processes_in_file] assign[=] call[name[read_yaml_file], parameter[name[process_file]]]
for taget[name[process]] in starred[name[processes_in_file]] begin[:]
variable[startline] assign[=] call[name[get_process_definition_start], parameter[name[process_file], call[name[process]][constant[slug]]]]
call[name[process]][constant[source_uri]] assign[=] binary_operation[binary_operation[binary_operation[name[base_source_uri] + call[name[process_file]][<ast.Slice object at 0x7da1b1b695a0>]] + constant[#L]] + call[name[str], parameter[name[startline]]]]
if compare[constant[category] <ast.NotIn object at 0x7da2590d7190> name[process]] begin[:]
call[name[process]][constant[category]] assign[=] constant[uncategorized]
call[name[processes].append, parameter[name[process]]]
variable[PROCESS_CACHE] assign[=] name[processes]
return[name[processes]] | keyword[def] identifier[get_processes] ( identifier[process_dir] , identifier[base_source_uri] ):
literal[string]
keyword[global] identifier[PROCESS_CACHE]
keyword[if] identifier[PROCESS_CACHE] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[PROCESS_CACHE]
identifier[all_process_files] =[]
identifier[process_file_extensions] =[ literal[string] , literal[string] ]
keyword[for] identifier[root] , identifier[_] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[process_dir] ):
keyword[for] identifier[extension] keyword[in] identifier[process_file_extensions] :
keyword[for] identifier[filename] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[filenames] , identifier[extension] ):
identifier[all_process_files] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] ))
keyword[def] identifier[read_yaml_file] ( identifier[fname] ):
literal[string]
keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[f] :
keyword[return] identifier[yaml] . identifier[load] ( identifier[f] , identifier[Loader] = identifier[yaml] . identifier[FullLoader] )
identifier[processes] =[]
keyword[for] identifier[process_file] keyword[in] identifier[all_process_files] :
identifier[processes_in_file] = identifier[read_yaml_file] ( identifier[process_file] )
keyword[for] identifier[process] keyword[in] identifier[processes_in_file] :
identifier[startline] = identifier[get_process_definition_start] ( identifier[process_file] , identifier[process] [ literal[string] ])
identifier[process] [ literal[string] ]= identifier[base_source_uri] + identifier[process_file] [ identifier[len] ( identifier[process_dir] )+ literal[int] :]+ literal[string] + identifier[str] ( identifier[startline] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[process] :
identifier[process] [ literal[string] ]= literal[string]
identifier[processes] . identifier[append] ( identifier[process] )
identifier[PROCESS_CACHE] = identifier[processes]
keyword[return] identifier[processes] | def get_processes(process_dir, base_source_uri):
"""Find processes in path.
:param str process_dir: Path to the directory where to search for processes
:param str base_source_uri: Base URL of the source code repository with process definitions
:return: Dictionary of processes where keys are URLs pointing to processes'
source code and values are processes' definitions parsed from YAML files
:rtype: dict
:raises: ValueError: if multiple processes with the same slug are found
"""
global PROCESS_CACHE # pylint: disable=global-statement
if PROCESS_CACHE is not None:
return PROCESS_CACHE # depends on [control=['if'], data=['PROCESS_CACHE']]
all_process_files = []
process_file_extensions = ['*.yaml', '*.yml']
for (root, _, filenames) in os.walk(process_dir):
for extension in process_file_extensions:
for filename in fnmatch.filter(filenames, extension):
all_process_files.append(os.path.join(root, filename)) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=['extension']] # depends on [control=['for'], data=[]]
def read_yaml_file(fname):
"""Read the yaml file."""
with open(fname) as f:
return yaml.load(f, Loader=yaml.FullLoader) # depends on [control=['with'], data=['f']]
processes = []
for process_file in all_process_files:
processes_in_file = read_yaml_file(process_file)
for process in processes_in_file:
# This section finds the line in file where the
# defintion of the process starts. (there are
# multiple process definition in some files).
startline = get_process_definition_start(process_file, process['slug'])
# Put together URL to starting line of process definition.
process['source_uri'] = base_source_uri + process_file[len(process_dir) + 1:] + '#L' + str(startline)
if 'category' not in process:
process['category'] = 'uncategorized' # depends on [control=['if'], data=['process']]
processes.append(process) # depends on [control=['for'], data=['process']] # depends on [control=['for'], data=['process_file']]
PROCESS_CACHE = processes
return processes |
def _ReadUUIDDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.UUIDDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16,
is_member=is_member, supported_size_values=(16, )) | def function[_ReadUUIDDataTypeDefinition, parameter[self, definitions_registry, definition_values, definition_name, is_member]]:
constant[Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
]
return[call[name[self]._ReadFixedSizeDataTypeDefinition, parameter[name[definitions_registry], name[definition_values], name[data_types].UUIDDefinition, name[definition_name], name[self]._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE]]] | keyword[def] identifier[_ReadUUIDDataTypeDefinition] (
identifier[self] , identifier[definitions_registry] , identifier[definition_values] , identifier[definition_name] ,
identifier[is_member] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_ReadFixedSizeDataTypeDefinition] (
identifier[definitions_registry] , identifier[definition_values] ,
identifier[data_types] . identifier[UUIDDefinition] , identifier[definition_name] ,
identifier[self] . identifier[_SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE] , identifier[default_size] = literal[int] ,
identifier[is_member] = identifier[is_member] , identifier[supported_size_values] =( literal[int] ,)) | def _ReadUUIDDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
"""Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.UUIDDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16, is_member=is_member, supported_size_values=(16,)) |
def ObjectModifiedEventHandler(obj, event):
"""Object has been modified
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# take a new snapshot
take_snapshot(obj, action="edit")
# reindex the object in the auditlog catalog
reindex_object(obj) | def function[ObjectModifiedEventHandler, parameter[obj, event]]:
constant[Object has been modified
]
if <ast.UnaryOp object at 0x7da18eb55510> begin[:]
return[None]
call[name[take_snapshot], parameter[name[obj]]]
call[name[reindex_object], parameter[name[obj]]] | keyword[def] identifier[ObjectModifiedEventHandler] ( identifier[obj] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[supports_snapshots] ( identifier[obj] ):
keyword[return]
identifier[take_snapshot] ( identifier[obj] , identifier[action] = literal[string] )
identifier[reindex_object] ( identifier[obj] ) | def ObjectModifiedEventHandler(obj, event):
"""Object has been modified
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return # depends on [control=['if'], data=[]]
# take a new snapshot
take_snapshot(obj, action='edit')
# reindex the object in the auditlog catalog
reindex_object(obj) |
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value | def function[convert_values, parameter[self, value, field]]:
constant[
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
]
if compare[name[value] is constant[None]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b0c4d3c0> begin[:]
if <ast.BoolOp object at 0x7da1b0c4d960> begin[:]
variable[value] assign[=] call[name[parse_datetime], parameter[name[value]]]
return[name[value]]
return[name[value]] | keyword[def] identifier[convert_values] ( identifier[self] , identifier[value] , identifier[field] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[field] keyword[and] identifier[field] . identifier[get_internal_type] ()== literal[string] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[string_types] ) keyword[and] identifier[value] :
identifier[value] = identifier[parse_datetime] ( identifier[value] )
keyword[return] identifier[value]
keyword[elif] identifier[field] keyword[and] identifier[field] . identifier[get_internal_type] ()== literal[string] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[datetime] ):
identifier[value] = identifier[value] . identifier[date] ()
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
identifier[value] = identifier[parse_date] ( identifier[value] )
keyword[elif] identifier[field] keyword[and] identifier[field] . identifier[get_internal_type] ()== literal[string] :
keyword[if] ( identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[datetime] ) keyword[and] identifier[value] . identifier[year] == literal[int] keyword[and] identifier[value] . identifier[month] == identifier[value] . identifier[day] == literal[int] ):
identifier[value] = identifier[value] . identifier[time] ()
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
identifier[value] = identifier[parse_time] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[datetime] ) keyword[and] identifier[value] . identifier[hour] == identifier[value] . identifier[minute] == identifier[value] . identifier[second] == identifier[value] . identifier[microsecond] == literal[int] :
identifier[value] = identifier[value] . identifier[date] ()
keyword[elif] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[field] keyword[and] identifier[field] . identifier[get_internal_type] ()== literal[string] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[return] identifier[value] | def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None # depends on [control=['if'], data=[]]
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value) # depends on [control=['if'], data=[]]
return value # depends on [control=['if'], data=[]]
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date # depends on [control=['if'], data=[]]
elif isinstance(value, string_types):
value = parse_date(value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field and field.get_internal_type() == 'TimeField':
if isinstance(value, datetime.datetime) and value.year == 1900 and (value.month == value.day == 1):
value = value.time() # extract time # depends on [control=['if'], data=[]]
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date() # depends on [control=['if'], data=[]]
# Force floats to the correct type
elif value is not None and field and (field.get_internal_type() == 'FloatField'):
value = float(value) # depends on [control=['if'], data=[]]
return value |
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy) | def function[_update_inplace, parameter[self, result, verify_is_copy]]:
constant[
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
]
call[name[self]._reset_cache, parameter[]]
call[name[self]._clear_item_cache, parameter[]]
name[self]._data assign[=] call[name[getattr], parameter[name[result], constant[_data], name[result]]]
call[name[self]._maybe_update_cacher, parameter[]] | keyword[def] identifier[_update_inplace] ( identifier[self] , identifier[result] , identifier[verify_is_copy] = keyword[True] ):
literal[string]
identifier[self] . identifier[_reset_cache] ()
identifier[self] . identifier[_clear_item_cache] ()
identifier[self] . identifier[_data] = identifier[getattr] ( identifier[result] , literal[string] , identifier[result] )
identifier[self] . identifier[_maybe_update_cacher] ( identifier[verify_is_copy] = identifier[verify_is_copy] ) | def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy) |
def getEstTraitCovar(self,term_i=None):
"""
Returns explicitly the estimated trait covariance matrix
Args:
term_i: index of the term we are interested in
"""
assert self.P>1, 'Trait covars not defined for single trait analysis'
if term_i==None:
RV=SP.zeros((self.P,self.P))
for term_i in range(self.n_terms): RV+=self.vd.getTerm(term_i).getTraitCovar().K()
else:
assert term_i<self.n_terms, 'Term index non valid'
RV = self.vd.getTerm(term_i).getTraitCovar().K()
return RV | def function[getEstTraitCovar, parameter[self, term_i]]:
constant[
Returns explicitly the estimated trait covariance matrix
Args:
term_i: index of the term we are interested in
]
assert[compare[name[self].P greater[>] constant[1]]]
if compare[name[term_i] equal[==] constant[None]] begin[:]
variable[RV] assign[=] call[name[SP].zeros, parameter[tuple[[<ast.Attribute object at 0x7da20c795600>, <ast.Attribute object at 0x7da20c795ff0>]]]]
for taget[name[term_i]] in starred[call[name[range], parameter[name[self].n_terms]]] begin[:]
<ast.AugAssign object at 0x7da20c796200>
return[name[RV]] | keyword[def] identifier[getEstTraitCovar] ( identifier[self] , identifier[term_i] = keyword[None] ):
literal[string]
keyword[assert] identifier[self] . identifier[P] > literal[int] , literal[string]
keyword[if] identifier[term_i] == keyword[None] :
identifier[RV] = identifier[SP] . identifier[zeros] (( identifier[self] . identifier[P] , identifier[self] . identifier[P] ))
keyword[for] identifier[term_i] keyword[in] identifier[range] ( identifier[self] . identifier[n_terms] ): identifier[RV] += identifier[self] . identifier[vd] . identifier[getTerm] ( identifier[term_i] ). identifier[getTraitCovar] (). identifier[K] ()
keyword[else] :
keyword[assert] identifier[term_i] < identifier[self] . identifier[n_terms] , literal[string]
identifier[RV] = identifier[self] . identifier[vd] . identifier[getTerm] ( identifier[term_i] ). identifier[getTraitCovar] (). identifier[K] ()
keyword[return] identifier[RV] | def getEstTraitCovar(self, term_i=None):
"""
Returns explicitly the estimated trait covariance matrix
Args:
term_i: index of the term we are interested in
"""
assert self.P > 1, 'Trait covars not defined for single trait analysis'
if term_i == None:
RV = SP.zeros((self.P, self.P))
for term_i in range(self.n_terms):
RV += self.vd.getTerm(term_i).getTraitCovar().K() # depends on [control=['for'], data=['term_i']] # depends on [control=['if'], data=['term_i']]
else:
assert term_i < self.n_terms, 'Term index non valid'
RV = self.vd.getTerm(term_i).getTraitCovar().K()
return RV |
def on(self, val=None):
"""Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag.
"""
if val is False:
raise ParameterError("Turning the ValuedParameter on with value "
"False is the same as turning it off. Use "
"another value.")
elif self.IsPath:
self.Value = FilePath(val)
else:
self.Value = val | def function[on, parameter[self, val]]:
constant[Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag.
]
if compare[name[val] is constant[False]] begin[:]
<ast.Raise object at 0x7da1b0a32bf0> | keyword[def] identifier[on] ( identifier[self] , identifier[val] = keyword[None] ):
literal[string]
keyword[if] identifier[val] keyword[is] keyword[False] :
keyword[raise] identifier[ParameterError] ( literal[string]
literal[string]
literal[string] )
keyword[elif] identifier[self] . identifier[IsPath] :
identifier[self] . identifier[Value] = identifier[FilePath] ( identifier[val] )
keyword[else] :
identifier[self] . identifier[Value] = identifier[val] | def on(self, val=None):
"""Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag.
"""
if val is False:
raise ParameterError('Turning the ValuedParameter on with value False is the same as turning it off. Use another value.') # depends on [control=['if'], data=[]]
elif self.IsPath:
self.Value = FilePath(val) # depends on [control=['if'], data=[]]
else:
self.Value = val |
def inet_ntop(address_family, packed_ip):
""" A platform independent version of inet_ntop """
global __inet_ntop
if __inet_ntop is None:
if hasattr(socket, 'inet_ntop'):
__inet_ntop = socket.inet_ntop
else:
from ospd import win_socket
__inet_ntop = win_socket.inet_ntop
return __inet_ntop(address_family, packed_ip) | def function[inet_ntop, parameter[address_family, packed_ip]]:
constant[ A platform independent version of inet_ntop ]
<ast.Global object at 0x7da20c7cbdf0>
if compare[name[__inet_ntop] is constant[None]] begin[:]
if call[name[hasattr], parameter[name[socket], constant[inet_ntop]]] begin[:]
variable[__inet_ntop] assign[=] name[socket].inet_ntop
return[call[name[__inet_ntop], parameter[name[address_family], name[packed_ip]]]] | keyword[def] identifier[inet_ntop] ( identifier[address_family] , identifier[packed_ip] ):
literal[string]
keyword[global] identifier[__inet_ntop]
keyword[if] identifier[__inet_ntop] keyword[is] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[socket] , literal[string] ):
identifier[__inet_ntop] = identifier[socket] . identifier[inet_ntop]
keyword[else] :
keyword[from] identifier[ospd] keyword[import] identifier[win_socket]
identifier[__inet_ntop] = identifier[win_socket] . identifier[inet_ntop]
keyword[return] identifier[__inet_ntop] ( identifier[address_family] , identifier[packed_ip] ) | def inet_ntop(address_family, packed_ip):
""" A platform independent version of inet_ntop """
global __inet_ntop
if __inet_ntop is None:
if hasattr(socket, 'inet_ntop'):
__inet_ntop = socket.inet_ntop # depends on [control=['if'], data=[]]
else:
from ospd import win_socket
__inet_ntop = win_socket.inet_ntop # depends on [control=['if'], data=['__inet_ntop']]
return __inet_ntop(address_family, packed_ip) |
def create_request_comment(self, issue_id_or_key, body, public=True):
"""
Creating request comment
:param issue_id_or_key: str
:param body: str
:param public: OPTIONAL: bool (default is True)
:return: New comment
"""
log.warning('Creating comment...')
data = {"body": body, "public": public}
return self.post('rest/servicedeskapi/request/{}/comment'.format(issue_id_or_key), data=data) | def function[create_request_comment, parameter[self, issue_id_or_key, body, public]]:
constant[
Creating request comment
:param issue_id_or_key: str
:param body: str
:param public: OPTIONAL: bool (default is True)
:return: New comment
]
call[name[log].warning, parameter[constant[Creating comment...]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18f720160>, <ast.Constant object at 0x7da18f723c40>], [<ast.Name object at 0x7da18f7203d0>, <ast.Name object at 0x7da18f721480>]]
return[call[name[self].post, parameter[call[constant[rest/servicedeskapi/request/{}/comment].format, parameter[name[issue_id_or_key]]]]]] | keyword[def] identifier[create_request_comment] ( identifier[self] , identifier[issue_id_or_key] , identifier[body] , identifier[public] = keyword[True] ):
literal[string]
identifier[log] . identifier[warning] ( literal[string] )
identifier[data] ={ literal[string] : identifier[body] , literal[string] : identifier[public] }
keyword[return] identifier[self] . identifier[post] ( literal[string] . identifier[format] ( identifier[issue_id_or_key] ), identifier[data] = identifier[data] ) | def create_request_comment(self, issue_id_or_key, body, public=True):
"""
Creating request comment
:param issue_id_or_key: str
:param body: str
:param public: OPTIONAL: bool (default is True)
:return: New comment
"""
log.warning('Creating comment...')
data = {'body': body, 'public': public}
return self.post('rest/servicedeskapi/request/{}/comment'.format(issue_id_or_key), data=data) |
def summarise_file_as_html(fname):
"""
takes a large data file and produces a HTML summary as html
"""
txt = '<H1>' + fname + '</H1>'
num_lines = 0
print('Reading OpenCyc file - ', fname)
with open(ip_folder + os.sep + fname, 'r') as f:
txt += '<PRE>'
for line in f:
if line.strip() != '':
num_lines += 1
if num_lines < 80:
txt += str(num_lines) + ': ' + escape_html(line) + ''
txt += '</PRE>'
txt += 'Total lines = ' + str(num_lines) + '<BR><BR>'
return txt | def function[summarise_file_as_html, parameter[fname]]:
constant[
takes a large data file and produces a HTML summary as html
]
variable[txt] assign[=] binary_operation[binary_operation[constant[<H1>] + name[fname]] + constant[</H1>]]
variable[num_lines] assign[=] constant[0]
call[name[print], parameter[constant[Reading OpenCyc file - ], name[fname]]]
with call[name[open], parameter[binary_operation[binary_operation[name[ip_folder] + name[os].sep] + name[fname]], constant[r]]] begin[:]
<ast.AugAssign object at 0x7da204565a50>
for taget[name[line]] in starred[name[f]] begin[:]
if compare[call[name[line].strip, parameter[]] not_equal[!=] constant[]] begin[:]
<ast.AugAssign object at 0x7da204565390>
if compare[name[num_lines] less[<] constant[80]] begin[:]
<ast.AugAssign object at 0x7da204564fa0>
<ast.AugAssign object at 0x7da204565120>
<ast.AugAssign object at 0x7da18fe908b0>
return[name[txt]] | keyword[def] identifier[summarise_file_as_html] ( identifier[fname] ):
literal[string]
identifier[txt] = literal[string] + identifier[fname] + literal[string]
identifier[num_lines] = literal[int]
identifier[print] ( literal[string] , identifier[fname] )
keyword[with] identifier[open] ( identifier[ip_folder] + identifier[os] . identifier[sep] + identifier[fname] , literal[string] ) keyword[as] identifier[f] :
identifier[txt] += literal[string]
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] identifier[line] . identifier[strip] ()!= literal[string] :
identifier[num_lines] += literal[int]
keyword[if] identifier[num_lines] < literal[int] :
identifier[txt] += identifier[str] ( identifier[num_lines] )+ literal[string] + identifier[escape_html] ( identifier[line] )+ literal[string]
identifier[txt] += literal[string]
identifier[txt] += literal[string] + identifier[str] ( identifier[num_lines] )+ literal[string]
keyword[return] identifier[txt] | def summarise_file_as_html(fname):
"""
takes a large data file and produces a HTML summary as html
"""
txt = '<H1>' + fname + '</H1>'
num_lines = 0
print('Reading OpenCyc file - ', fname)
with open(ip_folder + os.sep + fname, 'r') as f:
txt += '<PRE>'
for line in f:
if line.strip() != '':
num_lines += 1
if num_lines < 80:
txt += str(num_lines) + ': ' + escape_html(line) + '' # depends on [control=['if'], data=['num_lines']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
txt += '</PRE>'
txt += 'Total lines = ' + str(num_lines) + '<BR><BR>' # depends on [control=['with'], data=['f']]
return txt |
def query(self, minhash, size):
'''
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
'''
for i, index in enumerate(self.indexes):
u = self.uppers[i]
if u is None:
continue
b, r = self._get_optimal_param(u, size)
for key in index[r]._query_b(minhash, b):
yield key | def function[query, parameter[self, minhash, size]]:
constant[
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
]
for taget[tuple[[<ast.Name object at 0x7da1b03aad40>, <ast.Name object at 0x7da1b03a8e80>]]] in starred[call[name[enumerate], parameter[name[self].indexes]]] begin[:]
variable[u] assign[=] call[name[self].uppers][name[i]]
if compare[name[u] is constant[None]] begin[:]
continue
<ast.Tuple object at 0x7da1b03a8c40> assign[=] call[name[self]._get_optimal_param, parameter[name[u], name[size]]]
for taget[name[key]] in starred[call[call[name[index]][name[r]]._query_b, parameter[name[minhash], name[b]]]] begin[:]
<ast.Yield object at 0x7da1b03aa080> | keyword[def] identifier[query] ( identifier[self] , identifier[minhash] , identifier[size] ):
literal[string]
keyword[for] identifier[i] , identifier[index] keyword[in] identifier[enumerate] ( identifier[self] . identifier[indexes] ):
identifier[u] = identifier[self] . identifier[uppers] [ identifier[i] ]
keyword[if] identifier[u] keyword[is] keyword[None] :
keyword[continue]
identifier[b] , identifier[r] = identifier[self] . identifier[_get_optimal_param] ( identifier[u] , identifier[size] )
keyword[for] identifier[key] keyword[in] identifier[index] [ identifier[r] ]. identifier[_query_b] ( identifier[minhash] , identifier[b] ):
keyword[yield] identifier[key] | def query(self, minhash, size):
"""
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
"""
for (i, index) in enumerate(self.indexes):
u = self.uppers[i]
if u is None:
continue # depends on [control=['if'], data=[]]
(b, r) = self._get_optimal_param(u, size)
for key in index[r]._query_b(minhash, b):
yield key # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=[]] |
def trace():
"""Enables and disables request tracing."""
def fget(self):
return self._options.get('trace', None)
def fset(self, value):
self._options['trace'] = value
return locals() | def function[trace, parameter[]]:
constant[Enables and disables request tracing.]
def function[fget, parameter[self]]:
return[call[name[self]._options.get, parameter[constant[trace], constant[None]]]]
def function[fset, parameter[self, value]]:
call[name[self]._options][constant[trace]] assign[=] name[value]
return[call[name[locals], parameter[]]] | keyword[def] identifier[trace] ():
literal[string]
keyword[def] identifier[fget] ( identifier[self] ):
keyword[return] identifier[self] . identifier[_options] . identifier[get] ( literal[string] , keyword[None] )
keyword[def] identifier[fset] ( identifier[self] , identifier[value] ):
identifier[self] . identifier[_options] [ literal[string] ]= identifier[value]
keyword[return] identifier[locals] () | def trace():
"""Enables and disables request tracing."""
def fget(self):
return self._options.get('trace', None)
def fset(self, value):
self._options['trace'] = value
return locals() |
def set_index(self, keys):
"""Set the index of the DataFrame to be the keys columns.
Note this means that the old index is removed.
Parameters
----------
keys : str or list of str
Which column(s) to set as the index.
Returns
-------
DataFrame
DataFrame with the index set to the column(s) corresponding to the keys.
"""
if isinstance(keys, str):
column = self._data[keys]
new_index = Index(column.values, column.dtype, column.name)
new_data = OrderedDict((sr.name, Series(sr.values, new_index, sr.dtype, sr.name))
for sr in self._iter())
del new_data[keys]
return DataFrame(new_data, new_index)
elif isinstance(keys, list):
check_inner_types(keys, str)
new_index_data = []
for column_name in keys:
column = self._data[column_name]
new_index_data.append(Index(column.values, column.dtype, column.name))
new_index = MultiIndex(new_index_data, keys)
new_data = OrderedDict((sr.name, Series(sr.values, new_index, sr.dtype, sr.name))
for sr in self._iter())
for column_name in keys:
del new_data[column_name]
return DataFrame(new_data, new_index)
else:
raise TypeError('Expected a string or a list of strings') | def function[set_index, parameter[self, keys]]:
constant[Set the index of the DataFrame to be the keys columns.
Note this means that the old index is removed.
Parameters
----------
keys : str or list of str
Which column(s) to set as the index.
Returns
-------
DataFrame
DataFrame with the index set to the column(s) corresponding to the keys.
]
if call[name[isinstance], parameter[name[keys], name[str]]] begin[:]
variable[column] assign[=] call[name[self]._data][name[keys]]
variable[new_index] assign[=] call[name[Index], parameter[name[column].values, name[column].dtype, name[column].name]]
variable[new_data] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b0956f20>]]
<ast.Delete object at 0x7da1b0955d20>
return[call[name[DataFrame], parameter[name[new_data], name[new_index]]]] | keyword[def] identifier[set_index] ( identifier[self] , identifier[keys] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[keys] , identifier[str] ):
identifier[column] = identifier[self] . identifier[_data] [ identifier[keys] ]
identifier[new_index] = identifier[Index] ( identifier[column] . identifier[values] , identifier[column] . identifier[dtype] , identifier[column] . identifier[name] )
identifier[new_data] = identifier[OrderedDict] (( identifier[sr] . identifier[name] , identifier[Series] ( identifier[sr] . identifier[values] , identifier[new_index] , identifier[sr] . identifier[dtype] , identifier[sr] . identifier[name] ))
keyword[for] identifier[sr] keyword[in] identifier[self] . identifier[_iter] ())
keyword[del] identifier[new_data] [ identifier[keys] ]
keyword[return] identifier[DataFrame] ( identifier[new_data] , identifier[new_index] )
keyword[elif] identifier[isinstance] ( identifier[keys] , identifier[list] ):
identifier[check_inner_types] ( identifier[keys] , identifier[str] )
identifier[new_index_data] =[]
keyword[for] identifier[column_name] keyword[in] identifier[keys] :
identifier[column] = identifier[self] . identifier[_data] [ identifier[column_name] ]
identifier[new_index_data] . identifier[append] ( identifier[Index] ( identifier[column] . identifier[values] , identifier[column] . identifier[dtype] , identifier[column] . identifier[name] ))
identifier[new_index] = identifier[MultiIndex] ( identifier[new_index_data] , identifier[keys] )
identifier[new_data] = identifier[OrderedDict] (( identifier[sr] . identifier[name] , identifier[Series] ( identifier[sr] . identifier[values] , identifier[new_index] , identifier[sr] . identifier[dtype] , identifier[sr] . identifier[name] ))
keyword[for] identifier[sr] keyword[in] identifier[self] . identifier[_iter] ())
keyword[for] identifier[column_name] keyword[in] identifier[keys] :
keyword[del] identifier[new_data] [ identifier[column_name] ]
keyword[return] identifier[DataFrame] ( identifier[new_data] , identifier[new_index] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def set_index(self, keys):
"""Set the index of the DataFrame to be the keys columns.
Note this means that the old index is removed.
Parameters
----------
keys : str or list of str
Which column(s) to set as the index.
Returns
-------
DataFrame
DataFrame with the index set to the column(s) corresponding to the keys.
"""
if isinstance(keys, str):
column = self._data[keys]
new_index = Index(column.values, column.dtype, column.name)
new_data = OrderedDict(((sr.name, Series(sr.values, new_index, sr.dtype, sr.name)) for sr in self._iter()))
del new_data[keys]
return DataFrame(new_data, new_index) # depends on [control=['if'], data=[]]
elif isinstance(keys, list):
check_inner_types(keys, str)
new_index_data = []
for column_name in keys:
column = self._data[column_name]
new_index_data.append(Index(column.values, column.dtype, column.name)) # depends on [control=['for'], data=['column_name']]
new_index = MultiIndex(new_index_data, keys)
new_data = OrderedDict(((sr.name, Series(sr.values, new_index, sr.dtype, sr.name)) for sr in self._iter()))
for column_name in keys:
del new_data[column_name] # depends on [control=['for'], data=['column_name']]
return DataFrame(new_data, new_index) # depends on [control=['if'], data=[]]
else:
raise TypeError('Expected a string or a list of strings') |
def keypoint_random_rotate(image, annos, mask=None, rg=15.):
"""Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask
"""
def _rotate_coord(shape, newxy, point, angle):
angle = -1 * angle / 180.0 * math.pi
ox, oy = shape
px, py = point
ox /= 2
oy /= 2
qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_x, new_y = newxy
qx += ox - new_x
qy += oy - new_y
return int(qx + 0.5), int(qy + 0.5)
def _largest_rotated_rect(w, h, angle):
"""
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
angle = angle / 180.0 * math.pi
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return int(np.round(wr)), int(np.round(hr))
img_shape = np.shape(image)
height = img_shape[0]
width = img_shape[1]
deg = np.random.uniform(-rg, rg)
img = image
center = (img.shape[1] * 0.5, img.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
img = ret[newy:newy + newh, newx:newx + neww]
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
x, y = _rotate_coord((width, height), (newx, newy), point, deg)
if x > neww - 1 or y > newh - 1:
adjust_joint.append((-1000, -1000))
continue
if x < 0 or y < 0:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((x, y))
adjust_joint_list.append(adjust_joint)
joint_list = adjust_joint_list
if mask is not None:
msk = mask
center = (msk.shape[1] * 0.5, msk.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if msk.ndim == 3 and msk.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
msk = ret[newy:newy + newh, newx:newx + neww]
return img, joint_list, msk
else:
return img, joint_list, None | def function[keypoint_random_rotate, parameter[image, annos, mask, rg]]:
constant[Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask
]
def function[_rotate_coord, parameter[shape, newxy, point, angle]]:
variable[angle] assign[=] binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da2045661a0> * name[angle]] / constant[180.0]] * name[math].pi]
<ast.Tuple object at 0x7da2045671f0> assign[=] name[shape]
<ast.Tuple object at 0x7da2045651e0> assign[=] name[point]
<ast.AugAssign object at 0x7da204567ca0>
<ast.AugAssign object at 0x7da204567790>
variable[qx] assign[=] binary_operation[binary_operation[call[name[math].cos, parameter[name[angle]]] * binary_operation[name[px] - name[ox]]] - binary_operation[call[name[math].sin, parameter[name[angle]]] * binary_operation[name[py] - name[oy]]]]
variable[qy] assign[=] binary_operation[binary_operation[call[name[math].sin, parameter[name[angle]]] * binary_operation[name[px] - name[ox]]] + binary_operation[call[name[math].cos, parameter[name[angle]]] * binary_operation[name[py] - name[oy]]]]
<ast.Tuple object at 0x7da204566080> assign[=] name[newxy]
<ast.AugAssign object at 0x7da204567010>
<ast.AugAssign object at 0x7da204567be0>
return[tuple[[<ast.Call object at 0x7da204564070>, <ast.Call object at 0x7da204565ae0>]]]
def function[_largest_rotated_rect, parameter[w, h, angle]]:
constant[
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
]
variable[angle] assign[=] binary_operation[binary_operation[name[angle] / constant[180.0]] * name[math].pi]
if <ast.BoolOp object at 0x7da2045644f0> begin[:]
return[tuple[[<ast.Constant object at 0x7da204567e50>, <ast.Constant object at 0x7da2045652d0>]]]
variable[width_is_longer] assign[=] compare[name[w] greater_or_equal[>=] name[h]]
<ast.Tuple object at 0x7da2045678b0> assign[=] <ast.IfExp object at 0x7da204566590>
<ast.Tuple object at 0x7da204564370> assign[=] tuple[[<ast.Call object at 0x7da204566290>, <ast.Call object at 0x7da204566bf0>]]
if compare[name[side_short] less_or_equal[<=] binary_operation[binary_operation[binary_operation[constant[2.0] * name[sin_a]] * name[cos_a]] * name[side_long]]] begin[:]
variable[x] assign[=] binary_operation[constant[0.5] * name[side_short]]
<ast.Tuple object at 0x7da204564f10> assign[=] <ast.IfExp object at 0x7da204564e80>
return[tuple[[<ast.Call object at 0x7da204564d90>, <ast.Call object at 0x7da204567e20>]]]
variable[img_shape] assign[=] call[name[np].shape, parameter[name[image]]]
variable[height] assign[=] call[name[img_shape]][constant[0]]
variable[width] assign[=] call[name[img_shape]][constant[1]]
variable[deg] assign[=] call[name[np].random.uniform, parameter[<ast.UnaryOp object at 0x7da204566b00>, name[rg]]]
variable[img] assign[=] name[image]
variable[center] assign[=] tuple[[<ast.BinOp object at 0x7da204566b60>, <ast.BinOp object at 0x7da204564af0>]]
variable[rot_m] assign[=] call[name[cv2].getRotationMatrix2D, parameter[tuple[[<ast.Call object at 0x7da204567a90>, <ast.Call object at 0x7da204567d30>]], name[deg], constant[1]]]
variable[ret] assign[=] call[name[cv2].warpAffine, parameter[name[img], name[rot_m], call[name[img].shape][<ast.Slice object at 0x7da204565120>]]]
if <ast.BoolOp object at 0x7da20c7966e0> begin[:]
variable[ret] assign[=] call[name[ret]][tuple[[<ast.Slice object at 0x7da20c794bb0>, <ast.Slice object at 0x7da20c795060>, <ast.Attribute object at 0x7da20c794070>]]]
<ast.Tuple object at 0x7da20c7945e0> assign[=] call[name[_largest_rotated_rect], parameter[call[name[ret].shape][constant[1]], call[name[ret].shape][constant[0]], name[deg]]]
variable[neww] assign[=] call[name[min], parameter[name[neww], call[name[ret].shape][constant[1]]]]
variable[newh] assign[=] call[name[min], parameter[name[newh], call[name[ret].shape][constant[0]]]]
variable[newx] assign[=] call[name[int], parameter[binary_operation[call[name[center]][constant[0]] - binary_operation[name[neww] * constant[0.5]]]]]
variable[newy] assign[=] call[name[int], parameter[binary_operation[call[name[center]][constant[1]] - binary_operation[name[newh] * constant[0.5]]]]]
variable[img] assign[=] call[name[ret]][tuple[[<ast.Slice object at 0x7da20c795f90>, <ast.Slice object at 0x7da20c7957e0>]]]
variable[adjust_joint_list] assign[=] list[[]]
for taget[name[joint]] in starred[name[annos]] begin[:]
variable[adjust_joint] assign[=] list[[]]
for taget[name[point]] in starred[name[joint]] begin[:]
if <ast.BoolOp object at 0x7da20c795960> begin[:]
call[name[adjust_joint].append, parameter[tuple[[<ast.UnaryOp object at 0x7da20c796b00>, <ast.UnaryOp object at 0x7da20c794e80>]]]]
continue
<ast.Tuple object at 0x7da20c7946d0> assign[=] call[name[_rotate_coord], parameter[tuple[[<ast.Name object at 0x7da20c795a80>, <ast.Name object at 0x7da20c796860>]], tuple[[<ast.Name object at 0x7da20c795120>, <ast.Name object at 0x7da20c796710>]], name[point], name[deg]]]
if <ast.BoolOp object at 0x7da20c794700> begin[:]
call[name[adjust_joint].append, parameter[tuple[[<ast.UnaryOp object at 0x7da20c796680>, <ast.UnaryOp object at 0x7da20c795930>]]]]
continue
if <ast.BoolOp object at 0x7da20c794ee0> begin[:]
call[name[adjust_joint].append, parameter[tuple[[<ast.UnaryOp object at 0x7da1b002a2c0>, <ast.UnaryOp object at 0x7da1b002bbb0>]]]]
continue
call[name[adjust_joint].append, parameter[tuple[[<ast.Name object at 0x7da1b0029360>, <ast.Name object at 0x7da1b00294b0>]]]]
call[name[adjust_joint_list].append, parameter[name[adjust_joint]]]
variable[joint_list] assign[=] name[adjust_joint_list]
if compare[name[mask] is_not constant[None]] begin[:]
variable[msk] assign[=] name[mask]
variable[center] assign[=] tuple[[<ast.BinOp object at 0x7da1b0029d20>, <ast.BinOp object at 0x7da1b00294e0>]]
variable[rot_m] assign[=] call[name[cv2].getRotationMatrix2D, parameter[tuple[[<ast.Call object at 0x7da1b002b550>, <ast.Call object at 0x7da1b0029450>]], name[deg], constant[1]]]
variable[ret] assign[=] call[name[cv2].warpAffine, parameter[name[msk], name[rot_m], call[name[msk].shape][<ast.Slice object at 0x7da1b0029480>]]]
if <ast.BoolOp object at 0x7da1b002b5b0> begin[:]
variable[ret] assign[=] call[name[ret]][tuple[[<ast.Slice object at 0x7da1b0028d60>, <ast.Slice object at 0x7da1b002a7d0>, <ast.Attribute object at 0x7da1b002a740>]]]
<ast.Tuple object at 0x7da1b002bd30> assign[=] call[name[_largest_rotated_rect], parameter[call[name[ret].shape][constant[1]], call[name[ret].shape][constant[0]], name[deg]]]
variable[neww] assign[=] call[name[min], parameter[name[neww], call[name[ret].shape][constant[1]]]]
variable[newh] assign[=] call[name[min], parameter[name[newh], call[name[ret].shape][constant[0]]]]
variable[newx] assign[=] call[name[int], parameter[binary_operation[call[name[center]][constant[0]] - binary_operation[name[neww] * constant[0.5]]]]]
variable[newy] assign[=] call[name[int], parameter[binary_operation[call[name[center]][constant[1]] - binary_operation[name[newh] * constant[0.5]]]]]
variable[msk] assign[=] call[name[ret]][tuple[[<ast.Slice object at 0x7da18bc735b0>, <ast.Slice object at 0x7da18bc72770>]]]
return[tuple[[<ast.Name object at 0x7da18bc70b20>, <ast.Name object at 0x7da18bc73490>, <ast.Name object at 0x7da18bc71f30>]]] | keyword[def] identifier[keypoint_random_rotate] ( identifier[image] , identifier[annos] , identifier[mask] = keyword[None] , identifier[rg] = literal[int] ):
literal[string]
keyword[def] identifier[_rotate_coord] ( identifier[shape] , identifier[newxy] , identifier[point] , identifier[angle] ):
identifier[angle] =- literal[int] * identifier[angle] / literal[int] * identifier[math] . identifier[pi]
identifier[ox] , identifier[oy] = identifier[shape]
identifier[px] , identifier[py] = identifier[point]
identifier[ox] /= literal[int]
identifier[oy] /= literal[int]
identifier[qx] = identifier[math] . identifier[cos] ( identifier[angle] )*( identifier[px] - identifier[ox] )- identifier[math] . identifier[sin] ( identifier[angle] )*( identifier[py] - identifier[oy] )
identifier[qy] = identifier[math] . identifier[sin] ( identifier[angle] )*( identifier[px] - identifier[ox] )+ identifier[math] . identifier[cos] ( identifier[angle] )*( identifier[py] - identifier[oy] )
identifier[new_x] , identifier[new_y] = identifier[newxy]
identifier[qx] += identifier[ox] - identifier[new_x]
identifier[qy] += identifier[oy] - identifier[new_y]
keyword[return] identifier[int] ( identifier[qx] + literal[int] ), identifier[int] ( identifier[qy] + literal[int] )
keyword[def] identifier[_largest_rotated_rect] ( identifier[w] , identifier[h] , identifier[angle] ):
literal[string]
identifier[angle] = identifier[angle] / literal[int] * identifier[math] . identifier[pi]
keyword[if] identifier[w] <= literal[int] keyword[or] identifier[h] <= literal[int] :
keyword[return] literal[int] , literal[int]
identifier[width_is_longer] = identifier[w] >= identifier[h]
identifier[side_long] , identifier[side_short] =( identifier[w] , identifier[h] ) keyword[if] identifier[width_is_longer] keyword[else] ( identifier[h] , identifier[w] )
identifier[sin_a] , identifier[cos_a] = identifier[abs] ( identifier[math] . identifier[sin] ( identifier[angle] )), identifier[abs] ( identifier[math] . identifier[cos] ( identifier[angle] ))
keyword[if] identifier[side_short] <= literal[int] * identifier[sin_a] * identifier[cos_a] * identifier[side_long] :
identifier[x] = literal[int] * identifier[side_short]
identifier[wr] , identifier[hr] =( identifier[x] / identifier[sin_a] , identifier[x] / identifier[cos_a] ) keyword[if] identifier[width_is_longer] keyword[else] ( identifier[x] / identifier[cos_a] , identifier[x] / identifier[sin_a] )
keyword[else] :
identifier[cos_2a] = identifier[cos_a] * identifier[cos_a] - identifier[sin_a] * identifier[sin_a]
identifier[wr] , identifier[hr] =( identifier[w] * identifier[cos_a] - identifier[h] * identifier[sin_a] )/ identifier[cos_2a] ,( identifier[h] * identifier[cos_a] - identifier[w] * identifier[sin_a] )/ identifier[cos_2a]
keyword[return] identifier[int] ( identifier[np] . identifier[round] ( identifier[wr] )), identifier[int] ( identifier[np] . identifier[round] ( identifier[hr] ))
identifier[img_shape] = identifier[np] . identifier[shape] ( identifier[image] )
identifier[height] = identifier[img_shape] [ literal[int] ]
identifier[width] = identifier[img_shape] [ literal[int] ]
identifier[deg] = identifier[np] . identifier[random] . identifier[uniform] (- identifier[rg] , identifier[rg] )
identifier[img] = identifier[image]
identifier[center] =( identifier[img] . identifier[shape] [ literal[int] ]* literal[int] , identifier[img] . identifier[shape] [ literal[int] ]* literal[int] )
identifier[rot_m] = identifier[cv2] . identifier[getRotationMatrix2D] (( identifier[int] ( identifier[center] [ literal[int] ]), identifier[int] ( identifier[center] [ literal[int] ])), identifier[deg] , literal[int] )
identifier[ret] = identifier[cv2] . identifier[warpAffine] ( identifier[img] , identifier[rot_m] , identifier[img] . identifier[shape] [ literal[int] ::- literal[int] ], identifier[flags] = identifier[cv2] . identifier[INTER_AREA] , identifier[borderMode] = identifier[cv2] . identifier[BORDER_CONSTANT] )
keyword[if] identifier[img] . identifier[ndim] == literal[int] keyword[and] identifier[ret] . identifier[ndim] == literal[int] :
identifier[ret] = identifier[ret] [:,:, identifier[np] . identifier[newaxis] ]
identifier[neww] , identifier[newh] = identifier[_largest_rotated_rect] ( identifier[ret] . identifier[shape] [ literal[int] ], identifier[ret] . identifier[shape] [ literal[int] ], identifier[deg] )
identifier[neww] = identifier[min] ( identifier[neww] , identifier[ret] . identifier[shape] [ literal[int] ])
identifier[newh] = identifier[min] ( identifier[newh] , identifier[ret] . identifier[shape] [ literal[int] ])
identifier[newx] = identifier[int] ( identifier[center] [ literal[int] ]- identifier[neww] * literal[int] )
identifier[newy] = identifier[int] ( identifier[center] [ literal[int] ]- identifier[newh] * literal[int] )
identifier[img] = identifier[ret] [ identifier[newy] : identifier[newy] + identifier[newh] , identifier[newx] : identifier[newx] + identifier[neww] ]
identifier[adjust_joint_list] =[]
keyword[for] identifier[joint] keyword[in] identifier[annos] :
identifier[adjust_joint] =[]
keyword[for] identifier[point] keyword[in] identifier[joint] :
keyword[if] identifier[point] [ literal[int] ]<- literal[int] keyword[or] identifier[point] [ literal[int] ]<- literal[int] :
identifier[adjust_joint] . identifier[append] ((- literal[int] ,- literal[int] ))
keyword[continue]
identifier[x] , identifier[y] = identifier[_rotate_coord] (( identifier[width] , identifier[height] ),( identifier[newx] , identifier[newy] ), identifier[point] , identifier[deg] )
keyword[if] identifier[x] > identifier[neww] - literal[int] keyword[or] identifier[y] > identifier[newh] - literal[int] :
identifier[adjust_joint] . identifier[append] ((- literal[int] ,- literal[int] ))
keyword[continue]
keyword[if] identifier[x] < literal[int] keyword[or] identifier[y] < literal[int] :
identifier[adjust_joint] . identifier[append] ((- literal[int] ,- literal[int] ))
keyword[continue]
identifier[adjust_joint] . identifier[append] (( identifier[x] , identifier[y] ))
identifier[adjust_joint_list] . identifier[append] ( identifier[adjust_joint] )
identifier[joint_list] = identifier[adjust_joint_list]
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[msk] = identifier[mask]
identifier[center] =( identifier[msk] . identifier[shape] [ literal[int] ]* literal[int] , identifier[msk] . identifier[shape] [ literal[int] ]* literal[int] )
identifier[rot_m] = identifier[cv2] . identifier[getRotationMatrix2D] (( identifier[int] ( identifier[center] [ literal[int] ]), identifier[int] ( identifier[center] [ literal[int] ])), identifier[deg] , literal[int] )
identifier[ret] = identifier[cv2] . identifier[warpAffine] ( identifier[msk] , identifier[rot_m] , identifier[msk] . identifier[shape] [ literal[int] ::- literal[int] ], identifier[flags] = identifier[cv2] . identifier[INTER_AREA] , identifier[borderMode] = identifier[cv2] . identifier[BORDER_CONSTANT] )
keyword[if] identifier[msk] . identifier[ndim] == literal[int] keyword[and] identifier[msk] . identifier[ndim] == literal[int] :
identifier[ret] = identifier[ret] [:,:, identifier[np] . identifier[newaxis] ]
identifier[neww] , identifier[newh] = identifier[_largest_rotated_rect] ( identifier[ret] . identifier[shape] [ literal[int] ], identifier[ret] . identifier[shape] [ literal[int] ], identifier[deg] )
identifier[neww] = identifier[min] ( identifier[neww] , identifier[ret] . identifier[shape] [ literal[int] ])
identifier[newh] = identifier[min] ( identifier[newh] , identifier[ret] . identifier[shape] [ literal[int] ])
identifier[newx] = identifier[int] ( identifier[center] [ literal[int] ]- identifier[neww] * literal[int] )
identifier[newy] = identifier[int] ( identifier[center] [ literal[int] ]- identifier[newh] * literal[int] )
identifier[msk] = identifier[ret] [ identifier[newy] : identifier[newy] + identifier[newh] , identifier[newx] : identifier[newx] + identifier[neww] ]
keyword[return] identifier[img] , identifier[joint_list] , identifier[msk]
keyword[else] :
keyword[return] identifier[img] , identifier[joint_list] , keyword[None] | def keypoint_random_rotate(image, annos, mask=None, rg=15.0):
"""Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask
"""
def _rotate_coord(shape, newxy, point, angle):
angle = -1 * angle / 180.0 * math.pi
(ox, oy) = shape
(px, py) = point
ox /= 2
oy /= 2
qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
(new_x, new_y) = newxy
qx += ox - new_x
qy += oy - new_y
return (int(qx + 0.5), int(qy + 0.5))
def _largest_rotated_rect(w, h, angle):
"""
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
angle = angle / 180.0 * math.pi
if w <= 0 or h <= 0:
return (0, 0) # depends on [control=['if'], data=[]]
width_is_longer = w >= h
(side_long, side_short) = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
(sin_a, cos_a) = (abs(math.sin(angle)), abs(math.cos(angle)))
if side_short <= 2.0 * sin_a * cos_a * side_long:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
(wr, hr) = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) # depends on [control=['if'], data=['side_short']]
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
(wr, hr) = ((w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a)
return (int(np.round(wr)), int(np.round(hr)))
img_shape = np.shape(image)
height = img_shape[0]
width = img_shape[1]
deg = np.random.uniform(-rg, rg)
img = image
center = (img.shape[1] * 0.5, img.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis] # depends on [control=['if'], data=[]]
(neww, newh) = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
img = ret[newy:newy + newh, newx:newx + neww]
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue # depends on [control=['if'], data=[]]
(x, y) = _rotate_coord((width, height), (newx, newy), point, deg)
if x > neww - 1 or y > newh - 1:
adjust_joint.append((-1000, -1000))
continue # depends on [control=['if'], data=[]]
if x < 0 or y < 0:
adjust_joint.append((-1000, -1000))
continue # depends on [control=['if'], data=[]]
adjust_joint.append((x, y)) # depends on [control=['for'], data=['point']]
adjust_joint_list.append(adjust_joint) # depends on [control=['for'], data=['joint']]
joint_list = adjust_joint_list
if mask is not None:
msk = mask
center = (msk.shape[1] * 0.5, msk.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if msk.ndim == 3 and msk.ndim == 2:
ret = ret[:, :, np.newaxis] # depends on [control=['if'], data=[]]
(neww, newh) = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
msk = ret[newy:newy + newh, newx:newx + neww]
return (img, joint_list, msk) # depends on [control=['if'], data=['mask']]
else:
return (img, joint_list, None) |
def agent(cls, version=None):
"""
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
"""
reuse_cached_agent = True
if version:
log.debug("api version changed to %s" % version)
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), version])
reuse_cached_agent = False
else:
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), cls.version])
if cls.api_token is None:
raise ConfigError("No API Token specified - please supply one via Qubole.configure()")
if not reuse_cached_agent:
uncached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return uncached_agent
if cls.cached_agent is None:
cls.cached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return cls.cached_agent | def function[agent, parameter[cls, version]]:
constant[
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
]
variable[reuse_cached_agent] assign[=] constant[True]
if name[version] begin[:]
call[name[log].debug, parameter[binary_operation[constant[api version changed to %s] <ast.Mod object at 0x7da2590d6920> name[version]]]]
name[cls].rest_url assign[=] call[constant[/].join, parameter[list[[<ast.Call object at 0x7da20c6c6d10>, <ast.Name object at 0x7da20c6c67d0>]]]]
variable[reuse_cached_agent] assign[=] constant[False]
if compare[name[cls].api_token is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6c7a60>
if <ast.UnaryOp object at 0x7da20c6c4c40> begin[:]
variable[uncached_agent] assign[=] call[name[Connection], parameter[name[cls]._auth, name[cls].rest_url, name[cls].skip_ssl_cert_check]]
return[name[uncached_agent]]
if compare[name[cls].cached_agent is constant[None]] begin[:]
name[cls].cached_agent assign[=] call[name[Connection], parameter[name[cls]._auth, name[cls].rest_url, name[cls].skip_ssl_cert_check]]
return[name[cls].cached_agent] | keyword[def] identifier[agent] ( identifier[cls] , identifier[version] = keyword[None] ):
literal[string]
identifier[reuse_cached_agent] = keyword[True]
keyword[if] identifier[version] :
identifier[log] . identifier[debug] ( literal[string] % identifier[version] )
identifier[cls] . identifier[rest_url] = literal[string] . identifier[join] ([ identifier[cls] . identifier[baseurl] . identifier[rstrip] ( literal[string] ), identifier[version] ])
identifier[reuse_cached_agent] = keyword[False]
keyword[else] :
identifier[cls] . identifier[rest_url] = literal[string] . identifier[join] ([ identifier[cls] . identifier[baseurl] . identifier[rstrip] ( literal[string] ), identifier[cls] . identifier[version] ])
keyword[if] identifier[cls] . identifier[api_token] keyword[is] keyword[None] :
keyword[raise] identifier[ConfigError] ( literal[string] )
keyword[if] keyword[not] identifier[reuse_cached_agent] :
identifier[uncached_agent] = identifier[Connection] ( identifier[cls] . identifier[_auth] , identifier[cls] . identifier[rest_url] , identifier[cls] . identifier[skip_ssl_cert_check] )
keyword[return] identifier[uncached_agent]
keyword[if] identifier[cls] . identifier[cached_agent] keyword[is] keyword[None] :
identifier[cls] . identifier[cached_agent] = identifier[Connection] ( identifier[cls] . identifier[_auth] , identifier[cls] . identifier[rest_url] , identifier[cls] . identifier[skip_ssl_cert_check] )
keyword[return] identifier[cls] . identifier[cached_agent] | def agent(cls, version=None):
"""
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
"""
reuse_cached_agent = True
if version:
log.debug('api version changed to %s' % version)
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), version])
reuse_cached_agent = False # depends on [control=['if'], data=[]]
else:
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), cls.version])
if cls.api_token is None:
raise ConfigError('No API Token specified - please supply one via Qubole.configure()') # depends on [control=['if'], data=[]]
if not reuse_cached_agent:
uncached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return uncached_agent # depends on [control=['if'], data=[]]
if cls.cached_agent is None:
cls.cached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check) # depends on [control=['if'], data=[]]
return cls.cached_agent |
def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o)
except TypeError as err:
print("Problem checking device {!r}: {!r}".format(
dev_o['info']['description']['name'],
str(err)))
return matches | def function[search_for_devices_by_serial_number, parameter[self, sn]]:
constant[
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
]
import module[re]
variable[sn_search] assign[=] call[name[re].compile, parameter[name[sn]]]
variable[matches] assign[=] list[[]]
for taget[name[dev_o]] in starred[call[name[self].get_all_devices_in_portal, parameter[]]] begin[:]
<ast.Try object at 0x7da2047ebaf0>
return[name[matches]] | keyword[def] identifier[search_for_devices_by_serial_number] ( identifier[self] , identifier[sn] ):
literal[string]
keyword[import] identifier[re]
identifier[sn_search] = identifier[re] . identifier[compile] ( identifier[sn] )
identifier[matches] =[]
keyword[for] identifier[dev_o] keyword[in] identifier[self] . identifier[get_all_devices_in_portal] ():
keyword[try] :
keyword[if] identifier[sn_search] . identifier[match] ( identifier[dev_o] [ literal[string] ]):
identifier[matches] . identifier[append] ( identifier[dev_o] )
keyword[except] identifier[TypeError] keyword[as] identifier[err] :
identifier[print] ( literal[string] . identifier[format] (
identifier[dev_o] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[str] ( identifier[err] )))
keyword[return] identifier[matches] | def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError as err:
print('Problem checking device {!r}: {!r}'.format(dev_o['info']['description']['name'], str(err))) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['dev_o']]
return matches |
def _get_segment(self, start, request_size, check_response=True):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
def _checker():
errors.check_status(status, [200, 206], self._path, headers,
resp_headers, body=content)
self._check_etag(resp_headers.get('etag'))
if check_response:
_checker()
raise ndb.Return(content)
raise ndb.Return(content, _checker) | def function[_get_segment, parameter[self, start, request_size, check_response]]:
constant[Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
]
variable[end] assign[=] binary_operation[binary_operation[name[start] + name[request_size]] - constant[1]]
variable[content_range] assign[=] binary_operation[constant[%d-%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0776c50>, <ast.Name object at 0x7da1b0774a30>]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0774af0>], [<ast.BinOp object at 0x7da1b0774b80>]]
<ast.Tuple object at 0x7da1b0775810> assign[=] <ast.Yield object at 0x7da1b0774cd0>
def function[_checker, parameter[]]:
call[name[errors].check_status, parameter[name[status], list[[<ast.Constant object at 0x7da1b0781150>, <ast.Constant object at 0x7da1b0781210>]], name[self]._path, name[headers], name[resp_headers]]]
call[name[self]._check_etag, parameter[call[name[resp_headers].get, parameter[constant[etag]]]]]
if name[check_response] begin[:]
call[name[_checker], parameter[]]
<ast.Raise object at 0x7da1b0781f90>
<ast.Raise object at 0x7da1b0780640> | keyword[def] identifier[_get_segment] ( identifier[self] , identifier[start] , identifier[request_size] , identifier[check_response] = keyword[True] ):
literal[string]
identifier[end] = identifier[start] + identifier[request_size] - literal[int]
identifier[content_range] = literal[string] %( identifier[start] , identifier[end] )
identifier[headers] ={ literal[string] : literal[string] + identifier[content_range] }
identifier[status] , identifier[resp_headers] , identifier[content] = keyword[yield] identifier[self] . identifier[_api] . identifier[get_object_async] (
identifier[self] . identifier[_path] , identifier[headers] = identifier[headers] )
keyword[def] identifier[_checker] ():
identifier[errors] . identifier[check_status] ( identifier[status] ,[ literal[int] , literal[int] ], identifier[self] . identifier[_path] , identifier[headers] ,
identifier[resp_headers] , identifier[body] = identifier[content] )
identifier[self] . identifier[_check_etag] ( identifier[resp_headers] . identifier[get] ( literal[string] ))
keyword[if] identifier[check_response] :
identifier[_checker] ()
keyword[raise] identifier[ndb] . identifier[Return] ( identifier[content] )
keyword[raise] identifier[ndb] . identifier[Return] ( identifier[content] , identifier[_checker] ) | def _get_segment(self, start, request_size, check_response=True):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
(status, resp_headers, content) = (yield self._api.get_object_async(self._path, headers=headers))
def _checker():
errors.check_status(status, [200, 206], self._path, headers, resp_headers, body=content)
self._check_etag(resp_headers.get('etag'))
if check_response:
_checker()
raise ndb.Return(content) # depends on [control=['if'], data=[]]
raise ndb.Return(content, _checker) |
def set_user_agent_component(self, key, value, sanitize=True):
"""Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
"""
if sanitize:
key = ''.join(_x.capitalize() for _x in re.findall(r'\S+', key))
if re.search(r'\s+', value):
value = '_'.join(re.findall(r'\S+', value))
self.user_agent_components[key] = value | def function[set_user_agent_component, parameter[self, key, value, sanitize]]:
constant[Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
]
if name[sanitize] begin[:]
variable[key] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da204620130>]]
if call[name[re].search, parameter[constant[\s+], name[value]]] begin[:]
variable[value] assign[=] call[constant[_].join, parameter[call[name[re].findall, parameter[constant[\S+], name[value]]]]]
call[name[self].user_agent_components][name[key]] assign[=] name[value] | keyword[def] identifier[set_user_agent_component] ( identifier[self] , identifier[key] , identifier[value] , identifier[sanitize] = keyword[True] ):
literal[string]
keyword[if] identifier[sanitize] :
identifier[key] = literal[string] . identifier[join] ( identifier[_x] . identifier[capitalize] () keyword[for] identifier[_x] keyword[in] identifier[re] . identifier[findall] ( literal[string] , identifier[key] ))
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[value] ):
identifier[value] = literal[string] . identifier[join] ( identifier[re] . identifier[findall] ( literal[string] , identifier[value] ))
identifier[self] . identifier[user_agent_components] [ identifier[key] ]= identifier[value] | def set_user_agent_component(self, key, value, sanitize=True):
"""Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
"""
if sanitize:
key = ''.join((_x.capitalize() for _x in re.findall('\\S+', key)))
if re.search('\\s+', value):
value = '_'.join(re.findall('\\S+', value)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.user_agent_components[key] = value |
def add_user_to_groups(self, user_id, body, **kwargs): # noqa: E501
"""Add user to a list of groups. # noqa: E501
An endpoint for adding user to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_user_to_groups(user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be added to the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_user_to_groups_with_http_info(user_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_user_to_groups_with_http_info(user_id, body, **kwargs) # noqa: E501
return data | def function[add_user_to_groups, parameter[self, user_id, body]]:
constant[Add user to a list of groups. # noqa: E501
An endpoint for adding user to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_user_to_groups(user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be added to the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].add_user_to_groups_with_http_info, parameter[name[user_id], name[body]]]] | keyword[def] identifier[add_user_to_groups] ( identifier[self] , identifier[user_id] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[add_user_to_groups_with_http_info] ( identifier[user_id] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[add_user_to_groups_with_http_info] ( identifier[user_id] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def add_user_to_groups(self, user_id, body, **kwargs): # noqa: E501
"Add user to a list of groups. # noqa: E501\n\n An endpoint for adding user to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.add_user_to_groups(user_id, body, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str user_id: The ID of the user to be added to the group. (required)\n :param list[str] body: A list of IDs of the groups to be updated. (required)\n :return: UpdatedResponse\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_user_to_groups_with_http_info(user_id, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.add_user_to_groups_with_http_info(user_id, body, **kwargs) # noqa: E501
return data |
def gp_rapp():
"""rho in-medium ratios by Rapp (based on protected data)"""
inDir, outDir = getWorkDirs()
# prepare data
yields = {}
for infile in os.listdir(inDir):
energy = re.compile('\d+').search(infile).group()
medium = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
getMassRangesSums(energy, medium, yields)
data = dict( # sort by energy
(k, np.array(sorted(v)))
for k, v in yields.iteritems()
)
for k in data: data[k][:,1] /= data[k][-1,1] # divide by 200
# make plot
nSets = len(data)
make_plot(
data = [ data[k][:-1] for k in data ],
properties = [
'with linespoints lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i]
for i in xrange(nSets)
],
titles = data.keys(), # TODO: titles order correct?
name = os.path.join(outDir, 'rapp'),
xlabel = '{/Symbol \326}s_{NN} (GeV)', ylabel = 'Rapp Ratio to 200 GeV',
lmargin = 0.1, key = ['left'], yr = [0.1, 0.8]
)
return 'done' | def function[gp_rapp, parameter[]]:
constant[rho in-medium ratios by Rapp (based on protected data)]
<ast.Tuple object at 0x7da18f09dbd0> assign[=] call[name[getWorkDirs], parameter[]]
variable[yields] assign[=] dictionary[[], []]
for taget[name[infile]] in starred[call[name[os].listdir, parameter[name[inDir]]]] begin[:]
variable[energy] assign[=] call[call[call[name[re].compile, parameter[constant[\d+]]].search, parameter[name[infile]]].group, parameter[]]
variable[medium] assign[=] call[name[np].loadtxt, parameter[call[name[open], parameter[call[name[os].path.join, parameter[name[inDir], name[infile]]], constant[rb]]]]]
call[name[getMassRangesSums], parameter[name[energy], name[medium], name[yields]]]
variable[data] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b15e5f00>]]
for taget[name[k]] in starred[name[data]] begin[:]
<ast.AugAssign object at 0x7da2043475e0>
variable[nSets] assign[=] call[name[len], parameter[name[data]]]
call[name[make_plot], parameter[]]
return[constant[done]] | keyword[def] identifier[gp_rapp] ():
literal[string]
identifier[inDir] , identifier[outDir] = identifier[getWorkDirs] ()
identifier[yields] ={}
keyword[for] identifier[infile] keyword[in] identifier[os] . identifier[listdir] ( identifier[inDir] ):
identifier[energy] = identifier[re] . identifier[compile] ( literal[string] ). identifier[search] ( identifier[infile] ). identifier[group] ()
identifier[medium] = identifier[np] . identifier[loadtxt] ( identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[inDir] , identifier[infile] ), literal[string] ))
identifier[getMassRangesSums] ( identifier[energy] , identifier[medium] , identifier[yields] )
identifier[data] = identifier[dict] (
( identifier[k] , identifier[np] . identifier[array] ( identifier[sorted] ( identifier[v] )))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[yields] . identifier[iteritems] ()
)
keyword[for] identifier[k] keyword[in] identifier[data] : identifier[data] [ identifier[k] ][:, literal[int] ]/= identifier[data] [ identifier[k] ][- literal[int] , literal[int] ]
identifier[nSets] = identifier[len] ( identifier[data] )
identifier[make_plot] (
identifier[data] =[ identifier[data] [ identifier[k] ][:- literal[int] ] keyword[for] identifier[k] keyword[in] identifier[data] ],
identifier[properties] =[
literal[string] % identifier[default_colors] [ identifier[i] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[nSets] )
],
identifier[titles] = identifier[data] . identifier[keys] (),
identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[outDir] , literal[string] ),
identifier[xlabel] = literal[string] , identifier[ylabel] = literal[string] ,
identifier[lmargin] = literal[int] , identifier[key] =[ literal[string] ], identifier[yr] =[ literal[int] , literal[int] ]
)
keyword[return] literal[string] | def gp_rapp():
"""rho in-medium ratios by Rapp (based on protected data)"""
(inDir, outDir) = getWorkDirs()
# prepare data
yields = {}
for infile in os.listdir(inDir):
energy = re.compile('\\d+').search(infile).group()
medium = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
getMassRangesSums(energy, medium, yields) # depends on [control=['for'], data=['infile']] # sort by energy
data = dict(((k, np.array(sorted(v))) for (k, v) in yields.iteritems()))
for k in data:
data[k][:, 1] /= data[k][-1, 1] # divide by 200 # depends on [control=['for'], data=['k']]
# make plot
nSets = len(data) # TODO: titles order correct?
make_plot(data=[data[k][:-1] for k in data], properties=['with linespoints lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i] for i in xrange(nSets)], titles=data.keys(), name=os.path.join(outDir, 'rapp'), xlabel='{/Symbol Ö}s_{NN} (GeV)', ylabel='Rapp Ratio to 200 GeV', lmargin=0.1, key=['left'], yr=[0.1, 0.8])
return 'done' |
def last(self, name, value):
"""
Records the last calculated measurement value.
Usually this method is used by metrics calculated externally.
:param name: a counter name of Last type.
:param value: a last value to record.
"""
counter = self.get(name, CounterType.LastValue)
counter.last = value
self._update() | def function[last, parameter[self, name, value]]:
constant[
Records the last calculated measurement value.
Usually this method is used by metrics calculated externally.
:param name: a counter name of Last type.
:param value: a last value to record.
]
variable[counter] assign[=] call[name[self].get, parameter[name[name], name[CounterType].LastValue]]
name[counter].last assign[=] name[value]
call[name[self]._update, parameter[]] | keyword[def] identifier[last] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[counter] = identifier[self] . identifier[get] ( identifier[name] , identifier[CounterType] . identifier[LastValue] )
identifier[counter] . identifier[last] = identifier[value]
identifier[self] . identifier[_update] () | def last(self, name, value):
"""
Records the last calculated measurement value.
Usually this method is used by metrics calculated externally.
:param name: a counter name of Last type.
:param value: a last value to record.
"""
counter = self.get(name, CounterType.LastValue)
counter.last = value
self._update() |
def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
WEATHER_XMLNS_PREFIX,
WEATHER_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration). \
encode('utf-8') | def function[to_XML, parameter[self, xml_declaration, xmlns]]:
constant[
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
]
variable[root_node] assign[=] call[name[self]._to_DOM, parameter[]]
if name[xmlns] begin[:]
call[name[xmlutils].annotate_with_XMLNS, parameter[name[root_node], name[WEATHER_XMLNS_PREFIX], name[WEATHER_XMLNS_URL]]]
return[call[call[name[xmlutils].DOM_node_to_XML, parameter[name[root_node], name[xml_declaration]]].encode, parameter[constant[utf-8]]]] | keyword[def] identifier[to_XML] ( identifier[self] , identifier[xml_declaration] = keyword[True] , identifier[xmlns] = keyword[True] ):
literal[string]
identifier[root_node] = identifier[self] . identifier[_to_DOM] ()
keyword[if] identifier[xmlns] :
identifier[xmlutils] . identifier[annotate_with_XMLNS] ( identifier[root_node] ,
identifier[WEATHER_XMLNS_PREFIX] ,
identifier[WEATHER_XMLNS_URL] )
keyword[return] identifier[xmlutils] . identifier[DOM_node_to_XML] ( identifier[root_node] , identifier[xml_declaration] ). identifier[encode] ( literal[string] ) | def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node, WEATHER_XMLNS_PREFIX, WEATHER_XMLNS_URL) # depends on [control=['if'], data=[]]
return xmlutils.DOM_node_to_XML(root_node, xml_declaration).encode('utf-8') |
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) | def function[IsDeletedOrDefault, parameter[clean_lines, linenum]]:
constant[Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
]
variable[open_paren] assign[=] call[call[name[clean_lines].elided][name[linenum]].find, parameter[constant[(]]]
if compare[name[open_paren] less[<] constant[0]] begin[:]
return[constant[False]]
<ast.Tuple object at 0x7da1b1db7e50> assign[=] call[name[CloseExpression], parameter[name[clean_lines], name[linenum], name[open_paren]]]
if compare[name[close_paren] less[<] constant[0]] begin[:]
return[constant[False]]
return[call[name[Match], parameter[constant[\s*=\s*(?:delete|default)\b], call[name[close_line]][<ast.Slice object at 0x7da1b1db67d0>]]]] | keyword[def] identifier[IsDeletedOrDefault] ( identifier[clean_lines] , identifier[linenum] ):
literal[string]
identifier[open_paren] = identifier[clean_lines] . identifier[elided] [ identifier[linenum] ]. identifier[find] ( literal[string] )
keyword[if] identifier[open_paren] < literal[int] :
keyword[return] keyword[False]
( identifier[close_line] , identifier[_] , identifier[close_paren] )= identifier[CloseExpression] (
identifier[clean_lines] , identifier[linenum] , identifier[open_paren] )
keyword[if] identifier[close_paren] < literal[int] :
keyword[return] keyword[False]
keyword[return] identifier[Match] ( literal[string] , identifier[close_line] [ identifier[close_paren] :]) | def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False # depends on [control=['if'], data=[]]
(close_line, _, close_paren) = CloseExpression(clean_lines, linenum, open_paren)
if close_paren < 0:
return False # depends on [control=['if'], data=[]]
return Match('\\s*=\\s*(?:delete|default)\\b', close_line[close_paren:]) |
def _init_compile_patterns(optional_attrs):
"""Compile search patterns for optional attributes if needed."""
attr2cmp = {}
if optional_attrs is None:
return attr2cmp
# "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
# "blood vessel formation from pre-existing blood vessels" EXACT systematic_synonym []
# "mitochondrial inheritance" EXACT []
# "tricarboxylate transport protein" RELATED [] {comment="WIkipedia:Mitochondrial_carrier"}
if 'synonym' in optional_attrs:
attr2cmp['synonym'] = re.compile(r'"(\S.*\S)" ([A-Z]+) (.*)\[(.*)\](.*)$')
attr2cmp['synonym nt'] = cx.namedtuple("synonym", "text scope typename dbxrefs")
# Wikipedia:Zygotene
# Reactome:REACT_27267 "DHAP from Ery4P and PEP, Mycobacterium tuberculosis"
if 'xref' in optional_attrs:
attr2cmp['xref'] = re.compile(r'^(\S+:\s*\S+)\b(.*)$')
return attr2cmp | def function[_init_compile_patterns, parameter[optional_attrs]]:
constant[Compile search patterns for optional attributes if needed.]
variable[attr2cmp] assign[=] dictionary[[], []]
if compare[name[optional_attrs] is constant[None]] begin[:]
return[name[attr2cmp]]
if compare[constant[synonym] in name[optional_attrs]] begin[:]
call[name[attr2cmp]][constant[synonym]] assign[=] call[name[re].compile, parameter[constant["(\S.*\S)" ([A-Z]+) (.*)\[(.*)\](.*)$]]]
call[name[attr2cmp]][constant[synonym nt]] assign[=] call[name[cx].namedtuple, parameter[constant[synonym], constant[text scope typename dbxrefs]]]
if compare[constant[xref] in name[optional_attrs]] begin[:]
call[name[attr2cmp]][constant[xref]] assign[=] call[name[re].compile, parameter[constant[^(\S+:\s*\S+)\b(.*)$]]]
return[name[attr2cmp]] | keyword[def] identifier[_init_compile_patterns] ( identifier[optional_attrs] ):
literal[string]
identifier[attr2cmp] ={}
keyword[if] identifier[optional_attrs] keyword[is] keyword[None] :
keyword[return] identifier[attr2cmp]
keyword[if] literal[string] keyword[in] identifier[optional_attrs] :
identifier[attr2cmp] [ literal[string] ]= identifier[re] . identifier[compile] ( literal[string] )
identifier[attr2cmp] [ literal[string] ]= identifier[cx] . identifier[namedtuple] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[optional_attrs] :
identifier[attr2cmp] [ literal[string] ]= identifier[re] . identifier[compile] ( literal[string] )
keyword[return] identifier[attr2cmp] | def _init_compile_patterns(optional_attrs):
"""Compile search patterns for optional attributes if needed."""
attr2cmp = {}
if optional_attrs is None:
return attr2cmp # depends on [control=['if'], data=[]]
# "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
# "blood vessel formation from pre-existing blood vessels" EXACT systematic_synonym []
# "mitochondrial inheritance" EXACT []
# "tricarboxylate transport protein" RELATED [] {comment="WIkipedia:Mitochondrial_carrier"}
if 'synonym' in optional_attrs:
attr2cmp['synonym'] = re.compile('"(\\S.*\\S)" ([A-Z]+) (.*)\\[(.*)\\](.*)$')
attr2cmp['synonym nt'] = cx.namedtuple('synonym', 'text scope typename dbxrefs') # depends on [control=['if'], data=[]]
# Wikipedia:Zygotene
# Reactome:REACT_27267 "DHAP from Ery4P and PEP, Mycobacterium tuberculosis"
if 'xref' in optional_attrs:
attr2cmp['xref'] = re.compile('^(\\S+:\\s*\\S+)\\b(.*)$') # depends on [control=['if'], data=[]]
return attr2cmp |
def temporary_file(root_dir=None, cleanup=True, suffix='', permissions=None, binary_mode=True):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param int permissions: If provided, sets the file to use these permissions.
:param bool binary_mode: Whether file opens in binary or text mode.
"""
mode = 'w+b' if binary_mode else 'w+' # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name) | def function[temporary_file, parameter[root_dir, cleanup, suffix, permissions, binary_mode]]:
constant[
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param int permissions: If provided, sets the file to use these permissions.
:param bool binary_mode: Whether file opens in binary or text mode.
]
variable[mode] assign[=] <ast.IfExp object at 0x7da1b22a71c0>
with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:]
<ast.Try object at 0x7da1b22a66b0> | keyword[def] identifier[temporary_file] ( identifier[root_dir] = keyword[None] , identifier[cleanup] = keyword[True] , identifier[suffix] = literal[string] , identifier[permissions] = keyword[None] , identifier[binary_mode] = keyword[True] ):
literal[string]
identifier[mode] = literal[string] keyword[if] identifier[binary_mode] keyword[else] literal[string]
keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[suffix] = identifier[suffix] , identifier[dir] = identifier[root_dir] , identifier[delete] = keyword[False] , identifier[mode] = identifier[mode] ) keyword[as] identifier[fd] :
keyword[try] :
keyword[if] identifier[permissions] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[chmod] ( identifier[fd] . identifier[name] , identifier[permissions] )
keyword[yield] identifier[fd]
keyword[finally] :
keyword[if] identifier[cleanup] :
identifier[safe_delete] ( identifier[fd] . identifier[name] ) | def temporary_file(root_dir=None, cleanup=True, suffix='', permissions=None, binary_mode=True):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param int permissions: If provided, sets the file to use these permissions.
:param bool binary_mode: Whether file opens in binary or text mode.
"""
mode = 'w+b' if binary_mode else 'w+' # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions) # depends on [control=['if'], data=['permissions']]
yield fd # depends on [control=['try'], data=[]]
finally:
if cleanup:
safe_delete(fd.name) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['fd']] |
def opls_notation(atom_key):
"""Return element for OPLS forcefield atom key."""
# warning for Ne, He, Na types overlap
conflicts = ['ne', 'he', 'na']
if atom_key in conflicts:
raise _AtomKeyConflict((
"One of the OPLS conflicting "
"atom_keys has occured '{0}'. "
"For how to solve this issue see the manual or "
"MolecularSystem._atom_key_swap() doc string.").format(atom_key))
for element in opls_atom_keys:
if atom_key in opls_atom_keys[element]:
return element
# In case if atom_key was not found in the OPLS keys dictionary
raise _AtomKeyError((
"OPLS atom key {0} was not found in OPLS keys dictionary.").format(
atom_key)) | def function[opls_notation, parameter[atom_key]]:
constant[Return element for OPLS forcefield atom key.]
variable[conflicts] assign[=] list[[<ast.Constant object at 0x7da20e9b18d0>, <ast.Constant object at 0x7da20e9b1ae0>, <ast.Constant object at 0x7da20e9b3160>]]
if compare[name[atom_key] in name[conflicts]] begin[:]
<ast.Raise object at 0x7da20e9b1a50>
for taget[name[element]] in starred[name[opls_atom_keys]] begin[:]
if compare[name[atom_key] in call[name[opls_atom_keys]][name[element]]] begin[:]
return[name[element]]
<ast.Raise object at 0x7da2047e98a0> | keyword[def] identifier[opls_notation] ( identifier[atom_key] ):
literal[string]
identifier[conflicts] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[atom_key] keyword[in] identifier[conflicts] :
keyword[raise] identifier[_AtomKeyConflict] ((
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[atom_key] ))
keyword[for] identifier[element] keyword[in] identifier[opls_atom_keys] :
keyword[if] identifier[atom_key] keyword[in] identifier[opls_atom_keys] [ identifier[element] ]:
keyword[return] identifier[element]
keyword[raise] identifier[_AtomKeyError] ((
literal[string] ). identifier[format] (
identifier[atom_key] )) | def opls_notation(atom_key):
"""Return element for OPLS forcefield atom key."""
# warning for Ne, He, Na types overlap
conflicts = ['ne', 'he', 'na']
if atom_key in conflicts:
raise _AtomKeyConflict("One of the OPLS conflicting atom_keys has occured '{0}'. For how to solve this issue see the manual or MolecularSystem._atom_key_swap() doc string.".format(atom_key)) # depends on [control=['if'], data=['atom_key']]
for element in opls_atom_keys:
if atom_key in opls_atom_keys[element]:
return element # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
# In case if atom_key was not found in the OPLS keys dictionary
raise _AtomKeyError('OPLS atom key {0} was not found in OPLS keys dictionary.'.format(atom_key)) |
def plot_gos(fout_png, goids, obo_dag, *args, **kws):
"""Given GO ids and the obo_dag, create a plot of paths from GO ids."""
engine = kws['engine'] if 'engine' in kws else 'pydot'
godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag
godagplot = GODagSmallPlot(godagsmall, *args, **kws)
godagplot.plt(fout_png, engine) | def function[plot_gos, parameter[fout_png, goids, obo_dag]]:
constant[Given GO ids and the obo_dag, create a plot of paths from GO ids.]
variable[engine] assign[=] <ast.IfExp object at 0x7da18f811b40>
variable[godagsmall] assign[=] call[name[OboToGoDagSmall], parameter[]].godag
variable[godagplot] assign[=] call[name[GODagSmallPlot], parameter[name[godagsmall], <ast.Starred object at 0x7da20c7c9ff0>]]
call[name[godagplot].plt, parameter[name[fout_png], name[engine]]] | keyword[def] identifier[plot_gos] ( identifier[fout_png] , identifier[goids] , identifier[obo_dag] ,* identifier[args] ,** identifier[kws] ):
literal[string]
identifier[engine] = identifier[kws] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kws] keyword[else] literal[string]
identifier[godagsmall] = identifier[OboToGoDagSmall] ( identifier[goids] = identifier[goids] , identifier[obodag] = identifier[obo_dag] ). identifier[godag]
identifier[godagplot] = identifier[GODagSmallPlot] ( identifier[godagsmall] ,* identifier[args] ,** identifier[kws] )
identifier[godagplot] . identifier[plt] ( identifier[fout_png] , identifier[engine] ) | def plot_gos(fout_png, goids, obo_dag, *args, **kws):
"""Given GO ids and the obo_dag, create a plot of paths from GO ids."""
engine = kws['engine'] if 'engine' in kws else 'pydot'
godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag
godagplot = GODagSmallPlot(godagsmall, *args, **kws)
godagplot.plt(fout_png, engine) |
def zoning_enabled_configuration_cfg_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone")
enabled_configuration = ET.SubElement(zoning, "enabled-configuration")
cfg_name = ET.SubElement(enabled_configuration, "cfg-name")
cfg_name.text = kwargs.pop('cfg_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[zoning_enabled_configuration_cfg_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[zoning] assign[=] call[name[ET].SubElement, parameter[name[config], constant[zoning]]]
variable[enabled_configuration] assign[=] call[name[ET].SubElement, parameter[name[zoning], constant[enabled-configuration]]]
variable[cfg_name] assign[=] call[name[ET].SubElement, parameter[name[enabled_configuration], constant[cfg-name]]]
name[cfg_name].text assign[=] call[name[kwargs].pop, parameter[constant[cfg_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[zoning_enabled_configuration_cfg_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[zoning] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[enabled_configuration] = identifier[ET] . identifier[SubElement] ( identifier[zoning] , literal[string] )
identifier[cfg_name] = identifier[ET] . identifier[SubElement] ( identifier[enabled_configuration] , literal[string] )
identifier[cfg_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def zoning_enabled_configuration_cfg_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
zoning = ET.SubElement(config, 'zoning', xmlns='urn:brocade.com:mgmt:brocade-zone')
enabled_configuration = ET.SubElement(zoning, 'enabled-configuration')
cfg_name = ET.SubElement(enabled_configuration, 'cfg-name')
cfg_name.text = kwargs.pop('cfg_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_AV_infinity(ra,dec,frame='icrs'):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs')
rah,ram,ras = coords.ra.hms
decd,decm,decs = coords.dec.dms
if decd > 0:
decsign = '%2B'
else:
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \
'%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \
'&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
AV = None
for line in urllib.request.urlopen(url).readlines():
m = re.search(b'^Landolt V \(0.54\)\s+(\d+\.\d+)', line)
if m:
AV = (float(m.group(1)))
break
if AV is None:
raise RuntimeError('AV query fails! URL is {}'.format(url))
return AV | def function[get_AV_infinity, parameter[ra, dec, frame]]:
constant[
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
]
variable[coords] assign[=] call[call[name[SkyCoord], parameter[name[ra], name[dec]]].transform_to, parameter[constant[icrs]]]
<ast.Tuple object at 0x7da18c4cf310> assign[=] name[coords].ra.hms
<ast.Tuple object at 0x7da18dc99960> assign[=] name[coords].dec.dms
if compare[name[decd] greater[>] constant[0]] begin[:]
variable[decsign] assign[=] constant[%2B]
variable[url] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon=] + binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> name[rah]]] + constant[%3A]] + binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> name[ram]]] + constant[%3A]] + binary_operation[constant[%05.2f] <ast.Mod object at 0x7da2590d6920> name[ras]]] + binary_operation[constant[&lat=%s] <ast.Mod object at 0x7da2590d6920> name[decsign]]] + binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> call[name[abs], parameter[name[decd]]]]] + constant[%3A]] + binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> call[name[abs], parameter[name[decm]]]]] + constant[%3A]] + binary_operation[constant[%05.2f] <ast.Mod object at 0x7da2590d6920> call[name[abs], parameter[name[decs]]]]] + constant[&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0]]
variable[AV] assign[=] constant[None]
for taget[name[line]] in starred[call[call[name[urllib].request.urlopen, parameter[name[url]]].readlines, parameter[]]] begin[:]
variable[m] assign[=] call[name[re].search, parameter[constant[b'^Landolt V \\(0.54\\)\\s+(\\d+\\.\\d+)'], name[line]]]
if name[m] begin[:]
variable[AV] assign[=] call[name[float], parameter[call[name[m].group, parameter[constant[1]]]]]
break
if compare[name[AV] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc98b20>
return[name[AV]] | keyword[def] identifier[get_AV_infinity] ( identifier[ra] , identifier[dec] , identifier[frame] = literal[string] ):
literal[string]
identifier[coords] = identifier[SkyCoord] ( identifier[ra] , identifier[dec] , identifier[unit] = literal[string] , identifier[frame] = identifier[frame] ). identifier[transform_to] ( literal[string] )
identifier[rah] , identifier[ram] , identifier[ras] = identifier[coords] . identifier[ra] . identifier[hms]
identifier[decd] , identifier[decm] , identifier[decs] = identifier[coords] . identifier[dec] . identifier[dms]
keyword[if] identifier[decd] > literal[int] :
identifier[decsign] = literal[string]
keyword[else] :
identifier[decsign] = literal[string]
identifier[url] = literal[string] + literal[string] % identifier[rah] + literal[string] + literal[string] % identifier[ram] + literal[string] + literal[string] % identifier[ras] + literal[string] % identifier[decsign] + literal[string] % identifier[abs] ( identifier[decd] )+ literal[string] + literal[string] % identifier[abs] ( identifier[decm] )+ literal[string] + literal[string] % identifier[abs] ( identifier[decs] )+ literal[string]
identifier[AV] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[url] ). identifier[readlines] ():
identifier[m] = identifier[re] . identifier[search] ( literal[string] , identifier[line] )
keyword[if] identifier[m] :
identifier[AV] =( identifier[float] ( identifier[m] . identifier[group] ( literal[int] )))
keyword[break]
keyword[if] identifier[AV] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[url] ))
keyword[return] identifier[AV] | def get_AV_infinity(ra, dec, frame='icrs'):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra, dec, unit='deg', frame=frame).transform_to('icrs')
(rah, ram, ras) = coords.ra.hms
(decd, decm, decs) = coords.dec.dms
if decd > 0:
decsign = '%2B' # depends on [control=['if'], data=[]]
else:
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon=' + '%i' % rah + '%3A' + '%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + '&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
AV = None
for line in urllib.request.urlopen(url).readlines():
m = re.search(b'^Landolt V \\(0.54\\)\\s+(\\d+\\.\\d+)', line)
if m:
AV = float(m.group(1))
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if AV is None:
raise RuntimeError('AV query fails! URL is {}'.format(url)) # depends on [control=['if'], data=[]]
return AV |
def schema_file(self):
""" Gets the full path to the file in which to load configuration schema. """
path = os.getcwd() + '/' + self.lazy_folder
return path + self.schema_filename | def function[schema_file, parameter[self]]:
constant[ Gets the full path to the file in which to load configuration schema. ]
variable[path] assign[=] binary_operation[binary_operation[call[name[os].getcwd, parameter[]] + constant[/]] + name[self].lazy_folder]
return[binary_operation[name[path] + name[self].schema_filename]] | keyword[def] identifier[schema_file] ( identifier[self] ):
literal[string]
identifier[path] = identifier[os] . identifier[getcwd] ()+ literal[string] + identifier[self] . identifier[lazy_folder]
keyword[return] identifier[path] + identifier[self] . identifier[schema_filename] | def schema_file(self):
""" Gets the full path to the file in which to load configuration schema. """
path = os.getcwd() + '/' + self.lazy_folder
return path + self.schema_filename |
def synchronized(cls, obj=None):
""" synchronize on obj if obj is supplied.
:param obj: the obj to lock on. if none, lock to the function
:return: return of the func.
"""
def get_key(f, o):
if o is None:
key = hash(f)
else:
key = hash(o)
return key
def get_lock(f, o):
key = get_key(f, o)
if key not in cls.lock_map:
with cls.lock_map_lock:
if key not in cls.lock_map:
cls.lock_map[key] = _init_lock()
return cls.lock_map[key]
def wrap(f):
@functools.wraps(f)
def new_func(*args, **kw):
with get_lock(f, obj):
return f(*args, **kw)
return new_func
return wrap | def function[synchronized, parameter[cls, obj]]:
constant[ synchronize on obj if obj is supplied.
:param obj: the obj to lock on. if none, lock to the function
:return: return of the func.
]
def function[get_key, parameter[f, o]]:
if compare[name[o] is constant[None]] begin[:]
variable[key] assign[=] call[name[hash], parameter[name[f]]]
return[name[key]]
def function[get_lock, parameter[f, o]]:
variable[key] assign[=] call[name[get_key], parameter[name[f], name[o]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[cls].lock_map] begin[:]
with name[cls].lock_map_lock begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[cls].lock_map] begin[:]
call[name[cls].lock_map][name[key]] assign[=] call[name[_init_lock], parameter[]]
return[call[name[cls].lock_map][name[key]]]
def function[wrap, parameter[f]]:
def function[new_func, parameter[]]:
with call[name[get_lock], parameter[name[f], name[obj]]] begin[:]
return[call[name[f], parameter[<ast.Starred object at 0x7da1b115efe0>]]]
return[name[new_func]]
return[name[wrap]] | keyword[def] identifier[synchronized] ( identifier[cls] , identifier[obj] = keyword[None] ):
literal[string]
keyword[def] identifier[get_key] ( identifier[f] , identifier[o] ):
keyword[if] identifier[o] keyword[is] keyword[None] :
identifier[key] = identifier[hash] ( identifier[f] )
keyword[else] :
identifier[key] = identifier[hash] ( identifier[o] )
keyword[return] identifier[key]
keyword[def] identifier[get_lock] ( identifier[f] , identifier[o] ):
identifier[key] = identifier[get_key] ( identifier[f] , identifier[o] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[cls] . identifier[lock_map] :
keyword[with] identifier[cls] . identifier[lock_map_lock] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[cls] . identifier[lock_map] :
identifier[cls] . identifier[lock_map] [ identifier[key] ]= identifier[_init_lock] ()
keyword[return] identifier[cls] . identifier[lock_map] [ identifier[key] ]
keyword[def] identifier[wrap] ( identifier[f] ):
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[new_func] (* identifier[args] ,** identifier[kw] ):
keyword[with] identifier[get_lock] ( identifier[f] , identifier[obj] ):
keyword[return] identifier[f] (* identifier[args] ,** identifier[kw] )
keyword[return] identifier[new_func]
keyword[return] identifier[wrap] | def synchronized(cls, obj=None):
""" synchronize on obj if obj is supplied.
:param obj: the obj to lock on. if none, lock to the function
:return: return of the func.
"""
def get_key(f, o):
if o is None:
key = hash(f) # depends on [control=['if'], data=[]]
else:
key = hash(o)
return key
def get_lock(f, o):
key = get_key(f, o)
if key not in cls.lock_map:
with cls.lock_map_lock:
if key not in cls.lock_map:
cls.lock_map[key] = _init_lock() # depends on [control=['if'], data=['key']] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=['key']]
return cls.lock_map[key]
def wrap(f):
@functools.wraps(f)
def new_func(*args, **kw):
with get_lock(f, obj):
return f(*args, **kw) # depends on [control=['with'], data=[]]
return new_func
return wrap |
def advance_shards(self):
"""Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
"""
# Don't poll shards when there are pending records.
if self.buffer:
return
# 0) Collect new records from all active shards.
record_shard_pairs = []
for shard in self.active:
records = next(shard)
if records:
record_shard_pairs.extend((record, shard) for record in records)
self.buffer.push_all(record_shard_pairs)
self.migrate_closed_shards() | def function[advance_shards, parameter[self]]:
constant[Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
]
if name[self].buffer begin[:]
return[None]
variable[record_shard_pairs] assign[=] list[[]]
for taget[name[shard]] in starred[name[self].active] begin[:]
variable[records] assign[=] call[name[next], parameter[name[shard]]]
if name[records] begin[:]
call[name[record_shard_pairs].extend, parameter[<ast.GeneratorExp object at 0x7da1b0fe9480>]]
call[name[self].buffer.push_all, parameter[name[record_shard_pairs]]]
call[name[self].migrate_closed_shards, parameter[]] | keyword[def] identifier[advance_shards] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[buffer] :
keyword[return]
identifier[record_shard_pairs] =[]
keyword[for] identifier[shard] keyword[in] identifier[self] . identifier[active] :
identifier[records] = identifier[next] ( identifier[shard] )
keyword[if] identifier[records] :
identifier[record_shard_pairs] . identifier[extend] (( identifier[record] , identifier[shard] ) keyword[for] identifier[record] keyword[in] identifier[records] )
identifier[self] . identifier[buffer] . identifier[push_all] ( identifier[record_shard_pairs] )
identifier[self] . identifier[migrate_closed_shards] () | def advance_shards(self):
"""Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
"""
# Don't poll shards when there are pending records.
if self.buffer:
return # depends on [control=['if'], data=[]]
# 0) Collect new records from all active shards.
record_shard_pairs = []
for shard in self.active:
records = next(shard)
if records:
record_shard_pairs.extend(((record, shard) for record in records)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['shard']]
self.buffer.push_all(record_shard_pairs)
self.migrate_closed_shards() |
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min, idx_min), (d_max, t_max, idx_max)
which minimize and maximize, respectively, the distance
d = |self[idx].point(t)-origin|."""
if return_all_global_extrema:
raise NotImplementedError
else:
global_min = (np.inf, None, None)
global_max = (0, None, None)
for seg_idx, seg in enumerate(self):
seg_global_min, seg_global_max = seg.radialrange(origin)
if seg_global_min[0] < global_min[0]:
global_min = seg_global_min + (seg_idx,)
if seg_global_max[0] > global_max[0]:
global_max = seg_global_max + (seg_idx,)
return global_min, global_max | def function[radialrange, parameter[self, origin, return_all_global_extrema]]:
constant[returns the tuples (d_min, t_min, idx_min), (d_max, t_max, idx_max)
which minimize and maximize, respectively, the distance
d = |self[idx].point(t)-origin|.]
if name[return_all_global_extrema] begin[:]
<ast.Raise object at 0x7da18dc9a7a0> | keyword[def] identifier[radialrange] ( identifier[self] , identifier[origin] , identifier[return_all_global_extrema] = keyword[False] ):
literal[string]
keyword[if] identifier[return_all_global_extrema] :
keyword[raise] identifier[NotImplementedError]
keyword[else] :
identifier[global_min] =( identifier[np] . identifier[inf] , keyword[None] , keyword[None] )
identifier[global_max] =( literal[int] , keyword[None] , keyword[None] )
keyword[for] identifier[seg_idx] , identifier[seg] keyword[in] identifier[enumerate] ( identifier[self] ):
identifier[seg_global_min] , identifier[seg_global_max] = identifier[seg] . identifier[radialrange] ( identifier[origin] )
keyword[if] identifier[seg_global_min] [ literal[int] ]< identifier[global_min] [ literal[int] ]:
identifier[global_min] = identifier[seg_global_min] +( identifier[seg_idx] ,)
keyword[if] identifier[seg_global_max] [ literal[int] ]> identifier[global_max] [ literal[int] ]:
identifier[global_max] = identifier[seg_global_max] +( identifier[seg_idx] ,)
keyword[return] identifier[global_min] , identifier[global_max] | def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min, idx_min), (d_max, t_max, idx_max)
which minimize and maximize, respectively, the distance
d = |self[idx].point(t)-origin|."""
if return_all_global_extrema:
raise NotImplementedError # depends on [control=['if'], data=[]]
else:
global_min = (np.inf, None, None)
global_max = (0, None, None)
for (seg_idx, seg) in enumerate(self):
(seg_global_min, seg_global_max) = seg.radialrange(origin)
if seg_global_min[0] < global_min[0]:
global_min = seg_global_min + (seg_idx,) # depends on [control=['if'], data=[]]
if seg_global_max[0] > global_max[0]:
global_max = seg_global_max + (seg_idx,) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (global_min, global_max) |
def upload_files(selected_file, selected_host, only_link, file_name):
"""
Uploads selected file to the host, thanks to the fact that
every pomf.se based site has pretty much the same architecture.
"""
try:
answer = requests.post(
url=selected_host[0]+"upload.php",
files={'files[]':selected_file})
file_name_1 = re.findall(r'"url": *"((h.+\/){0,1}(.+?))"[,\}]', \
answer.text.replace("\\", ""))[0][2]
if only_link:
return [selected_host[1]+file_name_1, "{}: {}{}".format(file_name, selected_host[1], file_name_1)]
else:
return "{}: {}{}".format(file_name, selected_host[1], file_name_1)
except requests.exceptions.ConnectionError:
print(file_name + ' couldn\'t be uploaded to ' + selected_host[0]) | def function[upload_files, parameter[selected_file, selected_host, only_link, file_name]]:
constant[
Uploads selected file to the host, thanks to the fact that
every pomf.se based site has pretty much the same architecture.
]
<ast.Try object at 0x7da1afe6d6c0> | keyword[def] identifier[upload_files] ( identifier[selected_file] , identifier[selected_host] , identifier[only_link] , identifier[file_name] ):
literal[string]
keyword[try] :
identifier[answer] = identifier[requests] . identifier[post] (
identifier[url] = identifier[selected_host] [ literal[int] ]+ literal[string] ,
identifier[files] ={ literal[string] : identifier[selected_file] })
identifier[file_name_1] = identifier[re] . identifier[findall] ( literal[string] , identifier[answer] . identifier[text] . identifier[replace] ( literal[string] , literal[string] ))[ literal[int] ][ literal[int] ]
keyword[if] identifier[only_link] :
keyword[return] [ identifier[selected_host] [ literal[int] ]+ identifier[file_name_1] , literal[string] . identifier[format] ( identifier[file_name] , identifier[selected_host] [ literal[int] ], identifier[file_name_1] )]
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[file_name] , identifier[selected_host] [ literal[int] ], identifier[file_name_1] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] :
identifier[print] ( identifier[file_name] + literal[string] + identifier[selected_host] [ literal[int] ]) | def upload_files(selected_file, selected_host, only_link, file_name):
"""
Uploads selected file to the host, thanks to the fact that
every pomf.se based site has pretty much the same architecture.
"""
try:
answer = requests.post(url=selected_host[0] + 'upload.php', files={'files[]': selected_file})
file_name_1 = re.findall('"url": *"((h.+\\/){0,1}(.+?))"[,\\}]', answer.text.replace('\\', ''))[0][2]
if only_link:
return [selected_host[1] + file_name_1, '{}: {}{}'.format(file_name, selected_host[1], file_name_1)] # depends on [control=['if'], data=[]]
else:
return '{}: {}{}'.format(file_name, selected_host[1], file_name_1) # depends on [control=['try'], data=[]]
except requests.exceptions.ConnectionError:
print(file_name + " couldn't be uploaded to " + selected_host[0]) # depends on [control=['except'], data=[]] |
def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')")
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)
else:
if freq is not None:
raise NotImplementedError("TODO: implement this logic if needed")
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data
else:
return pd.DataFrame(
{name: returns(col, method, periods, fill_method, limit, freq) for name, col in prices.iteritems()},
columns=prices.columns,
index=prices.index) | def function[returns, parameter[prices, method, periods, fill_method, limit, freq]]:
constant[
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
]
if compare[name[method] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1e7af20>, <ast.Constant object at 0x7da1b1e7a8f0>, <ast.Constant object at 0x7da1b1e7a8c0>]]] begin[:]
<ast.Raise object at 0x7da1b1e7a860>
if compare[name[method] equal[==] constant[simple]] begin[:]
return[call[name[prices].pct_change, parameter[]]] | keyword[def] identifier[returns] ( identifier[prices] , identifier[method] = literal[string] , identifier[periods] = literal[int] , identifier[fill_method] = literal[string] , identifier[limit] = keyword[None] , identifier[freq] = keyword[None] ):
literal[string]
keyword[if] identifier[method] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[method] == literal[string] :
keyword[return] identifier[prices] . identifier[pct_change] ( identifier[periods] = identifier[periods] , identifier[fill_method] = identifier[fill_method] , identifier[limit] = identifier[limit] , identifier[freq] = identifier[freq] )
keyword[else] :
keyword[if] identifier[freq] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[prices] , identifier[pd] . identifier[Series] ):
keyword[if] identifier[fill_method] keyword[is] keyword[None] :
identifier[data] = identifier[prices]
keyword[else] :
identifier[data] = identifier[prices] . identifier[fillna] ( identifier[method] = identifier[fill_method] , identifier[limit] = identifier[limit] )
identifier[data] = identifier[np] . identifier[log] ( identifier[data] / identifier[data] . identifier[shift] ( identifier[periods] = identifier[periods] ))
identifier[mask] = identifier[pd] . identifier[isnull] ( identifier[prices] . identifier[values] )
identifier[np] . identifier[putmask] ( identifier[data] . identifier[values] , identifier[mask] , identifier[np] . identifier[nan] )
keyword[return] identifier[data]
keyword[else] :
keyword[return] identifier[pd] . identifier[DataFrame] (
{ identifier[name] : identifier[returns] ( identifier[col] , identifier[method] , identifier[periods] , identifier[fill_method] , identifier[limit] , identifier[freq] ) keyword[for] identifier[name] , identifier[col] keyword[in] identifier[prices] . identifier[iteritems] ()},
identifier[columns] = identifier[prices] . identifier[columns] ,
identifier[index] = identifier[prices] . identifier[index] ) | def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')") # depends on [control=['if'], data=[]]
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq) # depends on [control=['if'], data=[]]
else:
if freq is not None:
raise NotImplementedError('TODO: implement this logic if needed') # depends on [control=['if'], data=[]]
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices # depends on [control=['if'], data=[]]
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data # depends on [control=['if'], data=[]]
else:
return pd.DataFrame({name: returns(col, method, periods, fill_method, limit, freq) for (name, col) in prices.iteritems()}, columns=prices.columns, index=prices.index) |
def set_hash(self, algo, digest):
"""Set algorithm ID and hexadecimal digest for next operation."""
self.algo = algo
self.digest = digest | def function[set_hash, parameter[self, algo, digest]]:
constant[Set algorithm ID and hexadecimal digest for next operation.]
name[self].algo assign[=] name[algo]
name[self].digest assign[=] name[digest] | keyword[def] identifier[set_hash] ( identifier[self] , identifier[algo] , identifier[digest] ):
literal[string]
identifier[self] . identifier[algo] = identifier[algo]
identifier[self] . identifier[digest] = identifier[digest] | def set_hash(self, algo, digest):
"""Set algorithm ID and hexadecimal digest for next operation."""
self.algo = algo
self.digest = digest |
def add_navi_series_from_jsonp(self, data_src=None, data_name='json_data', series_type="line", **kwargs):
"""set series for navigator option in highstocks"""
if not self.jsonp_data_flag:
self.jsonp_data_flag = True
self.jsonp_data_url = json.dumps(data_src)
if data_name == 'data':
data_name = 'json_'+ data_name
self.jsonp_data = data_name
self.add_navi_series(RawJavaScriptText(self.jsonp_data), series_type, **kwargs) | def function[add_navi_series_from_jsonp, parameter[self, data_src, data_name, series_type]]:
constant[set series for navigator option in highstocks]
if <ast.UnaryOp object at 0x7da18bc737f0> begin[:]
name[self].jsonp_data_flag assign[=] constant[True]
name[self].jsonp_data_url assign[=] call[name[json].dumps, parameter[name[data_src]]]
if compare[name[data_name] equal[==] constant[data]] begin[:]
variable[data_name] assign[=] binary_operation[constant[json_] + name[data_name]]
name[self].jsonp_data assign[=] name[data_name]
call[name[self].add_navi_series, parameter[call[name[RawJavaScriptText], parameter[name[self].jsonp_data]], name[series_type]]] | keyword[def] identifier[add_navi_series_from_jsonp] ( identifier[self] , identifier[data_src] = keyword[None] , identifier[data_name] = literal[string] , identifier[series_type] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[jsonp_data_flag] :
identifier[self] . identifier[jsonp_data_flag] = keyword[True]
identifier[self] . identifier[jsonp_data_url] = identifier[json] . identifier[dumps] ( identifier[data_src] )
keyword[if] identifier[data_name] == literal[string] :
identifier[data_name] = literal[string] + identifier[data_name]
identifier[self] . identifier[jsonp_data] = identifier[data_name]
identifier[self] . identifier[add_navi_series] ( identifier[RawJavaScriptText] ( identifier[self] . identifier[jsonp_data] ), identifier[series_type] ,** identifier[kwargs] ) | def add_navi_series_from_jsonp(self, data_src=None, data_name='json_data', series_type='line', **kwargs):
"""set series for navigator option in highstocks"""
if not self.jsonp_data_flag:
self.jsonp_data_flag = True
self.jsonp_data_url = json.dumps(data_src)
if data_name == 'data':
data_name = 'json_' + data_name # depends on [control=['if'], data=['data_name']]
self.jsonp_data = data_name # depends on [control=['if'], data=[]]
self.add_navi_series(RawJavaScriptText(self.jsonp_data), series_type, **kwargs) |
def assigned_state(instance):
"""Returns `assigned` or `unassigned` depending on the state of the
analyses the analysisrequest contains. Return `unassigned` if the Analysis
Request has at least one analysis in `unassigned` state.
Otherwise, returns `assigned`
"""
analyses = instance.getAnalyses()
if not analyses:
return "unassigned"
for analysis in analyses:
analysis_object = api.get_object(analysis)
if not analysis_object.getWorksheet():
return "unassigned"
return "assigned" | def function[assigned_state, parameter[instance]]:
constant[Returns `assigned` or `unassigned` depending on the state of the
analyses the analysisrequest contains. Return `unassigned` if the Analysis
Request has at least one analysis in `unassigned` state.
Otherwise, returns `assigned`
]
variable[analyses] assign[=] call[name[instance].getAnalyses, parameter[]]
if <ast.UnaryOp object at 0x7da18eb55870> begin[:]
return[constant[unassigned]]
for taget[name[analysis]] in starred[name[analyses]] begin[:]
variable[analysis_object] assign[=] call[name[api].get_object, parameter[name[analysis]]]
if <ast.UnaryOp object at 0x7da18eb554b0> begin[:]
return[constant[unassigned]]
return[constant[assigned]] | keyword[def] identifier[assigned_state] ( identifier[instance] ):
literal[string]
identifier[analyses] = identifier[instance] . identifier[getAnalyses] ()
keyword[if] keyword[not] identifier[analyses] :
keyword[return] literal[string]
keyword[for] identifier[analysis] keyword[in] identifier[analyses] :
identifier[analysis_object] = identifier[api] . identifier[get_object] ( identifier[analysis] )
keyword[if] keyword[not] identifier[analysis_object] . identifier[getWorksheet] ():
keyword[return] literal[string]
keyword[return] literal[string] | def assigned_state(instance):
"""Returns `assigned` or `unassigned` depending on the state of the
analyses the analysisrequest contains. Return `unassigned` if the Analysis
Request has at least one analysis in `unassigned` state.
Otherwise, returns `assigned`
"""
analyses = instance.getAnalyses()
if not analyses:
return 'unassigned' # depends on [control=['if'], data=[]]
for analysis in analyses:
analysis_object = api.get_object(analysis)
if not analysis_object.getWorksheet():
return 'unassigned' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['analysis']]
return 'assigned' |
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise DataError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params) | def function[bitpos, parameter[self, key, bit, start, end]]:
constant[
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
]
if compare[name[bit] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18dc9bac0>, <ast.Constant object at 0x7da18dc99810>]]] begin[:]
<ast.Raise object at 0x7da18dc9a5f0>
variable[params] assign[=] list[[<ast.Name object at 0x7da1b1f95f90>, <ast.Name object at 0x7da1b1f97b50>]]
<ast.BoolOp object at 0x7da1b1f94190>
if <ast.BoolOp object at 0x7da1b1f95600> begin[:]
call[name[params].append, parameter[name[end]]]
return[call[name[self].execute_command, parameter[constant[BITPOS], <ast.Starred object at 0x7da1b1f94370>]]] | keyword[def] identifier[bitpos] ( identifier[self] , identifier[key] , identifier[bit] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
keyword[if] identifier[bit] keyword[not] keyword[in] ( literal[int] , literal[int] ):
keyword[raise] identifier[DataError] ( literal[string] )
identifier[params] =[ identifier[key] , identifier[bit] ]
identifier[start] keyword[is] keyword[not] keyword[None] keyword[and] identifier[params] . identifier[append] ( identifier[start] )
keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] keyword[and] identifier[end] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[append] ( identifier[end] )
keyword[elif] identifier[start] keyword[is] keyword[None] keyword[and] identifier[end] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[DataError] ( literal[string]
literal[string] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] ,* identifier[params] ) | def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1') # depends on [control=['if'], data=[]]
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end) # depends on [control=['if'], data=[]]
elif start is None and end is not None:
raise DataError('start argument is not set, when end is specified') # depends on [control=['if'], data=[]]
return self.execute_command('BITPOS', *params) |
def flatten_tree(tree, nested_attr='replies', depth_first=False):
"""Return a flattened version of the passed in tree.
:param nested_attr: The attribute name that contains the nested items.
Defaults to ``replies`` which is suitable for comments.
:param depth_first: When true, add to the list in a depth-first manner
rather than the default breadth-first manner.
"""
stack = deque(tree)
extend = stack.extend if depth_first else stack.extendleft
retval = []
while stack:
item = stack.popleft()
nested = getattr(item, nested_attr, None)
if nested:
extend(nested)
retval.append(item)
return retval | def function[flatten_tree, parameter[tree, nested_attr, depth_first]]:
constant[Return a flattened version of the passed in tree.
:param nested_attr: The attribute name that contains the nested items.
Defaults to ``replies`` which is suitable for comments.
:param depth_first: When true, add to the list in a depth-first manner
rather than the default breadth-first manner.
]
variable[stack] assign[=] call[name[deque], parameter[name[tree]]]
variable[extend] assign[=] <ast.IfExp object at 0x7da20e9b3a90>
variable[retval] assign[=] list[[]]
while name[stack] begin[:]
variable[item] assign[=] call[name[stack].popleft, parameter[]]
variable[nested] assign[=] call[name[getattr], parameter[name[item], name[nested_attr], constant[None]]]
if name[nested] begin[:]
call[name[extend], parameter[name[nested]]]
call[name[retval].append, parameter[name[item]]]
return[name[retval]] | keyword[def] identifier[flatten_tree] ( identifier[tree] , identifier[nested_attr] = literal[string] , identifier[depth_first] = keyword[False] ):
literal[string]
identifier[stack] = identifier[deque] ( identifier[tree] )
identifier[extend] = identifier[stack] . identifier[extend] keyword[if] identifier[depth_first] keyword[else] identifier[stack] . identifier[extendleft]
identifier[retval] =[]
keyword[while] identifier[stack] :
identifier[item] = identifier[stack] . identifier[popleft] ()
identifier[nested] = identifier[getattr] ( identifier[item] , identifier[nested_attr] , keyword[None] )
keyword[if] identifier[nested] :
identifier[extend] ( identifier[nested] )
identifier[retval] . identifier[append] ( identifier[item] )
keyword[return] identifier[retval] | def flatten_tree(tree, nested_attr='replies', depth_first=False):
"""Return a flattened version of the passed in tree.
:param nested_attr: The attribute name that contains the nested items.
Defaults to ``replies`` which is suitable for comments.
:param depth_first: When true, add to the list in a depth-first manner
rather than the default breadth-first manner.
"""
stack = deque(tree)
extend = stack.extend if depth_first else stack.extendleft
retval = []
while stack:
item = stack.popleft()
nested = getattr(item, nested_attr, None)
if nested:
extend(nested) # depends on [control=['if'], data=[]]
retval.append(item) # depends on [control=['while'], data=[]]
return retval |
def set_record(self, name, record_id, record):
"""Save a record into the cache.
Args:
name (string): The name to save the model under.
record_id (int): The record id.
record (:class:`cinder_data.model.CinderModel`): The model
"""
if name not in self._cache:
self._cache[name] = {}
self._cache[name][record_id] = record | def function[set_record, parameter[self, name, record_id, record]]:
constant[Save a record into the cache.
Args:
name (string): The name to save the model under.
record_id (int): The record id.
record (:class:`cinder_data.model.CinderModel`): The model
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._cache] begin[:]
call[name[self]._cache][name[name]] assign[=] dictionary[[], []]
call[call[name[self]._cache][name[name]]][name[record_id]] assign[=] name[record] | keyword[def] identifier[set_record] ( identifier[self] , identifier[name] , identifier[record_id] , identifier[record] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_cache] :
identifier[self] . identifier[_cache] [ identifier[name] ]={}
identifier[self] . identifier[_cache] [ identifier[name] ][ identifier[record_id] ]= identifier[record] | def set_record(self, name, record_id, record):
"""Save a record into the cache.
Args:
name (string): The name to save the model under.
record_id (int): The record id.
record (:class:`cinder_data.model.CinderModel`): The model
"""
if name not in self._cache:
self._cache[name] = {} # depends on [control=['if'], data=['name']]
self._cache[name][record_id] = record |
def get_domains(self):
"""
Connect to alignak backend and retrieve all available child endpoints of root
If connection is successful, returns a list of all the resources available in the backend:
Each resource is identified with its title and provides its endpoint relative to backend
root endpoint.::
[
{u'href': u'loghost', u'title': u'loghost'},
{u'href': u'escalation', u'title': u'escalation'},
...
]
If an error occurs a BackendException is raised.
If an exception occurs, it is raised to caller.
:return: list of available resources
:rtype: list
"""
resp = self.get('')
if "_links" in resp:
_links = resp["_links"]
if "child" in _links:
return _links["child"]
return {} | def function[get_domains, parameter[self]]:
constant[
Connect to alignak backend and retrieve all available child endpoints of root
If connection is successful, returns a list of all the resources available in the backend:
Each resource is identified with its title and provides its endpoint relative to backend
root endpoint.::
[
{u'href': u'loghost', u'title': u'loghost'},
{u'href': u'escalation', u'title': u'escalation'},
...
]
If an error occurs a BackendException is raised.
If an exception occurs, it is raised to caller.
:return: list of available resources
:rtype: list
]
variable[resp] assign[=] call[name[self].get, parameter[constant[]]]
if compare[constant[_links] in name[resp]] begin[:]
variable[_links] assign[=] call[name[resp]][constant[_links]]
if compare[constant[child] in name[_links]] begin[:]
return[call[name[_links]][constant[child]]]
return[dictionary[[], []]] | keyword[def] identifier[get_domains] ( identifier[self] ):
literal[string]
identifier[resp] = identifier[self] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[resp] :
identifier[_links] = identifier[resp] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[_links] :
keyword[return] identifier[_links] [ literal[string] ]
keyword[return] {} | def get_domains(self):
"""
Connect to alignak backend and retrieve all available child endpoints of root
If connection is successful, returns a list of all the resources available in the backend:
Each resource is identified with its title and provides its endpoint relative to backend
root endpoint.::
[
{u'href': u'loghost', u'title': u'loghost'},
{u'href': u'escalation', u'title': u'escalation'},
...
]
If an error occurs a BackendException is raised.
If an exception occurs, it is raised to caller.
:return: list of available resources
:rtype: list
"""
resp = self.get('')
if '_links' in resp:
_links = resp['_links']
if 'child' in _links:
return _links['child'] # depends on [control=['if'], data=['_links']] # depends on [control=['if'], data=['resp']]
return {} |
def expected_number_of_transactions_in_first_n_periods(self, n):
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
x_counts = self.data.groupby("frequency")["weights"].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(
betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta)
)
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j) :]
return np.sum(
binom(i, x)
* exp(
betaln(alpha + x, beta + i - x)
- betaln(alpha, beta)
+ betaln(gamma + 1, delta + i)
- betaln(gamma, delta)
)
)
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name="frequency")
return DataFrame(p1 * x_counts.sum(), index=idx, columns=["model"]) | def function[expected_number_of_transactions_in_first_n_periods, parameter[self, n]]:
constant[
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
]
variable[params] assign[=] call[name[self]._unload_params, parameter[constant[alpha], constant[beta], constant[gamma], constant[delta]]]
<ast.Tuple object at 0x7da1b1d883a0> assign[=] name[params]
variable[x_counts] assign[=] call[call[call[name[self].data.groupby, parameter[constant[frequency]]]][constant[weights]].sum, parameter[]]
variable[x] assign[=] call[name[np].asarray, parameter[name[x_counts].index]]
variable[p1] assign[=] binary_operation[call[name[binom], parameter[name[n], name[x]]] * call[name[exp], parameter[binary_operation[binary_operation[binary_operation[call[name[betaln], parameter[binary_operation[name[alpha] + name[x]], binary_operation[binary_operation[name[beta] + name[n]] - name[x]]]] - call[name[betaln], parameter[name[alpha], name[beta]]]] + call[name[betaln], parameter[name[gamma], binary_operation[name[delta] + name[n]]]]] - call[name[betaln], parameter[name[gamma], name[delta]]]]]]]
variable[I] assign[=] call[name[np].arange, parameter[call[name[x].min, parameter[]], name[n]]]
def function[p2, parameter[j, x]]:
variable[i] assign[=] call[name[I]][<ast.Slice object at 0x7da1b1d89630>]
return[call[name[np].sum, parameter[binary_operation[call[name[binom], parameter[name[i], name[x]]] * call[name[exp], parameter[binary_operation[binary_operation[binary_operation[call[name[betaln], parameter[binary_operation[name[alpha] + name[x]], binary_operation[binary_operation[name[beta] + name[i]] - name[x]]]] - call[name[betaln], parameter[name[alpha], name[beta]]]] + call[name[betaln], parameter[binary_operation[name[gamma] + constant[1]], binary_operation[name[delta] + name[i]]]]] - call[name[betaln], parameter[name[gamma], name[delta]]]]]]]]]]
<ast.AugAssign object at 0x7da1b1d89f00>
variable[idx] assign[=] call[name[pd].Index, parameter[name[x]]]
return[call[name[DataFrame], parameter[binary_operation[name[p1] * call[name[x_counts].sum, parameter[]]]]]] | keyword[def] identifier[expected_number_of_transactions_in_first_n_periods] ( identifier[self] , identifier[n] ):
literal[string]
identifier[params] = identifier[self] . identifier[_unload_params] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[alpha] , identifier[beta] , identifier[gamma] , identifier[delta] = identifier[params]
identifier[x_counts] = identifier[self] . identifier[data] . identifier[groupby] ( literal[string] )[ literal[string] ]. identifier[sum] ()
identifier[x] = identifier[np] . identifier[asarray] ( identifier[x_counts] . identifier[index] )
identifier[p1] = identifier[binom] ( identifier[n] , identifier[x] )* identifier[exp] (
identifier[betaln] ( identifier[alpha] + identifier[x] , identifier[beta] + identifier[n] - identifier[x] )- identifier[betaln] ( identifier[alpha] , identifier[beta] )+ identifier[betaln] ( identifier[gamma] , identifier[delta] + identifier[n] )- identifier[betaln] ( identifier[gamma] , identifier[delta] )
)
identifier[I] = identifier[np] . identifier[arange] ( identifier[x] . identifier[min] (), identifier[n] )
@ identifier[np] . identifier[vectorize]
keyword[def] identifier[p2] ( identifier[j] , identifier[x] ):
identifier[i] = identifier[I] [ identifier[int] ( identifier[j] ):]
keyword[return] identifier[np] . identifier[sum] (
identifier[binom] ( identifier[i] , identifier[x] )
* identifier[exp] (
identifier[betaln] ( identifier[alpha] + identifier[x] , identifier[beta] + identifier[i] - identifier[x] )
- identifier[betaln] ( identifier[alpha] , identifier[beta] )
+ identifier[betaln] ( identifier[gamma] + literal[int] , identifier[delta] + identifier[i] )
- identifier[betaln] ( identifier[gamma] , identifier[delta] )
)
)
identifier[p1] += identifier[np] . identifier[fromfunction] ( identifier[p2] ,( identifier[x] . identifier[shape] [ literal[int] ],), identifier[x] = identifier[x] )
identifier[idx] = identifier[pd] . identifier[Index] ( identifier[x] , identifier[name] = literal[string] )
keyword[return] identifier[DataFrame] ( identifier[p1] * identifier[x_counts] . identifier[sum] (), identifier[index] = identifier[idx] , identifier[columns] =[ literal[string] ]) | def expected_number_of_transactions_in_first_n_periods(self, n):
"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \\alpha, \\beta, \\gamma, \\delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params('alpha', 'beta', 'gamma', 'delta')
(alpha, beta, gamma, delta) = params
x_counts = self.data.groupby('frequency')['weights'].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta))
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j):]
return np.sum(binom(i, x) * exp(betaln(alpha + x, beta + i - x) - betaln(alpha, beta) + betaln(gamma + 1, delta + i) - betaln(gamma, delta)))
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name='frequency')
return DataFrame(p1 * x_counts.sum(), index=idx, columns=['model']) |
def param_define(self,
param,
default,
unit='',
descr='',
tomatrix=True,
nonzero=False,
mandatory=False,
power=False,
voltage=False,
current=False,
z=False,
y=False,
r=False,
g=False,
dccurrent=False,
dcvoltage=False,
time=False,
event_time=False,
**kwargs):
"""
Define a parameter in the model
:param tomatrix: convert this parameter list to matrix
:param param: parameter name
:param default: parameter default value
:param unit: parameter unit
:param descr: description
:param nonzero: is non-zero
:param mandatory: is mandatory
:param power: is a power value in the `self.Sn` base
:param voltage: is a voltage value in the `self.Vn` base
:param current: is a current value in the device base
:param z: is an impedance value in the device base
:param y: is an admittance value in the device base
:param r: is a dc resistance value in the device base
:param g: is a dc conductance value in the device base
:param dccurrent: is a dc current value in the device base
:param dcvoltage: is a dc votlage value in the device base
:param time: is a time value in the device base
:param event_time: is a variable for timed event
:type param: str
:type tomatrix: bool
:type default: str, float
:type unit: str
:type descr: str
:type nonzero: bool
:type mandatory: bool
:type power: bool
:type voltage: bool
:type current: bool
:type z: bool
:type y: bool
:type r: bool
:type g: bool
:type dccurrent: bool
:type dcvoltage: bool
:type time: bool
:type event_time: bool
"""
assert param not in self._data
assert param not in self._algebs
assert param not in self._states
assert param not in self._service
self._data.update({param: default})
if unit:
self._units.update({param: unit})
if descr:
self._descr.update({param: descr})
if tomatrix:
self._params.append(param)
if nonzero:
self._zeros.append(param)
if mandatory:
self._mandatory.append(param)
if power:
self._powers.append(param)
if voltage:
self._voltages.append(param)
if current:
self._currents.append(param)
if z:
self._z.append(param)
if y:
self._y.append(param)
if r:
self._r.append(param)
if g:
self._g.append(param)
if dccurrent:
self._dccurrents.append(param)
if dcvoltage:
self._dcvoltages.append(param)
if time:
self._times.append(param)
if event_time:
self._event_times.append(param) | def function[param_define, parameter[self, param, default, unit, descr, tomatrix, nonzero, mandatory, power, voltage, current, z, y, r, g, dccurrent, dcvoltage, time, event_time]]:
constant[
Define a parameter in the model
:param tomatrix: convert this parameter list to matrix
:param param: parameter name
:param default: parameter default value
:param unit: parameter unit
:param descr: description
:param nonzero: is non-zero
:param mandatory: is mandatory
:param power: is a power value in the `self.Sn` base
:param voltage: is a voltage value in the `self.Vn` base
:param current: is a current value in the device base
:param z: is an impedance value in the device base
:param y: is an admittance value in the device base
:param r: is a dc resistance value in the device base
:param g: is a dc conductance value in the device base
:param dccurrent: is a dc current value in the device base
:param dcvoltage: is a dc votlage value in the device base
:param time: is a time value in the device base
:param event_time: is a variable for timed event
:type param: str
:type tomatrix: bool
:type default: str, float
:type unit: str
:type descr: str
:type nonzero: bool
:type mandatory: bool
:type power: bool
:type voltage: bool
:type current: bool
:type z: bool
:type y: bool
:type r: bool
:type g: bool
:type dccurrent: bool
:type dcvoltage: bool
:type time: bool
:type event_time: bool
]
assert[compare[name[param] <ast.NotIn object at 0x7da2590d7190> name[self]._data]]
assert[compare[name[param] <ast.NotIn object at 0x7da2590d7190> name[self]._algebs]]
assert[compare[name[param] <ast.NotIn object at 0x7da2590d7190> name[self]._states]]
assert[compare[name[param] <ast.NotIn object at 0x7da2590d7190> name[self]._service]]
call[name[self]._data.update, parameter[dictionary[[<ast.Name object at 0x7da2044c1750>], [<ast.Name object at 0x7da2044c3c40>]]]]
if name[unit] begin[:]
call[name[self]._units.update, parameter[dictionary[[<ast.Name object at 0x7da2044c2350>], [<ast.Name object at 0x7da2044c21d0>]]]]
if name[descr] begin[:]
call[name[self]._descr.update, parameter[dictionary[[<ast.Name object at 0x7da2044c3e80>], [<ast.Name object at 0x7da2044c0790>]]]]
if name[tomatrix] begin[:]
call[name[self]._params.append, parameter[name[param]]]
if name[nonzero] begin[:]
call[name[self]._zeros.append, parameter[name[param]]]
if name[mandatory] begin[:]
call[name[self]._mandatory.append, parameter[name[param]]]
if name[power] begin[:]
call[name[self]._powers.append, parameter[name[param]]]
if name[voltage] begin[:]
call[name[self]._voltages.append, parameter[name[param]]]
if name[current] begin[:]
call[name[self]._currents.append, parameter[name[param]]]
if name[z] begin[:]
call[name[self]._z.append, parameter[name[param]]]
if name[y] begin[:]
call[name[self]._y.append, parameter[name[param]]]
if name[r] begin[:]
call[name[self]._r.append, parameter[name[param]]]
if name[g] begin[:]
call[name[self]._g.append, parameter[name[param]]]
if name[dccurrent] begin[:]
call[name[self]._dccurrents.append, parameter[name[param]]]
if name[dcvoltage] begin[:]
call[name[self]._dcvoltages.append, parameter[name[param]]]
if name[time] begin[:]
call[name[self]._times.append, parameter[name[param]]]
if name[event_time] begin[:]
call[name[self]._event_times.append, parameter[name[param]]] | keyword[def] identifier[param_define] ( identifier[self] ,
identifier[param] ,
identifier[default] ,
identifier[unit] = literal[string] ,
identifier[descr] = literal[string] ,
identifier[tomatrix] = keyword[True] ,
identifier[nonzero] = keyword[False] ,
identifier[mandatory] = keyword[False] ,
identifier[power] = keyword[False] ,
identifier[voltage] = keyword[False] ,
identifier[current] = keyword[False] ,
identifier[z] = keyword[False] ,
identifier[y] = keyword[False] ,
identifier[r] = keyword[False] ,
identifier[g] = keyword[False] ,
identifier[dccurrent] = keyword[False] ,
identifier[dcvoltage] = keyword[False] ,
identifier[time] = keyword[False] ,
identifier[event_time] = keyword[False] ,
** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[param] keyword[not] keyword[in] identifier[self] . identifier[_data]
keyword[assert] identifier[param] keyword[not] keyword[in] identifier[self] . identifier[_algebs]
keyword[assert] identifier[param] keyword[not] keyword[in] identifier[self] . identifier[_states]
keyword[assert] identifier[param] keyword[not] keyword[in] identifier[self] . identifier[_service]
identifier[self] . identifier[_data] . identifier[update] ({ identifier[param] : identifier[default] })
keyword[if] identifier[unit] :
identifier[self] . identifier[_units] . identifier[update] ({ identifier[param] : identifier[unit] })
keyword[if] identifier[descr] :
identifier[self] . identifier[_descr] . identifier[update] ({ identifier[param] : identifier[descr] })
keyword[if] identifier[tomatrix] :
identifier[self] . identifier[_params] . identifier[append] ( identifier[param] )
keyword[if] identifier[nonzero] :
identifier[self] . identifier[_zeros] . identifier[append] ( identifier[param] )
keyword[if] identifier[mandatory] :
identifier[self] . identifier[_mandatory] . identifier[append] ( identifier[param] )
keyword[if] identifier[power] :
identifier[self] . identifier[_powers] . identifier[append] ( identifier[param] )
keyword[if] identifier[voltage] :
identifier[self] . identifier[_voltages] . identifier[append] ( identifier[param] )
keyword[if] identifier[current] :
identifier[self] . identifier[_currents] . identifier[append] ( identifier[param] )
keyword[if] identifier[z] :
identifier[self] . identifier[_z] . identifier[append] ( identifier[param] )
keyword[if] identifier[y] :
identifier[self] . identifier[_y] . identifier[append] ( identifier[param] )
keyword[if] identifier[r] :
identifier[self] . identifier[_r] . identifier[append] ( identifier[param] )
keyword[if] identifier[g] :
identifier[self] . identifier[_g] . identifier[append] ( identifier[param] )
keyword[if] identifier[dccurrent] :
identifier[self] . identifier[_dccurrents] . identifier[append] ( identifier[param] )
keyword[if] identifier[dcvoltage] :
identifier[self] . identifier[_dcvoltages] . identifier[append] ( identifier[param] )
keyword[if] identifier[time] :
identifier[self] . identifier[_times] . identifier[append] ( identifier[param] )
keyword[if] identifier[event_time] :
identifier[self] . identifier[_event_times] . identifier[append] ( identifier[param] ) | def param_define(self, param, default, unit='', descr='', tomatrix=True, nonzero=False, mandatory=False, power=False, voltage=False, current=False, z=False, y=False, r=False, g=False, dccurrent=False, dcvoltage=False, time=False, event_time=False, **kwargs):
"""
Define a parameter in the model
:param tomatrix: convert this parameter list to matrix
:param param: parameter name
:param default: parameter default value
:param unit: parameter unit
:param descr: description
:param nonzero: is non-zero
:param mandatory: is mandatory
:param power: is a power value in the `self.Sn` base
:param voltage: is a voltage value in the `self.Vn` base
:param current: is a current value in the device base
:param z: is an impedance value in the device base
:param y: is an admittance value in the device base
:param r: is a dc resistance value in the device base
:param g: is a dc conductance value in the device base
:param dccurrent: is a dc current value in the device base
:param dcvoltage: is a dc votlage value in the device base
:param time: is a time value in the device base
:param event_time: is a variable for timed event
:type param: str
:type tomatrix: bool
:type default: str, float
:type unit: str
:type descr: str
:type nonzero: bool
:type mandatory: bool
:type power: bool
:type voltage: bool
:type current: bool
:type z: bool
:type y: bool
:type r: bool
:type g: bool
:type dccurrent: bool
:type dcvoltage: bool
:type time: bool
:type event_time: bool
"""
assert param not in self._data
assert param not in self._algebs
assert param not in self._states
assert param not in self._service
self._data.update({param: default})
if unit:
self._units.update({param: unit}) # depends on [control=['if'], data=[]]
if descr:
self._descr.update({param: descr}) # depends on [control=['if'], data=[]]
if tomatrix:
self._params.append(param) # depends on [control=['if'], data=[]]
if nonzero:
self._zeros.append(param) # depends on [control=['if'], data=[]]
if mandatory:
self._mandatory.append(param) # depends on [control=['if'], data=[]]
if power:
self._powers.append(param) # depends on [control=['if'], data=[]]
if voltage:
self._voltages.append(param) # depends on [control=['if'], data=[]]
if current:
self._currents.append(param) # depends on [control=['if'], data=[]]
if z:
self._z.append(param) # depends on [control=['if'], data=[]]
if y:
self._y.append(param) # depends on [control=['if'], data=[]]
if r:
self._r.append(param) # depends on [control=['if'], data=[]]
if g:
self._g.append(param) # depends on [control=['if'], data=[]]
if dccurrent:
self._dccurrents.append(param) # depends on [control=['if'], data=[]]
if dcvoltage:
self._dcvoltages.append(param) # depends on [control=['if'], data=[]]
if time:
self._times.append(param) # depends on [control=['if'], data=[]]
if event_time:
self._event_times.append(param) # depends on [control=['if'], data=[]] |
def structs2records(structs):
"""convert one or more structs and generate dictionaries"""
try:
n = len(structs)
except TypeError:
# no array
yield struct2dict(structs)
# just 1
return
for i in range(n):
struct = structs[i]
yield struct2dict(struct) | def function[structs2records, parameter[structs]]:
constant[convert one or more structs and generate dictionaries]
<ast.Try object at 0x7da1b26a05e0>
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
variable[struct] assign[=] call[name[structs]][name[i]]
<ast.Yield object at 0x7da1b26a2ef0> | keyword[def] identifier[structs2records] ( identifier[structs] ):
literal[string]
keyword[try] :
identifier[n] = identifier[len] ( identifier[structs] )
keyword[except] identifier[TypeError] :
keyword[yield] identifier[struct2dict] ( identifier[structs] )
keyword[return]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[struct] = identifier[structs] [ identifier[i] ]
keyword[yield] identifier[struct2dict] ( identifier[struct] ) | def structs2records(structs):
"""convert one or more structs and generate dictionaries"""
try:
n = len(structs) # depends on [control=['try'], data=[]]
except TypeError:
# no array
yield struct2dict(structs)
# just 1
return # depends on [control=['except'], data=[]]
for i in range(n):
struct = structs[i]
yield struct2dict(struct) # depends on [control=['for'], data=['i']] |
def get_file_hash(file_path, block_size=1024, hasher=None):
"""
Generate hash for given file
:param file_path: Path to file
:type file_path: str
:param block_size: Size of block to be read at once (default: 1024)
:type block_size: int
:param hasher: Use specific hasher, defaults to md5 (default: None)
:type hasher: _hashlib.HASH
:return: Hash of file
:rtype: str
"""
if hasher is None:
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
while True:
buffer = f.read(block_size)
if len(buffer) <= 0:
break
hasher.update(buffer)
return hasher.hexdigest() | def function[get_file_hash, parameter[file_path, block_size, hasher]]:
constant[
Generate hash for given file
:param file_path: Path to file
:type file_path: str
:param block_size: Size of block to be read at once (default: 1024)
:type block_size: int
:param hasher: Use specific hasher, defaults to md5 (default: None)
:type hasher: _hashlib.HASH
:return: Hash of file
:rtype: str
]
if compare[name[hasher] is constant[None]] begin[:]
variable[hasher] assign[=] call[name[hashlib].md5, parameter[]]
with call[name[open], parameter[name[file_path], constant[rb]]] begin[:]
while constant[True] begin[:]
variable[buffer] assign[=] call[name[f].read, parameter[name[block_size]]]
if compare[call[name[len], parameter[name[buffer]]] less_or_equal[<=] constant[0]] begin[:]
break
call[name[hasher].update, parameter[name[buffer]]]
return[call[name[hasher].hexdigest, parameter[]]] | keyword[def] identifier[get_file_hash] ( identifier[file_path] , identifier[block_size] = literal[int] , identifier[hasher] = keyword[None] ):
literal[string]
keyword[if] identifier[hasher] keyword[is] keyword[None] :
identifier[hasher] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
keyword[while] keyword[True] :
identifier[buffer] = identifier[f] . identifier[read] ( identifier[block_size] )
keyword[if] identifier[len] ( identifier[buffer] )<= literal[int] :
keyword[break]
identifier[hasher] . identifier[update] ( identifier[buffer] )
keyword[return] identifier[hasher] . identifier[hexdigest] () | def get_file_hash(file_path, block_size=1024, hasher=None):
"""
Generate hash for given file
:param file_path: Path to file
:type file_path: str
:param block_size: Size of block to be read at once (default: 1024)
:type block_size: int
:param hasher: Use specific hasher, defaults to md5 (default: None)
:type hasher: _hashlib.HASH
:return: Hash of file
:rtype: str
"""
if hasher is None:
hasher = hashlib.md5() # depends on [control=['if'], data=['hasher']]
with open(file_path, 'rb') as f:
while True:
buffer = f.read(block_size)
if len(buffer) <= 0:
break # depends on [control=['if'], data=[]]
hasher.update(buffer) # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['f']]
return hasher.hexdigest() |
def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection) | def function[ensure, parameter[self, connection, func]]:
constant[Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
]
variable[channel] assign[=] constant[None]
while constant[1] begin[:]
<ast.Try object at 0x7da204621c00>
variable[channel] assign[=] call[name[self].connect, parameter[name[connection]]] | keyword[def] identifier[ensure] ( identifier[self] , identifier[connection] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[channel] = keyword[None]
keyword[while] literal[int] :
keyword[try] :
keyword[if] identifier[channel] keyword[is] keyword[None] :
identifier[channel] = identifier[connection] . identifier[channel] ()
keyword[return] identifier[func] ( identifier[channel] ,* identifier[args] ,** identifier[kwargs] ), identifier[channel]
keyword[except] ( identifier[connection] . identifier[connection_errors] , identifier[IOError] ):
identifier[self] . identifier[_call_errback] ()
identifier[channel] = identifier[self] . identifier[connect] ( identifier[connection] ) | def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel() # depends on [control=['if'], data=['channel']]
return (func(channel, *args, **kwargs), channel) # depends on [control=['try'], data=[]]
except (connection.connection_errors, IOError):
self._call_errback() # depends on [control=['except'], data=[]]
channel = self.connect(connection) # depends on [control=['while'], data=[]] |
def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string("access_key")
access_key = access_key if access_key != None else self.get_as_nullable_string("access_key")
return access_key | def function[get_access_key, parameter[self]]:
constant[
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
]
variable[access_key] assign[=] call[name[self].get_as_nullable_string, parameter[constant[access_key]]]
variable[access_key] assign[=] <ast.IfExp object at 0x7da207f9a050>
return[name[access_key]] | keyword[def] identifier[get_access_key] ( identifier[self] ):
literal[string]
identifier[access_key] = identifier[self] . identifier[get_as_nullable_string] ( literal[string] )
identifier[access_key] = identifier[access_key] keyword[if] identifier[access_key] != keyword[None] keyword[else] identifier[self] . identifier[get_as_nullable_string] ( literal[string] )
keyword[return] identifier[access_key] | def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string('access_key')
access_key = access_key if access_key != None else self.get_as_nullable_string('access_key')
return access_key |
def train(self, record):
"""
Incrementally updates the tree with the given sample record.
"""
assert self.data.class_attribute_name in record, \
"The class attribute must be present in the record."
record = record.copy()
self.sample_count += 1
self.tree.train(record) | def function[train, parameter[self, record]]:
constant[
Incrementally updates the tree with the given sample record.
]
assert[compare[name[self].data.class_attribute_name in name[record]]]
variable[record] assign[=] call[name[record].copy, parameter[]]
<ast.AugAssign object at 0x7da1b0f59c90>
call[name[self].tree.train, parameter[name[record]]] | keyword[def] identifier[train] ( identifier[self] , identifier[record] ):
literal[string]
keyword[assert] identifier[self] . identifier[data] . identifier[class_attribute_name] keyword[in] identifier[record] , literal[string]
identifier[record] = identifier[record] . identifier[copy] ()
identifier[self] . identifier[sample_count] += literal[int]
identifier[self] . identifier[tree] . identifier[train] ( identifier[record] ) | def train(self, record):
"""
Incrementally updates the tree with the given sample record.
"""
assert self.data.class_attribute_name in record, 'The class attribute must be present in the record.'
record = record.copy()
self.sample_count += 1
self.tree.train(record) |
def _handle_ssh_callback(self, submission_id, host, port, password):
""" Handles the creation of a remote ssh server """
if host is not None: # ignore late calls (a bit hacky, but...)
obj = {
"ssh_host": host,
"ssh_port": port,
"ssh_password": password
}
self._database.submissions.update_one({"_id": submission_id}, {"$set": obj}) | def function[_handle_ssh_callback, parameter[self, submission_id, host, port, password]]:
constant[ Handles the creation of a remote ssh server ]
if compare[name[host] is_not constant[None]] begin[:]
variable[obj] assign[=] dictionary[[<ast.Constant object at 0x7da1b1736c50>, <ast.Constant object at 0x7da18bc73130>, <ast.Constant object at 0x7da18bc73910>], [<ast.Name object at 0x7da18bc70850>, <ast.Name object at 0x7da18bc72ef0>, <ast.Name object at 0x7da18bc720b0>]]
call[name[self]._database.submissions.update_one, parameter[dictionary[[<ast.Constant object at 0x7da18bc73f70>], [<ast.Name object at 0x7da18bc73010>]], dictionary[[<ast.Constant object at 0x7da18bc70ac0>], [<ast.Name object at 0x7da18bc723b0>]]]] | keyword[def] identifier[_handle_ssh_callback] ( identifier[self] , identifier[submission_id] , identifier[host] , identifier[port] , identifier[password] ):
literal[string]
keyword[if] identifier[host] keyword[is] keyword[not] keyword[None] :
identifier[obj] ={
literal[string] : identifier[host] ,
literal[string] : identifier[port] ,
literal[string] : identifier[password]
}
identifier[self] . identifier[_database] . identifier[submissions] . identifier[update_one] ({ literal[string] : identifier[submission_id] },{ literal[string] : identifier[obj] }) | def _handle_ssh_callback(self, submission_id, host, port, password):
""" Handles the creation of a remote ssh server """
if host is not None: # ignore late calls (a bit hacky, but...)
obj = {'ssh_host': host, 'ssh_port': port, 'ssh_password': password}
self._database.submissions.update_one({'_id': submission_id}, {'$set': obj}) # depends on [control=['if'], data=['host']] |
def createRepo(self):
"""
Creates the repository for all the data we've just downloaded.
"""
repo = datarepo.SqlDataRepository(self.repoPath)
repo.open("w")
repo.initialise()
referenceSet = references.HtslibReferenceSet("GRCh37-subset")
referenceSet.populateFromFile(self.fastaFilePath)
referenceSet.setDescription("Subset of GRCh37 used for demonstration")
referenceSet.setSpeciesFromJson(
'{"id": "9606",'
+ '"term": "Homo sapiens", "source_name": "NCBI"}')
for reference in referenceSet.getReferences():
reference.setSpeciesFromJson(
'{"id": "9606",'
+ '"term": "Homo sapiens", "source_name": "NCBI"}')
reference.setSourceAccessions(
self.accessions[reference.getName()] + ".subset")
repo.insertReferenceSet(referenceSet)
dataset = datasets.Dataset("1kg-p3-subset")
dataset.setDescription("Sample data from 1000 Genomes phase 3")
repo.insertDataset(dataset)
variantSet = variants.HtslibVariantSet(dataset, "mvncall")
variantSet.setReferenceSet(referenceSet)
dataUrls = [vcfFile for vcfFile, _ in self.vcfFilePaths]
indexFiles = [indexFile for _, indexFile in self.vcfFilePaths]
variantSet.populateFromFile(dataUrls, indexFiles)
variantSet.checkConsistency()
repo.insertVariantSet(variantSet)
for sample, (bamFile, indexFile) in zip(
self.samples, self.bamFilePaths):
readGroupSet = reads.HtslibReadGroupSet(dataset, sample)
readGroupSet.populateFromFile(bamFile, indexFile)
readGroupSet.setReferenceSet(referenceSet)
repo.insertReadGroupSet(readGroupSet)
repo.commit()
repo.close()
self.log("Finished creating the repository; summary:\n")
repo.open("r")
repo.printSummary() | def function[createRepo, parameter[self]]:
constant[
Creates the repository for all the data we've just downloaded.
]
variable[repo] assign[=] call[name[datarepo].SqlDataRepository, parameter[name[self].repoPath]]
call[name[repo].open, parameter[constant[w]]]
call[name[repo].initialise, parameter[]]
variable[referenceSet] assign[=] call[name[references].HtslibReferenceSet, parameter[constant[GRCh37-subset]]]
call[name[referenceSet].populateFromFile, parameter[name[self].fastaFilePath]]
call[name[referenceSet].setDescription, parameter[constant[Subset of GRCh37 used for demonstration]]]
call[name[referenceSet].setSpeciesFromJson, parameter[binary_operation[constant[{"id": "9606",] + constant["term": "Homo sapiens", "source_name": "NCBI"}]]]]
for taget[name[reference]] in starred[call[name[referenceSet].getReferences, parameter[]]] begin[:]
call[name[reference].setSpeciesFromJson, parameter[binary_operation[constant[{"id": "9606",] + constant["term": "Homo sapiens", "source_name": "NCBI"}]]]]
call[name[reference].setSourceAccessions, parameter[binary_operation[call[name[self].accessions][call[name[reference].getName, parameter[]]] + constant[.subset]]]]
call[name[repo].insertReferenceSet, parameter[name[referenceSet]]]
variable[dataset] assign[=] call[name[datasets].Dataset, parameter[constant[1kg-p3-subset]]]
call[name[dataset].setDescription, parameter[constant[Sample data from 1000 Genomes phase 3]]]
call[name[repo].insertDataset, parameter[name[dataset]]]
variable[variantSet] assign[=] call[name[variants].HtslibVariantSet, parameter[name[dataset], constant[mvncall]]]
call[name[variantSet].setReferenceSet, parameter[name[referenceSet]]]
variable[dataUrls] assign[=] <ast.ListComp object at 0x7da18bcc9a80>
variable[indexFiles] assign[=] <ast.ListComp object at 0x7da18bcca500>
call[name[variantSet].populateFromFile, parameter[name[dataUrls], name[indexFiles]]]
call[name[variantSet].checkConsistency, parameter[]]
call[name[repo].insertVariantSet, parameter[name[variantSet]]]
for taget[tuple[[<ast.Name object at 0x7da18f812cb0>, <ast.Tuple object at 0x7da18f811bd0>]]] in starred[call[name[zip], parameter[name[self].samples, name[self].bamFilePaths]]] begin[:]
variable[readGroupSet] assign[=] call[name[reads].HtslibReadGroupSet, parameter[name[dataset], name[sample]]]
call[name[readGroupSet].populateFromFile, parameter[name[bamFile], name[indexFile]]]
call[name[readGroupSet].setReferenceSet, parameter[name[referenceSet]]]
call[name[repo].insertReadGroupSet, parameter[name[readGroupSet]]]
call[name[repo].commit, parameter[]]
call[name[repo].close, parameter[]]
call[name[self].log, parameter[constant[Finished creating the repository; summary:
]]]
call[name[repo].open, parameter[constant[r]]]
call[name[repo].printSummary, parameter[]] | keyword[def] identifier[createRepo] ( identifier[self] ):
literal[string]
identifier[repo] = identifier[datarepo] . identifier[SqlDataRepository] ( identifier[self] . identifier[repoPath] )
identifier[repo] . identifier[open] ( literal[string] )
identifier[repo] . identifier[initialise] ()
identifier[referenceSet] = identifier[references] . identifier[HtslibReferenceSet] ( literal[string] )
identifier[referenceSet] . identifier[populateFromFile] ( identifier[self] . identifier[fastaFilePath] )
identifier[referenceSet] . identifier[setDescription] ( literal[string] )
identifier[referenceSet] . identifier[setSpeciesFromJson] (
literal[string]
+ literal[string] )
keyword[for] identifier[reference] keyword[in] identifier[referenceSet] . identifier[getReferences] ():
identifier[reference] . identifier[setSpeciesFromJson] (
literal[string]
+ literal[string] )
identifier[reference] . identifier[setSourceAccessions] (
identifier[self] . identifier[accessions] [ identifier[reference] . identifier[getName] ()]+ literal[string] )
identifier[repo] . identifier[insertReferenceSet] ( identifier[referenceSet] )
identifier[dataset] = identifier[datasets] . identifier[Dataset] ( literal[string] )
identifier[dataset] . identifier[setDescription] ( literal[string] )
identifier[repo] . identifier[insertDataset] ( identifier[dataset] )
identifier[variantSet] = identifier[variants] . identifier[HtslibVariantSet] ( identifier[dataset] , literal[string] )
identifier[variantSet] . identifier[setReferenceSet] ( identifier[referenceSet] )
identifier[dataUrls] =[ identifier[vcfFile] keyword[for] identifier[vcfFile] , identifier[_] keyword[in] identifier[self] . identifier[vcfFilePaths] ]
identifier[indexFiles] =[ identifier[indexFile] keyword[for] identifier[_] , identifier[indexFile] keyword[in] identifier[self] . identifier[vcfFilePaths] ]
identifier[variantSet] . identifier[populateFromFile] ( identifier[dataUrls] , identifier[indexFiles] )
identifier[variantSet] . identifier[checkConsistency] ()
identifier[repo] . identifier[insertVariantSet] ( identifier[variantSet] )
keyword[for] identifier[sample] ,( identifier[bamFile] , identifier[indexFile] ) keyword[in] identifier[zip] (
identifier[self] . identifier[samples] , identifier[self] . identifier[bamFilePaths] ):
identifier[readGroupSet] = identifier[reads] . identifier[HtslibReadGroupSet] ( identifier[dataset] , identifier[sample] )
identifier[readGroupSet] . identifier[populateFromFile] ( identifier[bamFile] , identifier[indexFile] )
identifier[readGroupSet] . identifier[setReferenceSet] ( identifier[referenceSet] )
identifier[repo] . identifier[insertReadGroupSet] ( identifier[readGroupSet] )
identifier[repo] . identifier[commit] ()
identifier[repo] . identifier[close] ()
identifier[self] . identifier[log] ( literal[string] )
identifier[repo] . identifier[open] ( literal[string] )
identifier[repo] . identifier[printSummary] () | def createRepo(self):
"""
Creates the repository for all the data we've just downloaded.
"""
repo = datarepo.SqlDataRepository(self.repoPath)
repo.open('w')
repo.initialise()
referenceSet = references.HtslibReferenceSet('GRCh37-subset')
referenceSet.populateFromFile(self.fastaFilePath)
referenceSet.setDescription('Subset of GRCh37 used for demonstration')
referenceSet.setSpeciesFromJson('{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}')
for reference in referenceSet.getReferences():
reference.setSpeciesFromJson('{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}')
reference.setSourceAccessions(self.accessions[reference.getName()] + '.subset') # depends on [control=['for'], data=['reference']]
repo.insertReferenceSet(referenceSet)
dataset = datasets.Dataset('1kg-p3-subset')
dataset.setDescription('Sample data from 1000 Genomes phase 3')
repo.insertDataset(dataset)
variantSet = variants.HtslibVariantSet(dataset, 'mvncall')
variantSet.setReferenceSet(referenceSet)
dataUrls = [vcfFile for (vcfFile, _) in self.vcfFilePaths]
indexFiles = [indexFile for (_, indexFile) in self.vcfFilePaths]
variantSet.populateFromFile(dataUrls, indexFiles)
variantSet.checkConsistency()
repo.insertVariantSet(variantSet)
for (sample, (bamFile, indexFile)) in zip(self.samples, self.bamFilePaths):
readGroupSet = reads.HtslibReadGroupSet(dataset, sample)
readGroupSet.populateFromFile(bamFile, indexFile)
readGroupSet.setReferenceSet(referenceSet)
repo.insertReadGroupSet(readGroupSet) # depends on [control=['for'], data=[]]
repo.commit()
repo.close()
self.log('Finished creating the repository; summary:\n')
repo.open('r')
repo.printSummary() |
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err)) | def function[image_import, parameter[cls, image_name, url, target]]:
constant[Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
]
variable[source] assign[=] call[name[urlparse].urlparse, parameter[name[url]]].path
if call[name[kwargs]][constant[remote_host]] begin[:]
if compare[constant[@] in call[name[kwargs]][constant[remote_host]]] begin[:]
variable[source_path] assign[=] call[constant[:].join, parameter[list[[<ast.Subscript object at 0x7da20e749720>, <ast.Name object at 0x7da20e74b7f0>]]]]
variable[command] assign[=] call[constant[ ].join, parameter[list[[<ast.Constant object at 0x7da20e74bf10>, <ast.Constant object at 0x7da20e74bf40>, <ast.Attribute object at 0x7da20e749b40>, <ast.Constant object at 0x7da20e74beb0>, <ast.Constant object at 0x7da20e74b790>, <ast.Name object at 0x7da20e74b850>, <ast.Name object at 0x7da20e748640>]]]]
<ast.Tuple object at 0x7da20e74b370> assign[=] call[name[zvmutils].execute, parameter[name[command]]]
if name[rc] begin[:]
variable[msg] assign[=] binary_operation[constant[Copying image file from remote filesystem failed with reason: %s] <ast.Mod object at 0x7da2590d6920> name[output]]
call[name[LOG].error, parameter[name[msg]]]
<ast.Raise object at 0x7da204345ff0> | keyword[def] identifier[image_import] ( identifier[cls] , identifier[image_name] , identifier[url] , identifier[target] ,** identifier[kwargs] ):
literal[string]
identifier[source] = identifier[urlparse] . identifier[urlparse] ( identifier[url] ). identifier[path]
keyword[if] identifier[kwargs] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[kwargs] [ literal[string] ]:
identifier[source_path] = literal[string] . identifier[join] ([ identifier[kwargs] [ literal[string] ], identifier[source] ])
identifier[command] = literal[string] . identifier[join] ([ literal[string] ,
literal[string] , identifier[CONF] . identifier[zvm] . identifier[remotehost_sshd_port] ,
literal[string] ,
literal[string] , identifier[source_path] , identifier[target] ])
( identifier[rc] , identifier[output] )= identifier[zvmutils] . identifier[execute] ( identifier[command] )
keyword[if] identifier[rc] :
identifier[msg] =( literal[string]
literal[string] % identifier[output] )
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] , identifier[err] = identifier[output] )
keyword[else] :
identifier[msg] =( literal[string] %
identifier[kwargs] [ literal[string] ])
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] ,
identifier[rh] = identifier[kwargs] [ literal[string] ])
keyword[else] :
identifier[LOG] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[shutil] . identifier[copyfile] ( identifier[source] , identifier[target] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[msg] =( literal[string]
literal[string] % identifier[six] . identifier[text_type] ( identifier[err] ))
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] ,
identifier[err] = identifier[six] . identifier[text_type] ( identifier[err] )) | def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp', '-P', CONF.zvm.remotehost_sshd_port, '-o StrictHostKeyChecking=no', '-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = 'Copying image file from remote filesystem failed with reason: %s' % output
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
msg = 'The specified remote_host %s format invalid' % kwargs['remote_host']
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11, rh=kwargs['remote_host']) # depends on [control=['if'], data=[]]
else:
LOG.debug('Remote_host not specified, will copy from local')
try:
shutil.copyfile(source, target) # depends on [control=['try'], data=[]]
except Exception as err:
msg = 'Import image from local file system failed with reason %s' % six.text_type(err)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12, err=six.text_type(err)) # depends on [control=['except'], data=['err']] |
def get_diadefs(self, project, linker):
"""Get the diagrams configuration data
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams | def function[get_diadefs, parameter[self, project, linker]]:
constant[Get the diagrams configuration data
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
]
variable[diagrams] assign[=] list[[]]
variable[generator] assign[=] call[name[ClassDiadefGenerator], parameter[name[linker], name[self]]]
for taget[name[klass]] in starred[name[self].config.classes] begin[:]
call[name[diagrams].append, parameter[call[name[generator].class_diagram, parameter[name[project], name[klass]]]]]
if <ast.UnaryOp object at 0x7da1b03a4250> begin[:]
variable[diagrams] assign[=] call[call[name[DefaultDiadefGenerator], parameter[name[linker], name[self]]].visit, parameter[name[project]]]
for taget[name[diagram]] in starred[name[diagrams]] begin[:]
call[name[diagram].extract_relationships, parameter[]]
return[name[diagrams]] | keyword[def] identifier[get_diadefs] ( identifier[self] , identifier[project] , identifier[linker] ):
literal[string]
identifier[diagrams] =[]
identifier[generator] = identifier[ClassDiadefGenerator] ( identifier[linker] , identifier[self] )
keyword[for] identifier[klass] keyword[in] identifier[self] . identifier[config] . identifier[classes] :
identifier[diagrams] . identifier[append] ( identifier[generator] . identifier[class_diagram] ( identifier[project] , identifier[klass] ))
keyword[if] keyword[not] identifier[diagrams] :
identifier[diagrams] = identifier[DefaultDiadefGenerator] ( identifier[linker] , identifier[self] ). identifier[visit] ( identifier[project] )
keyword[for] identifier[diagram] keyword[in] identifier[diagrams] :
identifier[diagram] . identifier[extract_relationships] ()
keyword[return] identifier[diagrams] | def get_diadefs(self, project, linker):
"""Get the diagrams configuration data
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass)) # depends on [control=['for'], data=['klass']]
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project) # depends on [control=['if'], data=[]]
for diagram in diagrams:
diagram.extract_relationships() # depends on [control=['for'], data=['diagram']]
return diagrams |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.