code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def basis_function_ders_one(degree, knot_vector, span, knot, order):
""" Computes the derivative of one basis functions for a single parameter.
Implementation of Algorithm A2.5 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot_vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:param order: order of the derivative
:type order: int
:return: basis function derivatives
:rtype: list
"""
ders = [0.0 for _ in range(0, order + 1)]
# Knot is outside of span range
if (knot < knot_vector[span]) or (knot >= knot_vector[span + degree + 1]):
for k in range(0, order + 1):
ders[k] = 0.0
return ders
N = [[0.0 for _ in range(0, degree + 1)] for _ in range(0, degree + 1)]
# Initializing the zeroth degree basis functions
for j in range(0, degree + 1):
if knot_vector[span + j] <= knot < knot_vector[span + j + 1]:
N[j][0] = 1.0
# Computing all basis functions values for all degrees inside the span
for k in range(1, degree + 1):
saved = 0.0
# Detecting zeros saves computations
if N[0][k - 1] != 0.0:
saved = ((knot - knot_vector[span]) * N[0][k - 1]) / (knot_vector[span + k] - knot_vector[span])
for j in range(0, degree - k + 1):
Uleft = knot_vector[span + j + 1]
Uright = knot_vector[span + j + k + 1]
# Zero detection
if N[j + 1][k - 1] == 0.0:
N[j][k] = saved
saved = 0.0
else:
temp = N[j + 1][k - 1] / (Uright - Uleft)
N[j][k] = saved + (Uright - knot) * temp
saved = (knot - Uleft) * temp
# The basis function value is the zeroth derivative
ders[0] = N[0][degree]
# Computing the basis functions derivatives
for k in range(1, order + 1):
# Buffer for computing the kth derivative
ND = [0.0 for _ in range(0, k + 1)]
# Basis functions values used for the derivative
for j in range(0, k + 1):
ND[j] = N[j][degree - k]
# Computing derivatives used for the kth basis function derivative
# Derivative order for the k-th basis function derivative
for jj in range(1, k + 1):
if ND[0] == 0.0:
saved = 0.0
else:
saved = ND[0] / (knot_vector[span + degree - k + jj] - knot_vector[span])
# Index of the Basis function derivatives
for j in range(0, k - jj + 1):
Uleft = knot_vector[span + j + 1]
# Wrong in The NURBS Book: -k is missing.
# The right expression is the same as for saved with the added j offset
Uright = knot_vector[span + j + degree - k + jj + 1]
if ND[j + 1] == 0.0:
ND[j] = (degree - k + jj) * saved
saved = 0.0
else:
temp = ND[j + 1] / (Uright - Uleft)
ND[j] = (degree - k + jj) * (saved - temp)
saved = temp
ders[k] = ND[0]
return ders | def function[basis_function_ders_one, parameter[degree, knot_vector, span, knot, order]]:
constant[ Computes the derivative of one basis functions for a single parameter.
Implementation of Algorithm A2.5 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot_vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:param order: order of the derivative
:type order: int
:return: basis function derivatives
:rtype: list
]
variable[ders] assign[=] <ast.ListComp object at 0x7da1b17d4f70>
if <ast.BoolOp object at 0x7da1b17d5510> begin[:]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], binary_operation[name[order] + constant[1]]]]] begin[:]
call[name[ders]][name[k]] assign[=] constant[0.0]
return[name[ders]]
variable[N] assign[=] <ast.ListComp object at 0x7da1b1721120>
for taget[name[j]] in starred[call[name[range], parameter[constant[0], binary_operation[name[degree] + constant[1]]]]] begin[:]
if compare[call[name[knot_vector]][binary_operation[name[span] + name[j]]] less_or_equal[<=] name[knot]] begin[:]
call[call[name[N]][name[j]]][constant[0]] assign[=] constant[1.0]
for taget[name[k]] in starred[call[name[range], parameter[constant[1], binary_operation[name[degree] + constant[1]]]]] begin[:]
variable[saved] assign[=] constant[0.0]
if compare[call[call[name[N]][constant[0]]][binary_operation[name[k] - constant[1]]] not_equal[!=] constant[0.0]] begin[:]
variable[saved] assign[=] binary_operation[binary_operation[binary_operation[name[knot] - call[name[knot_vector]][name[span]]] * call[call[name[N]][constant[0]]][binary_operation[name[k] - constant[1]]]] / binary_operation[call[name[knot_vector]][binary_operation[name[span] + name[k]]] - call[name[knot_vector]][name[span]]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], binary_operation[binary_operation[name[degree] - name[k]] + constant[1]]]]] begin[:]
variable[Uleft] assign[=] call[name[knot_vector]][binary_operation[binary_operation[name[span] + name[j]] + constant[1]]]
variable[Uright] assign[=] call[name[knot_vector]][binary_operation[binary_operation[binary_operation[name[span] + name[j]] + name[k]] + constant[1]]]
if compare[call[call[name[N]][binary_operation[name[j] + constant[1]]]][binary_operation[name[k] - constant[1]]] equal[==] constant[0.0]] begin[:]
call[call[name[N]][name[j]]][name[k]] assign[=] name[saved]
variable[saved] assign[=] constant[0.0]
call[name[ders]][constant[0]] assign[=] call[call[name[N]][constant[0]]][name[degree]]
for taget[name[k]] in starred[call[name[range], parameter[constant[1], binary_operation[name[order] + constant[1]]]]] begin[:]
variable[ND] assign[=] <ast.ListComp object at 0x7da1b16328f0>
for taget[name[j]] in starred[call[name[range], parameter[constant[0], binary_operation[name[k] + constant[1]]]]] begin[:]
call[name[ND]][name[j]] assign[=] call[call[name[N]][name[j]]][binary_operation[name[degree] - name[k]]]
for taget[name[jj]] in starred[call[name[range], parameter[constant[1], binary_operation[name[k] + constant[1]]]]] begin[:]
if compare[call[name[ND]][constant[0]] equal[==] constant[0.0]] begin[:]
variable[saved] assign[=] constant[0.0]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], binary_operation[binary_operation[name[k] - name[jj]] + constant[1]]]]] begin[:]
variable[Uleft] assign[=] call[name[knot_vector]][binary_operation[binary_operation[name[span] + name[j]] + constant[1]]]
variable[Uright] assign[=] call[name[knot_vector]][binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[span] + name[j]] + name[degree]] - name[k]] + name[jj]] + constant[1]]]
if compare[call[name[ND]][binary_operation[name[j] + constant[1]]] equal[==] constant[0.0]] begin[:]
call[name[ND]][name[j]] assign[=] binary_operation[binary_operation[binary_operation[name[degree] - name[k]] + name[jj]] * name[saved]]
variable[saved] assign[=] constant[0.0]
call[name[ders]][name[k]] assign[=] call[name[ND]][constant[0]]
return[name[ders]] | keyword[def] identifier[basis_function_ders_one] ( identifier[degree] , identifier[knot_vector] , identifier[span] , identifier[knot] , identifier[order] ):
literal[string]
identifier[ders] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[order] + literal[int] )]
keyword[if] ( identifier[knot] < identifier[knot_vector] [ identifier[span] ]) keyword[or] ( identifier[knot] >= identifier[knot_vector] [ identifier[span] + identifier[degree] + literal[int] ]):
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[order] + literal[int] ):
identifier[ders] [ identifier[k] ]= literal[int]
keyword[return] identifier[ders]
identifier[N] =[[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[degree] + literal[int] )] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[degree] + literal[int] )]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[degree] + literal[int] ):
keyword[if] identifier[knot_vector] [ identifier[span] + identifier[j] ]<= identifier[knot] < identifier[knot_vector] [ identifier[span] + identifier[j] + literal[int] ]:
identifier[N] [ identifier[j] ][ literal[int] ]= literal[int]
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[degree] + literal[int] ):
identifier[saved] = literal[int]
keyword[if] identifier[N] [ literal[int] ][ identifier[k] - literal[int] ]!= literal[int] :
identifier[saved] =(( identifier[knot] - identifier[knot_vector] [ identifier[span] ])* identifier[N] [ literal[int] ][ identifier[k] - literal[int] ])/( identifier[knot_vector] [ identifier[span] + identifier[k] ]- identifier[knot_vector] [ identifier[span] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[degree] - identifier[k] + literal[int] ):
identifier[Uleft] = identifier[knot_vector] [ identifier[span] + identifier[j] + literal[int] ]
identifier[Uright] = identifier[knot_vector] [ identifier[span] + identifier[j] + identifier[k] + literal[int] ]
keyword[if] identifier[N] [ identifier[j] + literal[int] ][ identifier[k] - literal[int] ]== literal[int] :
identifier[N] [ identifier[j] ][ identifier[k] ]= identifier[saved]
identifier[saved] = literal[int]
keyword[else] :
identifier[temp] = identifier[N] [ identifier[j] + literal[int] ][ identifier[k] - literal[int] ]/( identifier[Uright] - identifier[Uleft] )
identifier[N] [ identifier[j] ][ identifier[k] ]= identifier[saved] +( identifier[Uright] - identifier[knot] )* identifier[temp]
identifier[saved] =( identifier[knot] - identifier[Uleft] )* identifier[temp]
identifier[ders] [ literal[int] ]= identifier[N] [ literal[int] ][ identifier[degree] ]
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[order] + literal[int] ):
identifier[ND] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[k] + literal[int] )]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[k] + literal[int] ):
identifier[ND] [ identifier[j] ]= identifier[N] [ identifier[j] ][ identifier[degree] - identifier[k] ]
keyword[for] identifier[jj] keyword[in] identifier[range] ( literal[int] , identifier[k] + literal[int] ):
keyword[if] identifier[ND] [ literal[int] ]== literal[int] :
identifier[saved] = literal[int]
keyword[else] :
identifier[saved] = identifier[ND] [ literal[int] ]/( identifier[knot_vector] [ identifier[span] + identifier[degree] - identifier[k] + identifier[jj] ]- identifier[knot_vector] [ identifier[span] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[k] - identifier[jj] + literal[int] ):
identifier[Uleft] = identifier[knot_vector] [ identifier[span] + identifier[j] + literal[int] ]
identifier[Uright] = identifier[knot_vector] [ identifier[span] + identifier[j] + identifier[degree] - identifier[k] + identifier[jj] + literal[int] ]
keyword[if] identifier[ND] [ identifier[j] + literal[int] ]== literal[int] :
identifier[ND] [ identifier[j] ]=( identifier[degree] - identifier[k] + identifier[jj] )* identifier[saved]
identifier[saved] = literal[int]
keyword[else] :
identifier[temp] = identifier[ND] [ identifier[j] + literal[int] ]/( identifier[Uright] - identifier[Uleft] )
identifier[ND] [ identifier[j] ]=( identifier[degree] - identifier[k] + identifier[jj] )*( identifier[saved] - identifier[temp] )
identifier[saved] = identifier[temp]
identifier[ders] [ identifier[k] ]= identifier[ND] [ literal[int] ]
keyword[return] identifier[ders] | def basis_function_ders_one(degree, knot_vector, span, knot, order):
""" Computes the derivative of one basis functions for a single parameter.
Implementation of Algorithm A2.5 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot_vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:param order: order of the derivative
:type order: int
:return: basis function derivatives
:rtype: list
"""
ders = [0.0 for _ in range(0, order + 1)]
# Knot is outside of span range
if knot < knot_vector[span] or knot >= knot_vector[span + degree + 1]:
for k in range(0, order + 1):
ders[k] = 0.0 # depends on [control=['for'], data=['k']]
return ders # depends on [control=['if'], data=[]]
N = [[0.0 for _ in range(0, degree + 1)] for _ in range(0, degree + 1)]
# Initializing the zeroth degree basis functions
for j in range(0, degree + 1):
if knot_vector[span + j] <= knot < knot_vector[span + j + 1]:
N[j][0] = 1.0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
# Computing all basis functions values for all degrees inside the span
for k in range(1, degree + 1):
saved = 0.0
# Detecting zeros saves computations
if N[0][k - 1] != 0.0:
saved = (knot - knot_vector[span]) * N[0][k - 1] / (knot_vector[span + k] - knot_vector[span]) # depends on [control=['if'], data=[]]
for j in range(0, degree - k + 1):
Uleft = knot_vector[span + j + 1]
Uright = knot_vector[span + j + k + 1]
# Zero detection
if N[j + 1][k - 1] == 0.0:
N[j][k] = saved
saved = 0.0 # depends on [control=['if'], data=[]]
else:
temp = N[j + 1][k - 1] / (Uright - Uleft)
N[j][k] = saved + (Uright - knot) * temp
saved = (knot - Uleft) * temp # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['k']]
# The basis function value is the zeroth derivative
ders[0] = N[0][degree]
# Computing the basis functions derivatives
for k in range(1, order + 1):
# Buffer for computing the kth derivative
ND = [0.0 for _ in range(0, k + 1)]
# Basis functions values used for the derivative
for j in range(0, k + 1):
ND[j] = N[j][degree - k] # depends on [control=['for'], data=['j']]
# Computing derivatives used for the kth basis function derivative
# Derivative order for the k-th basis function derivative
for jj in range(1, k + 1):
if ND[0] == 0.0:
saved = 0.0 # depends on [control=['if'], data=[]]
else:
saved = ND[0] / (knot_vector[span + degree - k + jj] - knot_vector[span])
# Index of the Basis function derivatives
for j in range(0, k - jj + 1):
Uleft = knot_vector[span + j + 1]
# Wrong in The NURBS Book: -k is missing.
# The right expression is the same as for saved with the added j offset
Uright = knot_vector[span + j + degree - k + jj + 1]
if ND[j + 1] == 0.0:
ND[j] = (degree - k + jj) * saved
saved = 0.0 # depends on [control=['if'], data=[]]
else:
temp = ND[j + 1] / (Uright - Uleft)
ND[j] = (degree - k + jj) * (saved - temp)
saved = temp # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['jj']]
ders[k] = ND[0] # depends on [control=['for'], data=['k']]
return ders |
def delall(self, key):
"""Delete all tags of a given kind; see getall."""
if key in self:
del(self[key])
else:
key = key + ":"
for k in self.keys():
if k.startswith(key):
del(self[k]) | def function[delall, parameter[self, key]]:
constant[Delete all tags of a given kind; see getall.]
if compare[name[key] in name[self]] begin[:]
<ast.Delete object at 0x7da1b26af0d0> | keyword[def] identifier[delall] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[self] :
keyword[del] ( identifier[self] [ identifier[key] ])
keyword[else] :
identifier[key] = identifier[key] + literal[string]
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[keys] ():
keyword[if] identifier[k] . identifier[startswith] ( identifier[key] ):
keyword[del] ( identifier[self] [ identifier[k] ]) | def delall(self, key):
"""Delete all tags of a given kind; see getall."""
if key in self:
del self[key] # depends on [control=['if'], data=['key', 'self']]
else:
key = key + ':'
for k in self.keys():
if k.startswith(key):
del self[k] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] |
def _find_stars(data, kernel, threshold_eff, min_separation=None,
mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
"""
convolved_data = filter_data(data, kernel.data, mode='constant',
fill_value=0.0, check_normalization=False)
# define a local footprint for the peak finder
if min_separation is None: # daofind
footprint = kernel.mask.astype(np.bool)
else:
# define a circular footprint
idx = np.arange(-min_separation, min_separation + 1)
xx, yy = np.meshgrid(idx, idx)
footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int)
# pad the data and convolved image by the kernel x/y radius to allow
# for detections near the edges
if not exclude_border:
ypad = kernel.yradius
xpad = kernel.xradius
pad = ((ypad, ypad), (xpad, xpad))
# mode must be a string for numpy < 0.11
# (see https://github.com/numpy/numpy/issues/7112)
mode = str('constant')
data = np.pad(data, pad, mode=mode, constant_values=[0.])
if mask is not None:
mask = np.pad(mask, pad, mode=mode, constant_values=[0.])
convolved_data = np.pad(convolved_data, pad, mode=mode,
constant_values=[0.])
# find local peaks in the convolved data
with warnings.catch_warnings():
# suppress any NoDetectionsWarning from find_peaks
warnings.filterwarnings('ignore', category=NoDetectionsWarning)
tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint,
mask=mask)
if tbl is None:
return None
coords = np.transpose([tbl['y_peak'], tbl['x_peak']])
star_cutouts = []
for (ypeak, xpeak) in coords:
# now extract the object from the data, centered on the peak
# pixel in the convolved image, with the same size as the kernel
x0 = xpeak - kernel.xradius
x1 = xpeak + kernel.xradius + 1
y0 = ypeak - kernel.yradius
y1 = ypeak + kernel.yradius + 1
if x0 < 0 or x1 > data.shape[1]:
continue # pragma: no cover
if y0 < 0 or y1 > data.shape[0]:
continue # pragma: no cover
slices = (slice(y0, y1), slice(x0, x1))
data_cutout = data[slices]
convdata_cutout = convolved_data[slices]
# correct pixel values for the previous image padding
if not exclude_border:
x0 -= kernel.xradius
x1 -= kernel.xradius
y0 -= kernel.yradius
y1 -= kernel.yradius
xpeak -= kernel.xradius
ypeak -= kernel.yradius
slices = (slice(y0, y1), slice(x0, x1))
star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices,
xpeak, ypeak, kernel, threshold_eff))
return star_cutouts | def function[_find_stars, parameter[data, kernel, threshold_eff, min_separation, mask, exclude_border]]:
constant[
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
]
variable[convolved_data] assign[=] call[name[filter_data], parameter[name[data], name[kernel].data]]
if compare[name[min_separation] is constant[None]] begin[:]
variable[footprint] assign[=] call[name[kernel].mask.astype, parameter[name[np].bool]]
if <ast.UnaryOp object at 0x7da1b11397e0> begin[:]
variable[ypad] assign[=] name[kernel].yradius
variable[xpad] assign[=] name[kernel].xradius
variable[pad] assign[=] tuple[[<ast.Tuple object at 0x7da1b1139720>, <ast.Tuple object at 0x7da1b1138550>]]
variable[mode] assign[=] call[name[str], parameter[constant[constant]]]
variable[data] assign[=] call[name[np].pad, parameter[name[data], name[pad]]]
if compare[name[mask] is_not constant[None]] begin[:]
variable[mask] assign[=] call[name[np].pad, parameter[name[mask], name[pad]]]
variable[convolved_data] assign[=] call[name[np].pad, parameter[name[convolved_data], name[pad]]]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].filterwarnings, parameter[constant[ignore]]]
variable[tbl] assign[=] call[name[find_peaks], parameter[name[convolved_data], name[threshold_eff]]]
if compare[name[tbl] is constant[None]] begin[:]
return[constant[None]]
variable[coords] assign[=] call[name[np].transpose, parameter[list[[<ast.Subscript object at 0x7da1b12f06d0>, <ast.Subscript object at 0x7da1b12f1f00>]]]]
variable[star_cutouts] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b12f03a0>, <ast.Name object at 0x7da1b12f38b0>]]] in starred[name[coords]] begin[:]
variable[x0] assign[=] binary_operation[name[xpeak] - name[kernel].xradius]
variable[x1] assign[=] binary_operation[binary_operation[name[xpeak] + name[kernel].xradius] + constant[1]]
variable[y0] assign[=] binary_operation[name[ypeak] - name[kernel].yradius]
variable[y1] assign[=] binary_operation[binary_operation[name[ypeak] + name[kernel].yradius] + constant[1]]
if <ast.BoolOp object at 0x7da1b11a7820> begin[:]
continue
if <ast.BoolOp object at 0x7da1b11a4070> begin[:]
continue
variable[slices] assign[=] tuple[[<ast.Call object at 0x7da1b113f0a0>, <ast.Call object at 0x7da1b113eef0>]]
variable[data_cutout] assign[=] call[name[data]][name[slices]]
variable[convdata_cutout] assign[=] call[name[convolved_data]][name[slices]]
if <ast.UnaryOp object at 0x7da1b113ddb0> begin[:]
<ast.AugAssign object at 0x7da1b113d030>
<ast.AugAssign object at 0x7da1b113c280>
<ast.AugAssign object at 0x7da1b113e890>
<ast.AugAssign object at 0x7da1b113c430>
<ast.AugAssign object at 0x7da1b113db70>
<ast.AugAssign object at 0x7da1b113ea10>
variable[slices] assign[=] tuple[[<ast.Call object at 0x7da1b113df60>, <ast.Call object at 0x7da1b113ca60>]]
call[name[star_cutouts].append, parameter[call[name[_StarCutout], parameter[name[data_cutout], name[convdata_cutout], name[slices], name[xpeak], name[ypeak], name[kernel], name[threshold_eff]]]]]
return[name[star_cutouts]] | keyword[def] identifier[_find_stars] ( identifier[data] , identifier[kernel] , identifier[threshold_eff] , identifier[min_separation] = keyword[None] ,
identifier[mask] = keyword[None] , identifier[exclude_border] = keyword[False] ):
literal[string]
identifier[convolved_data] = identifier[filter_data] ( identifier[data] , identifier[kernel] . identifier[data] , identifier[mode] = literal[string] ,
identifier[fill_value] = literal[int] , identifier[check_normalization] = keyword[False] )
keyword[if] identifier[min_separation] keyword[is] keyword[None] :
identifier[footprint] = identifier[kernel] . identifier[mask] . identifier[astype] ( identifier[np] . identifier[bool] )
keyword[else] :
identifier[idx] = identifier[np] . identifier[arange] (- identifier[min_separation] , identifier[min_separation] + literal[int] )
identifier[xx] , identifier[yy] = identifier[np] . identifier[meshgrid] ( identifier[idx] , identifier[idx] )
identifier[footprint] = identifier[np] . identifier[array] (( identifier[xx] ** literal[int] + identifier[yy] ** literal[int] )<= identifier[min_separation] ** literal[int] , identifier[dtype] = identifier[int] )
keyword[if] keyword[not] identifier[exclude_border] :
identifier[ypad] = identifier[kernel] . identifier[yradius]
identifier[xpad] = identifier[kernel] . identifier[xradius]
identifier[pad] =(( identifier[ypad] , identifier[ypad] ),( identifier[xpad] , identifier[xpad] ))
identifier[mode] = identifier[str] ( literal[string] )
identifier[data] = identifier[np] . identifier[pad] ( identifier[data] , identifier[pad] , identifier[mode] = identifier[mode] , identifier[constant_values] =[ literal[int] ])
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[mask] = identifier[np] . identifier[pad] ( identifier[mask] , identifier[pad] , identifier[mode] = identifier[mode] , identifier[constant_values] =[ literal[int] ])
identifier[convolved_data] = identifier[np] . identifier[pad] ( identifier[convolved_data] , identifier[pad] , identifier[mode] = identifier[mode] ,
identifier[constant_values] =[ literal[int] ])
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[filterwarnings] ( literal[string] , identifier[category] = identifier[NoDetectionsWarning] )
identifier[tbl] = identifier[find_peaks] ( identifier[convolved_data] , identifier[threshold_eff] , identifier[footprint] = identifier[footprint] ,
identifier[mask] = identifier[mask] )
keyword[if] identifier[tbl] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[coords] = identifier[np] . identifier[transpose] ([ identifier[tbl] [ literal[string] ], identifier[tbl] [ literal[string] ]])
identifier[star_cutouts] =[]
keyword[for] ( identifier[ypeak] , identifier[xpeak] ) keyword[in] identifier[coords] :
identifier[x0] = identifier[xpeak] - identifier[kernel] . identifier[xradius]
identifier[x1] = identifier[xpeak] + identifier[kernel] . identifier[xradius] + literal[int]
identifier[y0] = identifier[ypeak] - identifier[kernel] . identifier[yradius]
identifier[y1] = identifier[ypeak] + identifier[kernel] . identifier[yradius] + literal[int]
keyword[if] identifier[x0] < literal[int] keyword[or] identifier[x1] > identifier[data] . identifier[shape] [ literal[int] ]:
keyword[continue]
keyword[if] identifier[y0] < literal[int] keyword[or] identifier[y1] > identifier[data] . identifier[shape] [ literal[int] ]:
keyword[continue]
identifier[slices] =( identifier[slice] ( identifier[y0] , identifier[y1] ), identifier[slice] ( identifier[x0] , identifier[x1] ))
identifier[data_cutout] = identifier[data] [ identifier[slices] ]
identifier[convdata_cutout] = identifier[convolved_data] [ identifier[slices] ]
keyword[if] keyword[not] identifier[exclude_border] :
identifier[x0] -= identifier[kernel] . identifier[xradius]
identifier[x1] -= identifier[kernel] . identifier[xradius]
identifier[y0] -= identifier[kernel] . identifier[yradius]
identifier[y1] -= identifier[kernel] . identifier[yradius]
identifier[xpeak] -= identifier[kernel] . identifier[xradius]
identifier[ypeak] -= identifier[kernel] . identifier[yradius]
identifier[slices] =( identifier[slice] ( identifier[y0] , identifier[y1] ), identifier[slice] ( identifier[x0] , identifier[x1] ))
identifier[star_cutouts] . identifier[append] ( identifier[_StarCutout] ( identifier[data_cutout] , identifier[convdata_cutout] , identifier[slices] ,
identifier[xpeak] , identifier[ypeak] , identifier[kernel] , identifier[threshold_eff] ))
keyword[return] identifier[star_cutouts] | def _find_stars(data, kernel, threshold_eff, min_separation=None, mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
"""
convolved_data = filter_data(data, kernel.data, mode='constant', fill_value=0.0, check_normalization=False)
# define a local footprint for the peak finder
if min_separation is None: # daofind
footprint = kernel.mask.astype(np.bool) # depends on [control=['if'], data=[]]
else:
# define a circular footprint
idx = np.arange(-min_separation, min_separation + 1)
(xx, yy) = np.meshgrid(idx, idx)
footprint = np.array(xx ** 2 + yy ** 2 <= min_separation ** 2, dtype=int)
# pad the data and convolved image by the kernel x/y radius to allow
# for detections near the edges
if not exclude_border:
ypad = kernel.yradius
xpad = kernel.xradius
pad = ((ypad, ypad), (xpad, xpad))
# mode must be a string for numpy < 0.11
# (see https://github.com/numpy/numpy/issues/7112)
mode = str('constant')
data = np.pad(data, pad, mode=mode, constant_values=[0.0])
if mask is not None:
mask = np.pad(mask, pad, mode=mode, constant_values=[0.0]) # depends on [control=['if'], data=['mask']]
convolved_data = np.pad(convolved_data, pad, mode=mode, constant_values=[0.0]) # depends on [control=['if'], data=[]]
# find local peaks in the convolved data
with warnings.catch_warnings():
# suppress any NoDetectionsWarning from find_peaks
warnings.filterwarnings('ignore', category=NoDetectionsWarning)
tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint, mask=mask) # depends on [control=['with'], data=[]]
if tbl is None:
return None # depends on [control=['if'], data=[]]
coords = np.transpose([tbl['y_peak'], tbl['x_peak']])
star_cutouts = []
for (ypeak, xpeak) in coords:
# now extract the object from the data, centered on the peak
# pixel in the convolved image, with the same size as the kernel
x0 = xpeak - kernel.xradius
x1 = xpeak + kernel.xradius + 1
y0 = ypeak - kernel.yradius
y1 = ypeak + kernel.yradius + 1
if x0 < 0 or x1 > data.shape[1]:
continue # pragma: no cover # depends on [control=['if'], data=[]]
if y0 < 0 or y1 > data.shape[0]:
continue # pragma: no cover # depends on [control=['if'], data=[]]
slices = (slice(y0, y1), slice(x0, x1))
data_cutout = data[slices]
convdata_cutout = convolved_data[slices]
# correct pixel values for the previous image padding
if not exclude_border:
x0 -= kernel.xradius
x1 -= kernel.xradius
y0 -= kernel.yradius
y1 -= kernel.yradius
xpeak -= kernel.xradius
ypeak -= kernel.yradius
slices = (slice(y0, y1), slice(x0, x1)) # depends on [control=['if'], data=[]]
star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices, xpeak, ypeak, kernel, threshold_eff)) # depends on [control=['for'], data=[]]
return star_cutouts |
def _get_heading_level(self, element):
"""
Returns the level of heading.
:param element: The heading.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The level of heading.
:rtype: int
"""
# pylint: disable=no-self-use
tag = element.get_tag_name()
if tag == 'H1':
return 1
elif tag == 'H2':
return 2
elif tag == 'H3':
return 3
elif tag == 'H4':
return 4
elif tag == 'H5':
return 5
elif tag == 'H6':
return 6
return -1 | def function[_get_heading_level, parameter[self, element]]:
constant[
Returns the level of heading.
:param element: The heading.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The level of heading.
:rtype: int
]
variable[tag] assign[=] call[name[element].get_tag_name, parameter[]]
if compare[name[tag] equal[==] constant[H1]] begin[:]
return[constant[1]]
return[<ast.UnaryOp object at 0x7da2041d92a0>] | keyword[def] identifier[_get_heading_level] ( identifier[self] , identifier[element] ):
literal[string]
identifier[tag] = identifier[element] . identifier[get_tag_name] ()
keyword[if] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[tag] == literal[string] :
keyword[return] literal[int]
keyword[return] - literal[int] | def _get_heading_level(self, element):
"""
Returns the level of heading.
:param element: The heading.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The level of heading.
:rtype: int
"""
# pylint: disable=no-self-use
tag = element.get_tag_name()
if tag == 'H1':
return 1 # depends on [control=['if'], data=[]]
elif tag == 'H2':
return 2 # depends on [control=['if'], data=[]]
elif tag == 'H3':
return 3 # depends on [control=['if'], data=[]]
elif tag == 'H4':
return 4 # depends on [control=['if'], data=[]]
elif tag == 'H5':
return 5 # depends on [control=['if'], data=[]]
elif tag == 'H6':
return 6 # depends on [control=['if'], data=[]]
return -1 |
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot | def function[dense_to_one_hot, parameter[labels_dense, num_classes]]:
constant[Convert class labels from scalars to one-hot vectors.]
variable[num_labels] assign[=] call[name[labels_dense].shape][constant[0]]
variable[index_offset] assign[=] binary_operation[call[name[np].arange, parameter[name[num_labels]]] * name[num_classes]]
variable[labels_one_hot] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da2047e8910>, <ast.Name object at 0x7da2047e8c10>]]]]
call[name[labels_one_hot].flat][binary_operation[name[index_offset] + call[name[labels_dense].ravel, parameter[]]]] assign[=] constant[1]
return[name[labels_one_hot]] | keyword[def] identifier[dense_to_one_hot] ( identifier[labels_dense] , identifier[num_classes] ):
literal[string]
identifier[num_labels] = identifier[labels_dense] . identifier[shape] [ literal[int] ]
identifier[index_offset] = identifier[np] . identifier[arange] ( identifier[num_labels] )* identifier[num_classes]
identifier[labels_one_hot] = identifier[np] . identifier[zeros] (( identifier[num_labels] , identifier[num_classes] ))
identifier[labels_one_hot] . identifier[flat] [ identifier[index_offset] + identifier[labels_dense] . identifier[ravel] ()]= literal[int]
keyword[return] identifier[labels_one_hot] | def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot |
def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None):
""" Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
"""
assert amount >= 0
return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime) | def function[debit, parameter[self, amount, credit_account, description, debit_memo, credit_memo, datetime]]:
constant[ Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
]
assert[compare[name[amount] greater_or_equal[>=] constant[0]]]
return[call[name[self].post, parameter[name[amount], name[credit_account], name[description]]]] | keyword[def] identifier[debit] ( identifier[self] , identifier[amount] , identifier[credit_account] , identifier[description] , identifier[debit_memo] = literal[string] , identifier[credit_memo] = literal[string] , identifier[datetime] = keyword[None] ):
literal[string]
keyword[assert] identifier[amount] >= literal[int]
keyword[return] identifier[self] . identifier[post] ( identifier[amount] , identifier[credit_account] , identifier[description] , identifier[self_memo] = identifier[debit_memo] , identifier[other_memo] = identifier[credit_memo] , identifier[datetime] = identifier[datetime] ) | def debit(self, amount, credit_account, description, debit_memo='', credit_memo='', datetime=None):
""" Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
"""
assert amount >= 0
return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime) |
async def open_pipe_connection(
path=None,
*,
loop=None,
limit=DEFAULT_LIMIT,
**kwargs
):
"""
Connect to a server using a Windows named pipe.
"""
path = path.replace('/', '\\')
loop = loop or asyncio.get_event_loop()
reader = asyncio.StreamReader(limit=limit, loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_pipe_connection(
lambda: protocol,
path,
**kwargs
)
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
return reader, writer | <ast.AsyncFunctionDef object at 0x7da1b0a3df60> | keyword[async] keyword[def] identifier[open_pipe_connection] (
identifier[path] = keyword[None] ,
*,
identifier[loop] = keyword[None] ,
identifier[limit] = identifier[DEFAULT_LIMIT] ,
** identifier[kwargs]
):
literal[string]
identifier[path] = identifier[path] . identifier[replace] ( literal[string] , literal[string] )
identifier[loop] = identifier[loop] keyword[or] identifier[asyncio] . identifier[get_event_loop] ()
identifier[reader] = identifier[asyncio] . identifier[StreamReader] ( identifier[limit] = identifier[limit] , identifier[loop] = identifier[loop] )
identifier[protocol] = identifier[asyncio] . identifier[StreamReaderProtocol] ( identifier[reader] , identifier[loop] = identifier[loop] )
identifier[transport] , identifier[_] = keyword[await] identifier[loop] . identifier[create_pipe_connection] (
keyword[lambda] : identifier[protocol] ,
identifier[path] ,
** identifier[kwargs]
)
identifier[writer] = identifier[asyncio] . identifier[StreamWriter] ( identifier[transport] , identifier[protocol] , identifier[reader] , identifier[loop] )
keyword[return] identifier[reader] , identifier[writer] | async def open_pipe_connection(path=None, *, loop=None, limit=DEFAULT_LIMIT, **kwargs):
"""
Connect to a server using a Windows named pipe.
"""
path = path.replace('/', '\\')
loop = loop or asyncio.get_event_loop()
reader = asyncio.StreamReader(limit=limit, loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
(transport, _) = await loop.create_pipe_connection(lambda : protocol, path, **kwargs)
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
return (reader, writer) |
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
state = self._format_state(state)
if qargs is None:
if state.shape[0] != self._input_dim:
raise QiskitError(
"Operator input dimension is not equal to state dimension."
)
if state.ndim == 1:
# Return evolved statevector
return np.dot(self.data, state)
# Return evolved density matrix
return np.dot(
np.dot(self.data, state), np.transpose(np.conj(self.data)))
# Subsystem evolution
return self._evolve_subsystem(state, qargs) | def function[_evolve, parameter[self, state, qargs]]:
constant[Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
]
variable[state] assign[=] call[name[self]._format_state, parameter[name[state]]]
if compare[name[qargs] is constant[None]] begin[:]
if compare[call[name[state].shape][constant[0]] not_equal[!=] name[self]._input_dim] begin[:]
<ast.Raise object at 0x7da1b0537c10>
if compare[name[state].ndim equal[==] constant[1]] begin[:]
return[call[name[np].dot, parameter[name[self].data, name[state]]]]
return[call[name[np].dot, parameter[call[name[np].dot, parameter[name[self].data, name[state]]], call[name[np].transpose, parameter[call[name[np].conj, parameter[name[self].data]]]]]]]
return[call[name[self]._evolve_subsystem, parameter[name[state], name[qargs]]]] | keyword[def] identifier[_evolve] ( identifier[self] , identifier[state] , identifier[qargs] = keyword[None] ):
literal[string]
identifier[state] = identifier[self] . identifier[_format_state] ( identifier[state] )
keyword[if] identifier[qargs] keyword[is] keyword[None] :
keyword[if] identifier[state] . identifier[shape] [ literal[int] ]!= identifier[self] . identifier[_input_dim] :
keyword[raise] identifier[QiskitError] (
literal[string]
)
keyword[if] identifier[state] . identifier[ndim] == literal[int] :
keyword[return] identifier[np] . identifier[dot] ( identifier[self] . identifier[data] , identifier[state] )
keyword[return] identifier[np] . identifier[dot] (
identifier[np] . identifier[dot] ( identifier[self] . identifier[data] , identifier[state] ), identifier[np] . identifier[transpose] ( identifier[np] . identifier[conj] ( identifier[self] . identifier[data] )))
keyword[return] identifier[self] . identifier[_evolve_subsystem] ( identifier[state] , identifier[qargs] ) | def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
state = self._format_state(state)
if qargs is None:
if state.shape[0] != self._input_dim:
raise QiskitError('Operator input dimension is not equal to state dimension.') # depends on [control=['if'], data=[]]
if state.ndim == 1:
# Return evolved statevector
return np.dot(self.data, state) # depends on [control=['if'], data=[]]
# Return evolved density matrix
return np.dot(np.dot(self.data, state), np.transpose(np.conj(self.data))) # depends on [control=['if'], data=[]]
# Subsystem evolution
return self._evolve_subsystem(state, qargs) |
def hparams(self, defaults, unused_model_hparams):
"""Returns problem_hparams.
Args:
defaults: default hyperparameters
unused_model_hparams: model hyperparameters
"""
(super(BabiQa, self).hparams(defaults, unused_model_hparams))
p = defaults
num_classes = self._encoders["targets"].vocab_size
p.modality = {"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"targets": num_classes} | def function[hparams, parameter[self, defaults, unused_model_hparams]]:
constant[Returns problem_hparams.
Args:
defaults: default hyperparameters
unused_model_hparams: model hyperparameters
]
call[call[name[super], parameter[name[BabiQa], name[self]]].hparams, parameter[name[defaults], name[unused_model_hparams]]]
variable[p] assign[=] name[defaults]
variable[num_classes] assign[=] call[name[self]._encoders][constant[targets]].vocab_size
name[p].modality assign[=] dictionary[[<ast.Constant object at 0x7da1b20e4520>], [<ast.Attribute object at 0x7da1b20e6530>]]
name[p].vocab_size assign[=] dictionary[[<ast.Constant object at 0x7da1b20e4730>], [<ast.Name object at 0x7da1b20e6260>]] | keyword[def] identifier[hparams] ( identifier[self] , identifier[defaults] , identifier[unused_model_hparams] ):
literal[string]
( identifier[super] ( identifier[BabiQa] , identifier[self] ). identifier[hparams] ( identifier[defaults] , identifier[unused_model_hparams] ))
identifier[p] = identifier[defaults]
identifier[num_classes] = identifier[self] . identifier[_encoders] [ literal[string] ]. identifier[vocab_size]
identifier[p] . identifier[modality] ={ literal[string] : identifier[modalities] . identifier[ModalityType] . identifier[CLASS_LABEL] }
identifier[p] . identifier[vocab_size] ={ literal[string] : identifier[num_classes] } | def hparams(self, defaults, unused_model_hparams):
"""Returns problem_hparams.
Args:
defaults: default hyperparameters
unused_model_hparams: model hyperparameters
"""
super(BabiQa, self).hparams(defaults, unused_model_hparams)
p = defaults
num_classes = self._encoders['targets'].vocab_size
p.modality = {'targets': modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {'targets': num_classes} |
def extract_concepts(self, sentences=None, ids=None, filename=None,
restrict_to_sts=None, restrict_to_sources=None):
""" extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
"""
if (sentences is not None and filename is not None) or \
(sentences is None and filename is None):
raise ValueError("You must either pass a list of sentences "
"OR a filename.")
input_file = None
if sentences is not None:
input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False)
else:
input_file = open(filename, 'r')
# Unlike MetaMap, MetaMapLite does not take an output filename as a parameter.
# It creates a new output file at same location as "input_file" with the default file extension ".mmi".
# output_file = tempfile.NamedTemporaryFile(mode="r", delete=False)
output_file_name = None
error = None
try:
if sentences is not None:
if ids is not None:
for identifier, sentence in zip(ids, sentences):
input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
command = ["bash", os.path.join(self.metamap_filename, "metamaplite.sh")]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts]
if len(restrict_to_sts) > 0:
command.append('--restrict_to_sts')
command.append(str(','.join(restrict_to_sts)))
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources]
if len(restrict_to_sources) > 0:
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources)))
if ids is not None:
command.append('--inputformat=sldiwi')
command.append(input_file.name)
# command.append(output_file.name)
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while metamap_process.poll() is None:
stdout = str(metamap_process.stdout.readline())
if 'ERROR' in stdout:
metamap_process.terminate()
error = stdout.rstrip()
# print("input file name: {0}".format(input_file.name))
output_file_name, file_extension = os.path.splitext(input_file.name)
output_file_name += "." + "mmi"
# print("output_file_name: {0}".format(output_file_name))
with open(output_file_name) as fd:
output = fd.read()
# output = str(output_file.read())
# print("output: {0}".format(output))
finally:
if sentences is not None:
os.remove(input_file.name)
else:
input_file.close()
# os.remove(output_file.name)
os.remove(output_file_name)
concepts = CorpusLite.load(output.splitlines())
return concepts, error | def function[extract_concepts, parameter[self, sentences, ids, filename, restrict_to_sts, restrict_to_sources]]:
constant[ extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
]
if <ast.BoolOp object at 0x7da1b0552e90> begin[:]
<ast.Raise object at 0x7da1b0550310>
variable[input_file] assign[=] constant[None]
if compare[name[sentences] is_not constant[None]] begin[:]
variable[input_file] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
variable[output_file_name] assign[=] constant[None]
variable[error] assign[=] constant[None]
<ast.Try object at 0x7da1b0553af0>
variable[concepts] assign[=] call[name[CorpusLite].load, parameter[call[name[output].splitlines, parameter[]]]]
return[tuple[[<ast.Name object at 0x7da1b05c6980>, <ast.Name object at 0x7da1b05c6c20>]]] | keyword[def] identifier[extract_concepts] ( identifier[self] , identifier[sentences] = keyword[None] , identifier[ids] = keyword[None] , identifier[filename] = keyword[None] ,
identifier[restrict_to_sts] = keyword[None] , identifier[restrict_to_sources] = keyword[None] ):
literal[string]
keyword[if] ( identifier[sentences] keyword[is] keyword[not] keyword[None] keyword[and] identifier[filename] keyword[is] keyword[not] keyword[None] ) keyword[or] ( identifier[sentences] keyword[is] keyword[None] keyword[and] identifier[filename] keyword[is] keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[input_file] = keyword[None]
keyword[if] identifier[sentences] keyword[is] keyword[not] keyword[None] :
identifier[input_file] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] , identifier[delete] = keyword[False] )
keyword[else] :
identifier[input_file] = identifier[open] ( identifier[filename] , literal[string] )
identifier[output_file_name] = keyword[None]
identifier[error] = keyword[None]
keyword[try] :
keyword[if] identifier[sentences] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[ids] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[identifier] , identifier[sentence] keyword[in] identifier[zip] ( identifier[ids] , identifier[sentences] ):
identifier[input_file] . identifier[write] ( literal[string] . identifier[format] ( identifier[identifier] , identifier[sentence] ). identifier[encode] ( literal[string] ))
keyword[else] :
keyword[for] identifier[sentence] keyword[in] identifier[sentences] :
identifier[input_file] . identifier[write] ( literal[string] . identifier[format] ( identifier[sentence] ). identifier[encode] ( literal[string] ))
identifier[input_file] . identifier[flush] ()
identifier[command] =[ literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[metamap_filename] , literal[string] )]
keyword[if] identifier[restrict_to_sts] :
keyword[if] identifier[isinstance] ( identifier[restrict_to_sts] , identifier[str] ):
identifier[restrict_to_sts] =[ identifier[restrict_to_sts] ]
keyword[if] identifier[len] ( identifier[restrict_to_sts] )> literal[int] :
identifier[command] . identifier[append] ( literal[string] )
identifier[command] . identifier[append] ( identifier[str] ( literal[string] . identifier[join] ( identifier[restrict_to_sts] )))
keyword[if] identifier[restrict_to_sources] :
keyword[if] identifier[isinstance] ( identifier[restrict_to_sources] , identifier[str] ):
identifier[restrict_to_sources] =[ identifier[restrict_to_sources] ]
keyword[if] identifier[len] ( identifier[restrict_to_sources] )> literal[int] :
identifier[command] . identifier[append] ( literal[string] )
identifier[command] . identifier[append] ( identifier[str] ( literal[string] . identifier[join] ( identifier[restrict_to_sources] )))
keyword[if] identifier[ids] keyword[is] keyword[not] keyword[None] :
identifier[command] . identifier[append] ( literal[string] )
identifier[command] . identifier[append] ( identifier[input_file] . identifier[name] )
identifier[metamap_process] = identifier[subprocess] . identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] )
keyword[while] identifier[metamap_process] . identifier[poll] () keyword[is] keyword[None] :
identifier[stdout] = identifier[str] ( identifier[metamap_process] . identifier[stdout] . identifier[readline] ())
keyword[if] literal[string] keyword[in] identifier[stdout] :
identifier[metamap_process] . identifier[terminate] ()
identifier[error] = identifier[stdout] . identifier[rstrip] ()
identifier[output_file_name] , identifier[file_extension] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[input_file] . identifier[name] )
identifier[output_file_name] += literal[string] + literal[string]
keyword[with] identifier[open] ( identifier[output_file_name] ) keyword[as] identifier[fd] :
identifier[output] = identifier[fd] . identifier[read] ()
keyword[finally] :
keyword[if] identifier[sentences] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[remove] ( identifier[input_file] . identifier[name] )
keyword[else] :
identifier[input_file] . identifier[close] ()
identifier[os] . identifier[remove] ( identifier[output_file_name] )
identifier[concepts] = identifier[CorpusLite] . identifier[load] ( identifier[output] . identifier[splitlines] ())
keyword[return] identifier[concepts] , identifier[error] | def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None):
""" extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
"""
if sentences is not None and filename is not None or (sentences is None and filename is None):
raise ValueError('You must either pass a list of sentences OR a filename.') # depends on [control=['if'], data=[]]
input_file = None
if sentences is not None:
input_file = tempfile.NamedTemporaryFile(mode='wb', delete=False) # depends on [control=['if'], data=[]]
else:
input_file = open(filename, 'r')
# Unlike MetaMap, MetaMapLite does not take an output filename as a parameter.
# It creates a new output file at same location as "input_file" with the default file extension ".mmi".
# output_file = tempfile.NamedTemporaryFile(mode="r", delete=False)
output_file_name = None
error = None
try:
if sentences is not None:
if ids is not None:
for (identifier, sentence) in zip(ids, sentences):
input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['ids']]
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8')) # depends on [control=['for'], data=['sentence']]
input_file.flush() # depends on [control=['if'], data=['sentences']]
command = ['bash', os.path.join(self.metamap_filename, 'metamaplite.sh')]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts] # depends on [control=['if'], data=[]]
if len(restrict_to_sts) > 0:
command.append('--restrict_to_sts')
command.append(str(','.join(restrict_to_sts))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources] # depends on [control=['if'], data=[]]
if len(restrict_to_sources) > 0:
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if ids is not None:
command.append('--inputformat=sldiwi') # depends on [control=['if'], data=[]]
command.append(input_file.name)
# command.append(output_file.name)
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while metamap_process.poll() is None:
stdout = str(metamap_process.stdout.readline())
if 'ERROR' in stdout:
metamap_process.terminate()
error = stdout.rstrip() # depends on [control=['if'], data=['stdout']] # depends on [control=['while'], data=[]]
# print("input file name: {0}".format(input_file.name))
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += '.' + 'mmi'
# print("output_file_name: {0}".format(output_file_name))
with open(output_file_name) as fd:
output = fd.read() # depends on [control=['with'], data=['fd']] # depends on [control=['try'], data=[]]
finally:
# output = str(output_file.read())
# print("output: {0}".format(output))
if sentences is not None:
os.remove(input_file.name) # depends on [control=['if'], data=[]]
else:
input_file.close()
# os.remove(output_file.name)
os.remove(output_file_name)
concepts = CorpusLite.load(output.splitlines())
return (concepts, error) |
def _images(self, sys_output):
''' a helper method for parsing docker image output '''
import re
gap_pattern = re.compile('\t|\s{2,}')
image_list = []
output_lines = sys_output.split('\n')
column_headers = gap_pattern.split(output_lines[0])
for i in range(1,len(output_lines)):
columns = gap_pattern.split(output_lines[i])
if len(columns) == len(column_headers):
image_details = {}
for j in range(len(columns)):
image_details[column_headers[j]] = columns[j]
image_list.append(image_details)
return image_list | def function[_images, parameter[self, sys_output]]:
constant[ a helper method for parsing docker image output ]
import module[re]
variable[gap_pattern] assign[=] call[name[re].compile, parameter[constant[ |\s{2,}]]]
variable[image_list] assign[=] list[[]]
variable[output_lines] assign[=] call[name[sys_output].split, parameter[constant[
]]]
variable[column_headers] assign[=] call[name[gap_pattern].split, parameter[call[name[output_lines]][constant[0]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[output_lines]]]]]] begin[:]
variable[columns] assign[=] call[name[gap_pattern].split, parameter[call[name[output_lines]][name[i]]]]
if compare[call[name[len], parameter[name[columns]]] equal[==] call[name[len], parameter[name[column_headers]]]] begin[:]
variable[image_details] assign[=] dictionary[[], []]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[columns]]]]]] begin[:]
call[name[image_details]][call[name[column_headers]][name[j]]] assign[=] call[name[columns]][name[j]]
call[name[image_list].append, parameter[name[image_details]]]
return[name[image_list]] | keyword[def] identifier[_images] ( identifier[self] , identifier[sys_output] ):
literal[string]
keyword[import] identifier[re]
identifier[gap_pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[image_list] =[]
identifier[output_lines] = identifier[sys_output] . identifier[split] ( literal[string] )
identifier[column_headers] = identifier[gap_pattern] . identifier[split] ( identifier[output_lines] [ literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[output_lines] )):
identifier[columns] = identifier[gap_pattern] . identifier[split] ( identifier[output_lines] [ identifier[i] ])
keyword[if] identifier[len] ( identifier[columns] )== identifier[len] ( identifier[column_headers] ):
identifier[image_details] ={}
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[columns] )):
identifier[image_details] [ identifier[column_headers] [ identifier[j] ]]= identifier[columns] [ identifier[j] ]
identifier[image_list] . identifier[append] ( identifier[image_details] )
keyword[return] identifier[image_list] | def _images(self, sys_output):
""" a helper method for parsing docker image output """
import re
gap_pattern = re.compile('\t|\\s{2,}')
image_list = []
output_lines = sys_output.split('\n')
column_headers = gap_pattern.split(output_lines[0])
for i in range(1, len(output_lines)):
columns = gap_pattern.split(output_lines[i])
if len(columns) == len(column_headers):
image_details = {}
for j in range(len(columns)):
image_details[column_headers[j]] = columns[j] # depends on [control=['for'], data=['j']]
image_list.append(image_details) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return image_list |
def display(self, cutout, use_pixel_coords=False):
"""
:param cutout: source cutout object to be display
:type cutout: source.SourceCutout
"""
logging.debug("Current display list contains: {}".format(self._displayables_by_cutout.keys()))
logging.debug("Looking for {}".format(cutout))
assert isinstance(cutout, SourceCutout)
if cutout in self._displayables_by_cutout:
displayable = self._displayables_by_cutout[cutout]
else:
displayable = self._create_displayable(cutout)
self._displayables_by_cutout[cutout] = displayable
self._detach_handlers(self.current_displayable)
self.current_cutout = cutout
self.current_displayable = displayable
self._attach_handlers(self.current_displayable)
self._do_render(self.current_displayable)
self.mark_apertures(cutout, pixel=use_pixel_coords)
self.draw_uncertainty_ellipse(cutout) | def function[display, parameter[self, cutout, use_pixel_coords]]:
constant[
:param cutout: source cutout object to be display
:type cutout: source.SourceCutout
]
call[name[logging].debug, parameter[call[constant[Current display list contains: {}].format, parameter[call[name[self]._displayables_by_cutout.keys, parameter[]]]]]]
call[name[logging].debug, parameter[call[constant[Looking for {}].format, parameter[name[cutout]]]]]
assert[call[name[isinstance], parameter[name[cutout], name[SourceCutout]]]]
if compare[name[cutout] in name[self]._displayables_by_cutout] begin[:]
variable[displayable] assign[=] call[name[self]._displayables_by_cutout][name[cutout]]
call[name[self]._detach_handlers, parameter[name[self].current_displayable]]
name[self].current_cutout assign[=] name[cutout]
name[self].current_displayable assign[=] name[displayable]
call[name[self]._attach_handlers, parameter[name[self].current_displayable]]
call[name[self]._do_render, parameter[name[self].current_displayable]]
call[name[self].mark_apertures, parameter[name[cutout]]]
call[name[self].draw_uncertainty_ellipse, parameter[name[cutout]]] | keyword[def] identifier[display] ( identifier[self] , identifier[cutout] , identifier[use_pixel_coords] = keyword[False] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[_displayables_by_cutout] . identifier[keys] ()))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cutout] ))
keyword[assert] identifier[isinstance] ( identifier[cutout] , identifier[SourceCutout] )
keyword[if] identifier[cutout] keyword[in] identifier[self] . identifier[_displayables_by_cutout] :
identifier[displayable] = identifier[self] . identifier[_displayables_by_cutout] [ identifier[cutout] ]
keyword[else] :
identifier[displayable] = identifier[self] . identifier[_create_displayable] ( identifier[cutout] )
identifier[self] . identifier[_displayables_by_cutout] [ identifier[cutout] ]= identifier[displayable]
identifier[self] . identifier[_detach_handlers] ( identifier[self] . identifier[current_displayable] )
identifier[self] . identifier[current_cutout] = identifier[cutout]
identifier[self] . identifier[current_displayable] = identifier[displayable]
identifier[self] . identifier[_attach_handlers] ( identifier[self] . identifier[current_displayable] )
identifier[self] . identifier[_do_render] ( identifier[self] . identifier[current_displayable] )
identifier[self] . identifier[mark_apertures] ( identifier[cutout] , identifier[pixel] = identifier[use_pixel_coords] )
identifier[self] . identifier[draw_uncertainty_ellipse] ( identifier[cutout] ) | def display(self, cutout, use_pixel_coords=False):
"""
:param cutout: source cutout object to be display
:type cutout: source.SourceCutout
"""
logging.debug('Current display list contains: {}'.format(self._displayables_by_cutout.keys()))
logging.debug('Looking for {}'.format(cutout))
assert isinstance(cutout, SourceCutout)
if cutout in self._displayables_by_cutout:
displayable = self._displayables_by_cutout[cutout] # depends on [control=['if'], data=['cutout']]
else:
displayable = self._create_displayable(cutout)
self._displayables_by_cutout[cutout] = displayable
self._detach_handlers(self.current_displayable)
self.current_cutout = cutout
self.current_displayable = displayable
self._attach_handlers(self.current_displayable)
self._do_render(self.current_displayable)
self.mark_apertures(cutout, pixel=use_pixel_coords)
self.draw_uncertainty_ellipse(cutout) |
def draw_graph(self, line_kwargs=None, scatter_kwargs=None, **kwargs):
"""Draws the graph.
Uses matplotlib, specifically
:class:`~matplotlib.collections.LineCollection` and
:meth:`~matplotlib.axes.Axes.scatter`. Gets the default
keyword arguments for both methods by calling
:meth:`~.QueueNetworkDiGraph.lines_scatter_args` first.
Parameters
----------
line_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:class:`~matplotlib.collections.LineCollection`
scatter_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.scatter`.
bgcolor : list (optional, keyword only)
A list with 4 floats representing a RGBA color. Defaults
to ``[1, 1, 1, 1]``.
figsize : tuple (optional, keyword only, default: ``(7, 7)``)
The width and height of the figure in inches.
kwargs :
Any keyword arguments used by
:meth:`~matplotlib.figure.Figure.savefig`.
Raises
------
ImportError :
If Matplotlib is not installed then an :exc:`ImportError`
is raised.
Notes
-----
If the ``fname`` keyword is passed, then the figure is saved
locally.
"""
if not HAS_MATPLOTLIB:
raise ImportError("Matplotlib is required to draw the graph.")
fig = plt.figure(figsize=kwargs.get('figsize', (7, 7)))
ax = fig.gca()
mpl_kwargs = {
'line_kwargs': line_kwargs,
'scatter_kwargs': scatter_kwargs,
'pos': kwargs.get('pos')
}
line_kwargs, scatter_kwargs = self.lines_scatter_args(**mpl_kwargs)
edge_collection = LineCollection(**line_kwargs)
ax.add_collection(edge_collection)
ax.scatter(**scatter_kwargs)
if hasattr(ax, 'set_facecolor'):
ax.set_facecolor(kwargs.get('bgcolor', [1, 1, 1, 1]))
else:
ax.set_axis_bgcolor(kwargs.get('bgcolor', [1, 1, 1, 1]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if 'fname' in kwargs:
# savefig needs a positional argument for some reason
new_kwargs = {k: v for k, v in kwargs.items() if k in SAVEFIG_KWARGS}
fig.savefig(kwargs['fname'], **new_kwargs)
else:
plt.ion()
plt.show() | def function[draw_graph, parameter[self, line_kwargs, scatter_kwargs]]:
constant[Draws the graph.
Uses matplotlib, specifically
:class:`~matplotlib.collections.LineCollection` and
:meth:`~matplotlib.axes.Axes.scatter`. Gets the default
keyword arguments for both methods by calling
:meth:`~.QueueNetworkDiGraph.lines_scatter_args` first.
Parameters
----------
line_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:class:`~matplotlib.collections.LineCollection`
scatter_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.scatter`.
bgcolor : list (optional, keyword only)
A list with 4 floats representing a RGBA color. Defaults
to ``[1, 1, 1, 1]``.
figsize : tuple (optional, keyword only, default: ``(7, 7)``)
The width and height of the figure in inches.
kwargs :
Any keyword arguments used by
:meth:`~matplotlib.figure.Figure.savefig`.
Raises
------
ImportError :
If Matplotlib is not installed then an :exc:`ImportError`
is raised.
Notes
-----
If the ``fname`` keyword is passed, then the figure is saved
locally.
]
if <ast.UnaryOp object at 0x7da1b013e410> begin[:]
<ast.Raise object at 0x7da1b013c9d0>
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].gca, parameter[]]
variable[mpl_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b013fa30>, <ast.Constant object at 0x7da1b013fd60>, <ast.Constant object at 0x7da1b013f010>], [<ast.Name object at 0x7da1b013edd0>, <ast.Name object at 0x7da1b013ee30>, <ast.Call object at 0x7da1b013f250>]]
<ast.Tuple object at 0x7da1b013fb20> assign[=] call[name[self].lines_scatter_args, parameter[]]
variable[edge_collection] assign[=] call[name[LineCollection], parameter[]]
call[name[ax].add_collection, parameter[name[edge_collection]]]
call[name[ax].scatter, parameter[]]
if call[name[hasattr], parameter[name[ax], constant[set_facecolor]]] begin[:]
call[name[ax].set_facecolor, parameter[call[name[kwargs].get, parameter[constant[bgcolor], list[[<ast.Constant object at 0x7da1b013d630>, <ast.Constant object at 0x7da1b013d990>, <ast.Constant object at 0x7da1b013ecb0>, <ast.Constant object at 0x7da1b013e260>]]]]]]
call[call[name[ax].get_xaxis, parameter[]].set_visible, parameter[constant[False]]]
call[call[name[ax].get_yaxis, parameter[]].set_visible, parameter[constant[False]]]
if compare[constant[fname] in name[kwargs]] begin[:]
variable[new_kwargs] assign[=] <ast.DictComp object at 0x7da1b013e0e0>
call[name[fig].savefig, parameter[call[name[kwargs]][constant[fname]]]] | keyword[def] identifier[draw_graph] ( identifier[self] , identifier[line_kwargs] = keyword[None] , identifier[scatter_kwargs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[HAS_MATPLOTLIB] :
keyword[raise] identifier[ImportError] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[kwargs] . identifier[get] ( literal[string] ,( literal[int] , literal[int] )))
identifier[ax] = identifier[fig] . identifier[gca] ()
identifier[mpl_kwargs] ={
literal[string] : identifier[line_kwargs] ,
literal[string] : identifier[scatter_kwargs] ,
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] )
}
identifier[line_kwargs] , identifier[scatter_kwargs] = identifier[self] . identifier[lines_scatter_args] (** identifier[mpl_kwargs] )
identifier[edge_collection] = identifier[LineCollection] (** identifier[line_kwargs] )
identifier[ax] . identifier[add_collection] ( identifier[edge_collection] )
identifier[ax] . identifier[scatter] (** identifier[scatter_kwargs] )
keyword[if] identifier[hasattr] ( identifier[ax] , literal[string] ):
identifier[ax] . identifier[set_facecolor] ( identifier[kwargs] . identifier[get] ( literal[string] ,[ literal[int] , literal[int] , literal[int] , literal[int] ]))
keyword[else] :
identifier[ax] . identifier[set_axis_bgcolor] ( identifier[kwargs] . identifier[get] ( literal[string] ,[ literal[int] , literal[int] , literal[int] , literal[int] ]))
identifier[ax] . identifier[get_xaxis] (). identifier[set_visible] ( keyword[False] )
identifier[ax] . identifier[get_yaxis] (). identifier[set_visible] ( keyword[False] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[new_kwargs] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[SAVEFIG_KWARGS] }
identifier[fig] . identifier[savefig] ( identifier[kwargs] [ literal[string] ],** identifier[new_kwargs] )
keyword[else] :
identifier[plt] . identifier[ion] ()
identifier[plt] . identifier[show] () | def draw_graph(self, line_kwargs=None, scatter_kwargs=None, **kwargs):
"""Draws the graph.
Uses matplotlib, specifically
:class:`~matplotlib.collections.LineCollection` and
:meth:`~matplotlib.axes.Axes.scatter`. Gets the default
keyword arguments for both methods by calling
:meth:`~.QueueNetworkDiGraph.lines_scatter_args` first.
Parameters
----------
line_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:class:`~matplotlib.collections.LineCollection`
scatter_kwargs : dict (optional, default: ``None``)
Any keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.scatter`.
bgcolor : list (optional, keyword only)
A list with 4 floats representing a RGBA color. Defaults
to ``[1, 1, 1, 1]``.
figsize : tuple (optional, keyword only, default: ``(7, 7)``)
The width and height of the figure in inches.
kwargs :
Any keyword arguments used by
:meth:`~matplotlib.figure.Figure.savefig`.
Raises
------
ImportError :
If Matplotlib is not installed then an :exc:`ImportError`
is raised.
Notes
-----
If the ``fname`` keyword is passed, then the figure is saved
locally.
"""
if not HAS_MATPLOTLIB:
raise ImportError('Matplotlib is required to draw the graph.') # depends on [control=['if'], data=[]]
fig = plt.figure(figsize=kwargs.get('figsize', (7, 7)))
ax = fig.gca()
mpl_kwargs = {'line_kwargs': line_kwargs, 'scatter_kwargs': scatter_kwargs, 'pos': kwargs.get('pos')}
(line_kwargs, scatter_kwargs) = self.lines_scatter_args(**mpl_kwargs)
edge_collection = LineCollection(**line_kwargs)
ax.add_collection(edge_collection)
ax.scatter(**scatter_kwargs)
if hasattr(ax, 'set_facecolor'):
ax.set_facecolor(kwargs.get('bgcolor', [1, 1, 1, 1])) # depends on [control=['if'], data=[]]
else:
ax.set_axis_bgcolor(kwargs.get('bgcolor', [1, 1, 1, 1]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if 'fname' in kwargs:
# savefig needs a positional argument for some reason
new_kwargs = {k: v for (k, v) in kwargs.items() if k in SAVEFIG_KWARGS}
fig.savefig(kwargs['fname'], **new_kwargs) # depends on [control=['if'], data=['kwargs']]
else:
plt.ion()
plt.show() |
def writeNumber(self, n):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n) | def function[writeNumber, parameter[self, n]]:
constant[
Writes a float to the stream.
@type n: C{float}
]
call[name[self].stream.write, parameter[name[TYPE_NUMBER]]]
call[name[self].stream.write_double, parameter[name[n]]] | keyword[def] identifier[writeNumber] ( identifier[self] , identifier[n] ):
literal[string]
identifier[self] . identifier[stream] . identifier[write] ( identifier[TYPE_NUMBER] )
identifier[self] . identifier[stream] . identifier[write_double] ( identifier[n] ) | def writeNumber(self, n):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n) |
def add_web_servers(self, listen_addrs, debug=False, **ssl_args):
"""Add WebSocketServer for each (host, port) in listen_addrs."""
self.servers.extend(
self.get_web_server(listen_addr, debug=debug, **ssl_args)
for listen_addr in listen_addrs
) | def function[add_web_servers, parameter[self, listen_addrs, debug]]:
constant[Add WebSocketServer for each (host, port) in listen_addrs.]
call[name[self].servers.extend, parameter[<ast.GeneratorExp object at 0x7da18f8102b0>]] | keyword[def] identifier[add_web_servers] ( identifier[self] , identifier[listen_addrs] , identifier[debug] = keyword[False] ,** identifier[ssl_args] ):
literal[string]
identifier[self] . identifier[servers] . identifier[extend] (
identifier[self] . identifier[get_web_server] ( identifier[listen_addr] , identifier[debug] = identifier[debug] ,** identifier[ssl_args] )
keyword[for] identifier[listen_addr] keyword[in] identifier[listen_addrs]
) | def add_web_servers(self, listen_addrs, debug=False, **ssl_args):
"""Add WebSocketServer for each (host, port) in listen_addrs."""
self.servers.extend((self.get_web_server(listen_addr, debug=debug, **ssl_args) for listen_addr in listen_addrs)) |
def sanitize_index(data, index, copy=False):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of index')
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = sanitize_array(data, index, copy=copy)
return data | def function[sanitize_index, parameter[data, index, copy]]:
constant[
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
]
if compare[name[index] is constant[None]] begin[:]
return[name[data]]
if compare[call[name[len], parameter[name[data]]] not_equal[!=] call[name[len], parameter[name[index]]]] begin[:]
<ast.Raise object at 0x7da1b1eb4a00>
if <ast.BoolOp object at 0x7da1b1eb4310> begin[:]
pass
return[name[data]] | keyword[def] identifier[sanitize_index] ( identifier[data] , identifier[index] , identifier[copy] = keyword[False] ):
literal[string]
keyword[if] identifier[index] keyword[is] keyword[None] :
keyword[return] identifier[data]
keyword[if] identifier[len] ( identifier[data] )!= identifier[len] ( identifier[index] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[ABCIndexClass] ) keyword[and] keyword[not] identifier[copy] :
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[data] ,( identifier[ABCPeriodIndex] , identifier[ABCDatetimeIndex] )):
identifier[data] = identifier[data] . identifier[_values]
keyword[if] identifier[copy] :
identifier[data] = identifier[data] . identifier[copy] ()
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[ndarray] ):
keyword[if] identifier[data] . identifier[dtype] . identifier[kind] keyword[in] [ literal[string] , literal[string] ]:
identifier[data] = identifier[sanitize_array] ( identifier[data] , identifier[index] , identifier[copy] = identifier[copy] )
keyword[return] identifier[data] | def sanitize_index(data, index, copy=False):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if index is None:
return data # depends on [control=['if'], data=[]]
if len(data) != len(index):
raise ValueError('Length of values does not match length of index') # depends on [control=['if'], data=[]]
if isinstance(data, ABCIndexClass) and (not copy):
pass # depends on [control=['if'], data=[]]
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
if copy:
data = data.copy() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = sanitize_array(data, index, copy=copy) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return data |
def when_children_replaced ( self, object, listener, remove ):
""" Sets up or removes a listener for children being replaced on a
specified object.
"""
object.on_trait_change( listener, "subgraphs", remove = remove,
dispatch = "fast_ui" )
object.on_trait_change( listener, "clusters", remove = remove,
dispatch = "fast_ui" )
object.on_trait_change( listener, "nodes", remove = remove,
dispatch = "fast_ui" )
object.on_trait_change( listener, "edges", remove = remove,
dispatch = "fast_ui" ) | def function[when_children_replaced, parameter[self, object, listener, remove]]:
constant[ Sets up or removes a listener for children being replaced on a
specified object.
]
call[name[object].on_trait_change, parameter[name[listener], constant[subgraphs]]]
call[name[object].on_trait_change, parameter[name[listener], constant[clusters]]]
call[name[object].on_trait_change, parameter[name[listener], constant[nodes]]]
call[name[object].on_trait_change, parameter[name[listener], constant[edges]]] | keyword[def] identifier[when_children_replaced] ( identifier[self] , identifier[object] , identifier[listener] , identifier[remove] ):
literal[string]
identifier[object] . identifier[on_trait_change] ( identifier[listener] , literal[string] , identifier[remove] = identifier[remove] ,
identifier[dispatch] = literal[string] )
identifier[object] . identifier[on_trait_change] ( identifier[listener] , literal[string] , identifier[remove] = identifier[remove] ,
identifier[dispatch] = literal[string] )
identifier[object] . identifier[on_trait_change] ( identifier[listener] , literal[string] , identifier[remove] = identifier[remove] ,
identifier[dispatch] = literal[string] )
identifier[object] . identifier[on_trait_change] ( identifier[listener] , literal[string] , identifier[remove] = identifier[remove] ,
identifier[dispatch] = literal[string] ) | def when_children_replaced(self, object, listener, remove):
""" Sets up or removes a listener for children being replaced on a
specified object.
"""
object.on_trait_change(listener, 'subgraphs', remove=remove, dispatch='fast_ui')
object.on_trait_change(listener, 'clusters', remove=remove, dispatch='fast_ui')
object.on_trait_change(listener, 'nodes', remove=remove, dispatch='fast_ui')
object.on_trait_change(listener, 'edges', remove=remove, dispatch='fast_ui') |
def _merge_list_fastqs(files, out_file, config):
"""merge list of fastq files into one"""
if not all(map(fastq.is_fastq, files)):
raise ValueError("Not all of the files to merge are fastq files: %s " % (files))
assert all(map(utils.file_exists, files)), ("Not all of the files to merge "
"exist: %s" % (files))
if not file_exists(out_file):
files = [_gzip_fastq(fn) for fn in files]
if len(files) == 1:
if "remove_source" in config and config["remove_source"]:
shutil.move(files[0], out_file)
else:
os.symlink(files[0], out_file)
return out_file
with file_transaction(out_file) as file_txt_out:
files_str = " ".join(list(files))
cmd = "cat {files_str} > {file_txt_out}".format(**locals())
do.run(cmd, "merge fastq files %s" % files)
return out_file | def function[_merge_list_fastqs, parameter[files, out_file, config]]:
constant[merge list of fastq files into one]
if <ast.UnaryOp object at 0x7da1b1831a50> begin[:]
<ast.Raise object at 0x7da1b1831390>
assert[call[name[all], parameter[call[name[map], parameter[name[utils].file_exists, name[files]]]]]]
if <ast.UnaryOp object at 0x7da1b1831840> begin[:]
variable[files] assign[=] <ast.ListComp object at 0x7da1b1831e70>
if compare[call[name[len], parameter[name[files]]] equal[==] constant[1]] begin[:]
if <ast.BoolOp object at 0x7da1b19854e0> begin[:]
call[name[shutil].move, parameter[call[name[files]][constant[0]], name[out_file]]]
return[name[out_file]]
with call[name[file_transaction], parameter[name[out_file]]] begin[:]
variable[files_str] assign[=] call[constant[ ].join, parameter[call[name[list], parameter[name[files]]]]]
variable[cmd] assign[=] call[constant[cat {files_str} > {file_txt_out}].format, parameter[]]
call[name[do].run, parameter[name[cmd], binary_operation[constant[merge fastq files %s] <ast.Mod object at 0x7da2590d6920> name[files]]]]
return[name[out_file]] | keyword[def] identifier[_merge_list_fastqs] ( identifier[files] , identifier[out_file] , identifier[config] ):
literal[string]
keyword[if] keyword[not] identifier[all] ( identifier[map] ( identifier[fastq] . identifier[is_fastq] , identifier[files] )):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[files] ))
keyword[assert] identifier[all] ( identifier[map] ( identifier[utils] . identifier[file_exists] , identifier[files] )),( literal[string]
literal[string] %( identifier[files] ))
keyword[if] keyword[not] identifier[file_exists] ( identifier[out_file] ):
identifier[files] =[ identifier[_gzip_fastq] ( identifier[fn] ) keyword[for] identifier[fn] keyword[in] identifier[files] ]
keyword[if] identifier[len] ( identifier[files] )== literal[int] :
keyword[if] literal[string] keyword[in] identifier[config] keyword[and] identifier[config] [ literal[string] ]:
identifier[shutil] . identifier[move] ( identifier[files] [ literal[int] ], identifier[out_file] )
keyword[else] :
identifier[os] . identifier[symlink] ( identifier[files] [ literal[int] ], identifier[out_file] )
keyword[return] identifier[out_file]
keyword[with] identifier[file_transaction] ( identifier[out_file] ) keyword[as] identifier[file_txt_out] :
identifier[files_str] = literal[string] . identifier[join] ( identifier[list] ( identifier[files] ))
identifier[cmd] = literal[string] . identifier[format] (** identifier[locals] ())
identifier[do] . identifier[run] ( identifier[cmd] , literal[string] % identifier[files] )
keyword[return] identifier[out_file] | def _merge_list_fastqs(files, out_file, config):
"""merge list of fastq files into one"""
if not all(map(fastq.is_fastq, files)):
raise ValueError('Not all of the files to merge are fastq files: %s ' % files) # depends on [control=['if'], data=[]]
assert all(map(utils.file_exists, files)), 'Not all of the files to merge exist: %s' % files
if not file_exists(out_file):
files = [_gzip_fastq(fn) for fn in files]
if len(files) == 1:
if 'remove_source' in config and config['remove_source']:
shutil.move(files[0], out_file) # depends on [control=['if'], data=[]]
else:
os.symlink(files[0], out_file)
return out_file # depends on [control=['if'], data=[]]
with file_transaction(out_file) as file_txt_out:
files_str = ' '.join(list(files))
cmd = 'cat {files_str} > {file_txt_out}'.format(**locals())
do.run(cmd, 'merge fastq files %s' % files) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return out_file |
def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete(
{self.id_field: document_id}
)
if doc2 and doc2.get("gridfs_id"):
GridFS(self.mongo[database], coll).delete(doc2["gridfs_id"])
else:
self.mongo[database][coll].delete_one({"_id": document_id}) | def function[remove, parameter[self, document_id, namespace, timestamp]]:
constant[Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
]
<ast.Tuple object at 0x7da1b1d3bdf0> assign[=] call[name[self]._db_and_collection, parameter[name[namespace]]]
variable[meta_collection] assign[=] call[name[self]._get_meta_collection, parameter[name[namespace]]]
variable[doc2] assign[=] call[call[name[self].meta_database][name[meta_collection]].find_one_and_delete, parameter[dictionary[[<ast.Attribute object at 0x7da1b1d38f70>], [<ast.Name object at 0x7da1b1d396c0>]]]]
if <ast.BoolOp object at 0x7da1b1d39750> begin[:]
call[call[name[GridFS], parameter[call[name[self].mongo][name[database]], name[coll]]].delete, parameter[call[name[doc2]][constant[gridfs_id]]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[document_id] , identifier[namespace] , identifier[timestamp] ):
literal[string]
identifier[database] , identifier[coll] = identifier[self] . identifier[_db_and_collection] ( identifier[namespace] )
identifier[meta_collection] = identifier[self] . identifier[_get_meta_collection] ( identifier[namespace] )
identifier[doc2] = identifier[self] . identifier[meta_database] [ identifier[meta_collection] ]. identifier[find_one_and_delete] (
{ identifier[self] . identifier[id_field] : identifier[document_id] }
)
keyword[if] identifier[doc2] keyword[and] identifier[doc2] . identifier[get] ( literal[string] ):
identifier[GridFS] ( identifier[self] . identifier[mongo] [ identifier[database] ], identifier[coll] ). identifier[delete] ( identifier[doc2] [ literal[string] ])
keyword[else] :
identifier[self] . identifier[mongo] [ identifier[database] ][ identifier[coll] ]. identifier[delete_one] ({ literal[string] : identifier[document_id] }) | def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
(database, coll) = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete({self.id_field: document_id})
if doc2 and doc2.get('gridfs_id'):
GridFS(self.mongo[database], coll).delete(doc2['gridfs_id']) # depends on [control=['if'], data=[]]
else:
self.mongo[database][coll].delete_one({'_id': document_id}) |
def jquery_js(version=None, migrate=False):
'''A shortcut to render a ``script`` tag for the packaged jQuery'''
version = version or settings.JQUERY_VERSION
suffix = '.min' if not settings.DEBUG else ''
libs = [js_lib('jquery-%s%s.js' % (version, suffix))]
if _boolean(migrate):
libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix)))
return '\n'.join(libs) | def function[jquery_js, parameter[version, migrate]]:
constant[A shortcut to render a ``script`` tag for the packaged jQuery]
variable[version] assign[=] <ast.BoolOp object at 0x7da1b1120910>
variable[suffix] assign[=] <ast.IfExp object at 0x7da1b1122e90>
variable[libs] assign[=] list[[<ast.Call object at 0x7da1b1122710>]]
if call[name[_boolean], parameter[name[migrate]]] begin[:]
call[name[libs].append, parameter[call[name[js_lib], parameter[binary_operation[constant[jquery-migrate-%s%s.js] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1121900>, <ast.Name object at 0x7da1b11202e0>]]]]]]]
return[call[constant[
].join, parameter[name[libs]]]] | keyword[def] identifier[jquery_js] ( identifier[version] = keyword[None] , identifier[migrate] = keyword[False] ):
literal[string]
identifier[version] = identifier[version] keyword[or] identifier[settings] . identifier[JQUERY_VERSION]
identifier[suffix] = literal[string] keyword[if] keyword[not] identifier[settings] . identifier[DEBUG] keyword[else] literal[string]
identifier[libs] =[ identifier[js_lib] ( literal[string] %( identifier[version] , identifier[suffix] ))]
keyword[if] identifier[_boolean] ( identifier[migrate] ):
identifier[libs] . identifier[append] ( identifier[js_lib] ( literal[string] %( identifier[JQUERY_MIGRATE_VERSION] , identifier[suffix] )))
keyword[return] literal[string] . identifier[join] ( identifier[libs] ) | def jquery_js(version=None, migrate=False):
"""A shortcut to render a ``script`` tag for the packaged jQuery"""
version = version or settings.JQUERY_VERSION
suffix = '.min' if not settings.DEBUG else ''
libs = [js_lib('jquery-%s%s.js' % (version, suffix))]
if _boolean(migrate):
libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix))) # depends on [control=['if'], data=[]]
return '\n'.join(libs) |
def get_level_nodes(self, level):
"""!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
"""
level_nodes = [];
if (level < self.__height):
level_nodes = self.__recursive_get_level_nodes(level, self.__root);
return level_nodes; | def function[get_level_nodes, parameter[self, level]]:
constant[!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
]
variable[level_nodes] assign[=] list[[]]
if compare[name[level] less[<] name[self].__height] begin[:]
variable[level_nodes] assign[=] call[name[self].__recursive_get_level_nodes, parameter[name[level], name[self].__root]]
return[name[level_nodes]] | keyword[def] identifier[get_level_nodes] ( identifier[self] , identifier[level] ):
literal[string]
identifier[level_nodes] =[];
keyword[if] ( identifier[level] < identifier[self] . identifier[__height] ):
identifier[level_nodes] = identifier[self] . identifier[__recursive_get_level_nodes] ( identifier[level] , identifier[self] . identifier[__root] );
keyword[return] identifier[level_nodes] ; | def get_level_nodes(self, level):
"""!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
"""
level_nodes = []
if level < self.__height:
level_nodes = self.__recursive_get_level_nodes(level, self.__root) # depends on [control=['if'], data=['level']]
return level_nodes |
def read_ready(self):
"""
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
"""
self._lock.acquire()
try:
if len(self._buffer) == 0:
return False
return True
finally:
self._lock.release() | def function[read_ready, parameter[self]]:
constant[
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
]
call[name[self]._lock.acquire, parameter[]]
<ast.Try object at 0x7da1b2198d60> | keyword[def] identifier[read_ready] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_lock] . identifier[acquire] ()
keyword[try] :
keyword[if] identifier[len] ( identifier[self] . identifier[_buffer] )== literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[finally] :
identifier[self] . identifier[_lock] . identifier[release] () | def read_ready(self):
"""
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
"""
self._lock.acquire()
try:
if len(self._buffer) == 0:
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['try'], data=[]]
finally:
self._lock.release() |
def is_connected(C, directed=True):
r"""Return true, if the input count matrix is completely connected.
Effectively checking if the number of connected components equals one.
Parameters
----------
C : scipy.sparse matrix or numpy ndarray
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
connected : boolean, returning true only if C is connected.
"""
nc = csgraph.connected_components(C, directed=directed, connection='strong', \
return_labels=False)
return nc == 1 | def function[is_connected, parameter[C, directed]]:
constant[Return true, if the input count matrix is completely connected.
Effectively checking if the number of connected components equals one.
Parameters
----------
C : scipy.sparse matrix or numpy ndarray
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
connected : boolean, returning true only if C is connected.
]
variable[nc] assign[=] call[name[csgraph].connected_components, parameter[name[C]]]
return[compare[name[nc] equal[==] constant[1]]] | keyword[def] identifier[is_connected] ( identifier[C] , identifier[directed] = keyword[True] ):
literal[string]
identifier[nc] = identifier[csgraph] . identifier[connected_components] ( identifier[C] , identifier[directed] = identifier[directed] , identifier[connection] = literal[string] , identifier[return_labels] = keyword[False] )
keyword[return] identifier[nc] == literal[int] | def is_connected(C, directed=True):
"""Return true, if the input count matrix is completely connected.
Effectively checking if the number of connected components equals one.
Parameters
----------
C : scipy.sparse matrix or numpy ndarray
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
connected : boolean, returning true only if C is connected.
"""
nc = csgraph.connected_components(C, directed=directed, connection='strong', return_labels=False)
return nc == 1 |
def get_data(conn_objs, providers):
"""Refresh node data using existing connection-objects."""
cld_svc_map = {"aws": nodes_aws,
"azure": nodes_az,
"gcp": nodes_gcp,
"alicloud": nodes_ali}
sys.stdout.write("\rCollecting Info: ")
sys.stdout.flush()
busy_obj = busy_disp_on()
collec_fn = [[cld_svc_map[x.rstrip('1234567890')], conn_objs[x]]
for x in providers]
ngroup = Group()
node_list = []
node_list = ngroup.map(get_nodes, collec_fn)
ngroup.join()
busy_disp_off(dobj=busy_obj)
sys.stdout.write("\r \r")
sys.stdout.write("\033[?25h") # cursor back on
sys.stdout.flush()
return node_list | def function[get_data, parameter[conn_objs, providers]]:
constant[Refresh node data using existing connection-objects.]
variable[cld_svc_map] assign[=] dictionary[[<ast.Constant object at 0x7da1b26771f0>, <ast.Constant object at 0x7da1b26777f0>, <ast.Constant object at 0x7da1b2677310>, <ast.Constant object at 0x7da1b2677ee0>], [<ast.Name object at 0x7da1b2676f50>, <ast.Name object at 0x7da1b2677e80>, <ast.Name object at 0x7da1b26776a0>, <ast.Name object at 0x7da1b26773a0>]]
call[name[sys].stdout.write, parameter[constant[
Collecting Info: ]]]
call[name[sys].stdout.flush, parameter[]]
variable[busy_obj] assign[=] call[name[busy_disp_on], parameter[]]
variable[collec_fn] assign[=] <ast.ListComp object at 0x7da1b26779a0>
variable[ngroup] assign[=] call[name[Group], parameter[]]
variable[node_list] assign[=] list[[]]
variable[node_list] assign[=] call[name[ngroup].map, parameter[name[get_nodes], name[collec_fn]]]
call[name[ngroup].join, parameter[]]
call[name[busy_disp_off], parameter[]]
call[name[sys].stdout.write, parameter[constant[
]]]
call[name[sys].stdout.write, parameter[constant[[?25h]]]
call[name[sys].stdout.flush, parameter[]]
return[name[node_list]] | keyword[def] identifier[get_data] ( identifier[conn_objs] , identifier[providers] ):
literal[string]
identifier[cld_svc_map] ={ literal[string] : identifier[nodes_aws] ,
literal[string] : identifier[nodes_az] ,
literal[string] : identifier[nodes_gcp] ,
literal[string] : identifier[nodes_ali] }
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[busy_obj] = identifier[busy_disp_on] ()
identifier[collec_fn] =[[ identifier[cld_svc_map] [ identifier[x] . identifier[rstrip] ( literal[string] )], identifier[conn_objs] [ identifier[x] ]]
keyword[for] identifier[x] keyword[in] identifier[providers] ]
identifier[ngroup] = identifier[Group] ()
identifier[node_list] =[]
identifier[node_list] = identifier[ngroup] . identifier[map] ( identifier[get_nodes] , identifier[collec_fn] )
identifier[ngroup] . identifier[join] ()
identifier[busy_disp_off] ( identifier[dobj] = identifier[busy_obj] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[return] identifier[node_list] | def get_data(conn_objs, providers):
"""Refresh node data using existing connection-objects."""
cld_svc_map = {'aws': nodes_aws, 'azure': nodes_az, 'gcp': nodes_gcp, 'alicloud': nodes_ali}
sys.stdout.write('\rCollecting Info: ')
sys.stdout.flush()
busy_obj = busy_disp_on()
collec_fn = [[cld_svc_map[x.rstrip('1234567890')], conn_objs[x]] for x in providers]
ngroup = Group()
node_list = []
node_list = ngroup.map(get_nodes, collec_fn)
ngroup.join()
busy_disp_off(dobj=busy_obj)
sys.stdout.write('\r \r')
sys.stdout.write('\x1b[?25h') # cursor back on
sys.stdout.flush()
return node_list |
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for i, buck in enumerate(self.sentences):
self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
self.sentences[i],
self.characters[i],
self.label[i])
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for i, buck in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) | def function[reset, parameter[self]]:
constant[Resets the iterator to the beginning of the data.]
name[self].curr_idx assign[=] constant[0]
call[name[random].shuffle, parameter[name[self].idx]]
for taget[tuple[[<ast.Name object at 0x7da2054a5300>, <ast.Name object at 0x7da2054a6890>]]] in starred[call[name[enumerate], parameter[name[self].sentences]]] begin[:]
<ast.Tuple object at 0x7da2054a43d0> assign[=] call[name[shuffle], parameter[call[name[self].indices][name[i]], call[name[self].sentences][name[i]], call[name[self].characters][name[i]], call[name[self].label][name[i]]]]
name[self].ndindex assign[=] list[[]]
name[self].ndsent assign[=] list[[]]
name[self].ndchar assign[=] list[[]]
name[self].ndlabel assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b20667a0>, <ast.Name object at 0x7da1b2065b10>]]] in starred[call[name[enumerate], parameter[name[self].sentences]]] begin[:]
call[name[self].ndindex.append, parameter[call[name[ndarray].array, parameter[call[name[self].indices][name[i]]]]]]
call[name[self].ndsent.append, parameter[call[name[ndarray].array, parameter[call[name[self].sentences][name[i]]]]]]
call[name[self].ndchar.append, parameter[call[name[ndarray].array, parameter[call[name[self].characters][name[i]]]]]]
call[name[self].ndlabel.append, parameter[call[name[ndarray].array, parameter[call[name[self].label][name[i]]]]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[curr_idx] = literal[int]
identifier[random] . identifier[shuffle] ( identifier[self] . identifier[idx] )
keyword[for] identifier[i] , identifier[buck] keyword[in] identifier[enumerate] ( identifier[self] . identifier[sentences] ):
identifier[self] . identifier[indices] [ identifier[i] ], identifier[self] . identifier[sentences] [ identifier[i] ], identifier[self] . identifier[characters] [ identifier[i] ], identifier[self] . identifier[label] [ identifier[i] ]= identifier[shuffle] ( identifier[self] . identifier[indices] [ identifier[i] ],
identifier[self] . identifier[sentences] [ identifier[i] ],
identifier[self] . identifier[characters] [ identifier[i] ],
identifier[self] . identifier[label] [ identifier[i] ])
identifier[self] . identifier[ndindex] =[]
identifier[self] . identifier[ndsent] =[]
identifier[self] . identifier[ndchar] =[]
identifier[self] . identifier[ndlabel] =[]
keyword[for] identifier[i] , identifier[buck] keyword[in] identifier[enumerate] ( identifier[self] . identifier[sentences] ):
identifier[self] . identifier[ndindex] . identifier[append] ( identifier[ndarray] . identifier[array] ( identifier[self] . identifier[indices] [ identifier[i] ], identifier[dtype] = identifier[self] . identifier[dtype] ))
identifier[self] . identifier[ndsent] . identifier[append] ( identifier[ndarray] . identifier[array] ( identifier[self] . identifier[sentences] [ identifier[i] ], identifier[dtype] = identifier[self] . identifier[dtype] ))
identifier[self] . identifier[ndchar] . identifier[append] ( identifier[ndarray] . identifier[array] ( identifier[self] . identifier[characters] [ identifier[i] ], identifier[dtype] = identifier[self] . identifier[dtype] ))
identifier[self] . identifier[ndlabel] . identifier[append] ( identifier[ndarray] . identifier[array] ( identifier[self] . identifier[label] [ identifier[i] ], identifier[dtype] = identifier[self] . identifier[dtype] )) | def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for (i, buck) in enumerate(self.sentences):
(self.indices[i], self.sentences[i], self.characters[i], self.label[i]) = shuffle(self.indices[i], self.sentences[i], self.characters[i], self.label[i]) # depends on [control=['for'], data=[]]
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for (i, buck) in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) # depends on [control=['for'], data=[]] |
def concatenate_variables(scope, variables, container):
'''
This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all
integer inputs would be converted to floats before concatenation.
'''
# Check if it's possible to concatenate those inputs.
type_set = set(type(variable.type) for variable in variables)
number_type_set = {FloatType, FloatTensorType, Int64Type, Int64TensorType}
if StringType in type_set and any(number_type in type_set for number_type in number_type_set):
raise RuntimeError('We are not able to concatenate numerical tensor(s) and string tensor(s)')
input_names = [] # input variables' names we want to concatenate
input_dims = [] # dimensions of the variables that is going to be concatenated
# Collect input variable names and do cast if needed
for variable in variables:
if isinstance(variable.type, (Int64TensorType, Int64Type)):
input_names.append(convert_integer_to_float(scope, variable, container))
else:
input_names.append(variable.full_name)
# We assume input variables' shape are [1, C_1], ..., [1, C_n] if there are n inputs.
input_dims.append(variable.type.shape[1])
if len(input_names) == 1:
# No need to concatenate tensors if there is only one input
return input_names[0]
else:
# To combine all inputs, we need a FeatureVectorizer
op_type = 'FeatureVectorizer'
attrs = {'name': scope.get_unique_operator_name(op_type), 'inputdimensions': input_dims}
# Create a variable name to capture feature vectorizer's output
concatenated_name = scope.get_unique_variable_name('concatenated')
# Set up our FeatureVectorizer
container.add_node(op_type, input_names, concatenated_name, op_domain='ai.onnx.ml', **attrs)
return concatenated_name | def function[concatenate_variables, parameter[scope, variables, container]]:
constant[
This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all
integer inputs would be converted to floats before concatenation.
]
variable[type_set] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da204623820>]]
variable[number_type_set] assign[=] <ast.Set object at 0x7da204620310>
if <ast.BoolOp object at 0x7da204623010> begin[:]
<ast.Raise object at 0x7da204620850>
variable[input_names] assign[=] list[[]]
variable[input_dims] assign[=] list[[]]
for taget[name[variable]] in starred[name[variables]] begin[:]
if call[name[isinstance], parameter[name[variable].type, tuple[[<ast.Name object at 0x7da2046226e0>, <ast.Name object at 0x7da204621d80>]]]] begin[:]
call[name[input_names].append, parameter[call[name[convert_integer_to_float], parameter[name[scope], name[variable], name[container]]]]]
call[name[input_dims].append, parameter[call[name[variable].type.shape][constant[1]]]]
if compare[call[name[len], parameter[name[input_names]]] equal[==] constant[1]] begin[:]
return[call[name[input_names]][constant[0]]] | keyword[def] identifier[concatenate_variables] ( identifier[scope] , identifier[variables] , identifier[container] ):
literal[string]
identifier[type_set] = identifier[set] ( identifier[type] ( identifier[variable] . identifier[type] ) keyword[for] identifier[variable] keyword[in] identifier[variables] )
identifier[number_type_set] ={ identifier[FloatType] , identifier[FloatTensorType] , identifier[Int64Type] , identifier[Int64TensorType] }
keyword[if] identifier[StringType] keyword[in] identifier[type_set] keyword[and] identifier[any] ( identifier[number_type] keyword[in] identifier[type_set] keyword[for] identifier[number_type] keyword[in] identifier[number_type_set] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[input_names] =[]
identifier[input_dims] =[]
keyword[for] identifier[variable] keyword[in] identifier[variables] :
keyword[if] identifier[isinstance] ( identifier[variable] . identifier[type] ,( identifier[Int64TensorType] , identifier[Int64Type] )):
identifier[input_names] . identifier[append] ( identifier[convert_integer_to_float] ( identifier[scope] , identifier[variable] , identifier[container] ))
keyword[else] :
identifier[input_names] . identifier[append] ( identifier[variable] . identifier[full_name] )
identifier[input_dims] . identifier[append] ( identifier[variable] . identifier[type] . identifier[shape] [ literal[int] ])
keyword[if] identifier[len] ( identifier[input_names] )== literal[int] :
keyword[return] identifier[input_names] [ literal[int] ]
keyword[else] :
identifier[op_type] = literal[string]
identifier[attrs] ={ literal[string] : identifier[scope] . identifier[get_unique_operator_name] ( identifier[op_type] ), literal[string] : identifier[input_dims] }
identifier[concatenated_name] = identifier[scope] . identifier[get_unique_variable_name] ( literal[string] )
identifier[container] . identifier[add_node] ( identifier[op_type] , identifier[input_names] , identifier[concatenated_name] , identifier[op_domain] = literal[string] ,** identifier[attrs] )
keyword[return] identifier[concatenated_name] | def concatenate_variables(scope, variables, container):
"""
This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all
integer inputs would be converted to floats before concatenation.
"""
# Check if it's possible to concatenate those inputs.
type_set = set((type(variable.type) for variable in variables))
number_type_set = {FloatType, FloatTensorType, Int64Type, Int64TensorType}
if StringType in type_set and any((number_type in type_set for number_type in number_type_set)):
raise RuntimeError('We are not able to concatenate numerical tensor(s) and string tensor(s)') # depends on [control=['if'], data=[]]
input_names = [] # input variables' names we want to concatenate
input_dims = [] # dimensions of the variables that is going to be concatenated
# Collect input variable names and do cast if needed
for variable in variables:
if isinstance(variable.type, (Int64TensorType, Int64Type)):
input_names.append(convert_integer_to_float(scope, variable, container)) # depends on [control=['if'], data=[]]
else:
input_names.append(variable.full_name)
# We assume input variables' shape are [1, C_1], ..., [1, C_n] if there are n inputs.
input_dims.append(variable.type.shape[1]) # depends on [control=['for'], data=['variable']]
if len(input_names) == 1:
# No need to concatenate tensors if there is only one input
return input_names[0] # depends on [control=['if'], data=[]]
else:
# To combine all inputs, we need a FeatureVectorizer
op_type = 'FeatureVectorizer'
attrs = {'name': scope.get_unique_operator_name(op_type), 'inputdimensions': input_dims}
# Create a variable name to capture feature vectorizer's output
concatenated_name = scope.get_unique_variable_name('concatenated')
# Set up our FeatureVectorizer
container.add_node(op_type, input_names, concatenated_name, op_domain='ai.onnx.ml', **attrs)
return concatenated_name |
def prepare_plot_data(data_file):
"""
Return a list of Plotly elements representing the network graph
"""
G = ig.Graph.Read_GML(data_file)
layout = G.layout('graphopt')
labels = list(G.vs['label'])
N = len(labels)
E = [e.tuple for e in G.es]
community = G.community_multilevel().membership
communities = len(set(community))
color_list = community_colors(communities)
Xn = [layout[k][0] for k in range(N)]
Yn = [layout[k][1] for k in range(N)]
Xe = []
Ye = []
for e in E:
Xe += [layout[e[0]][0], layout[e[1]][0], None]
Ye += [layout[e[0]][1], layout[e[1]][1], None]
lines = Scatter(x=Xe,
y=Ye,
mode='lines',
line=Line(color='rgb(210,210,210)', width=1),
hoverinfo='none'
)
plot_data = [lines]
node_x = [[] for i in range(communities)]
node_y = [[] for i in range(communities)]
node_labels = [[] for i in range(communities)]
for j in range(len(community)):
index = community[j]
node_x[index].append(layout[j][0])
node_y[index].append(layout[j][1])
node_labels[index].append(labels[j])
for i in range(communities):
trace = Scatter(x=node_x[i],
y=node_y[i],
mode='markers',
name='ntw',
marker=Marker(symbol='dot',
size=5,
color=color_list[i],
line=Line(
color='rgb(50,50,50)', width=0.5)
),
text=node_labels[i],
hoverinfo='text'
)
plot_data.append(trace)
return plot_data | def function[prepare_plot_data, parameter[data_file]]:
constant[
Return a list of Plotly elements representing the network graph
]
variable[G] assign[=] call[name[ig].Graph.Read_GML, parameter[name[data_file]]]
variable[layout] assign[=] call[name[G].layout, parameter[constant[graphopt]]]
variable[labels] assign[=] call[name[list], parameter[call[name[G].vs][constant[label]]]]
variable[N] assign[=] call[name[len], parameter[name[labels]]]
variable[E] assign[=] <ast.ListComp object at 0x7da1b0aa30a0>
variable[community] assign[=] call[name[G].community_multilevel, parameter[]].membership
variable[communities] assign[=] call[name[len], parameter[call[name[set], parameter[name[community]]]]]
variable[color_list] assign[=] call[name[community_colors], parameter[name[communities]]]
variable[Xn] assign[=] <ast.ListComp object at 0x7da1b0aa0940>
variable[Yn] assign[=] <ast.ListComp object at 0x7da1b0aa0d90>
variable[Xe] assign[=] list[[]]
variable[Ye] assign[=] list[[]]
for taget[name[e]] in starred[name[E]] begin[:]
<ast.AugAssign object at 0x7da1b0aa3370>
<ast.AugAssign object at 0x7da1b0aa36a0>
variable[lines] assign[=] call[name[Scatter], parameter[]]
variable[plot_data] assign[=] list[[<ast.Name object at 0x7da1b0a808e0>]]
variable[node_x] assign[=] <ast.ListComp object at 0x7da1b0a81060>
variable[node_y] assign[=] <ast.ListComp object at 0x7da1b0a80a90>
variable[node_labels] assign[=] <ast.ListComp object at 0x7da1b0a80cd0>
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[community]]]]]] begin[:]
variable[index] assign[=] call[name[community]][name[j]]
call[call[name[node_x]][name[index]].append, parameter[call[call[name[layout]][name[j]]][constant[0]]]]
call[call[name[node_y]][name[index]].append, parameter[call[call[name[layout]][name[j]]][constant[1]]]]
call[call[name[node_labels]][name[index]].append, parameter[call[name[labels]][name[j]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[communities]]]] begin[:]
variable[trace] assign[=] call[name[Scatter], parameter[]]
call[name[plot_data].append, parameter[name[trace]]]
return[name[plot_data]] | keyword[def] identifier[prepare_plot_data] ( identifier[data_file] ):
literal[string]
identifier[G] = identifier[ig] . identifier[Graph] . identifier[Read_GML] ( identifier[data_file] )
identifier[layout] = identifier[G] . identifier[layout] ( literal[string] )
identifier[labels] = identifier[list] ( identifier[G] . identifier[vs] [ literal[string] ])
identifier[N] = identifier[len] ( identifier[labels] )
identifier[E] =[ identifier[e] . identifier[tuple] keyword[for] identifier[e] keyword[in] identifier[G] . identifier[es] ]
identifier[community] = identifier[G] . identifier[community_multilevel] (). identifier[membership]
identifier[communities] = identifier[len] ( identifier[set] ( identifier[community] ))
identifier[color_list] = identifier[community_colors] ( identifier[communities] )
identifier[Xn] =[ identifier[layout] [ identifier[k] ][ literal[int] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[N] )]
identifier[Yn] =[ identifier[layout] [ identifier[k] ][ literal[int] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[N] )]
identifier[Xe] =[]
identifier[Ye] =[]
keyword[for] identifier[e] keyword[in] identifier[E] :
identifier[Xe] +=[ identifier[layout] [ identifier[e] [ literal[int] ]][ literal[int] ], identifier[layout] [ identifier[e] [ literal[int] ]][ literal[int] ], keyword[None] ]
identifier[Ye] +=[ identifier[layout] [ identifier[e] [ literal[int] ]][ literal[int] ], identifier[layout] [ identifier[e] [ literal[int] ]][ literal[int] ], keyword[None] ]
identifier[lines] = identifier[Scatter] ( identifier[x] = identifier[Xe] ,
identifier[y] = identifier[Ye] ,
identifier[mode] = literal[string] ,
identifier[line] = identifier[Line] ( identifier[color] = literal[string] , identifier[width] = literal[int] ),
identifier[hoverinfo] = literal[string]
)
identifier[plot_data] =[ identifier[lines] ]
identifier[node_x] =[[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[communities] )]
identifier[node_y] =[[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[communities] )]
identifier[node_labels] =[[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[communities] )]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[community] )):
identifier[index] = identifier[community] [ identifier[j] ]
identifier[node_x] [ identifier[index] ]. identifier[append] ( identifier[layout] [ identifier[j] ][ literal[int] ])
identifier[node_y] [ identifier[index] ]. identifier[append] ( identifier[layout] [ identifier[j] ][ literal[int] ])
identifier[node_labels] [ identifier[index] ]. identifier[append] ( identifier[labels] [ identifier[j] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[communities] ):
identifier[trace] = identifier[Scatter] ( identifier[x] = identifier[node_x] [ identifier[i] ],
identifier[y] = identifier[node_y] [ identifier[i] ],
identifier[mode] = literal[string] ,
identifier[name] = literal[string] ,
identifier[marker] = identifier[Marker] ( identifier[symbol] = literal[string] ,
identifier[size] = literal[int] ,
identifier[color] = identifier[color_list] [ identifier[i] ],
identifier[line] = identifier[Line] (
identifier[color] = literal[string] , identifier[width] = literal[int] )
),
identifier[text] = identifier[node_labels] [ identifier[i] ],
identifier[hoverinfo] = literal[string]
)
identifier[plot_data] . identifier[append] ( identifier[trace] )
keyword[return] identifier[plot_data] | def prepare_plot_data(data_file):
"""
Return a list of Plotly elements representing the network graph
"""
G = ig.Graph.Read_GML(data_file)
layout = G.layout('graphopt')
labels = list(G.vs['label'])
N = len(labels)
E = [e.tuple for e in G.es]
community = G.community_multilevel().membership
communities = len(set(community))
color_list = community_colors(communities)
Xn = [layout[k][0] for k in range(N)]
Yn = [layout[k][1] for k in range(N)]
Xe = []
Ye = []
for e in E:
Xe += [layout[e[0]][0], layout[e[1]][0], None]
Ye += [layout[e[0]][1], layout[e[1]][1], None] # depends on [control=['for'], data=['e']]
lines = Scatter(x=Xe, y=Ye, mode='lines', line=Line(color='rgb(210,210,210)', width=1), hoverinfo='none')
plot_data = [lines]
node_x = [[] for i in range(communities)]
node_y = [[] for i in range(communities)]
node_labels = [[] for i in range(communities)]
for j in range(len(community)):
index = community[j]
node_x[index].append(layout[j][0])
node_y[index].append(layout[j][1])
node_labels[index].append(labels[j]) # depends on [control=['for'], data=['j']]
for i in range(communities):
trace = Scatter(x=node_x[i], y=node_y[i], mode='markers', name='ntw', marker=Marker(symbol='dot', size=5, color=color_list[i], line=Line(color='rgb(50,50,50)', width=0.5)), text=node_labels[i], hoverinfo='text')
plot_data.append(trace) # depends on [control=['for'], data=['i']]
return plot_data |
def find_by_workspace(self, workspace, params={}, **options):
"""Returns the compact tag records for all tags in the workspace.
Parameters
----------
workspace : {Id} The workspace or organization to find tags in.
[params] : {Object} Parameters for the request
"""
path = "/workspaces/%s/tags" % (workspace)
return self.client.get_collection(path, params, **options) | def function[find_by_workspace, parameter[self, workspace, params]]:
constant[Returns the compact tag records for all tags in the workspace.
Parameters
----------
workspace : {Id} The workspace or organization to find tags in.
[params] : {Object} Parameters for the request
]
variable[path] assign[=] binary_operation[constant[/workspaces/%s/tags] <ast.Mod object at 0x7da2590d6920> name[workspace]]
return[call[name[self].client.get_collection, parameter[name[path], name[params]]]] | keyword[def] identifier[find_by_workspace] ( identifier[self] , identifier[workspace] , identifier[params] ={},** identifier[options] ):
literal[string]
identifier[path] = literal[string] %( identifier[workspace] )
keyword[return] identifier[self] . identifier[client] . identifier[get_collection] ( identifier[path] , identifier[params] ,** identifier[options] ) | def find_by_workspace(self, workspace, params={}, **options):
"""Returns the compact tag records for all tags in the workspace.
Parameters
----------
workspace : {Id} The workspace or organization to find tags in.
[params] : {Object} Parameters for the request
"""
path = '/workspaces/%s/tags' % workspace
return self.client.get_collection(path, params, **options) |
def parse_sidebar(self, anime_page):
"""Parses the DOM and returns anime attributes in the sidebar.
:type anime_page: :class:`bs4.BeautifulSoup`
:param anime_page: MAL anime page's DOM
:rtype: dict
:return: anime attributes
:raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError`
"""
# if MAL says the series doesn't exist, raise an InvalidAnimeError.
error_tag = anime_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidAnimeError(self.id)
title_tag = anime_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedAnimePageError.
raise MalformedAnimePageError(self.id, anime_page, message="Could not find title div")
anime_info = super(Anime, self).parse_sidebar(anime_page)
info_panel_first = anime_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
episode_tag = info_panel_first.find(text=u'Episodes:').parent.parent
utilities.extract_tags(episode_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'episodes'] = int(episode_tag.text.strip()) if episode_tag.text.strip() != 'Unknown' else 0
except:
if not self.session.suppress_parse_exceptions:
raise
try:
aired_tag = info_panel_first.find(text=u'Aired:').parent.parent
utilities.extract_tags(aired_tag.find_all(u'span', {'class': 'dark_text'}))
aired_parts = aired_tag.text.strip().split(u' to ')
if len(aired_parts) == 1:
# this aired once.
try:
aired_date = utilities.parse_profile_date(aired_parts[0], suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0], message="Could not parse single air date")
anime_info[u'aired'] = (aired_date,)
else:
# two airing dates.
try:
air_start = utilities.parse_profile_date(aired_parts[0], suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0], message="Could not parse first of two air dates")
try:
air_end = utilities.parse_profile_date(aired_parts[1], suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[1], message="Could not parse second of two air dates")
anime_info[u'aired'] = (air_start, air_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
producers_tag = info_panel_first.find(text=u'Producers:').parent.parent
utilities.extract_tags(producers_tag.find_all(u'span', {'class': 'dark_text'}))
utilities.extract_tags(producers_tag.find_all(u'sup'))
anime_info[u'producers'] = []
for producer_link in producers_tag.find_all('a'):
if producer_link.text == u'add some':
# MAL is saying "None found, add some".
break
link_parts = producer_link.get('href').split('p=')
# of the form: /anime.php?p=14
if len(link_parts) > 1:
anime_info[u'producers'].append(self.session.producer(int(link_parts[1])).set({'name': producer_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
duration_tag = info_panel_first.find(text=u'Duration:').parent.parent
utilities.extract_tags(duration_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'duration'] = duration_tag.text.strip()
duration_parts = [part.strip() for part in anime_info[u'duration'].split(u'.')]
duration_mins = 0
for part in duration_parts:
part_match = re.match(u'(?P<num>[0-9]+)', part)
if not part_match:
continue
part_volume = int(part_match.group(u'num'))
if part.endswith(u'hr'):
duration_mins += part_volume * 60
elif part.endswith(u'min'):
duration_mins += part_volume
anime_info[u'duration'] = datetime.timedelta(minutes=duration_mins)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
rating_tag = info_panel_first.find(text=u'Rating:').parent.parent
utilities.extract_tags(rating_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'rating'] = rating_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
return anime_info | def function[parse_sidebar, parameter[self, anime_page]]:
constant[Parses the DOM and returns anime attributes in the sidebar.
:type anime_page: :class:`bs4.BeautifulSoup`
:param anime_page: MAL anime page's DOM
:rtype: dict
:return: anime attributes
:raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError`
]
variable[error_tag] assign[=] call[name[anime_page].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da18fe92380>], [<ast.Constant object at 0x7da18fe90820>]]]]
if name[error_tag] begin[:]
<ast.Raise object at 0x7da18fe91840>
variable[title_tag] assign[=] call[call[name[anime_page].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da18fe911b0>], [<ast.Constant object at 0x7da18fe92c20>]]]].find, parameter[constant[h1]]]
if <ast.UnaryOp object at 0x7da18fe90910> begin[:]
<ast.Raise object at 0x7da1b2547c10>
variable[anime_info] assign[=] call[call[name[super], parameter[name[Anime], name[self]]].parse_sidebar, parameter[name[anime_page]]]
variable[info_panel_first] assign[=] call[call[call[name[anime_page].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da204621510>], [<ast.Constant object at 0x7da204621300>]]]].find, parameter[constant[table]]].find, parameter[constant[td]]]
<ast.Try object at 0x7da204622890>
<ast.Try object at 0x7da1b26779a0>
<ast.Try object at 0x7da1b2659390>
<ast.Try object at 0x7da1b265ae90>
<ast.Try object at 0x7da1b265e8c0>
return[name[anime_info]] | keyword[def] identifier[parse_sidebar] ( identifier[self] , identifier[anime_page] ):
literal[string]
identifier[error_tag] = identifier[anime_page] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[error_tag] :
keyword[raise] identifier[InvalidAnimeError] ( identifier[self] . identifier[id] )
identifier[title_tag] = identifier[anime_page] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] )
keyword[if] keyword[not] identifier[title_tag] . identifier[find] ( literal[string] ):
keyword[raise] identifier[MalformedAnimePageError] ( identifier[self] . identifier[id] , identifier[anime_page] , identifier[message] = literal[string] )
identifier[anime_info] = identifier[super] ( identifier[Anime] , identifier[self] ). identifier[parse_sidebar] ( identifier[anime_page] )
identifier[info_panel_first] = identifier[anime_page] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ). identifier[find] ( literal[string] )
keyword[try] :
identifier[episode_tag] = identifier[info_panel_first] . identifier[find] ( identifier[text] = literal[string] ). identifier[parent] . identifier[parent]
identifier[utilities] . identifier[extract_tags] ( identifier[episode_tag] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }))
identifier[anime_info] [ literal[string] ]= identifier[int] ( identifier[episode_tag] . identifier[text] . identifier[strip] ()) keyword[if] identifier[episode_tag] . identifier[text] . identifier[strip] ()!= literal[string] keyword[else] literal[int]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[aired_tag] = identifier[info_panel_first] . identifier[find] ( identifier[text] = literal[string] ). identifier[parent] . identifier[parent]
identifier[utilities] . identifier[extract_tags] ( identifier[aired_tag] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }))
identifier[aired_parts] = identifier[aired_tag] . identifier[text] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[aired_parts] )== literal[int] :
keyword[try] :
identifier[aired_date] = identifier[utilities] . identifier[parse_profile_date] ( identifier[aired_parts] [ literal[int] ], identifier[suppress] = identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[MalformedAnimePageError] ( identifier[self] . identifier[id] , identifier[aired_parts] [ literal[int] ], identifier[message] = literal[string] )
identifier[anime_info] [ literal[string] ]=( identifier[aired_date] ,)
keyword[else] :
keyword[try] :
identifier[air_start] = identifier[utilities] . identifier[parse_profile_date] ( identifier[aired_parts] [ literal[int] ], identifier[suppress] = identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[MalformedAnimePageError] ( identifier[self] . identifier[id] , identifier[aired_parts] [ literal[int] ], identifier[message] = literal[string] )
keyword[try] :
identifier[air_end] = identifier[utilities] . identifier[parse_profile_date] ( identifier[aired_parts] [ literal[int] ], identifier[suppress] = identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[MalformedAnimePageError] ( identifier[self] . identifier[id] , identifier[aired_parts] [ literal[int] ], identifier[message] = literal[string] )
identifier[anime_info] [ literal[string] ]=( identifier[air_start] , identifier[air_end] )
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[producers_tag] = identifier[info_panel_first] . identifier[find] ( identifier[text] = literal[string] ). identifier[parent] . identifier[parent]
identifier[utilities] . identifier[extract_tags] ( identifier[producers_tag] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }))
identifier[utilities] . identifier[extract_tags] ( identifier[producers_tag] . identifier[find_all] ( literal[string] ))
identifier[anime_info] [ literal[string] ]=[]
keyword[for] identifier[producer_link] keyword[in] identifier[producers_tag] . identifier[find_all] ( literal[string] ):
keyword[if] identifier[producer_link] . identifier[text] == literal[string] :
keyword[break]
identifier[link_parts] = identifier[producer_link] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[link_parts] )> literal[int] :
identifier[anime_info] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[session] . identifier[producer] ( identifier[int] ( identifier[link_parts] [ literal[int] ])). identifier[set] ({ literal[string] : identifier[producer_link] . identifier[text] }))
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[duration_tag] = identifier[info_panel_first] . identifier[find] ( identifier[text] = literal[string] ). identifier[parent] . identifier[parent]
identifier[utilities] . identifier[extract_tags] ( identifier[duration_tag] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }))
identifier[anime_info] [ literal[string] ]= identifier[duration_tag] . identifier[text] . identifier[strip] ()
identifier[duration_parts] =[ identifier[part] . identifier[strip] () keyword[for] identifier[part] keyword[in] identifier[anime_info] [ literal[string] ]. identifier[split] ( literal[string] )]
identifier[duration_mins] = literal[int]
keyword[for] identifier[part] keyword[in] identifier[duration_parts] :
identifier[part_match] = identifier[re] . identifier[match] ( literal[string] , identifier[part] )
keyword[if] keyword[not] identifier[part_match] :
keyword[continue]
identifier[part_volume] = identifier[int] ( identifier[part_match] . identifier[group] ( literal[string] ))
keyword[if] identifier[part] . identifier[endswith] ( literal[string] ):
identifier[duration_mins] += identifier[part_volume] * literal[int]
keyword[elif] identifier[part] . identifier[endswith] ( literal[string] ):
identifier[duration_mins] += identifier[part_volume]
identifier[anime_info] [ literal[string] ]= identifier[datetime] . identifier[timedelta] ( identifier[minutes] = identifier[duration_mins] )
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[rating_tag] = identifier[info_panel_first] . identifier[find] ( identifier[text] = literal[string] ). identifier[parent] . identifier[parent]
identifier[utilities] . identifier[extract_tags] ( identifier[rating_tag] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }))
identifier[anime_info] [ literal[string] ]= identifier[rating_tag] . identifier[text] . identifier[strip] ()
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[return] identifier[anime_info] | def parse_sidebar(self, anime_page):
"""Parses the DOM and returns anime attributes in the sidebar.
:type anime_page: :class:`bs4.BeautifulSoup`
:param anime_page: MAL anime page's DOM
:rtype: dict
:return: anime attributes
:raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError`
"""
# if MAL says the series doesn't exist, raise an InvalidAnimeError.
error_tag = anime_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidAnimeError(self.id) # depends on [control=['if'], data=[]]
title_tag = anime_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedAnimePageError.
raise MalformedAnimePageError(self.id, anime_page, message='Could not find title div') # depends on [control=['if'], data=[]]
anime_info = super(Anime, self).parse_sidebar(anime_page)
info_panel_first = anime_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
episode_tag = info_panel_first.find(text=u'Episodes:').parent.parent
utilities.extract_tags(episode_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'episodes'] = int(episode_tag.text.strip()) if episode_tag.text.strip() != 'Unknown' else 0 # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
aired_tag = info_panel_first.find(text=u'Aired:').parent.parent
utilities.extract_tags(aired_tag.find_all(u'span', {'class': 'dark_text'}))
aired_parts = aired_tag.text.strip().split(u' to ')
if len(aired_parts) == 1:
# this aired once.
try:
aired_date = utilities.parse_profile_date(aired_parts[0], suppress=self.session.suppress_parse_exceptions) # depends on [control=['try'], data=[]]
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0], message='Could not parse single air date') # depends on [control=['except'], data=[]]
anime_info[u'aired'] = (aired_date,) # depends on [control=['if'], data=[]]
else:
# two airing dates.
try:
air_start = utilities.parse_profile_date(aired_parts[0], suppress=self.session.suppress_parse_exceptions) # depends on [control=['try'], data=[]]
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0], message='Could not parse first of two air dates') # depends on [control=['except'], data=[]]
try:
air_end = utilities.parse_profile_date(aired_parts[1], suppress=self.session.suppress_parse_exceptions) # depends on [control=['try'], data=[]]
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[1], message='Could not parse second of two air dates') # depends on [control=['except'], data=[]]
anime_info[u'aired'] = (air_start, air_end) # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
producers_tag = info_panel_first.find(text=u'Producers:').parent.parent
utilities.extract_tags(producers_tag.find_all(u'span', {'class': 'dark_text'}))
utilities.extract_tags(producers_tag.find_all(u'sup'))
anime_info[u'producers'] = []
for producer_link in producers_tag.find_all('a'):
if producer_link.text == u'add some':
# MAL is saying "None found, add some".
break # depends on [control=['if'], data=[]]
link_parts = producer_link.get('href').split('p=')
# of the form: /anime.php?p=14
if len(link_parts) > 1:
anime_info[u'producers'].append(self.session.producer(int(link_parts[1])).set({'name': producer_link.text})) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['producer_link']] # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
duration_tag = info_panel_first.find(text=u'Duration:').parent.parent
utilities.extract_tags(duration_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'duration'] = duration_tag.text.strip()
duration_parts = [part.strip() for part in anime_info[u'duration'].split(u'.')]
duration_mins = 0
for part in duration_parts:
part_match = re.match(u'(?P<num>[0-9]+)', part)
if not part_match:
continue # depends on [control=['if'], data=[]]
part_volume = int(part_match.group(u'num'))
if part.endswith(u'hr'):
duration_mins += part_volume * 60 # depends on [control=['if'], data=[]]
elif part.endswith(u'min'):
duration_mins += part_volume # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']]
anime_info[u'duration'] = datetime.timedelta(minutes=duration_mins) # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
rating_tag = info_panel_first.find(text=u'Rating:').parent.parent
utilities.extract_tags(rating_tag.find_all(u'span', {'class': 'dark_text'}))
anime_info[u'rating'] = rating_tag.text.strip() # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
return anime_info |
def create_category(cls, category, **kwargs):
"""Create Category
Create a new Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_category(category, async=True)
>>> result = thread.get()
:param async bool
:param Category category: Attributes of category to create (required)
:return: Category
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_category_with_http_info(category, **kwargs)
else:
(data) = cls._create_category_with_http_info(category, **kwargs)
return data | def function[create_category, parameter[cls, category]]:
constant[Create Category
Create a new Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_category(category, async=True)
>>> result = thread.get()
:param async bool
:param Category category: Attributes of category to create (required)
:return: Category
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._create_category_with_http_info, parameter[name[category]]]] | keyword[def] identifier[create_category] ( identifier[cls] , identifier[category] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_create_category_with_http_info] ( identifier[category] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_create_category_with_http_info] ( identifier[category] ,** identifier[kwargs] )
keyword[return] identifier[data] | def create_category(cls, category, **kwargs):
"""Create Category
Create a new Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_category(category, async=True)
>>> result = thread.get()
:param async bool
:param Category category: Attributes of category to create (required)
:return: Category
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_category_with_http_info(category, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._create_category_with_http_info(category, **kwargs)
return data |
def aggregate_failed(self) -> List:
"""Count the number of failed jobs per category (name)."""
categories = self.session.query(
self.Job.name.label('name'),
sqa.func.count(self.Job.id).label('count')
).filter(self.Job.status != 'cancelled').group_by(self.Job.name).all()
data = [{'name': category.name, 'count': category.count} for category in categories]
return data | def function[aggregate_failed, parameter[self]]:
constant[Count the number of failed jobs per category (name).]
variable[categories] assign[=] call[call[call[call[name[self].session.query, parameter[call[name[self].Job.name.label, parameter[constant[name]]], call[call[name[sqa].func.count, parameter[name[self].Job.id]].label, parameter[constant[count]]]]].filter, parameter[compare[name[self].Job.status not_equal[!=] constant[cancelled]]]].group_by, parameter[name[self].Job.name]].all, parameter[]]
variable[data] assign[=] <ast.ListComp object at 0x7da18bcc8460>
return[name[data]] | keyword[def] identifier[aggregate_failed] ( identifier[self] )-> identifier[List] :
literal[string]
identifier[categories] = identifier[self] . identifier[session] . identifier[query] (
identifier[self] . identifier[Job] . identifier[name] . identifier[label] ( literal[string] ),
identifier[sqa] . identifier[func] . identifier[count] ( identifier[self] . identifier[Job] . identifier[id] ). identifier[label] ( literal[string] )
). identifier[filter] ( identifier[self] . identifier[Job] . identifier[status] != literal[string] ). identifier[group_by] ( identifier[self] . identifier[Job] . identifier[name] ). identifier[all] ()
identifier[data] =[{ literal[string] : identifier[category] . identifier[name] , literal[string] : identifier[category] . identifier[count] } keyword[for] identifier[category] keyword[in] identifier[categories] ]
keyword[return] identifier[data] | def aggregate_failed(self) -> List:
"""Count the number of failed jobs per category (name)."""
categories = self.session.query(self.Job.name.label('name'), sqa.func.count(self.Job.id).label('count')).filter(self.Job.status != 'cancelled').group_by(self.Job.name).all()
data = [{'name': category.name, 'count': category.count} for category in categories]
return data |
def request(self, max_width):
# type: (int) -> Optional[Tuple[int, Chunk]]
"""Requests a sub-chunk of max_width or shorter. Returns None if no chunks left."""
if max_width < 1:
raise ValueError('requires positive integer max_width')
s = self.chunk.s
length = len(s)
if self.internal_offset == len(s):
return None
width = 0
start_offset = i = self.internal_offset
replacement_char = u' '
while True:
w = wcswidth(s[i])
# If adding a character puts us over the requested width, return what we've got so far
if width + w > max_width:
self.internal_offset = i # does not include ith character
self.internal_width += width
# if not adding it us makes us short, this must have been a double-width character
if width < max_width:
assert width + 1 == max_width, 'unicode character width of more than 2!?!'
assert w == 2, 'unicode character of width other than 2?'
return (width + 1, Chunk(s[start_offset:self.internal_offset] + replacement_char,
atts=self.chunk.atts))
return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts))
# otherwise add this width
width += w
# If one more char would put us over, return whatever we've got
if i + 1 == length:
self.internal_offset = i + 1 # beware the fencepost, i is an index not an offset
self.internal_width += width
return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts))
# otherwise attempt to add the next character
i += 1 | def function[request, parameter[self, max_width]]:
constant[Requests a sub-chunk of max_width or shorter. Returns None if no chunks left.]
if compare[name[max_width] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da20e74bd90>
variable[s] assign[=] name[self].chunk.s
variable[length] assign[=] call[name[len], parameter[name[s]]]
if compare[name[self].internal_offset equal[==] call[name[len], parameter[name[s]]]] begin[:]
return[constant[None]]
variable[width] assign[=] constant[0]
variable[start_offset] assign[=] name[self].internal_offset
variable[replacement_char] assign[=] constant[ ]
while constant[True] begin[:]
variable[w] assign[=] call[name[wcswidth], parameter[call[name[s]][name[i]]]]
if compare[binary_operation[name[width] + name[w]] greater[>] name[max_width]] begin[:]
name[self].internal_offset assign[=] name[i]
<ast.AugAssign object at 0x7da20e9603a0>
if compare[name[width] less[<] name[max_width]] begin[:]
assert[compare[binary_operation[name[width] + constant[1]] equal[==] name[max_width]]]
assert[compare[name[w] equal[==] constant[2]]]
return[tuple[[<ast.BinOp object at 0x7da20e9608e0>, <ast.Call object at 0x7da20e960700>]]]
return[tuple[[<ast.Name object at 0x7da20e963760>, <ast.Call object at 0x7da20e9630d0>]]]
<ast.AugAssign object at 0x7da20e960f40>
if compare[binary_operation[name[i] + constant[1]] equal[==] name[length]] begin[:]
name[self].internal_offset assign[=] binary_operation[name[i] + constant[1]]
<ast.AugAssign object at 0x7da20e960130>
return[tuple[[<ast.Name object at 0x7da20e961ab0>, <ast.Call object at 0x7da20e960100>]]]
<ast.AugAssign object at 0x7da20e9629b0> | keyword[def] identifier[request] ( identifier[self] , identifier[max_width] ):
literal[string]
keyword[if] identifier[max_width] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[s] = identifier[self] . identifier[chunk] . identifier[s]
identifier[length] = identifier[len] ( identifier[s] )
keyword[if] identifier[self] . identifier[internal_offset] == identifier[len] ( identifier[s] ):
keyword[return] keyword[None]
identifier[width] = literal[int]
identifier[start_offset] = identifier[i] = identifier[self] . identifier[internal_offset]
identifier[replacement_char] = literal[string]
keyword[while] keyword[True] :
identifier[w] = identifier[wcswidth] ( identifier[s] [ identifier[i] ])
keyword[if] identifier[width] + identifier[w] > identifier[max_width] :
identifier[self] . identifier[internal_offset] = identifier[i]
identifier[self] . identifier[internal_width] += identifier[width]
keyword[if] identifier[width] < identifier[max_width] :
keyword[assert] identifier[width] + literal[int] == identifier[max_width] , literal[string]
keyword[assert] identifier[w] == literal[int] , literal[string]
keyword[return] ( identifier[width] + literal[int] , identifier[Chunk] ( identifier[s] [ identifier[start_offset] : identifier[self] . identifier[internal_offset] ]+ identifier[replacement_char] ,
identifier[atts] = identifier[self] . identifier[chunk] . identifier[atts] ))
keyword[return] ( identifier[width] , identifier[Chunk] ( identifier[s] [ identifier[start_offset] : identifier[self] . identifier[internal_offset] ], identifier[atts] = identifier[self] . identifier[chunk] . identifier[atts] ))
identifier[width] += identifier[w]
keyword[if] identifier[i] + literal[int] == identifier[length] :
identifier[self] . identifier[internal_offset] = identifier[i] + literal[int]
identifier[self] . identifier[internal_width] += identifier[width]
keyword[return] ( identifier[width] , identifier[Chunk] ( identifier[s] [ identifier[start_offset] : identifier[self] . identifier[internal_offset] ], identifier[atts] = identifier[self] . identifier[chunk] . identifier[atts] ))
identifier[i] += literal[int] | def request(self, max_width):
# type: (int) -> Optional[Tuple[int, Chunk]]
'Requests a sub-chunk of max_width or shorter. Returns None if no chunks left.'
if max_width < 1:
raise ValueError('requires positive integer max_width') # depends on [control=['if'], data=[]]
s = self.chunk.s
length = len(s)
if self.internal_offset == len(s):
return None # depends on [control=['if'], data=[]]
width = 0
start_offset = i = self.internal_offset
replacement_char = u' '
while True:
w = wcswidth(s[i])
# If adding a character puts us over the requested width, return what we've got so far
if width + w > max_width:
self.internal_offset = i # does not include ith character
self.internal_width += width
# if not adding it us makes us short, this must have been a double-width character
if width < max_width:
assert width + 1 == max_width, 'unicode character width of more than 2!?!'
assert w == 2, 'unicode character of width other than 2?'
return (width + 1, Chunk(s[start_offset:self.internal_offset] + replacement_char, atts=self.chunk.atts)) # depends on [control=['if'], data=['width', 'max_width']]
return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts)) # depends on [control=['if'], data=['max_width']]
# otherwise add this width
width += w
# If one more char would put us over, return whatever we've got
if i + 1 == length:
self.internal_offset = i + 1 # beware the fencepost, i is an index not an offset
self.internal_width += width
return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts)) # depends on [control=['if'], data=[]]
# otherwise attempt to add the next character
i += 1 # depends on [control=['while'], data=[]] |
def frame_times(self):
"""This getter returns the frame times, for the corresponding type of
features."""
frame_times = None
# Make sure we have already computed the features
self.features
if self.feat_type is FeatureTypes.framesync:
self._compute_framesync_times()
frame_times = self._framesync_times
elif self.feat_type is FeatureTypes.est_beatsync:
frame_times = self._est_beatsync_times
elif self.feat_type is FeatureTypes.ann_beatsync:
frame_times = self._ann_beatsync_times
return frame_times | def function[frame_times, parameter[self]]:
constant[This getter returns the frame times, for the corresponding type of
features.]
variable[frame_times] assign[=] constant[None]
name[self].features
if compare[name[self].feat_type is name[FeatureTypes].framesync] begin[:]
call[name[self]._compute_framesync_times, parameter[]]
variable[frame_times] assign[=] name[self]._framesync_times
return[name[frame_times]] | keyword[def] identifier[frame_times] ( identifier[self] ):
literal[string]
identifier[frame_times] = keyword[None]
identifier[self] . identifier[features]
keyword[if] identifier[self] . identifier[feat_type] keyword[is] identifier[FeatureTypes] . identifier[framesync] :
identifier[self] . identifier[_compute_framesync_times] ()
identifier[frame_times] = identifier[self] . identifier[_framesync_times]
keyword[elif] identifier[self] . identifier[feat_type] keyword[is] identifier[FeatureTypes] . identifier[est_beatsync] :
identifier[frame_times] = identifier[self] . identifier[_est_beatsync_times]
keyword[elif] identifier[self] . identifier[feat_type] keyword[is] identifier[FeatureTypes] . identifier[ann_beatsync] :
identifier[frame_times] = identifier[self] . identifier[_ann_beatsync_times]
keyword[return] identifier[frame_times] | def frame_times(self):
"""This getter returns the frame times, for the corresponding type of
features."""
frame_times = None
# Make sure we have already computed the features
self.features
if self.feat_type is FeatureTypes.framesync:
self._compute_framesync_times()
frame_times = self._framesync_times # depends on [control=['if'], data=[]]
elif self.feat_type is FeatureTypes.est_beatsync:
frame_times = self._est_beatsync_times # depends on [control=['if'], data=[]]
elif self.feat_type is FeatureTypes.ann_beatsync:
frame_times = self._ann_beatsync_times # depends on [control=['if'], data=[]]
return frame_times |
def _wait_for_keypress():
"""
Wait for a key press on the console and return it.
Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
"""
result = None
if os.name == "nt":
# noinspection PyUnresolvedReferences
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result | def function[_wait_for_keypress, parameter[]]:
constant[
Wait for a key press on the console and return it.
Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
]
variable[result] assign[=] constant[None]
if compare[name[os].name equal[==] constant[nt]] begin[:]
import module[msvcrt]
variable[result] assign[=] call[name[msvcrt].getch, parameter[]]
return[name[result]] | keyword[def] identifier[_wait_for_keypress] ():
literal[string]
identifier[result] = keyword[None]
keyword[if] identifier[os] . identifier[name] == literal[string] :
keyword[import] identifier[msvcrt]
identifier[result] = identifier[msvcrt] . identifier[getch] ()
keyword[else] :
keyword[import] identifier[termios]
identifier[fd] = identifier[sys] . identifier[stdin] . identifier[fileno] ()
identifier[oldterm] = identifier[termios] . identifier[tcgetattr] ( identifier[fd] )
identifier[newattr] = identifier[termios] . identifier[tcgetattr] ( identifier[fd] )
identifier[newattr] [ literal[int] ]= identifier[newattr] [ literal[int] ]&~ identifier[termios] . identifier[ICANON] &~ identifier[termios] . identifier[ECHO]
identifier[termios] . identifier[tcsetattr] ( identifier[fd] , identifier[termios] . identifier[TCSANOW] , identifier[newattr] )
keyword[try] :
identifier[result] = identifier[sys] . identifier[stdin] . identifier[read] ( literal[int] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[finally] :
identifier[termios] . identifier[tcsetattr] ( identifier[fd] , identifier[termios] . identifier[TCSAFLUSH] , identifier[oldterm] )
keyword[return] identifier[result] | def _wait_for_keypress():
"""
Wait for a key press on the console and return it.
Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
"""
result = None
if os.name == 'nt':
# noinspection PyUnresolvedReferences
import msvcrt
result = msvcrt.getch() # depends on [control=['if'], data=[]]
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result |
def as_condition(cls, obj):
"""Convert obj into :class:`Condition`"""
if isinstance(obj, cls):
return obj
else:
return cls(cmap=obj) | def function[as_condition, parameter[cls, obj]]:
constant[Convert obj into :class:`Condition`]
if call[name[isinstance], parameter[name[obj], name[cls]]] begin[:]
return[name[obj]] | keyword[def] identifier[as_condition] ( identifier[cls] , identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[cls] ):
keyword[return] identifier[obj]
keyword[else] :
keyword[return] identifier[cls] ( identifier[cmap] = identifier[obj] ) | def as_condition(cls, obj):
"""Convert obj into :class:`Condition`"""
if isinstance(obj, cls):
return obj # depends on [control=['if'], data=[]]
else:
return cls(cmap=obj) |
def _set_symlink_ownership(path, user, group, win_owner):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
if salt.utils.platform.is_windows():
try:
salt.utils.win_dacl.set_owner(path, win_owner)
except CommandExecutionError:
pass
else:
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group, win_owner) | def function[_set_symlink_ownership, parameter[path, user, group, win_owner]]:
constant[
Set the ownership of a symlink and return a boolean indicating
success/failure
]
if call[name[salt].utils.platform.is_windows, parameter[]] begin[:]
<ast.Try object at 0x7da1b20efbb0>
return[call[name[_check_symlink_ownership], parameter[name[path], name[user], name[group], name[win_owner]]]] | keyword[def] identifier[_set_symlink_ownership] ( identifier[path] , identifier[user] , identifier[group] , identifier[win_owner] ):
literal[string]
keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] ():
keyword[try] :
identifier[salt] . identifier[utils] . identifier[win_dacl] . identifier[set_owner] ( identifier[path] , identifier[win_owner] )
keyword[except] identifier[CommandExecutionError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[__salt__] [ literal[string] ]( identifier[path] , identifier[user] , identifier[group] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[return] identifier[_check_symlink_ownership] ( identifier[path] , identifier[user] , identifier[group] , identifier[win_owner] ) | def _set_symlink_ownership(path, user, group, win_owner):
"""
Set the ownership of a symlink and return a boolean indicating
success/failure
"""
if salt.utils.platform.is_windows():
try:
salt.utils.win_dacl.set_owner(path, win_owner) # depends on [control=['try'], data=[]]
except CommandExecutionError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
__salt__['file.lchown'](path, user, group) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
return _check_symlink_ownership(path, user, group, win_owner) |
def write_file(self, filename, file_format="xyz"):
"""
Uses OpenBabel to output all supported formats.
Args:
filename: Filename of file to output
file_format: String specifying any OpenBabel supported formats.
"""
mol = pb.Molecule(self._obmol)
return mol.write(file_format, filename, overwrite=True) | def function[write_file, parameter[self, filename, file_format]]:
constant[
Uses OpenBabel to output all supported formats.
Args:
filename: Filename of file to output
file_format: String specifying any OpenBabel supported formats.
]
variable[mol] assign[=] call[name[pb].Molecule, parameter[name[self]._obmol]]
return[call[name[mol].write, parameter[name[file_format], name[filename]]]] | keyword[def] identifier[write_file] ( identifier[self] , identifier[filename] , identifier[file_format] = literal[string] ):
literal[string]
identifier[mol] = identifier[pb] . identifier[Molecule] ( identifier[self] . identifier[_obmol] )
keyword[return] identifier[mol] . identifier[write] ( identifier[file_format] , identifier[filename] , identifier[overwrite] = keyword[True] ) | def write_file(self, filename, file_format='xyz'):
"""
Uses OpenBabel to output all supported formats.
Args:
filename: Filename of file to output
file_format: String specifying any OpenBabel supported formats.
"""
mol = pb.Molecule(self._obmol)
return mol.write(file_format, filename, overwrite=True) |
def get_connections_stat(
self, instance, base_url, object_type, vhosts, limit_vhosts, custom_tags, auth=None, ssl_verify=True
):
"""
Collect metrics on currently open connection per vhost.
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
grab_all_data = True
if self._limit_vhosts(instance):
grab_all_data = False
data = []
for vhost in vhosts:
url = "vhosts/{}/{}".format(quote_plus(vhost), object_type)
try:
data += self._get_data(
urljoin(base_url, url), auth=auth, ssl_verify=ssl_verify, proxies=instance_proxy
)
except Exception as e:
# This will happen if there is no connection data to grab
self.log.debug("Couldn't grab connection data from vhost, {}: {}".format(vhost, e))
# sometimes it seems to need to fall back to this
if grab_all_data or not len(data):
data = self._get_data(
urljoin(base_url, object_type), auth=auth, ssl_verify=ssl_verify, proxies=instance_proxy
)
stats = {vhost: 0 for vhost in vhosts}
connection_states = defaultdict(int)
for conn in data:
if conn['vhost'] in vhosts:
stats[conn['vhost']] += 1
# 'state' does not exist for direct type connections.
connection_states[conn.get('state', 'direct')] += 1
for vhost, nb_conn in iteritems(stats):
self.gauge('rabbitmq.connections', nb_conn, tags=['{}_vhost:{}'.format(TAG_PREFIX, vhost)] + custom_tags)
for conn_state, nb_conn in iteritems(connection_states):
self.gauge(
'rabbitmq.connections.state',
nb_conn,
tags=['{}_conn_state:{}'.format(TAG_PREFIX, conn_state)] + custom_tags,
) | def function[get_connections_stat, parameter[self, instance, base_url, object_type, vhosts, limit_vhosts, custom_tags, auth, ssl_verify]]:
constant[
Collect metrics on currently open connection per vhost.
]
variable[instance_proxy] assign[=] call[name[self].get_instance_proxy, parameter[name[instance], name[base_url]]]
variable[grab_all_data] assign[=] constant[True]
if call[name[self]._limit_vhosts, parameter[name[instance]]] begin[:]
variable[grab_all_data] assign[=] constant[False]
variable[data] assign[=] list[[]]
for taget[name[vhost]] in starred[name[vhosts]] begin[:]
variable[url] assign[=] call[constant[vhosts/{}/{}].format, parameter[call[name[quote_plus], parameter[name[vhost]]], name[object_type]]]
<ast.Try object at 0x7da18c4cceb0>
if <ast.BoolOp object at 0x7da18c4cd390> begin[:]
variable[data] assign[=] call[name[self]._get_data, parameter[call[name[urljoin], parameter[name[base_url], name[object_type]]]]]
variable[stats] assign[=] <ast.DictComp object at 0x7da18c4cf430>
variable[connection_states] assign[=] call[name[defaultdict], parameter[name[int]]]
for taget[name[conn]] in starred[name[data]] begin[:]
if compare[call[name[conn]][constant[vhost]] in name[vhosts]] begin[:]
<ast.AugAssign object at 0x7da20c6aad70>
<ast.AugAssign object at 0x7da20c6a8340>
for taget[tuple[[<ast.Name object at 0x7da20c6aa920>, <ast.Name object at 0x7da20c6aa590>]]] in starred[call[name[iteritems], parameter[name[stats]]]] begin[:]
call[name[self].gauge, parameter[constant[rabbitmq.connections], name[nb_conn]]]
for taget[tuple[[<ast.Name object at 0x7da20c6aae90>, <ast.Name object at 0x7da20c6a95d0>]]] in starred[call[name[iteritems], parameter[name[connection_states]]]] begin[:]
call[name[self].gauge, parameter[constant[rabbitmq.connections.state], name[nb_conn]]] | keyword[def] identifier[get_connections_stat] (
identifier[self] , identifier[instance] , identifier[base_url] , identifier[object_type] , identifier[vhosts] , identifier[limit_vhosts] , identifier[custom_tags] , identifier[auth] = keyword[None] , identifier[ssl_verify] = keyword[True]
):
literal[string]
identifier[instance_proxy] = identifier[self] . identifier[get_instance_proxy] ( identifier[instance] , identifier[base_url] )
identifier[grab_all_data] = keyword[True]
keyword[if] identifier[self] . identifier[_limit_vhosts] ( identifier[instance] ):
identifier[grab_all_data] = keyword[False]
identifier[data] =[]
keyword[for] identifier[vhost] keyword[in] identifier[vhosts] :
identifier[url] = literal[string] . identifier[format] ( identifier[quote_plus] ( identifier[vhost] ), identifier[object_type] )
keyword[try] :
identifier[data] += identifier[self] . identifier[_get_data] (
identifier[urljoin] ( identifier[base_url] , identifier[url] ), identifier[auth] = identifier[auth] , identifier[ssl_verify] = identifier[ssl_verify] , identifier[proxies] = identifier[instance_proxy]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[vhost] , identifier[e] ))
keyword[if] identifier[grab_all_data] keyword[or] keyword[not] identifier[len] ( identifier[data] ):
identifier[data] = identifier[self] . identifier[_get_data] (
identifier[urljoin] ( identifier[base_url] , identifier[object_type] ), identifier[auth] = identifier[auth] , identifier[ssl_verify] = identifier[ssl_verify] , identifier[proxies] = identifier[instance_proxy]
)
identifier[stats] ={ identifier[vhost] : literal[int] keyword[for] identifier[vhost] keyword[in] identifier[vhosts] }
identifier[connection_states] = identifier[defaultdict] ( identifier[int] )
keyword[for] identifier[conn] keyword[in] identifier[data] :
keyword[if] identifier[conn] [ literal[string] ] keyword[in] identifier[vhosts] :
identifier[stats] [ identifier[conn] [ literal[string] ]]+= literal[int]
identifier[connection_states] [ identifier[conn] . identifier[get] ( literal[string] , literal[string] )]+= literal[int]
keyword[for] identifier[vhost] , identifier[nb_conn] keyword[in] identifier[iteritems] ( identifier[stats] ):
identifier[self] . identifier[gauge] ( literal[string] , identifier[nb_conn] , identifier[tags] =[ literal[string] . identifier[format] ( identifier[TAG_PREFIX] , identifier[vhost] )]+ identifier[custom_tags] )
keyword[for] identifier[conn_state] , identifier[nb_conn] keyword[in] identifier[iteritems] ( identifier[connection_states] ):
identifier[self] . identifier[gauge] (
literal[string] ,
identifier[nb_conn] ,
identifier[tags] =[ literal[string] . identifier[format] ( identifier[TAG_PREFIX] , identifier[conn_state] )]+ identifier[custom_tags] ,
) | def get_connections_stat(self, instance, base_url, object_type, vhosts, limit_vhosts, custom_tags, auth=None, ssl_verify=True):
"""
Collect metrics on currently open connection per vhost.
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
grab_all_data = True
if self._limit_vhosts(instance):
grab_all_data = False
data = []
for vhost in vhosts:
url = 'vhosts/{}/{}'.format(quote_plus(vhost), object_type)
try:
data += self._get_data(urljoin(base_url, url), auth=auth, ssl_verify=ssl_verify, proxies=instance_proxy) # depends on [control=['try'], data=[]]
except Exception as e:
# This will happen if there is no connection data to grab
self.log.debug("Couldn't grab connection data from vhost, {}: {}".format(vhost, e)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['vhost']] # depends on [control=['if'], data=[]]
# sometimes it seems to need to fall back to this
if grab_all_data or not len(data):
data = self._get_data(urljoin(base_url, object_type), auth=auth, ssl_verify=ssl_verify, proxies=instance_proxy) # depends on [control=['if'], data=[]]
stats = {vhost: 0 for vhost in vhosts}
connection_states = defaultdict(int)
for conn in data:
if conn['vhost'] in vhosts:
stats[conn['vhost']] += 1
# 'state' does not exist for direct type connections.
connection_states[conn.get('state', 'direct')] += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['conn']]
for (vhost, nb_conn) in iteritems(stats):
self.gauge('rabbitmq.connections', nb_conn, tags=['{}_vhost:{}'.format(TAG_PREFIX, vhost)] + custom_tags) # depends on [control=['for'], data=[]]
for (conn_state, nb_conn) in iteritems(connection_states):
self.gauge('rabbitmq.connections.state', nb_conn, tags=['{}_conn_state:{}'.format(TAG_PREFIX, conn_state)] + custom_tags) # depends on [control=['for'], data=[]] |
def store_meta_data(self, copy_path=None):
"""Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path
"""
if copy_path:
meta_file_json = os.path.join(copy_path, storage.FILE_NAME_META_DATA)
else:
meta_file_json = os.path.join(self.state_machine.file_system_path, storage.FILE_NAME_META_DATA)
storage_utils.write_dict_to_json(self.meta, meta_file_json)
self.root_state.store_meta_data(copy_path) | def function[store_meta_data, parameter[self, copy_path]]:
constant[Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path
]
if name[copy_path] begin[:]
variable[meta_file_json] assign[=] call[name[os].path.join, parameter[name[copy_path], name[storage].FILE_NAME_META_DATA]]
call[name[storage_utils].write_dict_to_json, parameter[name[self].meta, name[meta_file_json]]]
call[name[self].root_state.store_meta_data, parameter[name[copy_path]]] | keyword[def] identifier[store_meta_data] ( identifier[self] , identifier[copy_path] = keyword[None] ):
literal[string]
keyword[if] identifier[copy_path] :
identifier[meta_file_json] = identifier[os] . identifier[path] . identifier[join] ( identifier[copy_path] , identifier[storage] . identifier[FILE_NAME_META_DATA] )
keyword[else] :
identifier[meta_file_json] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[state_machine] . identifier[file_system_path] , identifier[storage] . identifier[FILE_NAME_META_DATA] )
identifier[storage_utils] . identifier[write_dict_to_json] ( identifier[self] . identifier[meta] , identifier[meta_file_json] )
identifier[self] . identifier[root_state] . identifier[store_meta_data] ( identifier[copy_path] ) | def store_meta_data(self, copy_path=None):
"""Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path
"""
if copy_path:
meta_file_json = os.path.join(copy_path, storage.FILE_NAME_META_DATA) # depends on [control=['if'], data=[]]
else:
meta_file_json = os.path.join(self.state_machine.file_system_path, storage.FILE_NAME_META_DATA)
storage_utils.write_dict_to_json(self.meta, meta_file_json)
self.root_state.store_meta_data(copy_path) |
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(AssetState, self).Serialize(writer)
writer.WriteUInt256(self.AssetId)
writer.WriteByte(self.AssetType)
writer.WriteVarString(self.Name)
if self.Amount.value > -1:
writer.WriteFixed8(self.Amount, unsigned=True)
else:
writer.WriteFixed8(self.Amount)
if type(self.Available) is not Fixed8:
raise Exception("AVAILABLE IS NOT FIXED 8!")
writer.WriteFixed8(self.Available, unsigned=True)
writer.WriteByte(self.Precision)
writer.WriteByte(b'\x00')
writer.WriteFixed8(self.Fee)
writer.WriteUInt160(self.FeeAddress)
self.Owner.Serialize(writer)
writer.WriteUInt160(self.Admin)
writer.WriteUInt160(self.Issuer)
writer.WriteUInt32(self.Expiration)
writer.WriteBool(self.IsFrozen) | def function[Serialize, parameter[self, writer]]:
constant[
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
]
call[call[name[super], parameter[name[AssetState], name[self]]].Serialize, parameter[name[writer]]]
call[name[writer].WriteUInt256, parameter[name[self].AssetId]]
call[name[writer].WriteByte, parameter[name[self].AssetType]]
call[name[writer].WriteVarString, parameter[name[self].Name]]
if compare[name[self].Amount.value greater[>] <ast.UnaryOp object at 0x7da18bcca590>] begin[:]
call[name[writer].WriteFixed8, parameter[name[self].Amount]]
if compare[call[name[type], parameter[name[self].Available]] is_not name[Fixed8]] begin[:]
<ast.Raise object at 0x7da18dc998d0>
call[name[writer].WriteFixed8, parameter[name[self].Available]]
call[name[writer].WriteByte, parameter[name[self].Precision]]
call[name[writer].WriteByte, parameter[constant[b'\x00']]]
call[name[writer].WriteFixed8, parameter[name[self].Fee]]
call[name[writer].WriteUInt160, parameter[name[self].FeeAddress]]
call[name[self].Owner.Serialize, parameter[name[writer]]]
call[name[writer].WriteUInt160, parameter[name[self].Admin]]
call[name[writer].WriteUInt160, parameter[name[self].Issuer]]
call[name[writer].WriteUInt32, parameter[name[self].Expiration]]
call[name[writer].WriteBool, parameter[name[self].IsFrozen]] | keyword[def] identifier[Serialize] ( identifier[self] , identifier[writer] ):
literal[string]
identifier[super] ( identifier[AssetState] , identifier[self] ). identifier[Serialize] ( identifier[writer] )
identifier[writer] . identifier[WriteUInt256] ( identifier[self] . identifier[AssetId] )
identifier[writer] . identifier[WriteByte] ( identifier[self] . identifier[AssetType] )
identifier[writer] . identifier[WriteVarString] ( identifier[self] . identifier[Name] )
keyword[if] identifier[self] . identifier[Amount] . identifier[value] >- literal[int] :
identifier[writer] . identifier[WriteFixed8] ( identifier[self] . identifier[Amount] , identifier[unsigned] = keyword[True] )
keyword[else] :
identifier[writer] . identifier[WriteFixed8] ( identifier[self] . identifier[Amount] )
keyword[if] identifier[type] ( identifier[self] . identifier[Available] ) keyword[is] keyword[not] identifier[Fixed8] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[writer] . identifier[WriteFixed8] ( identifier[self] . identifier[Available] , identifier[unsigned] = keyword[True] )
identifier[writer] . identifier[WriteByte] ( identifier[self] . identifier[Precision] )
identifier[writer] . identifier[WriteByte] ( literal[string] )
identifier[writer] . identifier[WriteFixed8] ( identifier[self] . identifier[Fee] )
identifier[writer] . identifier[WriteUInt160] ( identifier[self] . identifier[FeeAddress] )
identifier[self] . identifier[Owner] . identifier[Serialize] ( identifier[writer] )
identifier[writer] . identifier[WriteUInt160] ( identifier[self] . identifier[Admin] )
identifier[writer] . identifier[WriteUInt160] ( identifier[self] . identifier[Issuer] )
identifier[writer] . identifier[WriteUInt32] ( identifier[self] . identifier[Expiration] )
identifier[writer] . identifier[WriteBool] ( identifier[self] . identifier[IsFrozen] ) | def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(AssetState, self).Serialize(writer)
writer.WriteUInt256(self.AssetId)
writer.WriteByte(self.AssetType)
writer.WriteVarString(self.Name)
if self.Amount.value > -1:
writer.WriteFixed8(self.Amount, unsigned=True) # depends on [control=['if'], data=[]]
else:
writer.WriteFixed8(self.Amount)
if type(self.Available) is not Fixed8:
raise Exception('AVAILABLE IS NOT FIXED 8!') # depends on [control=['if'], data=[]]
writer.WriteFixed8(self.Available, unsigned=True)
writer.WriteByte(self.Precision)
writer.WriteByte(b'\x00')
writer.WriteFixed8(self.Fee)
writer.WriteUInt160(self.FeeAddress)
self.Owner.Serialize(writer)
writer.WriteUInt160(self.Admin)
writer.WriteUInt160(self.Issuer)
writer.WriteUInt32(self.Expiration)
writer.WriteBool(self.IsFrozen) |
def recall(y, y_pred):
"""Recall score
recall = true_positives / (true_positives + false_negatives)
Parameters:
-----------
y : vector, shape (n_samples,)
The target labels.
y_pred : vector, shape (n_samples,)
The predicted labels.
Returns:
--------
recall : float
"""
tp = true_positives(y, y_pred)
fn = false_negatives(y, y_pred)
return tp / (tp + fn) | def function[recall, parameter[y, y_pred]]:
constant[Recall score
recall = true_positives / (true_positives + false_negatives)
Parameters:
-----------
y : vector, shape (n_samples,)
The target labels.
y_pred : vector, shape (n_samples,)
The predicted labels.
Returns:
--------
recall : float
]
variable[tp] assign[=] call[name[true_positives], parameter[name[y], name[y_pred]]]
variable[fn] assign[=] call[name[false_negatives], parameter[name[y], name[y_pred]]]
return[binary_operation[name[tp] / binary_operation[name[tp] + name[fn]]]] | keyword[def] identifier[recall] ( identifier[y] , identifier[y_pred] ):
literal[string]
identifier[tp] = identifier[true_positives] ( identifier[y] , identifier[y_pred] )
identifier[fn] = identifier[false_negatives] ( identifier[y] , identifier[y_pred] )
keyword[return] identifier[tp] /( identifier[tp] + identifier[fn] ) | def recall(y, y_pred):
"""Recall score
recall = true_positives / (true_positives + false_negatives)
Parameters:
-----------
y : vector, shape (n_samples,)
The target labels.
y_pred : vector, shape (n_samples,)
The predicted labels.
Returns:
--------
recall : float
"""
tp = true_positives(y, y_pred)
fn = false_negatives(y, y_pred)
return tp / (tp + fn) |
def get_remarks(txt: str) -> ([str], str): # type: ignore
"""
Returns the report split into components and the remarks string
Remarks can include items like RMK and on, NOSIG and on, and BECMG and on
"""
txt = txt.replace('?', '').strip()
# First look for Altimeter in txt
alt_index = len(txt) + 1
for item in [' A2', ' A3', ' Q1', ' Q0', ' Q9']:
index = txt.find(item)
if len(txt) - 6 > index > -1 and txt[index + 2:index + 6].isdigit():
alt_index = index
# Then look for earliest remarks 'signifier'
sig_index = find_first_in_list(txt, METAR_RMK)
if sig_index == -1:
sig_index = len(txt) + 1
if sig_index > alt_index > -1:
return txt[:alt_index + 6].strip().split(' '), txt[alt_index + 7:]
if alt_index > sig_index > -1:
return txt[:sig_index].strip().split(' '), txt[sig_index + 1:]
return txt.strip().split(' '), '' | def function[get_remarks, parameter[txt]]:
constant[
Returns the report split into components and the remarks string
Remarks can include items like RMK and on, NOSIG and on, and BECMG and on
]
variable[txt] assign[=] call[call[name[txt].replace, parameter[constant[?], constant[]]].strip, parameter[]]
variable[alt_index] assign[=] binary_operation[call[name[len], parameter[name[txt]]] + constant[1]]
for taget[name[item]] in starred[list[[<ast.Constant object at 0x7da18bc70340>, <ast.Constant object at 0x7da18bc717b0>, <ast.Constant object at 0x7da18bc73190>, <ast.Constant object at 0x7da18bc72d40>, <ast.Constant object at 0x7da18bc716f0>]]] begin[:]
variable[index] assign[=] call[name[txt].find, parameter[name[item]]]
if <ast.BoolOp object at 0x7da18bc73fd0> begin[:]
variable[alt_index] assign[=] name[index]
variable[sig_index] assign[=] call[name[find_first_in_list], parameter[name[txt], name[METAR_RMK]]]
if compare[name[sig_index] equal[==] <ast.UnaryOp object at 0x7da204962f80>] begin[:]
variable[sig_index] assign[=] binary_operation[call[name[len], parameter[name[txt]]] + constant[1]]
if compare[name[sig_index] greater[>] name[alt_index]] begin[:]
return[tuple[[<ast.Call object at 0x7da18f7212d0>, <ast.Subscript object at 0x7da18f721f30>]]]
if compare[name[alt_index] greater[>] name[sig_index]] begin[:]
return[tuple[[<ast.Call object at 0x7da207f011e0>, <ast.Subscript object at 0x7da207f02650>]]]
return[tuple[[<ast.Call object at 0x7da207f02a70>, <ast.Constant object at 0x7da207f00d00>]]] | keyword[def] identifier[get_remarks] ( identifier[txt] : identifier[str] )->([ identifier[str] ], identifier[str] ):
literal[string]
identifier[txt] = identifier[txt] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
identifier[alt_index] = identifier[len] ( identifier[txt] )+ literal[int]
keyword[for] identifier[item] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[index] = identifier[txt] . identifier[find] ( identifier[item] )
keyword[if] identifier[len] ( identifier[txt] )- literal[int] > identifier[index] >- literal[int] keyword[and] identifier[txt] [ identifier[index] + literal[int] : identifier[index] + literal[int] ]. identifier[isdigit] ():
identifier[alt_index] = identifier[index]
identifier[sig_index] = identifier[find_first_in_list] ( identifier[txt] , identifier[METAR_RMK] )
keyword[if] identifier[sig_index] ==- literal[int] :
identifier[sig_index] = identifier[len] ( identifier[txt] )+ literal[int]
keyword[if] identifier[sig_index] > identifier[alt_index] >- literal[int] :
keyword[return] identifier[txt] [: identifier[alt_index] + literal[int] ]. identifier[strip] (). identifier[split] ( literal[string] ), identifier[txt] [ identifier[alt_index] + literal[int] :]
keyword[if] identifier[alt_index] > identifier[sig_index] >- literal[int] :
keyword[return] identifier[txt] [: identifier[sig_index] ]. identifier[strip] (). identifier[split] ( literal[string] ), identifier[txt] [ identifier[sig_index] + literal[int] :]
keyword[return] identifier[txt] . identifier[strip] (). identifier[split] ( literal[string] ), literal[string] | def get_remarks(txt: str) -> ([str], str): # type: ignore
'\n Returns the report split into components and the remarks string\n\n Remarks can include items like RMK and on, NOSIG and on, and BECMG and on\n '
txt = txt.replace('?', '').strip()
# First look for Altimeter in txt
alt_index = len(txt) + 1
for item in [' A2', ' A3', ' Q1', ' Q0', ' Q9']:
index = txt.find(item)
if len(txt) - 6 > index > -1 and txt[index + 2:index + 6].isdigit():
alt_index = index # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
# Then look for earliest remarks 'signifier'
sig_index = find_first_in_list(txt, METAR_RMK)
if sig_index == -1:
sig_index = len(txt) + 1 # depends on [control=['if'], data=['sig_index']]
if sig_index > alt_index > -1:
return (txt[:alt_index + 6].strip().split(' '), txt[alt_index + 7:]) # depends on [control=['if'], data=['alt_index']]
if alt_index > sig_index > -1:
return (txt[:sig_index].strip().split(' '), txt[sig_index + 1:]) # depends on [control=['if'], data=['sig_index']]
return (txt.strip().split(' '), '') |
def is_iterable_of_float(l):
r""" Checks if l is iterable and contains only floating point types """
if not is_iterable(l):
return False
return all(is_float(value) for value in l) | def function[is_iterable_of_float, parameter[l]]:
constant[ Checks if l is iterable and contains only floating point types ]
if <ast.UnaryOp object at 0x7da18f00dab0> begin[:]
return[constant[False]]
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da18f7217e0>]]] | keyword[def] identifier[is_iterable_of_float] ( identifier[l] ):
literal[string]
keyword[if] keyword[not] identifier[is_iterable] ( identifier[l] ):
keyword[return] keyword[False]
keyword[return] identifier[all] ( identifier[is_float] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[l] ) | def is_iterable_of_float(l):
""" Checks if l is iterable and contains only floating point types """
if not is_iterable(l):
return False # depends on [control=['if'], data=[]]
return all((is_float(value) for value in l)) |
def get_media_requests(self, item, info):
"""
根据item中的信息, 构造出需要下载的静态资源的Request对象
:param item:
:param info:
:return:
"""
key_generator = item.get(self.QINIU_KEY_GENERATOR_FIELD)
return [Request(x, meta={'qiniu_key_generator': key_generator}) for x in item.get(self.FILES_URLS_FIELD, [])] | def function[get_media_requests, parameter[self, item, info]]:
constant[
根据item中的信息, 构造出需要下载的静态资源的Request对象
:param item:
:param info:
:return:
]
variable[key_generator] assign[=] call[name[item].get, parameter[name[self].QINIU_KEY_GENERATOR_FIELD]]
return[<ast.ListComp object at 0x7da2045665f0>] | keyword[def] identifier[get_media_requests] ( identifier[self] , identifier[item] , identifier[info] ):
literal[string]
identifier[key_generator] = identifier[item] . identifier[get] ( identifier[self] . identifier[QINIU_KEY_GENERATOR_FIELD] )
keyword[return] [ identifier[Request] ( identifier[x] , identifier[meta] ={ literal[string] : identifier[key_generator] }) keyword[for] identifier[x] keyword[in] identifier[item] . identifier[get] ( identifier[self] . identifier[FILES_URLS_FIELD] ,[])] | def get_media_requests(self, item, info):
"""
根据item中的信息, 构造出需要下载的静态资源的Request对象
:param item:
:param info:
:return:
"""
key_generator = item.get(self.QINIU_KEY_GENERATOR_FIELD)
return [Request(x, meta={'qiniu_key_generator': key_generator}) for x in item.get(self.FILES_URLS_FIELD, [])] |
def effective_sample_size(h):
"""
Calculate the effective sample size for a histogram
the same way as ROOT does.
"""
sum = 0
ew = 0
w = 0
for bin in h.bins(overflow=False):
sum += bin.value
ew = bin.error
w += ew * ew
esum = sum * sum / w
return esum | def function[effective_sample_size, parameter[h]]:
constant[
Calculate the effective sample size for a histogram
the same way as ROOT does.
]
variable[sum] assign[=] constant[0]
variable[ew] assign[=] constant[0]
variable[w] assign[=] constant[0]
for taget[name[bin]] in starred[call[name[h].bins, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b12cdf60>
variable[ew] assign[=] name[bin].error
<ast.AugAssign object at 0x7da1b12cdfc0>
variable[esum] assign[=] binary_operation[binary_operation[name[sum] * name[sum]] / name[w]]
return[name[esum]] | keyword[def] identifier[effective_sample_size] ( identifier[h] ):
literal[string]
identifier[sum] = literal[int]
identifier[ew] = literal[int]
identifier[w] = literal[int]
keyword[for] identifier[bin] keyword[in] identifier[h] . identifier[bins] ( identifier[overflow] = keyword[False] ):
identifier[sum] += identifier[bin] . identifier[value]
identifier[ew] = identifier[bin] . identifier[error]
identifier[w] += identifier[ew] * identifier[ew]
identifier[esum] = identifier[sum] * identifier[sum] / identifier[w]
keyword[return] identifier[esum] | def effective_sample_size(h):
"""
Calculate the effective sample size for a histogram
the same way as ROOT does.
"""
sum = 0
ew = 0
w = 0
for bin in h.bins(overflow=False):
sum += bin.value
ew = bin.error
w += ew * ew # depends on [control=['for'], data=['bin']]
esum = sum * sum / w
return esum |
def _qrd_solve_full(a, b, ddiag, dtype=np.float):
"""Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
"""
a = np.asarray(a, dtype)
b = np.asarray(b, dtype)
ddiag = np.asarray(ddiag, dtype)
n, m = a.shape
assert m >= n
assert b.shape == (m, )
assert ddiag.shape == (n, )
# The computation is straightforward.
q, r, pmut = _qr_factor_full(a)
bqt = np.dot(b, q.T)
x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt,
dtype=dtype, build_s=True)
return x, s, pmut | def function[_qrd_solve_full, parameter[a, b, ddiag, dtype]]:
constant[Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
]
variable[a] assign[=] call[name[np].asarray, parameter[name[a], name[dtype]]]
variable[b] assign[=] call[name[np].asarray, parameter[name[b], name[dtype]]]
variable[ddiag] assign[=] call[name[np].asarray, parameter[name[ddiag], name[dtype]]]
<ast.Tuple object at 0x7da1b269c8b0> assign[=] name[a].shape
assert[compare[name[m] greater_or_equal[>=] name[n]]]
assert[compare[name[b].shape equal[==] tuple[[<ast.Name object at 0x7da1b269d180>]]]]
assert[compare[name[ddiag].shape equal[==] tuple[[<ast.Name object at 0x7da1b269d3c0>]]]]
<ast.Tuple object at 0x7da1b269d390> assign[=] call[name[_qr_factor_full], parameter[name[a]]]
variable[bqt] assign[=] call[name[np].dot, parameter[name[b], name[q].T]]
<ast.Tuple object at 0x7da1b269cdc0> assign[=] call[name[_manual_qrd_solve], parameter[call[name[r]][tuple[[<ast.Slice object at 0x7da1b269c6a0>, <ast.Slice object at 0x7da1b269cfd0>]]], name[pmut], name[ddiag], name[bqt]]]
return[tuple[[<ast.Name object at 0x7da1b269cf70>, <ast.Name object at 0x7da1b269cb50>, <ast.Name object at 0x7da1b269cf40>]]] | keyword[def] identifier[_qrd_solve_full] ( identifier[a] , identifier[b] , identifier[ddiag] , identifier[dtype] = identifier[np] . identifier[float] ):
literal[string]
identifier[a] = identifier[np] . identifier[asarray] ( identifier[a] , identifier[dtype] )
identifier[b] = identifier[np] . identifier[asarray] ( identifier[b] , identifier[dtype] )
identifier[ddiag] = identifier[np] . identifier[asarray] ( identifier[ddiag] , identifier[dtype] )
identifier[n] , identifier[m] = identifier[a] . identifier[shape]
keyword[assert] identifier[m] >= identifier[n]
keyword[assert] identifier[b] . identifier[shape] ==( identifier[m] ,)
keyword[assert] identifier[ddiag] . identifier[shape] ==( identifier[n] ,)
identifier[q] , identifier[r] , identifier[pmut] = identifier[_qr_factor_full] ( identifier[a] )
identifier[bqt] = identifier[np] . identifier[dot] ( identifier[b] , identifier[q] . identifier[T] )
identifier[x] , identifier[s] = identifier[_manual_qrd_solve] ( identifier[r] [:,: identifier[n] ], identifier[pmut] , identifier[ddiag] , identifier[bqt] ,
identifier[dtype] = identifier[dtype] , identifier[build_s] = keyword[True] )
keyword[return] identifier[x] , identifier[s] , identifier[pmut] | def _qrd_solve_full(a, b, ddiag, dtype=np.float):
"""Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
"""
a = np.asarray(a, dtype)
b = np.asarray(b, dtype)
ddiag = np.asarray(ddiag, dtype)
(n, m) = a.shape
assert m >= n
assert b.shape == (m,)
assert ddiag.shape == (n,)
# The computation is straightforward.
(q, r, pmut) = _qr_factor_full(a)
bqt = np.dot(b, q.T)
(x, s) = _manual_qrd_solve(r[:, :n], pmut, ddiag, bqt, dtype=dtype, build_s=True)
return (x, s, pmut) |
def num2bytes(value, size):
"""Convert an unsigned integer to MSB-first bytes with specified size."""
res = []
for _ in range(size):
res.append(value & 0xFF)
value = value >> 8
assert value == 0
return bytes(bytearray(list(reversed(res)))) | def function[num2bytes, parameter[value, size]]:
constant[Convert an unsigned integer to MSB-first bytes with specified size.]
variable[res] assign[=] list[[]]
for taget[name[_]] in starred[call[name[range], parameter[name[size]]]] begin[:]
call[name[res].append, parameter[binary_operation[name[value] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
variable[value] assign[=] binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[8]]
assert[compare[name[value] equal[==] constant[0]]]
return[call[name[bytes], parameter[call[name[bytearray], parameter[call[name[list], parameter[call[name[reversed], parameter[name[res]]]]]]]]]] | keyword[def] identifier[num2bytes] ( identifier[value] , identifier[size] ):
literal[string]
identifier[res] =[]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[size] ):
identifier[res] . identifier[append] ( identifier[value] & literal[int] )
identifier[value] = identifier[value] >> literal[int]
keyword[assert] identifier[value] == literal[int]
keyword[return] identifier[bytes] ( identifier[bytearray] ( identifier[list] ( identifier[reversed] ( identifier[res] )))) | def num2bytes(value, size):
"""Convert an unsigned integer to MSB-first bytes with specified size."""
res = []
for _ in range(size):
res.append(value & 255)
value = value >> 8 # depends on [control=['for'], data=[]]
assert value == 0
return bytes(bytearray(list(reversed(res)))) |
def verify_jwt_in_request():
"""
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
"""
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key]) | def function[verify_jwt_in_request, parameter[]]:
constant[
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
]
if compare[name[request].method <ast.NotIn object at 0x7da2590d7190> name[config].exempt_methods] begin[:]
variable[jwt_data] assign[=] call[name[_decode_jwt_from_request], parameter[]]
name[ctx_stack].top.jwt assign[=] name[jwt_data]
call[name[verify_token_claims], parameter[name[jwt_data]]]
call[name[_load_user], parameter[call[name[jwt_data]][name[config].identity_claim_key]]] | keyword[def] identifier[verify_jwt_in_request] ():
literal[string]
keyword[if] identifier[request] . identifier[method] keyword[not] keyword[in] identifier[config] . identifier[exempt_methods] :
identifier[jwt_data] = identifier[_decode_jwt_from_request] ( identifier[request_type] = literal[string] )
identifier[ctx_stack] . identifier[top] . identifier[jwt] = identifier[jwt_data]
identifier[verify_token_claims] ( identifier[jwt_data] )
identifier[_load_user] ( identifier[jwt_data] [ identifier[config] . identifier[identity_claim_key] ]) | def verify_jwt_in_request():
"""
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
"""
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key]) # depends on [control=['if'], data=[]] |
def get_rotated(self, angle):
"""Rotates this vector through the given anti-clockwise angle
in radians."""
ca = math.cos(angle)
sa = math.sin(angle)
return Point(self.x*ca-self.y*sa, self.x*sa+self.y*ca) | def function[get_rotated, parameter[self, angle]]:
constant[Rotates this vector through the given anti-clockwise angle
in radians.]
variable[ca] assign[=] call[name[math].cos, parameter[name[angle]]]
variable[sa] assign[=] call[name[math].sin, parameter[name[angle]]]
return[call[name[Point], parameter[binary_operation[binary_operation[name[self].x * name[ca]] - binary_operation[name[self].y * name[sa]]], binary_operation[binary_operation[name[self].x * name[sa]] + binary_operation[name[self].y * name[ca]]]]]] | keyword[def] identifier[get_rotated] ( identifier[self] , identifier[angle] ):
literal[string]
identifier[ca] = identifier[math] . identifier[cos] ( identifier[angle] )
identifier[sa] = identifier[math] . identifier[sin] ( identifier[angle] )
keyword[return] identifier[Point] ( identifier[self] . identifier[x] * identifier[ca] - identifier[self] . identifier[y] * identifier[sa] , identifier[self] . identifier[x] * identifier[sa] + identifier[self] . identifier[y] * identifier[ca] ) | def get_rotated(self, angle):
"""Rotates this vector through the given anti-clockwise angle
in radians."""
ca = math.cos(angle)
sa = math.sin(angle)
return Point(self.x * ca - self.y * sa, self.x * sa + self.y * ca) |
def find_types_removed_from_unions(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_removed_from_union = []
for old_type_name, old_type in old_type_map.items():
new_type = new_type_map.get(old_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_new_union = {type_.name for type_ in new_type.types}
for type_ in old_type.types:
type_name = type_.name
if type_name not in type_names_in_new_union:
types_removed_from_union.append(
BreakingChange(
BreakingChangeType.TYPE_REMOVED_FROM_UNION,
f"{type_name} was removed from union type {old_type_name}.",
)
)
return types_removed_from_union | def function[find_types_removed_from_unions, parameter[old_schema, new_schema]]:
constant[Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
]
variable[old_type_map] assign[=] name[old_schema].type_map
variable[new_type_map] assign[=] name[new_schema].type_map
variable[types_removed_from_union] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2260610>, <ast.Name object at 0x7da1b2263be0>]]] in starred[call[name[old_type_map].items, parameter[]]] begin[:]
variable[new_type] assign[=] call[name[new_type_map].get, parameter[name[old_type_name]]]
if <ast.UnaryOp object at 0x7da1b2260d60> begin[:]
continue
variable[old_type] assign[=] call[name[cast], parameter[name[GraphQLUnionType], name[old_type]]]
variable[new_type] assign[=] call[name[cast], parameter[name[GraphQLUnionType], name[new_type]]]
variable[type_names_in_new_union] assign[=] <ast.SetComp object at 0x7da1b2261510>
for taget[name[type_]] in starred[name[old_type].types] begin[:]
variable[type_name] assign[=] name[type_].name
if compare[name[type_name] <ast.NotIn object at 0x7da2590d7190> name[type_names_in_new_union]] begin[:]
call[name[types_removed_from_union].append, parameter[call[name[BreakingChange], parameter[name[BreakingChangeType].TYPE_REMOVED_FROM_UNION, <ast.JoinedStr object at 0x7da1b22ebca0>]]]]
return[name[types_removed_from_union]] | keyword[def] identifier[find_types_removed_from_unions] (
identifier[old_schema] : identifier[GraphQLSchema] , identifier[new_schema] : identifier[GraphQLSchema]
)-> identifier[List] [ identifier[BreakingChange] ]:
literal[string]
identifier[old_type_map] = identifier[old_schema] . identifier[type_map]
identifier[new_type_map] = identifier[new_schema] . identifier[type_map]
identifier[types_removed_from_union] =[]
keyword[for] identifier[old_type_name] , identifier[old_type] keyword[in] identifier[old_type_map] . identifier[items] ():
identifier[new_type] = identifier[new_type_map] . identifier[get] ( identifier[old_type_name] )
keyword[if] keyword[not] ( identifier[is_union_type] ( identifier[old_type] ) keyword[and] identifier[is_union_type] ( identifier[new_type] )):
keyword[continue]
identifier[old_type] = identifier[cast] ( identifier[GraphQLUnionType] , identifier[old_type] )
identifier[new_type] = identifier[cast] ( identifier[GraphQLUnionType] , identifier[new_type] )
identifier[type_names_in_new_union] ={ identifier[type_] . identifier[name] keyword[for] identifier[type_] keyword[in] identifier[new_type] . identifier[types] }
keyword[for] identifier[type_] keyword[in] identifier[old_type] . identifier[types] :
identifier[type_name] = identifier[type_] . identifier[name]
keyword[if] identifier[type_name] keyword[not] keyword[in] identifier[type_names_in_new_union] :
identifier[types_removed_from_union] . identifier[append] (
identifier[BreakingChange] (
identifier[BreakingChangeType] . identifier[TYPE_REMOVED_FROM_UNION] ,
literal[string] ,
)
)
keyword[return] identifier[types_removed_from_union] | def find_types_removed_from_unions(old_schema: GraphQLSchema, new_schema: GraphQLSchema) -> List[BreakingChange]:
"""Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_removed_from_union = []
for (old_type_name, old_type) in old_type_map.items():
new_type = new_type_map.get(old_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue # depends on [control=['if'], data=[]]
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_new_union = {type_.name for type_ in new_type.types}
for type_ in old_type.types:
type_name = type_.name
if type_name not in type_names_in_new_union:
types_removed_from_union.append(BreakingChange(BreakingChangeType.TYPE_REMOVED_FROM_UNION, f'{type_name} was removed from union type {old_type_name}.')) # depends on [control=['if'], data=['type_name']] # depends on [control=['for'], data=['type_']] # depends on [control=['for'], data=[]]
return types_removed_from_union |
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback() | def function[unwrap_or_else, parameter[self, callback]]:
constant[
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
]
return[<ast.IfExp object at 0x7da18bc72560>] | keyword[def] identifier[unwrap_or_else] ( identifier[self] , identifier[callback] : identifier[Callable] [[], identifier[U] ])-> identifier[Union] [ identifier[T] , identifier[U] ]:
literal[string]
keyword[return] identifier[self] . identifier[_val] keyword[if] identifier[self] . identifier[_is_some] keyword[else] identifier[callback] () | def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback() |
def to_jsonf(self, fpath: str, encoding: str='utf8', indent: int=None, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to json file
:param fpath: Json file path
:param encoding: Json file encoding
:param indent: Number of indentation
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json file path
"""
return util.save_jsonf(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty), fpath, encoding, indent) | def function[to_jsonf, parameter[self, fpath, encoding, indent, ignore_none, ignore_empty]]:
constant[From instance to json file
:param fpath: Json file path
:param encoding: Json file encoding
:param indent: Number of indentation
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json file path
]
return[call[name[util].save_jsonf, parameter[call[name[traverse], parameter[name[self], name[ignore_none]]], name[fpath], name[encoding], name[indent]]]] | keyword[def] identifier[to_jsonf] ( identifier[self] , identifier[fpath] : identifier[str] , identifier[encoding] : identifier[str] = literal[string] , identifier[indent] : identifier[int] = keyword[None] , identifier[ignore_none] : identifier[bool] = keyword[True] , identifier[ignore_empty] : identifier[bool] = keyword[False] )-> identifier[str] :
literal[string]
keyword[return] identifier[util] . identifier[save_jsonf] ( identifier[traverse] ( identifier[self] , identifier[ignore_none] , identifier[force_value] = keyword[True] , identifier[ignore_empty] = identifier[ignore_empty] ), identifier[fpath] , identifier[encoding] , identifier[indent] ) | def to_jsonf(self, fpath: str, encoding: str='utf8', indent: int=None, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to json file
:param fpath: Json file path
:param encoding: Json file encoding
:param indent: Number of indentation
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json file path
"""
return util.save_jsonf(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty), fpath, encoding, indent) |
def get_rates_from_response_headers(headers):
"""
Returns a namedtuple with values for short - and long usage and limit rates found in provided HTTP response headers
:param headers: HTTP response headers
:type headers: dict
:return: namedtuple with request rates or None if no rate-limit headers present in response.
:rtype: Optional[RequestRate]
"""
try:
usage_rates = [int(v) for v in headers['X-RateLimit-Usage'].split(',')]
limit_rates = [int(v) for v in headers['X-RateLimit-Limit'].split(',')]
return RequestRate(short_usage=usage_rates[0], long_usage=usage_rates[1],
short_limit=limit_rates[0], long_limit=limit_rates[1])
except KeyError:
return None | def function[get_rates_from_response_headers, parameter[headers]]:
constant[
Returns a namedtuple with values for short - and long usage and limit rates found in provided HTTP response headers
:param headers: HTTP response headers
:type headers: dict
:return: namedtuple with request rates or None if no rate-limit headers present in response.
:rtype: Optional[RequestRate]
]
<ast.Try object at 0x7da1b0778160> | keyword[def] identifier[get_rates_from_response_headers] ( identifier[headers] ):
literal[string]
keyword[try] :
identifier[usage_rates] =[ identifier[int] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[headers] [ literal[string] ]. identifier[split] ( literal[string] )]
identifier[limit_rates] =[ identifier[int] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[headers] [ literal[string] ]. identifier[split] ( literal[string] )]
keyword[return] identifier[RequestRate] ( identifier[short_usage] = identifier[usage_rates] [ literal[int] ], identifier[long_usage] = identifier[usage_rates] [ literal[int] ],
identifier[short_limit] = identifier[limit_rates] [ literal[int] ], identifier[long_limit] = identifier[limit_rates] [ literal[int] ])
keyword[except] identifier[KeyError] :
keyword[return] keyword[None] | def get_rates_from_response_headers(headers):
"""
Returns a namedtuple with values for short - and long usage and limit rates found in provided HTTP response headers
:param headers: HTTP response headers
:type headers: dict
:return: namedtuple with request rates or None if no rate-limit headers present in response.
:rtype: Optional[RequestRate]
"""
try:
usage_rates = [int(v) for v in headers['X-RateLimit-Usage'].split(',')]
limit_rates = [int(v) for v in headers['X-RateLimit-Limit'].split(',')]
return RequestRate(short_usage=usage_rates[0], long_usage=usage_rates[1], short_limit=limit_rates[0], long_limit=limit_rates[1]) # depends on [control=['try'], data=[]]
except KeyError:
return None # depends on [control=['except'], data=[]] |
def pull(self):
"""
Pull this image from URL.
:return: None
"""
if not os.path.exists(CONU_IMAGES_STORE):
os.makedirs(CONU_IMAGES_STORE)
logger.debug(
"Try to pull: {} -> {}".format(self.location, self.local_location))
if not self._is_local():
compressed_location = self.local_location + ".xz"
run_cmd(["curl", "-f", "-L", "-o", compressed_location, self.location])
run_cmd(["xz", "-d", compressed_location])
else:
if self.location.endswith("xz"):
compressed_location = self.local_location + ".xz"
run_cmd(["cp", self.location, compressed_location])
run_cmd(["xz", "-d", compressed_location])
else:
run_cmd(["cp", self.location, self.local_location]) | def function[pull, parameter[self]]:
constant[
Pull this image from URL.
:return: None
]
if <ast.UnaryOp object at 0x7da1b11978b0> begin[:]
call[name[os].makedirs, parameter[name[CONU_IMAGES_STORE]]]
call[name[logger].debug, parameter[call[constant[Try to pull: {} -> {}].format, parameter[name[self].location, name[self].local_location]]]]
if <ast.UnaryOp object at 0x7da1b1196fb0> begin[:]
variable[compressed_location] assign[=] binary_operation[name[self].local_location + constant[.xz]]
call[name[run_cmd], parameter[list[[<ast.Constant object at 0x7da1b1194490>, <ast.Constant object at 0x7da1b11976a0>, <ast.Constant object at 0x7da1b1195a20>, <ast.Constant object at 0x7da1b11969b0>, <ast.Name object at 0x7da1b1196b90>, <ast.Attribute object at 0x7da1b1194340>]]]]
call[name[run_cmd], parameter[list[[<ast.Constant object at 0x7da1b1194b80>, <ast.Constant object at 0x7da1b11961a0>, <ast.Name object at 0x7da1b1197670>]]]] | keyword[def] identifier[pull] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[CONU_IMAGES_STORE] ):
identifier[os] . identifier[makedirs] ( identifier[CONU_IMAGES_STORE] )
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] ( identifier[self] . identifier[location] , identifier[self] . identifier[local_location] ))
keyword[if] keyword[not] identifier[self] . identifier[_is_local] ():
identifier[compressed_location] = identifier[self] . identifier[local_location] + literal[string]
identifier[run_cmd] ([ literal[string] , literal[string] , literal[string] , literal[string] , identifier[compressed_location] , identifier[self] . identifier[location] ])
identifier[run_cmd] ([ literal[string] , literal[string] , identifier[compressed_location] ])
keyword[else] :
keyword[if] identifier[self] . identifier[location] . identifier[endswith] ( literal[string] ):
identifier[compressed_location] = identifier[self] . identifier[local_location] + literal[string]
identifier[run_cmd] ([ literal[string] , identifier[self] . identifier[location] , identifier[compressed_location] ])
identifier[run_cmd] ([ literal[string] , literal[string] , identifier[compressed_location] ])
keyword[else] :
identifier[run_cmd] ([ literal[string] , identifier[self] . identifier[location] , identifier[self] . identifier[local_location] ]) | def pull(self):
"""
Pull this image from URL.
:return: None
"""
if not os.path.exists(CONU_IMAGES_STORE):
os.makedirs(CONU_IMAGES_STORE) # depends on [control=['if'], data=[]]
logger.debug('Try to pull: {} -> {}'.format(self.location, self.local_location))
if not self._is_local():
compressed_location = self.local_location + '.xz'
run_cmd(['curl', '-f', '-L', '-o', compressed_location, self.location])
run_cmd(['xz', '-d', compressed_location]) # depends on [control=['if'], data=[]]
elif self.location.endswith('xz'):
compressed_location = self.local_location + '.xz'
run_cmd(['cp', self.location, compressed_location])
run_cmd(['xz', '-d', compressed_location]) # depends on [control=['if'], data=[]]
else:
run_cmd(['cp', self.location, self.local_location]) |
def run(self, N=1, trace_sort=False):
'''Run the sampler, store the history of visited points into
the member variable ``self.samples`` and the importance weights
into ``self.weights``.
.. seealso::
:py:class:`pypmc.tools.History`
:param N:
Integer; the number of samples to be drawn.
:param trace_sort:
Bool; if True, return an array containing the responsible
component of ``self.proposal`` for each sample generated
during this run.
.. note::
This option only works for proposals of type
:py:class:`pypmc.density.mixture.MixtureDensity`
.. note::
If True, the samples will be ordered by the components.
'''
if N == 0:
return 0
if trace_sort:
this_samples, origin = self._get_samples(N, trace_sort=True)
self._calculate_weights(this_samples, N)
return origin
else:
this_samples = self._get_samples(N, trace_sort=False)
self._calculate_weights(this_samples, N) | def function[run, parameter[self, N, trace_sort]]:
constant[Run the sampler, store the history of visited points into
the member variable ``self.samples`` and the importance weights
into ``self.weights``.
.. seealso::
:py:class:`pypmc.tools.History`
:param N:
Integer; the number of samples to be drawn.
:param trace_sort:
Bool; if True, return an array containing the responsible
component of ``self.proposal`` for each sample generated
during this run.
.. note::
This option only works for proposals of type
:py:class:`pypmc.density.mixture.MixtureDensity`
.. note::
If True, the samples will be ordered by the components.
]
if compare[name[N] equal[==] constant[0]] begin[:]
return[constant[0]]
if name[trace_sort] begin[:]
<ast.Tuple object at 0x7da18bcc8d30> assign[=] call[name[self]._get_samples, parameter[name[N]]]
call[name[self]._calculate_weights, parameter[name[this_samples], name[N]]]
return[name[origin]] | keyword[def] identifier[run] ( identifier[self] , identifier[N] = literal[int] , identifier[trace_sort] = keyword[False] ):
literal[string]
keyword[if] identifier[N] == literal[int] :
keyword[return] literal[int]
keyword[if] identifier[trace_sort] :
identifier[this_samples] , identifier[origin] = identifier[self] . identifier[_get_samples] ( identifier[N] , identifier[trace_sort] = keyword[True] )
identifier[self] . identifier[_calculate_weights] ( identifier[this_samples] , identifier[N] )
keyword[return] identifier[origin]
keyword[else] :
identifier[this_samples] = identifier[self] . identifier[_get_samples] ( identifier[N] , identifier[trace_sort] = keyword[False] )
identifier[self] . identifier[_calculate_weights] ( identifier[this_samples] , identifier[N] ) | def run(self, N=1, trace_sort=False):
"""Run the sampler, store the history of visited points into
the member variable ``self.samples`` and the importance weights
into ``self.weights``.
.. seealso::
:py:class:`pypmc.tools.History`
:param N:
Integer; the number of samples to be drawn.
:param trace_sort:
Bool; if True, return an array containing the responsible
component of ``self.proposal`` for each sample generated
during this run.
.. note::
This option only works for proposals of type
:py:class:`pypmc.density.mixture.MixtureDensity`
.. note::
If True, the samples will be ordered by the components.
"""
if N == 0:
return 0 # depends on [control=['if'], data=[]]
if trace_sort:
(this_samples, origin) = self._get_samples(N, trace_sort=True)
self._calculate_weights(this_samples, N)
return origin # depends on [control=['if'], data=[]]
else:
this_samples = self._get_samples(N, trace_sort=False)
self._calculate_weights(this_samples, N) |
def count_(self):
"""
Returns the number of rows of the main dataframe
"""
try:
num = len(self.df.index)
except Exception as e:
self.err(e, "Can not count data")
return
return num | def function[count_, parameter[self]]:
constant[
Returns the number of rows of the main dataframe
]
<ast.Try object at 0x7da18ede71c0>
return[name[num]] | keyword[def] identifier[count_] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[num] = identifier[len] ( identifier[self] . identifier[df] . identifier[index] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , literal[string] )
keyword[return]
keyword[return] identifier[num] | def count_(self):
"""
Returns the number of rows of the main dataframe
"""
try:
num = len(self.df.index) # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, 'Can not count data')
return # depends on [control=['except'], data=['e']]
return num |
def _to_rest(model, includes=None):
""" Convert the model into a dict for serialization
Notify schematics of the sparse fields requested while
also forcing the resource id & resource type fields to always
be present no matter the request. Additionally, any includes
are implicitly added as well & automatically loaded.
Then normalize the includes, hide private fields, & munge
the relationships into a format the serializers are
expecting.
"""
includes = includes or []
sparse = goldman.sess.req.fields.get(model.rtype, [])
if sparse:
sparse += [model.rid_field, model.rtype_field]
sparse += includes
props = model.to_primitive(
load_rels=includes,
sparse_fields=sparse,
)
props['rid'] = props.pop(model.rid_field)
props['rtype'] = props.pop(model.rtype_field)
_to_rest_hide(model, props)
_to_rest_rels(model, props)
return props | def function[_to_rest, parameter[model, includes]]:
constant[ Convert the model into a dict for serialization
Notify schematics of the sparse fields requested while
also forcing the resource id & resource type fields to always
be present no matter the request. Additionally, any includes
are implicitly added as well & automatically loaded.
Then normalize the includes, hide private fields, & munge
the relationships into a format the serializers are
expecting.
]
variable[includes] assign[=] <ast.BoolOp object at 0x7da204347fd0>
variable[sparse] assign[=] call[name[goldman].sess.req.fields.get, parameter[name[model].rtype, list[[]]]]
if name[sparse] begin[:]
<ast.AugAssign object at 0x7da204345b10>
<ast.AugAssign object at 0x7da20e9562f0>
variable[props] assign[=] call[name[model].to_primitive, parameter[]]
call[name[props]][constant[rid]] assign[=] call[name[props].pop, parameter[name[model].rid_field]]
call[name[props]][constant[rtype]] assign[=] call[name[props].pop, parameter[name[model].rtype_field]]
call[name[_to_rest_hide], parameter[name[model], name[props]]]
call[name[_to_rest_rels], parameter[name[model], name[props]]]
return[name[props]] | keyword[def] identifier[_to_rest] ( identifier[model] , identifier[includes] = keyword[None] ):
literal[string]
identifier[includes] = identifier[includes] keyword[or] []
identifier[sparse] = identifier[goldman] . identifier[sess] . identifier[req] . identifier[fields] . identifier[get] ( identifier[model] . identifier[rtype] ,[])
keyword[if] identifier[sparse] :
identifier[sparse] +=[ identifier[model] . identifier[rid_field] , identifier[model] . identifier[rtype_field] ]
identifier[sparse] += identifier[includes]
identifier[props] = identifier[model] . identifier[to_primitive] (
identifier[load_rels] = identifier[includes] ,
identifier[sparse_fields] = identifier[sparse] ,
)
identifier[props] [ literal[string] ]= identifier[props] . identifier[pop] ( identifier[model] . identifier[rid_field] )
identifier[props] [ literal[string] ]= identifier[props] . identifier[pop] ( identifier[model] . identifier[rtype_field] )
identifier[_to_rest_hide] ( identifier[model] , identifier[props] )
identifier[_to_rest_rels] ( identifier[model] , identifier[props] )
keyword[return] identifier[props] | def _to_rest(model, includes=None):
""" Convert the model into a dict for serialization
Notify schematics of the sparse fields requested while
also forcing the resource id & resource type fields to always
be present no matter the request. Additionally, any includes
are implicitly added as well & automatically loaded.
Then normalize the includes, hide private fields, & munge
the relationships into a format the serializers are
expecting.
"""
includes = includes or []
sparse = goldman.sess.req.fields.get(model.rtype, [])
if sparse:
sparse += [model.rid_field, model.rtype_field]
sparse += includes # depends on [control=['if'], data=[]]
props = model.to_primitive(load_rels=includes, sparse_fields=sparse)
props['rid'] = props.pop(model.rid_field)
props['rtype'] = props.pop(model.rtype_field)
_to_rest_hide(model, props)
_to_rest_rels(model, props)
return props |
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid)) | def function[is_valid_uid, parameter[uid]]:
constant[
:return: True if it is a valid DHIS2 UID, False if not
]
variable[pattern] assign[=] constant[^[A-Za-z][A-Za-z0-9]{10}$]
if <ast.UnaryOp object at 0x7da1b184a500> begin[:]
return[constant[False]]
return[call[name[bool], parameter[call[call[name[re].compile, parameter[name[pattern]]].match, parameter[name[uid]]]]]] | keyword[def] identifier[is_valid_uid] ( identifier[uid] ):
literal[string]
identifier[pattern] = literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[uid] , identifier[string_types] ):
keyword[return] keyword[False]
keyword[return] identifier[bool] ( identifier[re] . identifier[compile] ( identifier[pattern] ). identifier[match] ( identifier[uid] )) | def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = '^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False # depends on [control=['if'], data=[]]
return bool(re.compile(pattern).match(uid)) |
def _parse(self, lines, grammar, ignore_comments=False):
""" Given a type and a list, parse it using the more detailed
parse grammar.
"""
results = []
for c in lines:
if c != '' and not (ignore_comments and c[0] == '#'):
try:
results.append(grammar.parseString(c))
except pyparsing.ParseException as e:
raise ValueError('Invalid syntax. Verify line {} is '
'correct.\n{}\n\n{}'.format(e.lineno, c, e))
return results | def function[_parse, parameter[self, lines, grammar, ignore_comments]]:
constant[ Given a type and a list, parse it using the more detailed
parse grammar.
]
variable[results] assign[=] list[[]]
for taget[name[c]] in starred[name[lines]] begin[:]
if <ast.BoolOp object at 0x7da1b1ff1480> begin[:]
<ast.Try object at 0x7da1b1ff1060>
return[name[results]] | keyword[def] identifier[_parse] ( identifier[self] , identifier[lines] , identifier[grammar] , identifier[ignore_comments] = keyword[False] ):
literal[string]
identifier[results] =[]
keyword[for] identifier[c] keyword[in] identifier[lines] :
keyword[if] identifier[c] != literal[string] keyword[and] keyword[not] ( identifier[ignore_comments] keyword[and] identifier[c] [ literal[int] ]== literal[string] ):
keyword[try] :
identifier[results] . identifier[append] ( identifier[grammar] . identifier[parseString] ( identifier[c] ))
keyword[except] identifier[pyparsing] . identifier[ParseException] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[e] . identifier[lineno] , identifier[c] , identifier[e] ))
keyword[return] identifier[results] | def _parse(self, lines, grammar, ignore_comments=False):
""" Given a type and a list, parse it using the more detailed
parse grammar.
"""
results = []
for c in lines:
if c != '' and (not (ignore_comments and c[0] == '#')):
try:
results.append(grammar.parseString(c)) # depends on [control=['try'], data=[]]
except pyparsing.ParseException as e:
raise ValueError('Invalid syntax. Verify line {} is correct.\n{}\n\n{}'.format(e.lineno, c, e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return results |
def classifier(self):
""" Returns classifier from classifier.pkl """
clf = pickle.load(open(os.path.join(self.repopath, 'classifier.pkl')))
return clf | def function[classifier, parameter[self]]:
constant[ Returns classifier from classifier.pkl ]
variable[clf] assign[=] call[name[pickle].load, parameter[call[name[open], parameter[call[name[os].path.join, parameter[name[self].repopath, constant[classifier.pkl]]]]]]]
return[name[clf]] | keyword[def] identifier[classifier] ( identifier[self] ):
literal[string]
identifier[clf] = identifier[pickle] . identifier[load] ( identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[repopath] , literal[string] )))
keyword[return] identifier[clf] | def classifier(self):
""" Returns classifier from classifier.pkl """
clf = pickle.load(open(os.path.join(self.repopath, 'classifier.pkl')))
return clf |
def _write_to_log(self, output):
"""Write the output string to the log file."""
with open(self.log_file, 'a') as log_file:
log_file.write('\n')
log_file.write(output)
log_file.write('\n') | def function[_write_to_log, parameter[self, output]]:
constant[Write the output string to the log file.]
with call[name[open], parameter[name[self].log_file, constant[a]]] begin[:]
call[name[log_file].write, parameter[constant[
]]]
call[name[log_file].write, parameter[name[output]]]
call[name[log_file].write, parameter[constant[
]]] | keyword[def] identifier[_write_to_log] ( identifier[self] , identifier[output] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[log_file] , literal[string] ) keyword[as] identifier[log_file] :
identifier[log_file] . identifier[write] ( literal[string] )
identifier[log_file] . identifier[write] ( identifier[output] )
identifier[log_file] . identifier[write] ( literal[string] ) | def _write_to_log(self, output):
"""Write the output string to the log file."""
with open(self.log_file, 'a') as log_file:
log_file.write('\n')
log_file.write(output)
log_file.write('\n') # depends on [control=['with'], data=['log_file']] |
def getDisplayIdentifier(self):
"""Return the display_identifier if set, else return the claimed_id.
"""
if self.display_identifier is not None:
return self.display_identifier
if self.claimed_id is None:
return None
else:
return urllib.parse.urldefrag(self.claimed_id)[0] | def function[getDisplayIdentifier, parameter[self]]:
constant[Return the display_identifier if set, else return the claimed_id.
]
if compare[name[self].display_identifier is_not constant[None]] begin[:]
return[name[self].display_identifier]
if compare[name[self].claimed_id is constant[None]] begin[:]
return[constant[None]] | keyword[def] identifier[getDisplayIdentifier] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[display_identifier] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[display_identifier]
keyword[if] identifier[self] . identifier[claimed_id] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[urllib] . identifier[parse] . identifier[urldefrag] ( identifier[self] . identifier[claimed_id] )[ literal[int] ] | def getDisplayIdentifier(self):
"""Return the display_identifier if set, else return the claimed_id.
"""
if self.display_identifier is not None:
return self.display_identifier # depends on [control=['if'], data=[]]
if self.claimed_id is None:
return None # depends on [control=['if'], data=[]]
else:
return urllib.parse.urldefrag(self.claimed_id)[0] |
def get_directory(request):
"""Get API directory as a nested list of lists."""
def get_url(url):
return reverse(url, request=request) if url else url
def is_active_url(path, url):
return path.startswith(url) if url and path else False
path = request.path
directory_list = []
def sort_key(r):
return r[0]
# TODO(ant): support arbitrarily nested
# structure, for now it is capped at a single level
# for UX reasons
for group_name, endpoints in sorted(
six.iteritems(directory),
key=sort_key
):
endpoints_list = []
for endpoint_name, endpoint in sorted(
six.iteritems(endpoints),
key=sort_key
):
if endpoint_name[:1] == '_':
continue
endpoint_url = get_url(endpoint.get('_url', None))
active = is_active_url(path, endpoint_url)
endpoints_list.append(
(endpoint_name, endpoint_url, [], active)
)
url = get_url(endpoints.get('_url', None))
active = is_active_url(path, url)
directory_list.append(
(group_name, url, endpoints_list, active)
)
return directory_list | def function[get_directory, parameter[request]]:
constant[Get API directory as a nested list of lists.]
def function[get_url, parameter[url]]:
return[<ast.IfExp object at 0x7da2044c13f0>]
def function[is_active_url, parameter[path, url]]:
return[<ast.IfExp object at 0x7da2044c3010>]
variable[path] assign[=] name[request].path
variable[directory_list] assign[=] list[[]]
def function[sort_key, parameter[r]]:
return[call[name[r]][constant[0]]]
for taget[tuple[[<ast.Name object at 0x7da2044c0ca0>, <ast.Name object at 0x7da2044c3dc0>]]] in starred[call[name[sorted], parameter[call[name[six].iteritems, parameter[name[directory]]]]]] begin[:]
variable[endpoints_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2044c36d0>, <ast.Name object at 0x7da2044c3550>]]] in starred[call[name[sorted], parameter[call[name[six].iteritems, parameter[name[endpoints]]]]]] begin[:]
if compare[call[name[endpoint_name]][<ast.Slice object at 0x7da18eb56bf0>] equal[==] constant[_]] begin[:]
continue
variable[endpoint_url] assign[=] call[name[get_url], parameter[call[name[endpoint].get, parameter[constant[_url], constant[None]]]]]
variable[active] assign[=] call[name[is_active_url], parameter[name[path], name[endpoint_url]]]
call[name[endpoints_list].append, parameter[tuple[[<ast.Name object at 0x7da18eb549d0>, <ast.Name object at 0x7da18eb55690>, <ast.List object at 0x7da18eb55e70>, <ast.Name object at 0x7da18eb57c10>]]]]
variable[url] assign[=] call[name[get_url], parameter[call[name[endpoints].get, parameter[constant[_url], constant[None]]]]]
variable[active] assign[=] call[name[is_active_url], parameter[name[path], name[url]]]
call[name[directory_list].append, parameter[tuple[[<ast.Name object at 0x7da18eb57700>, <ast.Name object at 0x7da18eb57e50>, <ast.Name object at 0x7da18eb54f40>, <ast.Name object at 0x7da18eb547c0>]]]]
return[name[directory_list]] | keyword[def] identifier[get_directory] ( identifier[request] ):
literal[string]
keyword[def] identifier[get_url] ( identifier[url] ):
keyword[return] identifier[reverse] ( identifier[url] , identifier[request] = identifier[request] ) keyword[if] identifier[url] keyword[else] identifier[url]
keyword[def] identifier[is_active_url] ( identifier[path] , identifier[url] ):
keyword[return] identifier[path] . identifier[startswith] ( identifier[url] ) keyword[if] identifier[url] keyword[and] identifier[path] keyword[else] keyword[False]
identifier[path] = identifier[request] . identifier[path]
identifier[directory_list] =[]
keyword[def] identifier[sort_key] ( identifier[r] ):
keyword[return] identifier[r] [ literal[int] ]
keyword[for] identifier[group_name] , identifier[endpoints] keyword[in] identifier[sorted] (
identifier[six] . identifier[iteritems] ( identifier[directory] ),
identifier[key] = identifier[sort_key]
):
identifier[endpoints_list] =[]
keyword[for] identifier[endpoint_name] , identifier[endpoint] keyword[in] identifier[sorted] (
identifier[six] . identifier[iteritems] ( identifier[endpoints] ),
identifier[key] = identifier[sort_key]
):
keyword[if] identifier[endpoint_name] [: literal[int] ]== literal[string] :
keyword[continue]
identifier[endpoint_url] = identifier[get_url] ( identifier[endpoint] . identifier[get] ( literal[string] , keyword[None] ))
identifier[active] = identifier[is_active_url] ( identifier[path] , identifier[endpoint_url] )
identifier[endpoints_list] . identifier[append] (
( identifier[endpoint_name] , identifier[endpoint_url] ,[], identifier[active] )
)
identifier[url] = identifier[get_url] ( identifier[endpoints] . identifier[get] ( literal[string] , keyword[None] ))
identifier[active] = identifier[is_active_url] ( identifier[path] , identifier[url] )
identifier[directory_list] . identifier[append] (
( identifier[group_name] , identifier[url] , identifier[endpoints_list] , identifier[active] )
)
keyword[return] identifier[directory_list] | def get_directory(request):
"""Get API directory as a nested list of lists."""
def get_url(url):
return reverse(url, request=request) if url else url
def is_active_url(path, url):
return path.startswith(url) if url and path else False
path = request.path
directory_list = []
def sort_key(r):
return r[0]
# TODO(ant): support arbitrarily nested
# structure, for now it is capped at a single level
# for UX reasons
for (group_name, endpoints) in sorted(six.iteritems(directory), key=sort_key):
endpoints_list = []
for (endpoint_name, endpoint) in sorted(six.iteritems(endpoints), key=sort_key):
if endpoint_name[:1] == '_':
continue # depends on [control=['if'], data=[]]
endpoint_url = get_url(endpoint.get('_url', None))
active = is_active_url(path, endpoint_url)
endpoints_list.append((endpoint_name, endpoint_url, [], active)) # depends on [control=['for'], data=[]]
url = get_url(endpoints.get('_url', None))
active = is_active_url(path, url)
directory_list.append((group_name, url, endpoints_list, active)) # depends on [control=['for'], data=[]]
return directory_list |
def text_log_steps(self, request, project, pk=None):
"""
Gets a list of steps associated with this job
"""
try:
job = Job.objects.get(repository__name=project,
id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
textlog_steps = TextLogStep.objects.filter(job=job).order_by(
'started_line_number').prefetch_related('errors')
return Response(serializers.TextLogStepSerializer(textlog_steps,
many=True,
read_only=True).data) | def function[text_log_steps, parameter[self, request, project, pk]]:
constant[
Gets a list of steps associated with this job
]
<ast.Try object at 0x7da1b060c670>
variable[textlog_steps] assign[=] call[call[call[name[TextLogStep].objects.filter, parameter[]].order_by, parameter[constant[started_line_number]]].prefetch_related, parameter[constant[errors]]]
return[call[name[Response], parameter[call[name[serializers].TextLogStepSerializer, parameter[name[textlog_steps]]].data]]] | keyword[def] identifier[text_log_steps] ( identifier[self] , identifier[request] , identifier[project] , identifier[pk] = keyword[None] ):
literal[string]
keyword[try] :
identifier[job] = identifier[Job] . identifier[objects] . identifier[get] ( identifier[repository__name] = identifier[project] ,
identifier[id] = identifier[pk] )
keyword[except] identifier[ObjectDoesNotExist] :
keyword[return] identifier[Response] ( literal[string] . identifier[format] ( identifier[pk] ), identifier[status] = identifier[HTTP_404_NOT_FOUND] )
identifier[textlog_steps] = identifier[TextLogStep] . identifier[objects] . identifier[filter] ( identifier[job] = identifier[job] ). identifier[order_by] (
literal[string] ). identifier[prefetch_related] ( literal[string] )
keyword[return] identifier[Response] ( identifier[serializers] . identifier[TextLogStepSerializer] ( identifier[textlog_steps] ,
identifier[many] = keyword[True] ,
identifier[read_only] = keyword[True] ). identifier[data] ) | def text_log_steps(self, request, project, pk=None):
"""
Gets a list of steps associated with this job
"""
try:
job = Job.objects.get(repository__name=project, id=pk) # depends on [control=['try'], data=[]]
except ObjectDoesNotExist:
return Response('No job with id: {0}'.format(pk), status=HTTP_404_NOT_FOUND) # depends on [control=['except'], data=[]]
textlog_steps = TextLogStep.objects.filter(job=job).order_by('started_line_number').prefetch_related('errors')
return Response(serializers.TextLogStepSerializer(textlog_steps, many=True, read_only=True).data) |
def nlmsg_type(self, value):
"""Message content setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0)) | def function[nlmsg_type, parameter[self, value]]:
constant[Message content setter.]
call[name[self].bytearray][call[name[self]._get_slicers, parameter[constant[1]]]] assign[=] call[name[bytearray], parameter[call[name[c_uint16], parameter[<ast.BoolOp object at 0x7da1b26082b0>]]]] | keyword[def] identifier[nlmsg_type] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[bytearray] [ identifier[self] . identifier[_get_slicers] ( literal[int] )]= identifier[bytearray] ( identifier[c_uint16] ( identifier[value] keyword[or] literal[int] )) | def nlmsg_type(self, value):
"""Message content setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0)) |
def _allocate_segment(self, session, net_id, source):
"""Allocate segment from pool.
Return allocated db object or None.
"""
with session.begin(subtransactions=True):
hour_lapse = utils.utc_time_lapse(self.seg_timeout)
count = (session.query(self.model).filter(
self.model.delete_time < hour_lapse).update(
{"delete_time": None}))
select = (session.query(self.model).filter_by(allocated=False,
delete_time=None))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No segment resource available")
# No resource available
return
count = (session.query(self.model).
filter_by(segmentation_id=alloc.segmentation_id,
allocated=False).update({"allocated": True,
"network_id": net_id,
"source": source}))
if count:
return alloc
LOG.error("ERROR: Failed to allocate segment for net %(net)s"
" source %(src)s",
{'net': net_id, 'src': source}) | def function[_allocate_segment, parameter[self, session, net_id, source]]:
constant[Allocate segment from pool.
Return allocated db object or None.
]
with call[name[session].begin, parameter[]] begin[:]
variable[hour_lapse] assign[=] call[name[utils].utc_time_lapse, parameter[name[self].seg_timeout]]
variable[count] assign[=] call[call[call[name[session].query, parameter[name[self].model]].filter, parameter[compare[name[self].model.delete_time less[<] name[hour_lapse]]]].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1be7700>], [<ast.Constant object at 0x7da1b1be6080>]]]]
variable[select] assign[=] call[call[name[session].query, parameter[name[self].model]].filter_by, parameter[]]
for taget[name[attempt]] in starred[call[name[range], parameter[binary_operation[name[DB_MAX_RETRIES] + constant[1]]]]] begin[:]
variable[alloc] assign[=] call[name[select].first, parameter[]]
if <ast.UnaryOp object at 0x7da1b1be5090> begin[:]
call[name[LOG].info, parameter[constant[No segment resource available]]]
return[None]
variable[count] assign[=] call[call[call[name[session].query, parameter[name[self].model]].filter_by, parameter[]].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1a5dff0>, <ast.Constant object at 0x7da1b1a5cf10>, <ast.Constant object at 0x7da1b1a5de70>], [<ast.Constant object at 0x7da1b1a5d9f0>, <ast.Name object at 0x7da1b1a5e0b0>, <ast.Name object at 0x7da1b1a5f220>]]]]
if name[count] begin[:]
return[name[alloc]]
call[name[LOG].error, parameter[constant[ERROR: Failed to allocate segment for net %(net)s source %(src)s], dictionary[[<ast.Constant object at 0x7da1b1a5fc70>, <ast.Constant object at 0x7da1b1a5f0d0>], [<ast.Name object at 0x7da1b1a5c670>, <ast.Name object at 0x7da1b1a5fc10>]]]] | keyword[def] identifier[_allocate_segment] ( identifier[self] , identifier[session] , identifier[net_id] , identifier[source] ):
literal[string]
keyword[with] identifier[session] . identifier[begin] ( identifier[subtransactions] = keyword[True] ):
identifier[hour_lapse] = identifier[utils] . identifier[utc_time_lapse] ( identifier[self] . identifier[seg_timeout] )
identifier[count] =( identifier[session] . identifier[query] ( identifier[self] . identifier[model] ). identifier[filter] (
identifier[self] . identifier[model] . identifier[delete_time] < identifier[hour_lapse] ). identifier[update] (
{ literal[string] : keyword[None] }))
identifier[select] =( identifier[session] . identifier[query] ( identifier[self] . identifier[model] ). identifier[filter_by] ( identifier[allocated] = keyword[False] ,
identifier[delete_time] = keyword[None] ))
keyword[for] identifier[attempt] keyword[in] identifier[range] ( identifier[DB_MAX_RETRIES] + literal[int] ):
identifier[alloc] = identifier[select] . identifier[first] ()
keyword[if] keyword[not] identifier[alloc] :
identifier[LOG] . identifier[info] ( literal[string] )
keyword[return]
identifier[count] =( identifier[session] . identifier[query] ( identifier[self] . identifier[model] ).
identifier[filter_by] ( identifier[segmentation_id] = identifier[alloc] . identifier[segmentation_id] ,
identifier[allocated] = keyword[False] ). identifier[update] ({ literal[string] : keyword[True] ,
literal[string] : identifier[net_id] ,
literal[string] : identifier[source] }))
keyword[if] identifier[count] :
keyword[return] identifier[alloc]
identifier[LOG] . identifier[error] ( literal[string]
literal[string] ,
{ literal[string] : identifier[net_id] , literal[string] : identifier[source] }) | def _allocate_segment(self, session, net_id, source):
"""Allocate segment from pool.
Return allocated db object or None.
"""
with session.begin(subtransactions=True):
hour_lapse = utils.utc_time_lapse(self.seg_timeout)
count = session.query(self.model).filter(self.model.delete_time < hour_lapse).update({'delete_time': None})
select = session.query(self.model).filter_by(allocated=False, delete_time=None)
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info('No segment resource available')
# No resource available
return # depends on [control=['if'], data=[]]
count = session.query(self.model).filter_by(segmentation_id=alloc.segmentation_id, allocated=False).update({'allocated': True, 'network_id': net_id, 'source': source})
if count:
return alloc # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
LOG.error('ERROR: Failed to allocate segment for net %(net)s source %(src)s', {'net': net_id, 'src': source}) |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Socket(key)
if key not in Socket._member_map_:
extend_enum(Socket, key, default)
return Socket[key] | def function[get, parameter[key, default]]:
constant[Backport support for original codes.]
if call[name[isinstance], parameter[name[key], name[int]]] begin[:]
return[call[name[Socket], parameter[name[key]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[Socket]._member_map_] begin[:]
call[name[extend_enum], parameter[name[Socket], name[key], name[default]]]
return[call[name[Socket]][name[key]]] | keyword[def] identifier[get] ( identifier[key] , identifier[default] =- literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[int] ):
keyword[return] identifier[Socket] ( identifier[key] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[Socket] . identifier[_member_map_] :
identifier[extend_enum] ( identifier[Socket] , identifier[key] , identifier[default] )
keyword[return] identifier[Socket] [ identifier[key] ] | def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Socket(key) # depends on [control=['if'], data=[]]
if key not in Socket._member_map_:
extend_enum(Socket, key, default) # depends on [control=['if'], data=['key']]
return Socket[key] |
def saturated_vapor_pressure(t_kelvin):
"""Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
if t_kelvin >= 273.15:
# Calculate saturation vapor pressure above freezing
sig = 1 - (t_kelvin / 647.096)
sig_polynomial = (-7.85951783 * sig) + (1.84408259 * sig ** 1.5) + \
(-11.7866487 * sig ** 3) + (22.6807411 * sig ** 3.5) + \
(-15.9618719 * sig ** 4) + (1.80122502 * sig ** 7.5)
crit_temp = 647.096 / t_kelvin
exponent = crit_temp * sig_polynomial
p_ws = math.exp(exponent) * 22064000
else:
# Calculate saturation vapor pressure below freezing
theta = t_kelvin / 273.15
exponent = -13.928169 * (1 - theta ** -1.5) + \
34.707823 * (1 - theta ** -1.25)
p_ws = math.exp(exponent) * 611.657
return p_ws | def function[saturated_vapor_pressure, parameter[t_kelvin]]:
constant[Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
]
if compare[name[t_kelvin] greater_or_equal[>=] constant[273.15]] begin[:]
variable[sig] assign[=] binary_operation[constant[1] - binary_operation[name[t_kelvin] / constant[647.096]]]
variable[sig_polynomial] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b12b8f10> * name[sig]] + binary_operation[constant[1.84408259] * binary_operation[name[sig] ** constant[1.5]]]] + binary_operation[<ast.UnaryOp object at 0x7da1b12b8130> * binary_operation[name[sig] ** constant[3]]]] + binary_operation[constant[22.6807411] * binary_operation[name[sig] ** constant[3.5]]]] + binary_operation[<ast.UnaryOp object at 0x7da1b12b8c70> * binary_operation[name[sig] ** constant[4]]]] + binary_operation[constant[1.80122502] * binary_operation[name[sig] ** constant[7.5]]]]
variable[crit_temp] assign[=] binary_operation[constant[647.096] / name[t_kelvin]]
variable[exponent] assign[=] binary_operation[name[crit_temp] * name[sig_polynomial]]
variable[p_ws] assign[=] binary_operation[call[name[math].exp, parameter[name[exponent]]] * constant[22064000]]
return[name[p_ws]] | keyword[def] identifier[saturated_vapor_pressure] ( identifier[t_kelvin] ):
literal[string]
keyword[if] identifier[t_kelvin] >= literal[int] :
identifier[sig] = literal[int] -( identifier[t_kelvin] / literal[int] )
identifier[sig_polynomial] =(- literal[int] * identifier[sig] )+( literal[int] * identifier[sig] ** literal[int] )+(- literal[int] * identifier[sig] ** literal[int] )+( literal[int] * identifier[sig] ** literal[int] )+(- literal[int] * identifier[sig] ** literal[int] )+( literal[int] * identifier[sig] ** literal[int] )
identifier[crit_temp] = literal[int] / identifier[t_kelvin]
identifier[exponent] = identifier[crit_temp] * identifier[sig_polynomial]
identifier[p_ws] = identifier[math] . identifier[exp] ( identifier[exponent] )* literal[int]
keyword[else] :
identifier[theta] = identifier[t_kelvin] / literal[int]
identifier[exponent] =- literal[int] *( literal[int] - identifier[theta] **- literal[int] )+ literal[int] *( literal[int] - identifier[theta] **- literal[int] )
identifier[p_ws] = identifier[math] . identifier[exp] ( identifier[exponent] )* literal[int]
keyword[return] identifier[p_ws] | def saturated_vapor_pressure(t_kelvin):
"""Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
if t_kelvin >= 273.15:
# Calculate saturation vapor pressure above freezing
sig = 1 - t_kelvin / 647.096
sig_polynomial = -7.85951783 * sig + 1.84408259 * sig ** 1.5 + -11.7866487 * sig ** 3 + 22.6807411 * sig ** 3.5 + -15.9618719 * sig ** 4 + 1.80122502 * sig ** 7.5
crit_temp = 647.096 / t_kelvin
exponent = crit_temp * sig_polynomial
p_ws = math.exp(exponent) * 22064000 # depends on [control=['if'], data=['t_kelvin']]
else:
# Calculate saturation vapor pressure below freezing
theta = t_kelvin / 273.15
exponent = -13.928169 * (1 - theta ** (-1.5)) + 34.707823 * (1 - theta ** (-1.25))
p_ws = math.exp(exponent) * 611.657
return p_ws |
def confd_state_internal_callpoints_callpoint_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
callpoints = ET.SubElement(internal, "callpoints")
callpoint = ET.SubElement(callpoints, "callpoint")
id = ET.SubElement(callpoint, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[confd_state_internal_callpoints_callpoint_id, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[confd_state] assign[=] call[name[ET].SubElement, parameter[name[config], constant[confd-state]]]
variable[internal] assign[=] call[name[ET].SubElement, parameter[name[confd_state], constant[internal]]]
variable[callpoints] assign[=] call[name[ET].SubElement, parameter[name[internal], constant[callpoints]]]
variable[callpoint] assign[=] call[name[ET].SubElement, parameter[name[callpoints], constant[callpoint]]]
variable[id] assign[=] call[name[ET].SubElement, parameter[name[callpoint], constant[id]]]
name[id].text assign[=] call[name[kwargs].pop, parameter[constant[id]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[confd_state_internal_callpoints_callpoint_id] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[confd_state] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[internal] = identifier[ET] . identifier[SubElement] ( identifier[confd_state] , literal[string] )
identifier[callpoints] = identifier[ET] . identifier[SubElement] ( identifier[internal] , literal[string] )
identifier[callpoint] = identifier[ET] . identifier[SubElement] ( identifier[callpoints] , literal[string] )
identifier[id] = identifier[ET] . identifier[SubElement] ( identifier[callpoint] , literal[string] )
identifier[id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def confd_state_internal_callpoints_callpoint_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
confd_state = ET.SubElement(config, 'confd-state', xmlns='http://tail-f.com/yang/confd-monitoring')
internal = ET.SubElement(confd_state, 'internal')
callpoints = ET.SubElement(internal, 'callpoints')
callpoint = ET.SubElement(callpoints, 'callpoint')
id = ET.SubElement(callpoint, 'id')
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels] | def function[validate_data_columns, parameter[self, data_columns, min_itemsize]]:
constant[take the input data_columns and min_itemize and create a data
columns spec
]
if <ast.UnaryOp object at 0x7da1b202ae90> begin[:]
return[list[[]]]
<ast.Tuple object at 0x7da1b2028760> assign[=] call[name[self].non_index_axes][constant[0]]
variable[info] assign[=] call[name[self].info.get, parameter[name[axis], call[name[dict], parameter[]]]]
if <ast.BoolOp object at 0x7da1b2029f60> begin[:]
<ast.Raise object at 0x7da1b2029c00>
if compare[name[data_columns] is constant[True]] begin[:]
variable[data_columns] assign[=] call[name[list], parameter[name[axis_labels]]]
if call[name[isinstance], parameter[name[min_itemsize], name[dict]]] begin[:]
variable[existing_data_columns] assign[=] call[name[set], parameter[name[data_columns]]]
call[name[data_columns].extend, parameter[<ast.ListComp object at 0x7da1b202b9d0>]]
return[<ast.ListComp object at 0x7da1b20287f0>] | keyword[def] identifier[validate_data_columns] ( identifier[self] , identifier[data_columns] , identifier[min_itemsize] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[self] . identifier[non_index_axes] ):
keyword[return] []
identifier[axis] , identifier[axis_labels] = identifier[self] . identifier[non_index_axes] [ literal[int] ]
identifier[info] = identifier[self] . identifier[info] . identifier[get] ( identifier[axis] , identifier[dict] ())
keyword[if] identifier[info] . identifier[get] ( literal[string] )== literal[string] keyword[and] identifier[data_columns] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[axis] , identifier[data_columns] ))
keyword[if] identifier[data_columns] keyword[is] keyword[True] :
identifier[data_columns] = identifier[list] ( identifier[axis_labels] )
keyword[elif] identifier[data_columns] keyword[is] keyword[None] :
identifier[data_columns] =[]
keyword[if] identifier[isinstance] ( identifier[min_itemsize] , identifier[dict] ):
identifier[existing_data_columns] = identifier[set] ( identifier[data_columns] )
identifier[data_columns] . identifier[extend] ([
identifier[k] keyword[for] identifier[k] keyword[in] identifier[min_itemsize] . identifier[keys] ()
keyword[if] identifier[k] != literal[string] keyword[and] identifier[k] keyword[not] keyword[in] identifier[existing_data_columns]
])
keyword[return] [ identifier[c] keyword[for] identifier[c] keyword[in] identifier[data_columns] keyword[if] identifier[c] keyword[in] identifier[axis_labels] ] | def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return [] # depends on [control=['if'], data=[]]
(axis, axis_labels) = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError('cannot use a multi-index on axis [{0}] with data_columns {1}'.format(axis, data_columns)) # depends on [control=['if'], data=[]]
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels) # depends on [control=['if'], data=['data_columns']]
elif data_columns is None:
data_columns = [] # depends on [control=['if'], data=['data_columns']]
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns]) # depends on [control=['if'], data=[]]
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels] |
def open_urlview(self, data):
"""
Pipe a block of text to urlview, which displays a list of urls
contained in the text and allows the user to open them with their
web browser.
"""
urlview = os.getenv('RTV_URLVIEWER') or 'urlview'
command = shlex.split(urlview)
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
p.communicate(input=data.encode('utf-8'))
except KeyboardInterrupt:
p.terminate()
code = p.poll()
if code == 1:
# Clear the "No URLs found." message from stdout
sys.stdout.write("\033[F")
sys.stdout.flush()
if code == 1:
self.show_notification('No URLs found')
except OSError as e:
_logger.exception(e)
self.show_notification(
'Failed to open {0}'.format(urlview)) | def function[open_urlview, parameter[self, data]]:
constant[
Pipe a block of text to urlview, which displays a list of urls
contained in the text and allows the user to open them with their
web browser.
]
variable[urlview] assign[=] <ast.BoolOp object at 0x7da18fe901c0>
variable[command] assign[=] call[name[shlex].split, parameter[name[urlview]]]
<ast.Try object at 0x7da18fe927a0> | keyword[def] identifier[open_urlview] ( identifier[self] , identifier[data] ):
literal[string]
identifier[urlview] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[or] literal[string]
identifier[command] = identifier[shlex] . identifier[split] ( identifier[urlview] )
keyword[try] :
keyword[with] identifier[self] . identifier[suspend] ():
identifier[_logger] . identifier[debug] ( literal[string] , identifier[command] )
identifier[p] = identifier[subprocess] . identifier[Popen] ( identifier[command] , identifier[stdin] = identifier[subprocess] . identifier[PIPE] )
keyword[try] :
identifier[p] . identifier[communicate] ( identifier[input] = identifier[data] . identifier[encode] ( literal[string] ))
keyword[except] identifier[KeyboardInterrupt] :
identifier[p] . identifier[terminate] ()
identifier[code] = identifier[p] . identifier[poll] ()
keyword[if] identifier[code] == literal[int] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[if] identifier[code] == literal[int] :
identifier[self] . identifier[show_notification] ( literal[string] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[_logger] . identifier[exception] ( identifier[e] )
identifier[self] . identifier[show_notification] (
literal[string] . identifier[format] ( identifier[urlview] )) | def open_urlview(self, data):
"""
Pipe a block of text to urlview, which displays a list of urls
contained in the text and allows the user to open them with their
web browser.
"""
urlview = os.getenv('RTV_URLVIEWER') or 'urlview'
command = shlex.split(urlview)
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
p.communicate(input=data.encode('utf-8')) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
p.terminate() # depends on [control=['except'], data=[]]
code = p.poll()
if code == 1:
# Clear the "No URLs found." message from stdout
sys.stdout.write('\x1b[F')
sys.stdout.flush() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
if code == 1:
self.show_notification('No URLs found') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError as e:
_logger.exception(e)
self.show_notification('Failed to open {0}'.format(urlview)) # depends on [control=['except'], data=['e']] |
def run(self):
"""Starts the thread."""
resp = _wait_until(obj=self.obj, att=self.att,
desired=self.desired, callback=None,
interval=self.interval, attempts=self.attempts,
verbose=False, verbose_atts=None)
self.callback(resp) | def function[run, parameter[self]]:
constant[Starts the thread.]
variable[resp] assign[=] call[name[_wait_until], parameter[]]
call[name[self].callback, parameter[name[resp]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[resp] = identifier[_wait_until] ( identifier[obj] = identifier[self] . identifier[obj] , identifier[att] = identifier[self] . identifier[att] ,
identifier[desired] = identifier[self] . identifier[desired] , identifier[callback] = keyword[None] ,
identifier[interval] = identifier[self] . identifier[interval] , identifier[attempts] = identifier[self] . identifier[attempts] ,
identifier[verbose] = keyword[False] , identifier[verbose_atts] = keyword[None] )
identifier[self] . identifier[callback] ( identifier[resp] ) | def run(self):
"""Starts the thread."""
resp = _wait_until(obj=self.obj, att=self.att, desired=self.desired, callback=None, interval=self.interval, attempts=self.attempts, verbose=False, verbose_atts=None)
self.callback(resp) |
def list(self, **kwargs):
"""
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']] | def function[list, parameter[self]]:
constant[
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
]
variable[resp] assign[=] call[name[self].client.api.volumes, parameter[]]
if <ast.UnaryOp object at 0x7da18dc9bd00> begin[:]
return[list[[]]]
return[<ast.ListComp object at 0x7da18dc9b820>] | keyword[def] identifier[list] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[resp] = identifier[self] . identifier[client] . identifier[api] . identifier[volumes] (** identifier[kwargs] )
keyword[if] keyword[not] identifier[resp] . identifier[get] ( literal[string] ):
keyword[return] []
keyword[return] [ identifier[self] . identifier[prepare_model] ( identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[resp] [ literal[string] ]] | def list(self, **kwargs):
"""
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return [] # depends on [control=['if'], data=[]]
return [self.prepare_model(obj) for obj in resp['Volumes']] |
def group_id(self):
"""
Returns the @GROUPID.
If derived_from is set, returns that group_id.
"""
if self.derived_from is not None:
return self.derived_from.group_id()
if self.file_uuid is None:
return None
return utils.GROUP_ID_PREFIX + self.file_uuid | def function[group_id, parameter[self]]:
constant[
Returns the @GROUPID.
If derived_from is set, returns that group_id.
]
if compare[name[self].derived_from is_not constant[None]] begin[:]
return[call[name[self].derived_from.group_id, parameter[]]]
if compare[name[self].file_uuid is constant[None]] begin[:]
return[constant[None]]
return[binary_operation[name[utils].GROUP_ID_PREFIX + name[self].file_uuid]] | keyword[def] identifier[group_id] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[derived_from] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[derived_from] . identifier[group_id] ()
keyword[if] identifier[self] . identifier[file_uuid] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[utils] . identifier[GROUP_ID_PREFIX] + identifier[self] . identifier[file_uuid] | def group_id(self):
"""
Returns the @GROUPID.
If derived_from is set, returns that group_id.
"""
if self.derived_from is not None:
return self.derived_from.group_id() # depends on [control=['if'], data=[]]
if self.file_uuid is None:
return None # depends on [control=['if'], data=[]]
return utils.GROUP_ID_PREFIX + self.file_uuid |
def minify(path):
"""
Load a javascript file and minify.
Parameters
------------
path: str, path of resource
"""
if 'http' in path:
data = requests.get(path).content.decode(
'ascii', errors='ignore')
else:
with open(path, 'rb') as f:
# some of these assholes use unicode spaces -_-
data = f.read().decode('ascii',
errors='ignore')
# don't re- minify
if '.min.' in path:
return data
try:
return jsmin.jsmin(data)
except BaseException:
return data | def function[minify, parameter[path]]:
constant[
Load a javascript file and minify.
Parameters
------------
path: str, path of resource
]
if compare[constant[http] in name[path]] begin[:]
variable[data] assign[=] call[call[name[requests].get, parameter[name[path]]].content.decode, parameter[constant[ascii]]]
if compare[constant[.min.] in name[path]] begin[:]
return[name[data]]
<ast.Try object at 0x7da1b22d2590> | keyword[def] identifier[minify] ( identifier[path] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[path] :
identifier[data] = identifier[requests] . identifier[get] ( identifier[path] ). identifier[content] . identifier[decode] (
literal[string] , identifier[errors] = literal[string] )
keyword[else] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[f] . identifier[read] (). identifier[decode] ( literal[string] ,
identifier[errors] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[path] :
keyword[return] identifier[data]
keyword[try] :
keyword[return] identifier[jsmin] . identifier[jsmin] ( identifier[data] )
keyword[except] identifier[BaseException] :
keyword[return] identifier[data] | def minify(path):
"""
Load a javascript file and minify.
Parameters
------------
path: str, path of resource
"""
if 'http' in path:
data = requests.get(path).content.decode('ascii', errors='ignore') # depends on [control=['if'], data=['path']]
else:
with open(path, 'rb') as f:
# some of these assholes use unicode spaces -_-
data = f.read().decode('ascii', errors='ignore') # depends on [control=['with'], data=['f']]
# don't re- minify
if '.min.' in path:
return data # depends on [control=['if'], data=[]]
try:
return jsmin.jsmin(data) # depends on [control=['try'], data=[]]
except BaseException:
return data # depends on [control=['except'], data=[]] |
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
) | def function[_generate_version, parameter[base_version]]:
constant[Generate a version with information about the git repository]
variable[pkg_dir] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[name[__file__]]]]]]]
if <ast.BoolOp object at 0x7da1b20ba710> begin[:]
return[name[base_version]]
if <ast.BoolOp object at 0x7da1b1f8ee60> begin[:]
return[name[base_version]]
return[call[constant[{base_version}+{short_sha}{dirty}].format, parameter[]]] | keyword[def] identifier[_generate_version] ( identifier[base_version] ):
literal[string]
identifier[pkg_dir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )))
keyword[if] keyword[not] identifier[_is_git_repo] ( identifier[pkg_dir] ) keyword[or] keyword[not] identifier[_have_git] ():
keyword[return] identifier[base_version]
keyword[if] identifier[_is_release] ( identifier[pkg_dir] , identifier[base_version] ) keyword[and] keyword[not] identifier[_is_dirty] ( identifier[pkg_dir] ):
keyword[return] identifier[base_version]
keyword[return] literal[string] . identifier[format] (
identifier[base_version] = identifier[base_version] ,
identifier[short_sha] = identifier[_git_revision] ( identifier[pkg_dir] ). identifier[decode] ( literal[string] )[ literal[int] : literal[int] ],
identifier[dirty] = literal[string] keyword[if] identifier[_is_dirty] ( identifier[pkg_dir] ) keyword[else] literal[string] ,
) | def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version # depends on [control=['if'], data=[]]
if _is_release(pkg_dir, base_version) and (not _is_dirty(pkg_dir)):
return base_version # depends on [control=['if'], data=[]]
return '{base_version}+{short_sha}{dirty}'.format(base_version=base_version, short_sha=_git_revision(pkg_dir).decode('utf-8')[0:6], dirty='.mod' if _is_dirty(pkg_dir) else '') |
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list) | def function[media, parameter[self]]:
constant[
Incooperate composite field's media.
]
variable[media_list] assign[=] list[[]]
call[name[media_list].append, parameter[call[name[super], parameter[name[SuperFormMixin], name[self]]].media]]
for taget[name[composite_name]] in starred[call[name[self].composite_fields.keys, parameter[]]] begin[:]
variable[form] assign[=] call[name[self].get_composite_field_value, parameter[name[composite_name]]]
call[name[media_list].append, parameter[name[form].media]]
return[call[name[reduce], parameter[<ast.Lambda object at 0x7da204620f10>, name[media_list]]]] | keyword[def] identifier[media] ( identifier[self] ):
literal[string]
identifier[media_list] =[]
identifier[media_list] . identifier[append] ( identifier[super] ( identifier[SuperFormMixin] , identifier[self] ). identifier[media] )
keyword[for] identifier[composite_name] keyword[in] identifier[self] . identifier[composite_fields] . identifier[keys] ():
identifier[form] = identifier[self] . identifier[get_composite_field_value] ( identifier[composite_name] )
identifier[media_list] . identifier[append] ( identifier[form] . identifier[media] )
keyword[return] identifier[reduce] ( keyword[lambda] identifier[a] , identifier[b] : identifier[a] + identifier[b] , identifier[media_list] ) | def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media) # depends on [control=['for'], data=['composite_name']]
return reduce(lambda a, b: a + b, media_list) |
def get_account_entitlement_for_user(self, user_id, determine_rights=None, create_if_not_exists=None):
"""GetAccountEntitlementForUser.
[Preview API] Get the entitlements for a user
:param str user_id: The id of the user
:param bool determine_rights:
:param bool create_if_not_exists:
:rtype: :class:`<AccountEntitlement> <azure.devops.v5_1.licensing.models.AccountEntitlement>`
"""
route_values = {}
if user_id is not None:
route_values['userId'] = self._serialize.url('user_id', user_id, 'str')
query_parameters = {}
if determine_rights is not None:
query_parameters['determineRights'] = self._serialize.query('determine_rights', determine_rights, 'bool')
if create_if_not_exists is not None:
query_parameters['createIfNotExists'] = self._serialize.query('create_if_not_exists', create_if_not_exists, 'bool')
response = self._send(http_method='GET',
location_id='6490e566-b299-49a7-a4e4-28749752581f',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AccountEntitlement', response) | def function[get_account_entitlement_for_user, parameter[self, user_id, determine_rights, create_if_not_exists]]:
constant[GetAccountEntitlementForUser.
[Preview API] Get the entitlements for a user
:param str user_id: The id of the user
:param bool determine_rights:
:param bool create_if_not_exists:
:rtype: :class:`<AccountEntitlement> <azure.devops.v5_1.licensing.models.AccountEntitlement>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[user_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[userId]] assign[=] call[name[self]._serialize.url, parameter[constant[user_id], name[user_id], constant[str]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[determine_rights] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[determineRights]] assign[=] call[name[self]._serialize.query, parameter[constant[determine_rights], name[determine_rights], constant[bool]]]
if compare[name[create_if_not_exists] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[createIfNotExists]] assign[=] call[name[self]._serialize.query, parameter[constant[create_if_not_exists], name[create_if_not_exists], constant[bool]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[AccountEntitlement], name[response]]]] | keyword[def] identifier[get_account_entitlement_for_user] ( identifier[self] , identifier[user_id] , identifier[determine_rights] = keyword[None] , identifier[create_if_not_exists] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[user_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[user_id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[determine_rights] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[determine_rights] , literal[string] )
keyword[if] identifier[create_if_not_exists] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[create_if_not_exists] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def get_account_entitlement_for_user(self, user_id, determine_rights=None, create_if_not_exists=None):
"""GetAccountEntitlementForUser.
[Preview API] Get the entitlements for a user
:param str user_id: The id of the user
:param bool determine_rights:
:param bool create_if_not_exists:
:rtype: :class:`<AccountEntitlement> <azure.devops.v5_1.licensing.models.AccountEntitlement>`
"""
route_values = {}
if user_id is not None:
route_values['userId'] = self._serialize.url('user_id', user_id, 'str') # depends on [control=['if'], data=['user_id']]
query_parameters = {}
if determine_rights is not None:
query_parameters['determineRights'] = self._serialize.query('determine_rights', determine_rights, 'bool') # depends on [control=['if'], data=['determine_rights']]
if create_if_not_exists is not None:
query_parameters['createIfNotExists'] = self._serialize.query('create_if_not_exists', create_if_not_exists, 'bool') # depends on [control=['if'], data=['create_if_not_exists']]
response = self._send(http_method='GET', location_id='6490e566-b299-49a7-a4e4-28749752581f', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('AccountEntitlement', response) |
def field2read_only(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for a dump_only field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.dump_only:
attributes["readOnly"] = True
return attributes | def function[field2read_only, parameter[self, field]]:
constant[Return the dictionary of OpenAPI field attributes for a dump_only field.
:param Field field: A marshmallow field.
:rtype: dict
]
variable[attributes] assign[=] dictionary[[], []]
if name[field].dump_only begin[:]
call[name[attributes]][constant[readOnly]] assign[=] constant[True]
return[name[attributes]] | keyword[def] identifier[field2read_only] ( identifier[self] , identifier[field] ,** identifier[kwargs] ):
literal[string]
identifier[attributes] ={}
keyword[if] identifier[field] . identifier[dump_only] :
identifier[attributes] [ literal[string] ]= keyword[True]
keyword[return] identifier[attributes] | def field2read_only(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for a dump_only field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.dump_only:
attributes['readOnly'] = True # depends on [control=['if'], data=[]]
return attributes |
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map | def function[get_entry_map, parameter[self, group]]:
constant[Return the entry point map for `group`, or the full entry map]
<ast.Try object at 0x7da2041d93c0>
if compare[name[group] is_not constant[None]] begin[:]
return[call[name[ep_map].get, parameter[name[group], dictionary[[], []]]]]
return[name[ep_map]] | keyword[def] identifier[get_entry_map] ( identifier[self] , identifier[group] = keyword[None] ):
literal[string]
keyword[try] :
identifier[ep_map] = identifier[self] . identifier[_ep_map]
keyword[except] identifier[AttributeError] :
identifier[ep_map] = identifier[self] . identifier[_ep_map] = identifier[EntryPoint] . identifier[parse_map] (
identifier[self] . identifier[_get_metadata] ( literal[string] ), identifier[self]
)
keyword[if] identifier[group] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ep_map] . identifier[get] ( identifier[group] ,{})
keyword[return] identifier[ep_map] | def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map # depends on [control=['try'], data=[]]
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(self._get_metadata('entry_points.txt'), self) # depends on [control=['except'], data=[]]
if group is not None:
return ep_map.get(group, {}) # depends on [control=['if'], data=['group']]
return ep_map |
def send_object(bucket, obj, expected_chksum=None,
logger_data=None, restricted=True, as_attachment=False):
"""Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response.
"""
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
)
if expected_chksum and obj.file.checksum != expected_chksum:
current_app.logger.warning(
'File checksum mismatch detected.', extra=logger_data)
file_downloaded.send(current_app._get_current_object(), obj=obj)
return obj.send_file(restricted=restricted,
as_attachment=as_attachment) | def function[send_object, parameter[bucket, obj, expected_chksum, logger_data, restricted, as_attachment]]:
constant[Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response.
]
if <ast.UnaryOp object at 0x7da1b1943640> begin[:]
call[name[check_permission], parameter[call[name[current_permission_factory], parameter[name[obj], constant[object-read-version]]]]]
if <ast.BoolOp object at 0x7da1b1943580> begin[:]
call[name[current_app].logger.warning, parameter[constant[File checksum mismatch detected.]]]
call[name[file_downloaded].send, parameter[call[name[current_app]._get_current_object, parameter[]]]]
return[call[name[obj].send_file, parameter[]]] | keyword[def] identifier[send_object] ( identifier[bucket] , identifier[obj] , identifier[expected_chksum] = keyword[None] ,
identifier[logger_data] = keyword[None] , identifier[restricted] = keyword[True] , identifier[as_attachment] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[obj] . identifier[is_head] :
identifier[check_permission] (
identifier[current_permission_factory] ( identifier[obj] , literal[string] ),
identifier[hidden] = keyword[False]
)
keyword[if] identifier[expected_chksum] keyword[and] identifier[obj] . identifier[file] . identifier[checksum] != identifier[expected_chksum] :
identifier[current_app] . identifier[logger] . identifier[warning] (
literal[string] , identifier[extra] = identifier[logger_data] )
identifier[file_downloaded] . identifier[send] ( identifier[current_app] . identifier[_get_current_object] (), identifier[obj] = identifier[obj] )
keyword[return] identifier[obj] . identifier[send_file] ( identifier[restricted] = identifier[restricted] ,
identifier[as_attachment] = identifier[as_attachment] ) | def send_object(bucket, obj, expected_chksum=None, logger_data=None, restricted=True, as_attachment=False):
"""Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response.
"""
if not obj.is_head:
check_permission(current_permission_factory(obj, 'object-read-version'), hidden=False) # depends on [control=['if'], data=[]]
if expected_chksum and obj.file.checksum != expected_chksum:
current_app.logger.warning('File checksum mismatch detected.', extra=logger_data) # depends on [control=['if'], data=[]]
file_downloaded.send(current_app._get_current_object(), obj=obj)
return obj.send_file(restricted=restricted, as_attachment=as_attachment) |
def resolve_parameter_refs(self, input):
"""
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value.
"""
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs) | def function[resolve_parameter_refs, parameter[self, input]]:
constant[
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value.
]
return[call[name[self]._traverse, parameter[name[input], name[self].parameters, name[self]._try_resolve_parameter_refs]]] | keyword[def] identifier[resolve_parameter_refs] ( identifier[self] , identifier[input] ):
literal[string]
keyword[return] identifier[self] . identifier[_traverse] ( identifier[input] , identifier[self] . identifier[parameters] , identifier[self] . identifier[_try_resolve_parameter_refs] ) | def resolve_parameter_refs(self, input):
"""
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value.
"""
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs) |
def from_isodate(value, strict=False):
"""Convert an ISO formatted date into a Date object.
:param value: The ISO formatted date.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).date() | def function[from_isodate, parameter[value, strict]]:
constant[Convert an ISO formatted date into a Date object.
:param value: The ISO formatted date.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
]
if <ast.BoolOp object at 0x7da18dc98be0> begin[:]
return[call[call[name[arrow].get, parameter[name[value]]].date, parameter[]]] | keyword[def] identifier[from_isodate] ( identifier[value] , identifier[strict] = keyword[False] ):
literal[string]
keyword[if] identifier[value] keyword[or] identifier[strict] :
keyword[return] identifier[arrow] . identifier[get] ( identifier[value] ). identifier[date] () | def from_isodate(value, strict=False):
"""Convert an ISO formatted date into a Date object.
:param value: The ISO formatted date.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).date() # depends on [control=['if'], data=[]] |
def online(zpool, *vdevs, **kwargs):
'''
.. versionadded:: 2015.5.0
Ensure that the specified devices are online
zpool : string
name of storage pool
vdevs : string
one or more devices
expand : boolean
Expand the device to use all available space.
.. note::
If the device is part of a mirror or raidz then all devices must be
expanded before the new space will become available to the pool.
CLI Example:
.. code-block:: bash
salt '*' zpool.online myzpool /path/to/vdev1 [...]
'''
## Configure pool
# default options
flags = []
target = []
# set flags and options
if kwargs.get('expand', False):
flags.append('-e')
target.append(zpool)
if vdevs:
target.extend(vdevs)
## Configure pool
# NOTE: initialize the defaults
flags = []
target = []
# NOTE: set extra config based on kwargs
if kwargs.get('expand', False):
flags.append('-e')
# NOTE: append the pool name and specifications
target.append(zpool)
target.extend(vdevs)
## Bring online device
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='online',
flags=flags,
target=target,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'onlined') | def function[online, parameter[zpool]]:
constant[
.. versionadded:: 2015.5.0
Ensure that the specified devices are online
zpool : string
name of storage pool
vdevs : string
one or more devices
expand : boolean
Expand the device to use all available space.
.. note::
If the device is part of a mirror or raidz then all devices must be
expanded before the new space will become available to the pool.
CLI Example:
.. code-block:: bash
salt '*' zpool.online myzpool /path/to/vdev1 [...]
]
variable[flags] assign[=] list[[]]
variable[target] assign[=] list[[]]
if call[name[kwargs].get, parameter[constant[expand], constant[False]]] begin[:]
call[name[flags].append, parameter[constant[-e]]]
call[name[target].append, parameter[name[zpool]]]
if name[vdevs] begin[:]
call[name[target].extend, parameter[name[vdevs]]]
variable[flags] assign[=] list[[]]
variable[target] assign[=] list[[]]
if call[name[kwargs].get, parameter[constant[expand], constant[False]]] begin[:]
call[name[flags].append, parameter[constant[-e]]]
call[name[target].append, parameter[name[zpool]]]
call[name[target].extend, parameter[name[vdevs]]]
variable[res] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[call[name[__utils__]][constant[zfs.zpool_command]], parameter[]]]]
return[call[call[name[__utils__]][constant[zfs.parse_command_result]], parameter[name[res], constant[onlined]]]] | keyword[def] identifier[online] ( identifier[zpool] ,* identifier[vdevs] ,** identifier[kwargs] ):
literal[string]
identifier[flags] =[]
identifier[target] =[]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[flags] . identifier[append] ( literal[string] )
identifier[target] . identifier[append] ( identifier[zpool] )
keyword[if] identifier[vdevs] :
identifier[target] . identifier[extend] ( identifier[vdevs] )
identifier[flags] =[]
identifier[target] =[]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[flags] . identifier[append] ( literal[string] )
identifier[target] . identifier[append] ( identifier[zpool] )
identifier[target] . identifier[extend] ( identifier[vdevs] )
identifier[res] = identifier[__salt__] [ literal[string] ](
identifier[__utils__] [ literal[string] ](
identifier[command] = literal[string] ,
identifier[flags] = identifier[flags] ,
identifier[target] = identifier[target] ,
),
identifier[python_shell] = keyword[False] ,
)
keyword[return] identifier[__utils__] [ literal[string] ]( identifier[res] , literal[string] ) | def online(zpool, *vdevs, **kwargs):
"""
.. versionadded:: 2015.5.0
Ensure that the specified devices are online
zpool : string
name of storage pool
vdevs : string
one or more devices
expand : boolean
Expand the device to use all available space.
.. note::
If the device is part of a mirror or raidz then all devices must be
expanded before the new space will become available to the pool.
CLI Example:
.. code-block:: bash
salt '*' zpool.online myzpool /path/to/vdev1 [...]
"""
## Configure pool
# default options
flags = []
target = []
# set flags and options
if kwargs.get('expand', False):
flags.append('-e') # depends on [control=['if'], data=[]]
target.append(zpool)
if vdevs:
target.extend(vdevs) # depends on [control=['if'], data=[]]
## Configure pool
# NOTE: initialize the defaults
flags = []
target = []
# NOTE: set extra config based on kwargs
if kwargs.get('expand', False):
flags.append('-e') # depends on [control=['if'], data=[]]
# NOTE: append the pool name and specifications
target.append(zpool)
target.extend(vdevs)
## Bring online device
res = __salt__['cmd.run_all'](__utils__['zfs.zpool_command'](command='online', flags=flags, target=target), python_shell=False)
return __utils__['zfs.parse_command_result'](res, 'onlined') |
async def get_nym_role(self, target_did: str = None) -> Role:
"""
Return the cryptonym role for input did from the ledger - note that this may exceed
the role of least privilege for the class.
Raise AbsentNym if current anchor has no cryptonym on the ledger, or WalletState if current DID unavailable.
:param target_did: DID of cryptonym role to fetch (default own DID)
:return: identifier for current cryptonym role on ledger
"""
LOGGER.debug('BaseAnchor.get_nym_role >>> target_did: %s', target_did)
nym = json.loads(await self.get_nym(target_did))
if not nym:
LOGGER.debug('BaseAnchor.get_nym_role <!< Ledger has no cryptonym for anchor %s', self.name)
raise AbsentNym('Ledger has no cryptonym for anchor {}'.format(self.name))
rv = Role.get(nym['role'])
LOGGER.debug('BaseAnchor.get_nym_role <<< %s', rv)
return rv | <ast.AsyncFunctionDef object at 0x7da20e74beb0> | keyword[async] keyword[def] identifier[get_nym_role] ( identifier[self] , identifier[target_did] : identifier[str] = keyword[None] )-> identifier[Role] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[target_did] )
identifier[nym] = identifier[json] . identifier[loads] ( keyword[await] identifier[self] . identifier[get_nym] ( identifier[target_did] ))
keyword[if] keyword[not] identifier[nym] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] )
keyword[raise] identifier[AbsentNym] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
identifier[rv] = identifier[Role] . identifier[get] ( identifier[nym] [ literal[string] ])
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv] )
keyword[return] identifier[rv] | async def get_nym_role(self, target_did: str=None) -> Role:
"""
Return the cryptonym role for input did from the ledger - note that this may exceed
the role of least privilege for the class.
Raise AbsentNym if current anchor has no cryptonym on the ledger, or WalletState if current DID unavailable.
:param target_did: DID of cryptonym role to fetch (default own DID)
:return: identifier for current cryptonym role on ledger
"""
LOGGER.debug('BaseAnchor.get_nym_role >>> target_did: %s', target_did)
nym = json.loads(await self.get_nym(target_did))
if not nym:
LOGGER.debug('BaseAnchor.get_nym_role <!< Ledger has no cryptonym for anchor %s', self.name)
raise AbsentNym('Ledger has no cryptonym for anchor {}'.format(self.name)) # depends on [control=['if'], data=[]]
rv = Role.get(nym['role'])
LOGGER.debug('BaseAnchor.get_nym_role <<< %s', rv)
return rv |
def get_addr_of_native_method(self, soot_method):
"""
Get address of the implementation from a native declared Java function.
:param soot_method: Method descriptor of a native declared function.
:return: CLE address of the given method.
"""
for name, symbol in self.native_symbols.items():
if soot_method.matches_with_native_name(native_method=name):
l.debug("Found native symbol '%s' @ %x matching Soot method '%s'",
name, symbol.rebased_addr, soot_method)
return symbol.rebased_addr
native_symbols = "\n".join(self.native_symbols.keys())
l.warning("No native method found that matches the Soot method '%s'. "
"Skipping statement.", soot_method.name)
l.debug("Available symbols (prefix + encoded class path + encoded method "
"name):\n%s", native_symbols)
return None | def function[get_addr_of_native_method, parameter[self, soot_method]]:
constant[
Get address of the implementation from a native declared Java function.
:param soot_method: Method descriptor of a native declared function.
:return: CLE address of the given method.
]
for taget[tuple[[<ast.Name object at 0x7da20c6c5240>, <ast.Name object at 0x7da20c6c41f0>]]] in starred[call[name[self].native_symbols.items, parameter[]]] begin[:]
if call[name[soot_method].matches_with_native_name, parameter[]] begin[:]
call[name[l].debug, parameter[constant[Found native symbol '%s' @ %x matching Soot method '%s'], name[name], name[symbol].rebased_addr, name[soot_method]]]
return[name[symbol].rebased_addr]
variable[native_symbols] assign[=] call[constant[
].join, parameter[call[name[self].native_symbols.keys, parameter[]]]]
call[name[l].warning, parameter[constant[No native method found that matches the Soot method '%s'. Skipping statement.], name[soot_method].name]]
call[name[l].debug, parameter[constant[Available symbols (prefix + encoded class path + encoded method name):
%s], name[native_symbols]]]
return[constant[None]] | keyword[def] identifier[get_addr_of_native_method] ( identifier[self] , identifier[soot_method] ):
literal[string]
keyword[for] identifier[name] , identifier[symbol] keyword[in] identifier[self] . identifier[native_symbols] . identifier[items] ():
keyword[if] identifier[soot_method] . identifier[matches_with_native_name] ( identifier[native_method] = identifier[name] ):
identifier[l] . identifier[debug] ( literal[string] ,
identifier[name] , identifier[symbol] . identifier[rebased_addr] , identifier[soot_method] )
keyword[return] identifier[symbol] . identifier[rebased_addr]
identifier[native_symbols] = literal[string] . identifier[join] ( identifier[self] . identifier[native_symbols] . identifier[keys] ())
identifier[l] . identifier[warning] ( literal[string]
literal[string] , identifier[soot_method] . identifier[name] )
identifier[l] . identifier[debug] ( literal[string]
literal[string] , identifier[native_symbols] )
keyword[return] keyword[None] | def get_addr_of_native_method(self, soot_method):
"""
Get address of the implementation from a native declared Java function.
:param soot_method: Method descriptor of a native declared function.
:return: CLE address of the given method.
"""
for (name, symbol) in self.native_symbols.items():
if soot_method.matches_with_native_name(native_method=name):
l.debug("Found native symbol '%s' @ %x matching Soot method '%s'", name, symbol.rebased_addr, soot_method)
return symbol.rebased_addr # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
native_symbols = '\n'.join(self.native_symbols.keys())
l.warning("No native method found that matches the Soot method '%s'. Skipping statement.", soot_method.name)
l.debug('Available symbols (prefix + encoded class path + encoded method name):\n%s', native_symbols)
return None |
def set_throttle(self, name):
""" Assign to throttle group.
"""
if name.lower() == "null":
name = "NULL"
if name.lower() == "none":
name = ''
if name not in self._engine.known_throttle_names:
if self._engine._rpc.throttle.up.max(xmlrpc.NOHASH, name) == -1:
if self._engine._rpc.throttle.down.max(xmlrpc.NOHASH, name) == -1:
raise error.UserError("Unknown throttle name '{}'".format(name))
self._engine.known_throttle_names.add(name)
if (name or "NONE") == self.throttle:
self._engine.LOG.debug("Keeping throttle %r on torrent #%s" % (self.throttle, self._fields["hash"]))
return
active = self.is_active
if active:
self._engine.LOG.debug("Torrent #%s stopped for throttling" % (self._fields["hash"],))
self.stop()
self._make_it_so("setting throttle %r on" % (name,), ["throttle_name.set"], name)
if active:
self._engine.LOG.debug("Torrent #%s restarted after throttling" % (self._fields["hash"],))
self.start() | def function[set_throttle, parameter[self, name]]:
constant[ Assign to throttle group.
]
if compare[call[name[name].lower, parameter[]] equal[==] constant[null]] begin[:]
variable[name] assign[=] constant[NULL]
if compare[call[name[name].lower, parameter[]] equal[==] constant[none]] begin[:]
variable[name] assign[=] constant[]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._engine.known_throttle_names] begin[:]
if compare[call[name[self]._engine._rpc.throttle.up.max, parameter[name[xmlrpc].NOHASH, name[name]]] equal[==] <ast.UnaryOp object at 0x7da1b26adde0>] begin[:]
if compare[call[name[self]._engine._rpc.throttle.down.max, parameter[name[xmlrpc].NOHASH, name[name]]] equal[==] <ast.UnaryOp object at 0x7da1b26ae230>] begin[:]
<ast.Raise object at 0x7da1b26af340>
call[name[self]._engine.known_throttle_names.add, parameter[name[name]]]
if compare[<ast.BoolOp object at 0x7da1b26ada20> equal[==] name[self].throttle] begin[:]
call[name[self]._engine.LOG.debug, parameter[binary_operation[constant[Keeping throttle %r on torrent #%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b26ac4c0>, <ast.Subscript object at 0x7da1b26ad360>]]]]]
return[None]
variable[active] assign[=] name[self].is_active
if name[active] begin[:]
call[name[self]._engine.LOG.debug, parameter[binary_operation[constant[Torrent #%s stopped for throttling] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c76dcf0>]]]]]
call[name[self].stop, parameter[]]
call[name[self]._make_it_so, parameter[binary_operation[constant[setting throttle %r on] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c76d660>]]], list[[<ast.Constant object at 0x7da20c76e3b0>]], name[name]]]
if name[active] begin[:]
call[name[self]._engine.LOG.debug, parameter[binary_operation[constant[Torrent #%s restarted after throttling] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c76ecb0>]]]]]
call[name[self].start, parameter[]] | keyword[def] identifier[set_throttle] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] . identifier[lower] ()== literal[string] :
identifier[name] = literal[string]
keyword[if] identifier[name] . identifier[lower] ()== literal[string] :
identifier[name] = literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_engine] . identifier[known_throttle_names] :
keyword[if] identifier[self] . identifier[_engine] . identifier[_rpc] . identifier[throttle] . identifier[up] . identifier[max] ( identifier[xmlrpc] . identifier[NOHASH] , identifier[name] )==- literal[int] :
keyword[if] identifier[self] . identifier[_engine] . identifier[_rpc] . identifier[throttle] . identifier[down] . identifier[max] ( identifier[xmlrpc] . identifier[NOHASH] , identifier[name] )==- literal[int] :
keyword[raise] identifier[error] . identifier[UserError] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[self] . identifier[_engine] . identifier[known_throttle_names] . identifier[add] ( identifier[name] )
keyword[if] ( identifier[name] keyword[or] literal[string] )== identifier[self] . identifier[throttle] :
identifier[self] . identifier[_engine] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[self] . identifier[throttle] , identifier[self] . identifier[_fields] [ literal[string] ]))
keyword[return]
identifier[active] = identifier[self] . identifier[is_active]
keyword[if] identifier[active] :
identifier[self] . identifier[_engine] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[self] . identifier[_fields] [ literal[string] ],))
identifier[self] . identifier[stop] ()
identifier[self] . identifier[_make_it_so] ( literal[string] %( identifier[name] ,),[ literal[string] ], identifier[name] )
keyword[if] identifier[active] :
identifier[self] . identifier[_engine] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[self] . identifier[_fields] [ literal[string] ],))
identifier[self] . identifier[start] () | def set_throttle(self, name):
""" Assign to throttle group.
"""
if name.lower() == 'null':
name = 'NULL' # depends on [control=['if'], data=[]]
if name.lower() == 'none':
name = '' # depends on [control=['if'], data=[]]
if name not in self._engine.known_throttle_names:
if self._engine._rpc.throttle.up.max(xmlrpc.NOHASH, name) == -1:
if self._engine._rpc.throttle.down.max(xmlrpc.NOHASH, name) == -1:
raise error.UserError("Unknown throttle name '{}'".format(name)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._engine.known_throttle_names.add(name) # depends on [control=['if'], data=['name']]
if (name or 'NONE') == self.throttle:
self._engine.LOG.debug('Keeping throttle %r on torrent #%s' % (self.throttle, self._fields['hash']))
return # depends on [control=['if'], data=[]]
active = self.is_active
if active:
self._engine.LOG.debug('Torrent #%s stopped for throttling' % (self._fields['hash'],))
self.stop() # depends on [control=['if'], data=[]]
self._make_it_so('setting throttle %r on' % (name,), ['throttle_name.set'], name)
if active:
self._engine.LOG.debug('Torrent #%s restarted after throttling' % (self._fields['hash'],))
self.start() # depends on [control=['if'], data=[]] |
def _log_images(self):
"""Creates database logs for all the image file names in the global
variable :data:`thumb_uuid`.
"""
from time import time
from acorn.logging.database import record
entry = {
"m": "plot",
"a": None,
"s": time(),
"r": thumb_uuid,
}
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
code = self.shell.user_ns.get("i_{0:d}".format(self.cellid))
cellid = self._find_cellid(code)
if cellid is None:
cellid = self.cellid
#Store the contents of the cell so they are up to date for next time.
self.cellids[cellid] = code
from acorn import msg
record("__main__.{}".format(cellid), entry)
msg.info(entry, 1) | def function[_log_images, parameter[self]]:
constant[Creates database logs for all the image file names in the global
variable :data:`thumb_uuid`.
]
from relative_module[time] import module[time]
from relative_module[acorn.logging.database] import module[record]
variable[entry] assign[=] dictionary[[<ast.Constant object at 0x7da1b14d30d0>, <ast.Constant object at 0x7da1b14d2950>, <ast.Constant object at 0x7da1b14d08b0>, <ast.Constant object at 0x7da1b14d0940>], [<ast.Constant object at 0x7da1b14d1a80>, <ast.Constant object at 0x7da1b14d1270>, <ast.Call object at 0x7da1b14d0a90>, <ast.Name object at 0x7da1b14d0970>]]
variable[code] assign[=] call[name[self].shell.user_ns.get, parameter[call[constant[i_{0:d}].format, parameter[name[self].cellid]]]]
variable[cellid] assign[=] call[name[self]._find_cellid, parameter[name[code]]]
if compare[name[cellid] is constant[None]] begin[:]
variable[cellid] assign[=] name[self].cellid
call[name[self].cellids][name[cellid]] assign[=] name[code]
from relative_module[acorn] import module[msg]
call[name[record], parameter[call[constant[__main__.{}].format, parameter[name[cellid]]], name[entry]]]
call[name[msg].info, parameter[name[entry], constant[1]]] | keyword[def] identifier[_log_images] ( identifier[self] ):
literal[string]
keyword[from] identifier[time] keyword[import] identifier[time]
keyword[from] identifier[acorn] . identifier[logging] . identifier[database] keyword[import] identifier[record]
identifier[entry] ={
literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : identifier[time] (),
literal[string] : identifier[thumb_uuid] ,
}
identifier[code] = identifier[self] . identifier[shell] . identifier[user_ns] . identifier[get] ( literal[string] . identifier[format] ( identifier[self] . identifier[cellid] ))
identifier[cellid] = identifier[self] . identifier[_find_cellid] ( identifier[code] )
keyword[if] identifier[cellid] keyword[is] keyword[None] :
identifier[cellid] = identifier[self] . identifier[cellid]
identifier[self] . identifier[cellids] [ identifier[cellid] ]= identifier[code]
keyword[from] identifier[acorn] keyword[import] identifier[msg]
identifier[record] ( literal[string] . identifier[format] ( identifier[cellid] ), identifier[entry] )
identifier[msg] . identifier[info] ( identifier[entry] , literal[int] ) | def _log_images(self):
"""Creates database logs for all the image file names in the global
variable :data:`thumb_uuid`.
"""
from time import time
from acorn.logging.database import record
entry = {'m': 'plot', 'a': None, 's': time(), 'r': thumb_uuid}
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
code = self.shell.user_ns.get('i_{0:d}'.format(self.cellid))
cellid = self._find_cellid(code)
if cellid is None:
cellid = self.cellid # depends on [control=['if'], data=['cellid']]
#Store the contents of the cell so they are up to date for next time.
self.cellids[cellid] = code
from acorn import msg
record('__main__.{}'.format(cellid), entry)
msg.info(entry, 1) |
def commit(self, message, author=None):
"""
Commit changes to tracked files in the working tree.
:param message: The commit message (a string).
:param author: Override :attr:`author` (refer to
:func:`coerce_author()` for details
on argument handling).
"""
# Make sure the local repository exists and supports a working tree.
self.ensure_exists()
self.ensure_working_tree()
logger.info("Committing changes in %s: %s", format_path(self.local), message)
author = coerce_author(author) if author else self.author
self.context.execute(*self.get_commit_command(message, author)) | def function[commit, parameter[self, message, author]]:
constant[
Commit changes to tracked files in the working tree.
:param message: The commit message (a string).
:param author: Override :attr:`author` (refer to
:func:`coerce_author()` for details
on argument handling).
]
call[name[self].ensure_exists, parameter[]]
call[name[self].ensure_working_tree, parameter[]]
call[name[logger].info, parameter[constant[Committing changes in %s: %s], call[name[format_path], parameter[name[self].local]], name[message]]]
variable[author] assign[=] <ast.IfExp object at 0x7da1b0a05300>
call[name[self].context.execute, parameter[<ast.Starred object at 0x7da1b0a05a20>]] | keyword[def] identifier[commit] ( identifier[self] , identifier[message] , identifier[author] = keyword[None] ):
literal[string]
identifier[self] . identifier[ensure_exists] ()
identifier[self] . identifier[ensure_working_tree] ()
identifier[logger] . identifier[info] ( literal[string] , identifier[format_path] ( identifier[self] . identifier[local] ), identifier[message] )
identifier[author] = identifier[coerce_author] ( identifier[author] ) keyword[if] identifier[author] keyword[else] identifier[self] . identifier[author]
identifier[self] . identifier[context] . identifier[execute] (* identifier[self] . identifier[get_commit_command] ( identifier[message] , identifier[author] )) | def commit(self, message, author=None):
"""
Commit changes to tracked files in the working tree.
:param message: The commit message (a string).
:param author: Override :attr:`author` (refer to
:func:`coerce_author()` for details
on argument handling).
"""
# Make sure the local repository exists and supports a working tree.
self.ensure_exists()
self.ensure_working_tree()
logger.info('Committing changes in %s: %s', format_path(self.local), message)
author = coerce_author(author) if author else self.author
self.context.execute(*self.get_commit_command(message, author)) |
def xopen(filename, mode='r', compresslevel=6, threads=None):
"""
A replacement for the "open" function that can also open files that have
been compressed with gzip, bzip2 or xz. If the filename is '-', standard
output (mode 'w') or input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is
xz/lzma.
When writing a gzip-compressed file, the following methods are tried in order to get the
best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess;
3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a
separate process.
Uncompressed files are opened with the regular open().
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and
will raise an error.
compresslevel is the gzip compression level. It is not used for bz2 and xz.
threads is the number of threads for pigz. If None, then the pigz default is used.
"""
if mode in ('r', 'w', 'a'):
mode += 't'
if mode not in ('rt', 'rb', 'wt', 'wb', 'at', 'ab'):
raise ValueError("mode '{0}' not supported".format(mode))
if not _PY3:
mode = mode[0]
filename = fspath(filename)
if not isinstance(filename, basestring):
raise ValueError("the filename must be a string")
if compresslevel not in range(1, 10):
raise ValueError("compresslevel must be between 1 and 9")
if filename == '-':
return _open_stdin_or_out(mode)
elif filename.endswith('.bz2'):
return _open_bz2(filename, mode)
elif filename.endswith('.xz'):
return _open_xz(filename, mode)
elif filename.endswith('.gz'):
return _open_gz(filename, mode, compresslevel, threads)
else:
# Python 2.6 and 2.7 have io.open, which we could use to make the returned
# object consistent with the one returned in Python 3, but reading a file
# with io.open() is 100 times slower (!) on Python 2.6, and still about
# three times slower on Python 2.7 (tested with "for _ in io.open(path): pass")
return open(filename, mode) | def function[xopen, parameter[filename, mode, compresslevel, threads]]:
constant[
A replacement for the "open" function that can also open files that have
been compressed with gzip, bzip2 or xz. If the filename is '-', standard
output (mode 'w') or input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is
xz/lzma.
When writing a gzip-compressed file, the following methods are tried in order to get the
best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess;
3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a
separate process.
Uncompressed files are opened with the regular open().
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and
will raise an error.
compresslevel is the gzip compression level. It is not used for bz2 and xz.
threads is the number of threads for pigz. If None, then the pigz default is used.
]
if compare[name[mode] in tuple[[<ast.Constant object at 0x7da1b0401750>, <ast.Constant object at 0x7da1b0401b70>, <ast.Constant object at 0x7da1b0401930>]]] begin[:]
<ast.AugAssign object at 0x7da1b0403010>
if compare[name[mode] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0403ee0>, <ast.Constant object at 0x7da1b0402260>, <ast.Constant object at 0x7da1b04018a0>, <ast.Constant object at 0x7da1b0401120>, <ast.Constant object at 0x7da1b0400df0>, <ast.Constant object at 0x7da1b0403580>]]] begin[:]
<ast.Raise object at 0x7da1b0400310>
if <ast.UnaryOp object at 0x7da1b04010f0> begin[:]
variable[mode] assign[=] call[name[mode]][constant[0]]
variable[filename] assign[=] call[name[fspath], parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da1b0400ee0> begin[:]
<ast.Raise object at 0x7da1b0400520>
if compare[name[compresslevel] <ast.NotIn object at 0x7da2590d7190> call[name[range], parameter[constant[1], constant[10]]]] begin[:]
<ast.Raise object at 0x7da1b0401840>
if compare[name[filename] equal[==] constant[-]] begin[:]
return[call[name[_open_stdin_or_out], parameter[name[mode]]]] | keyword[def] identifier[xopen] ( identifier[filename] , identifier[mode] = literal[string] , identifier[compresslevel] = literal[int] , identifier[threads] = keyword[None] ):
literal[string]
keyword[if] identifier[mode] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[mode] += literal[string]
keyword[if] identifier[mode] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[if] keyword[not] identifier[_PY3] :
identifier[mode] = identifier[mode] [ literal[int] ]
identifier[filename] = identifier[fspath] ( identifier[filename] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[filename] , identifier[basestring] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[compresslevel] keyword[not] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[filename] == literal[string] :
keyword[return] identifier[_open_stdin_or_out] ( identifier[mode] )
keyword[elif] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[_open_bz2] ( identifier[filename] , identifier[mode] )
keyword[elif] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[_open_xz] ( identifier[filename] , identifier[mode] )
keyword[elif] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[_open_gz] ( identifier[filename] , identifier[mode] , identifier[compresslevel] , identifier[threads] )
keyword[else] :
keyword[return] identifier[open] ( identifier[filename] , identifier[mode] ) | def xopen(filename, mode='r', compresslevel=6, threads=None):
"""
A replacement for the "open" function that can also open files that have
been compressed with gzip, bzip2 or xz. If the filename is '-', standard
output (mode 'w') or input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is
xz/lzma.
When writing a gzip-compressed file, the following methods are tried in order to get the
best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess;
3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a
separate process.
Uncompressed files are opened with the regular open().
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and
will raise an error.
compresslevel is the gzip compression level. It is not used for bz2 and xz.
threads is the number of threads for pigz. If None, then the pigz default is used.
"""
if mode in ('r', 'w', 'a'):
mode += 't' # depends on [control=['if'], data=['mode']]
if mode not in ('rt', 'rb', 'wt', 'wb', 'at', 'ab'):
raise ValueError("mode '{0}' not supported".format(mode)) # depends on [control=['if'], data=['mode']]
if not _PY3:
mode = mode[0] # depends on [control=['if'], data=[]]
filename = fspath(filename)
if not isinstance(filename, basestring):
raise ValueError('the filename must be a string') # depends on [control=['if'], data=[]]
if compresslevel not in range(1, 10):
raise ValueError('compresslevel must be between 1 and 9') # depends on [control=['if'], data=[]]
if filename == '-':
return _open_stdin_or_out(mode) # depends on [control=['if'], data=[]]
elif filename.endswith('.bz2'):
return _open_bz2(filename, mode) # depends on [control=['if'], data=[]]
elif filename.endswith('.xz'):
return _open_xz(filename, mode) # depends on [control=['if'], data=[]]
elif filename.endswith('.gz'):
return _open_gz(filename, mode, compresslevel, threads) # depends on [control=['if'], data=[]]
else:
# Python 2.6 and 2.7 have io.open, which we could use to make the returned
# object consistent with the one returned in Python 3, but reading a file
# with io.open() is 100 times slower (!) on Python 2.6, and still about
# three times slower on Python 2.7 (tested with "for _ in io.open(path): pass")
return open(filename, mode) |
def _mixup( self, ps ):
"""Private method to mix up a list of values in-place using a Fisher-Yates
shuffle (see https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).
:param ps: the array
:returns: the array, shuffled in-place"""
for i in range(len(ps) - 1, 0, -1):
j = int(numpy.random.random() * i)
temp = ps[i]
ps[i] = ps[j]
ps[j] = temp
return ps | def function[_mixup, parameter[self, ps]]:
constant[Private method to mix up a list of values in-place using a Fisher-Yates
shuffle (see https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).
:param ps: the array
:returns: the array, shuffled in-place]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[ps]]] - constant[1]], constant[0], <ast.UnaryOp object at 0x7da1b0a831c0>]]] begin[:]
variable[j] assign[=] call[name[int], parameter[binary_operation[call[name[numpy].random.random, parameter[]] * name[i]]]]
variable[temp] assign[=] call[name[ps]][name[i]]
call[name[ps]][name[i]] assign[=] call[name[ps]][name[j]]
call[name[ps]][name[j]] assign[=] name[temp]
return[name[ps]] | keyword[def] identifier[_mixup] ( identifier[self] , identifier[ps] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[ps] )- literal[int] , literal[int] ,- literal[int] ):
identifier[j] = identifier[int] ( identifier[numpy] . identifier[random] . identifier[random] ()* identifier[i] )
identifier[temp] = identifier[ps] [ identifier[i] ]
identifier[ps] [ identifier[i] ]= identifier[ps] [ identifier[j] ]
identifier[ps] [ identifier[j] ]= identifier[temp]
keyword[return] identifier[ps] | def _mixup(self, ps):
"""Private method to mix up a list of values in-place using a Fisher-Yates
shuffle (see https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).
:param ps: the array
:returns: the array, shuffled in-place"""
for i in range(len(ps) - 1, 0, -1):
j = int(numpy.random.random() * i)
temp = ps[i]
ps[i] = ps[j]
ps[j] = temp # depends on [control=['for'], data=['i']]
return ps |
def from_dict(self, description):
"""Configures the task store to be the task_store described
in description"""
assert(self.ident == description['ident'])
self.partitions = description['partitions']
self.indices = description['indices'] | def function[from_dict, parameter[self, description]]:
constant[Configures the task store to be the task_store described
in description]
assert[compare[name[self].ident equal[==] call[name[description]][constant[ident]]]]
name[self].partitions assign[=] call[name[description]][constant[partitions]]
name[self].indices assign[=] call[name[description]][constant[indices]] | keyword[def] identifier[from_dict] ( identifier[self] , identifier[description] ):
literal[string]
keyword[assert] ( identifier[self] . identifier[ident] == identifier[description] [ literal[string] ])
identifier[self] . identifier[partitions] = identifier[description] [ literal[string] ]
identifier[self] . identifier[indices] = identifier[description] [ literal[string] ] | def from_dict(self, description):
"""Configures the task store to be the task_store described
in description"""
assert self.ident == description['ident']
self.partitions = description['partitions']
self.indices = description['indices'] |
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help() | def function[print_help, parameter[self, prog_name, subcommand]]:
constant[
Print the help message for this command, derived from
``self.usage()``.
]
variable[parser] assign[=] call[name[self].create_parser, parameter[name[prog_name], name[subcommand]]]
call[name[parser].print_help, parameter[]] | keyword[def] identifier[print_help] ( identifier[self] , identifier[prog_name] , identifier[subcommand] ):
literal[string]
identifier[parser] = identifier[self] . identifier[create_parser] ( identifier[prog_name] , identifier[subcommand] )
identifier[parser] . identifier[print_help] () | def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help() |
def solve_gcp(V,E):
"""solve_gcp -- solve the graph coloring problem with bisection and fixed-k model
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns tuple with number of colors used, and dictionary mapping colors to vertices
"""
LB = 0
UB = len(V)
color = {}
while UB-LB > 1:
K = int((UB+LB) / 2)
gcp = gcp_fixed_k(V,E,K)
# gcp.Params.OutputFlag = 0 # silent mode
#gcp.Params.Cutoff = .1
gcp.setObjlimit(0.1)
gcp.optimize()
status = gcp.getStatus()
if status == "optimal":
x,z = gcp.data
for i in V:
for k in range(K):
if gcp.getVal(x[i,k]) > 0.5:
color[i] = k
break
# else:
# raise "undefined color for", i
UB = K
else:
LB = K
return UB,color | def function[solve_gcp, parameter[V, E]]:
constant[solve_gcp -- solve the graph coloring problem with bisection and fixed-k model
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns tuple with number of colors used, and dictionary mapping colors to vertices
]
variable[LB] assign[=] constant[0]
variable[UB] assign[=] call[name[len], parameter[name[V]]]
variable[color] assign[=] dictionary[[], []]
while compare[binary_operation[name[UB] - name[LB]] greater[>] constant[1]] begin[:]
variable[K] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[UB] + name[LB]] / constant[2]]]]
variable[gcp] assign[=] call[name[gcp_fixed_k], parameter[name[V], name[E], name[K]]]
call[name[gcp].setObjlimit, parameter[constant[0.1]]]
call[name[gcp].optimize, parameter[]]
variable[status] assign[=] call[name[gcp].getStatus, parameter[]]
if compare[name[status] equal[==] constant[optimal]] begin[:]
<ast.Tuple object at 0x7da1b17f7370> assign[=] name[gcp].data
for taget[name[i]] in starred[name[V]] begin[:]
for taget[name[k]] in starred[call[name[range], parameter[name[K]]]] begin[:]
if compare[call[name[gcp].getVal, parameter[call[name[x]][tuple[[<ast.Name object at 0x7da1b172e680>, <ast.Name object at 0x7da1b172e620>]]]]] greater[>] constant[0.5]] begin[:]
call[name[color]][name[i]] assign[=] name[k]
break
variable[UB] assign[=] name[K]
return[tuple[[<ast.Name object at 0x7da1b172d4b0>, <ast.Name object at 0x7da1b172e6b0>]]] | keyword[def] identifier[solve_gcp] ( identifier[V] , identifier[E] ):
literal[string]
identifier[LB] = literal[int]
identifier[UB] = identifier[len] ( identifier[V] )
identifier[color] ={}
keyword[while] identifier[UB] - identifier[LB] > literal[int] :
identifier[K] = identifier[int] (( identifier[UB] + identifier[LB] )/ literal[int] )
identifier[gcp] = identifier[gcp_fixed_k] ( identifier[V] , identifier[E] , identifier[K] )
identifier[gcp] . identifier[setObjlimit] ( literal[int] )
identifier[gcp] . identifier[optimize] ()
identifier[status] = identifier[gcp] . identifier[getStatus] ()
keyword[if] identifier[status] == literal[string] :
identifier[x] , identifier[z] = identifier[gcp] . identifier[data]
keyword[for] identifier[i] keyword[in] identifier[V] :
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] ):
keyword[if] identifier[gcp] . identifier[getVal] ( identifier[x] [ identifier[i] , identifier[k] ])> literal[int] :
identifier[color] [ identifier[i] ]= identifier[k]
keyword[break]
identifier[UB] = identifier[K]
keyword[else] :
identifier[LB] = identifier[K]
keyword[return] identifier[UB] , identifier[color] | def solve_gcp(V, E):
"""solve_gcp -- solve the graph coloring problem with bisection and fixed-k model
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns tuple with number of colors used, and dictionary mapping colors to vertices
"""
LB = 0
UB = len(V)
color = {}
while UB - LB > 1:
K = int((UB + LB) / 2)
gcp = gcp_fixed_k(V, E, K)
# gcp.Params.OutputFlag = 0 # silent mode
#gcp.Params.Cutoff = .1
gcp.setObjlimit(0.1)
gcp.optimize()
status = gcp.getStatus()
if status == 'optimal':
(x, z) = gcp.data
for i in V:
for k in range(K):
if gcp.getVal(x[i, k]) > 0.5:
color[i] = k
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=['i']]
# else:
# raise "undefined color for", i
UB = K # depends on [control=['if'], data=[]]
else:
LB = K # depends on [control=['while'], data=[]]
return (UB, color) |
def consume(self, cwd=None):
""" Converts the lexer tokens into valid statements. This process
also checks command syntax.
"""
first_pass = Grammar.overall.parseString(self.string)
lowered = { key.lower(): val for key, val in first_pass.iteritems() }
self.commands = ['\n'.join(self._get('commands', lowered))]
self.job_options = self._get('job_options', lowered)
self.global_options = self._get('options', lowered)
self.files = self._get('files', lowered)
self.paths = self._get('paths', lowered)
self.files = self._parse(self.files, Grammar.file, True)
self.paths = self._parse(self.paths, Grammar.path, True)
self.job_options = self._parse(self.job_options, Grammar.line)
try:
command_lines = self._parse(self.commands, Grammar.command_lines)[0]
except IndexError:
raise ValueError('Did you write any commands?')
self.commands = []
for command_line in command_lines:
comments, command = command_line
self.commands.append([comments.asList(),
self._parse([''.join(command)], Grammar.command)])
self.job_options = [opt.asList() for opt in self.job_options]
self.paths = ctf.get_paths(self.paths)
self.files = ctf.get_files(self.files)
self.paths.reverse()
self.files.reverse()
self.commands.reverse()
return ctf.get_command_templates(self.commands, self.files[:],
self.paths[:], self.job_options) | def function[consume, parameter[self, cwd]]:
constant[ Converts the lexer tokens into valid statements. This process
also checks command syntax.
]
variable[first_pass] assign[=] call[name[Grammar].overall.parseString, parameter[name[self].string]]
variable[lowered] assign[=] <ast.DictComp object at 0x7da1b1ff0160>
name[self].commands assign[=] list[[<ast.Call object at 0x7da1b1ff0b50>]]
name[self].job_options assign[=] call[name[self]._get, parameter[constant[job_options], name[lowered]]]
name[self].global_options assign[=] call[name[self]._get, parameter[constant[options], name[lowered]]]
name[self].files assign[=] call[name[self]._get, parameter[constant[files], name[lowered]]]
name[self].paths assign[=] call[name[self]._get, parameter[constant[paths], name[lowered]]]
name[self].files assign[=] call[name[self]._parse, parameter[name[self].files, name[Grammar].file, constant[True]]]
name[self].paths assign[=] call[name[self]._parse, parameter[name[self].paths, name[Grammar].path, constant[True]]]
name[self].job_options assign[=] call[name[self]._parse, parameter[name[self].job_options, name[Grammar].line]]
<ast.Try object at 0x7da1b1ff0af0>
name[self].commands assign[=] list[[]]
for taget[name[command_line]] in starred[name[command_lines]] begin[:]
<ast.Tuple object at 0x7da1b1ff1bd0> assign[=] name[command_line]
call[name[self].commands.append, parameter[list[[<ast.Call object at 0x7da1b1f9cd00>, <ast.Call object at 0x7da1b1f9fbe0>]]]]
name[self].job_options assign[=] <ast.ListComp object at 0x7da1b1f9d000>
name[self].paths assign[=] call[name[ctf].get_paths, parameter[name[self].paths]]
name[self].files assign[=] call[name[ctf].get_files, parameter[name[self].files]]
call[name[self].paths.reverse, parameter[]]
call[name[self].files.reverse, parameter[]]
call[name[self].commands.reverse, parameter[]]
return[call[name[ctf].get_command_templates, parameter[name[self].commands, call[name[self].files][<ast.Slice object at 0x7da1b1e5bac0>], call[name[self].paths][<ast.Slice object at 0x7da1b1e5be20>], name[self].job_options]]] | keyword[def] identifier[consume] ( identifier[self] , identifier[cwd] = keyword[None] ):
literal[string]
identifier[first_pass] = identifier[Grammar] . identifier[overall] . identifier[parseString] ( identifier[self] . identifier[string] )
identifier[lowered] ={ identifier[key] . identifier[lower] (): identifier[val] keyword[for] identifier[key] , identifier[val] keyword[in] identifier[first_pass] . identifier[iteritems] ()}
identifier[self] . identifier[commands] =[ literal[string] . identifier[join] ( identifier[self] . identifier[_get] ( literal[string] , identifier[lowered] ))]
identifier[self] . identifier[job_options] = identifier[self] . identifier[_get] ( literal[string] , identifier[lowered] )
identifier[self] . identifier[global_options] = identifier[self] . identifier[_get] ( literal[string] , identifier[lowered] )
identifier[self] . identifier[files] = identifier[self] . identifier[_get] ( literal[string] , identifier[lowered] )
identifier[self] . identifier[paths] = identifier[self] . identifier[_get] ( literal[string] , identifier[lowered] )
identifier[self] . identifier[files] = identifier[self] . identifier[_parse] ( identifier[self] . identifier[files] , identifier[Grammar] . identifier[file] , keyword[True] )
identifier[self] . identifier[paths] = identifier[self] . identifier[_parse] ( identifier[self] . identifier[paths] , identifier[Grammar] . identifier[path] , keyword[True] )
identifier[self] . identifier[job_options] = identifier[self] . identifier[_parse] ( identifier[self] . identifier[job_options] , identifier[Grammar] . identifier[line] )
keyword[try] :
identifier[command_lines] = identifier[self] . identifier[_parse] ( identifier[self] . identifier[commands] , identifier[Grammar] . identifier[command_lines] )[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[commands] =[]
keyword[for] identifier[command_line] keyword[in] identifier[command_lines] :
identifier[comments] , identifier[command] = identifier[command_line]
identifier[self] . identifier[commands] . identifier[append] ([ identifier[comments] . identifier[asList] (),
identifier[self] . identifier[_parse] ([ literal[string] . identifier[join] ( identifier[command] )], identifier[Grammar] . identifier[command] )])
identifier[self] . identifier[job_options] =[ identifier[opt] . identifier[asList] () keyword[for] identifier[opt] keyword[in] identifier[self] . identifier[job_options] ]
identifier[self] . identifier[paths] = identifier[ctf] . identifier[get_paths] ( identifier[self] . identifier[paths] )
identifier[self] . identifier[files] = identifier[ctf] . identifier[get_files] ( identifier[self] . identifier[files] )
identifier[self] . identifier[paths] . identifier[reverse] ()
identifier[self] . identifier[files] . identifier[reverse] ()
identifier[self] . identifier[commands] . identifier[reverse] ()
keyword[return] identifier[ctf] . identifier[get_command_templates] ( identifier[self] . identifier[commands] , identifier[self] . identifier[files] [:],
identifier[self] . identifier[paths] [:], identifier[self] . identifier[job_options] ) | def consume(self, cwd=None):
""" Converts the lexer tokens into valid statements. This process
also checks command syntax.
"""
first_pass = Grammar.overall.parseString(self.string)
lowered = {key.lower(): val for (key, val) in first_pass.iteritems()}
self.commands = ['\n'.join(self._get('commands', lowered))]
self.job_options = self._get('job_options', lowered)
self.global_options = self._get('options', lowered)
self.files = self._get('files', lowered)
self.paths = self._get('paths', lowered)
self.files = self._parse(self.files, Grammar.file, True)
self.paths = self._parse(self.paths, Grammar.path, True)
self.job_options = self._parse(self.job_options, Grammar.line)
try:
command_lines = self._parse(self.commands, Grammar.command_lines)[0] # depends on [control=['try'], data=[]]
except IndexError:
raise ValueError('Did you write any commands?') # depends on [control=['except'], data=[]]
self.commands = []
for command_line in command_lines:
(comments, command) = command_line
self.commands.append([comments.asList(), self._parse([''.join(command)], Grammar.command)]) # depends on [control=['for'], data=['command_line']]
self.job_options = [opt.asList() for opt in self.job_options]
self.paths = ctf.get_paths(self.paths)
self.files = ctf.get_files(self.files)
self.paths.reverse()
self.files.reverse()
self.commands.reverse()
return ctf.get_command_templates(self.commands, self.files[:], self.paths[:], self.job_options) |
def hydrate(self, iterator):
"""
Pass in an iterator of tweet ids and get back an iterator for the
decoded JSON for each corresponding tweet.
"""
ids = []
url = "https://api.twitter.com/1.1/statuses/lookup.json"
# lookup 100 tweets at a time
for tweet_id in iterator:
tweet_id = str(tweet_id)
tweet_id = tweet_id.strip() # remove new line if present
ids.append(tweet_id)
if len(ids) == 100:
log.info("hydrating %s ids", len(ids))
resp = self.post(url, data={
"id": ','.join(ids),
"include_ext_alt_text": 'true'
})
tweets = resp.json()
tweets.sort(key=lambda t: t['id_str'])
for tweet in tweets:
yield tweet
ids = []
# hydrate any remaining ones
if len(ids) > 0:
log.info("hydrating %s", ids)
resp = self.post(url, data={
"id": ','.join(ids),
"include_ext_alt_text": 'true'
})
for tweet in resp.json():
yield tweet | def function[hydrate, parameter[self, iterator]]:
constant[
Pass in an iterator of tweet ids and get back an iterator for the
decoded JSON for each corresponding tweet.
]
variable[ids] assign[=] list[[]]
variable[url] assign[=] constant[https://api.twitter.com/1.1/statuses/lookup.json]
for taget[name[tweet_id]] in starred[name[iterator]] begin[:]
variable[tweet_id] assign[=] call[name[str], parameter[name[tweet_id]]]
variable[tweet_id] assign[=] call[name[tweet_id].strip, parameter[]]
call[name[ids].append, parameter[name[tweet_id]]]
if compare[call[name[len], parameter[name[ids]]] equal[==] constant[100]] begin[:]
call[name[log].info, parameter[constant[hydrating %s ids], call[name[len], parameter[name[ids]]]]]
variable[resp] assign[=] call[name[self].post, parameter[name[url]]]
variable[tweets] assign[=] call[name[resp].json, parameter[]]
call[name[tweets].sort, parameter[]]
for taget[name[tweet]] in starred[name[tweets]] begin[:]
<ast.Yield object at 0x7da20c6a94e0>
variable[ids] assign[=] list[[]]
if compare[call[name[len], parameter[name[ids]]] greater[>] constant[0]] begin[:]
call[name[log].info, parameter[constant[hydrating %s], name[ids]]]
variable[resp] assign[=] call[name[self].post, parameter[name[url]]]
for taget[name[tweet]] in starred[call[name[resp].json, parameter[]]] begin[:]
<ast.Yield object at 0x7da20c6aac80> | keyword[def] identifier[hydrate] ( identifier[self] , identifier[iterator] ):
literal[string]
identifier[ids] =[]
identifier[url] = literal[string]
keyword[for] identifier[tweet_id] keyword[in] identifier[iterator] :
identifier[tweet_id] = identifier[str] ( identifier[tweet_id] )
identifier[tweet_id] = identifier[tweet_id] . identifier[strip] ()
identifier[ids] . identifier[append] ( identifier[tweet_id] )
keyword[if] identifier[len] ( identifier[ids] )== literal[int] :
identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[ids] ))
identifier[resp] = identifier[self] . identifier[post] ( identifier[url] , identifier[data] ={
literal[string] : literal[string] . identifier[join] ( identifier[ids] ),
literal[string] : literal[string]
})
identifier[tweets] = identifier[resp] . identifier[json] ()
identifier[tweets] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[string] ])
keyword[for] identifier[tweet] keyword[in] identifier[tweets] :
keyword[yield] identifier[tweet]
identifier[ids] =[]
keyword[if] identifier[len] ( identifier[ids] )> literal[int] :
identifier[log] . identifier[info] ( literal[string] , identifier[ids] )
identifier[resp] = identifier[self] . identifier[post] ( identifier[url] , identifier[data] ={
literal[string] : literal[string] . identifier[join] ( identifier[ids] ),
literal[string] : literal[string]
})
keyword[for] identifier[tweet] keyword[in] identifier[resp] . identifier[json] ():
keyword[yield] identifier[tweet] | def hydrate(self, iterator):
"""
Pass in an iterator of tweet ids and get back an iterator for the
decoded JSON for each corresponding tweet.
"""
ids = []
url = 'https://api.twitter.com/1.1/statuses/lookup.json'
# lookup 100 tweets at a time
for tweet_id in iterator:
tweet_id = str(tweet_id)
tweet_id = tweet_id.strip() # remove new line if present
ids.append(tweet_id)
if len(ids) == 100:
log.info('hydrating %s ids', len(ids))
resp = self.post(url, data={'id': ','.join(ids), 'include_ext_alt_text': 'true'})
tweets = resp.json()
tweets.sort(key=lambda t: t['id_str'])
for tweet in tweets:
yield tweet # depends on [control=['for'], data=['tweet']]
ids = [] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tweet_id']]
# hydrate any remaining ones
if len(ids) > 0:
log.info('hydrating %s', ids)
resp = self.post(url, data={'id': ','.join(ids), 'include_ext_alt_text': 'true'})
for tweet in resp.json():
yield tweet # depends on [control=['for'], data=['tweet']] # depends on [control=['if'], data=[]] |
def usermacro_delete(macroids, **kwargs):
'''
Delete host usermacros.
:param macroids: macroids of the host usermacros
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: IDs of the deleted host usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_delete 21
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'usermacro.delete'
if isinstance(macroids, list):
params = macroids
else:
params = [macroids]
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['hostmacroids']
else:
raise KeyError
except KeyError:
return ret | def function[usermacro_delete, parameter[macroids]]:
constant[
Delete host usermacros.
:param macroids: macroids of the host usermacros
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: IDs of the deleted host usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_delete 21
]
variable[conn_args] assign[=] call[name[_login], parameter[]]
variable[ret] assign[=] dictionary[[], []]
<ast.Try object at 0x7da204566470> | keyword[def] identifier[usermacro_delete] ( identifier[macroids] ,** identifier[kwargs] ):
literal[string]
identifier[conn_args] = identifier[_login] (** identifier[kwargs] )
identifier[ret] ={}
keyword[try] :
keyword[if] identifier[conn_args] :
identifier[method] = literal[string]
keyword[if] identifier[isinstance] ( identifier[macroids] , identifier[list] ):
identifier[params] = identifier[macroids]
keyword[else] :
identifier[params] =[ identifier[macroids] ]
identifier[ret] = identifier[_query] ( identifier[method] , identifier[params] , identifier[conn_args] [ literal[string] ], identifier[conn_args] [ literal[string] ])
keyword[return] identifier[ret] [ literal[string] ][ literal[string] ]
keyword[else] :
keyword[raise] identifier[KeyError]
keyword[except] identifier[KeyError] :
keyword[return] identifier[ret] | def usermacro_delete(macroids, **kwargs):
"""
Delete host usermacros.
:param macroids: macroids of the host usermacros
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: IDs of the deleted host usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_delete 21
"""
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'usermacro.delete'
if isinstance(macroids, list):
params = macroids # depends on [control=['if'], data=[]]
else:
params = [macroids]
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['hostmacroids'] # depends on [control=['if'], data=[]]
else:
raise KeyError # depends on [control=['try'], data=[]]
except KeyError:
return ret # depends on [control=['except'], data=[]] |
def checkout_branch(self, branch_name):
""" git checkout -b <branch_name> """
cmd = [
"git",
"checkout",
"-b",
self.get_cherry_pick_branch(branch_name),
f"{self.upstream}/{branch_name}",
]
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError as err:
click.echo(
f"Error checking out the branch {self.get_cherry_pick_branch(branch_name)}."
)
click.echo(err.output)
raise BranchCheckoutException(
f"Error checking out the branch {self.get_cherry_pick_branch(branch_name)}."
) | def function[checkout_branch, parameter[self, branch_name]]:
constant[ git checkout -b <branch_name> ]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b230a4a0>, <ast.Constant object at 0x7da1b230a440>, <ast.Constant object at 0x7da1b2308d00>, <ast.Call object at 0x7da1b2308cd0>, <ast.JoinedStr object at 0x7da1b230b5e0>]]
<ast.Try object at 0x7da1b230ae30> | keyword[def] identifier[checkout_branch] ( identifier[self] , identifier[branch_name] ):
literal[string]
identifier[cmd] =[
literal[string] ,
literal[string] ,
literal[string] ,
identifier[self] . identifier[get_cherry_pick_branch] ( identifier[branch_name] ),
literal[string] ,
]
keyword[try] :
identifier[self] . identifier[run_cmd] ( identifier[cmd] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[err] :
identifier[click] . identifier[echo] (
literal[string]
)
identifier[click] . identifier[echo] ( identifier[err] . identifier[output] )
keyword[raise] identifier[BranchCheckoutException] (
literal[string]
) | def checkout_branch(self, branch_name):
""" git checkout -b <branch_name> """
cmd = ['git', 'checkout', '-b', self.get_cherry_pick_branch(branch_name), f'{self.upstream}/{branch_name}']
try:
self.run_cmd(cmd) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as err:
click.echo(f'Error checking out the branch {self.get_cherry_pick_branch(branch_name)}.')
click.echo(err.output)
raise BranchCheckoutException(f'Error checking out the branch {self.get_cherry_pick_branch(branch_name)}.') # depends on [control=['except'], data=['err']] |
def get(self, key):
"Find the first value within the tree which has the key."
if key in self.keys():
return self[key]
else:
res = None
for v in self.values():
# This could get weird if the actual expected returned value
# is None, especially in teh case of overlap. Any ambiguity
# would be resolved by get_path(s).
if hasattr(v, 'get'):
res = v.get(key)
if res is not None:
break
return res | def function[get, parameter[self, key]]:
constant[Find the first value within the tree which has the key.]
if compare[name[key] in call[name[self].keys, parameter[]]] begin[:]
return[call[name[self]][name[key]]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[keys] ():
keyword[return] identifier[self] [ identifier[key] ]
keyword[else] :
identifier[res] = keyword[None]
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[values] ():
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[res] = identifier[v] . identifier[get] ( identifier[key] )
keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] :
keyword[break]
keyword[return] identifier[res] | def get(self, key):
"""Find the first value within the tree which has the key."""
if key in self.keys():
return self[key] # depends on [control=['if'], data=['key']]
else:
res = None
for v in self.values():
# This could get weird if the actual expected returned value
# is None, especially in teh case of overlap. Any ambiguity
# would be resolved by get_path(s).
if hasattr(v, 'get'):
res = v.get(key) # depends on [control=['if'], data=[]]
if res is not None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
return res |
def _encode_chunk(self, data, index):
'''
gets a chunk from the input data, converts it to a number and
encodes that number
'''
chunk = self._get_chunk(data, index)
return self._encode_long(self._chunk_to_long(chunk)) | def function[_encode_chunk, parameter[self, data, index]]:
constant[
gets a chunk from the input data, converts it to a number and
encodes that number
]
variable[chunk] assign[=] call[name[self]._get_chunk, parameter[name[data], name[index]]]
return[call[name[self]._encode_long, parameter[call[name[self]._chunk_to_long, parameter[name[chunk]]]]]] | keyword[def] identifier[_encode_chunk] ( identifier[self] , identifier[data] , identifier[index] ):
literal[string]
identifier[chunk] = identifier[self] . identifier[_get_chunk] ( identifier[data] , identifier[index] )
keyword[return] identifier[self] . identifier[_encode_long] ( identifier[self] . identifier[_chunk_to_long] ( identifier[chunk] )) | def _encode_chunk(self, data, index):
"""
gets a chunk from the input data, converts it to a number and
encodes that number
"""
chunk = self._get_chunk(data, index)
return self._encode_long(self._chunk_to_long(chunk)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.