code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _set_stream_parameters(self, **kwargs):
"""
Sets the stream parameters which are expected to be declared
constant.
"""
with util.disable_constant(self):
self.param.set_param(**kwargs) | def function[_set_stream_parameters, parameter[self]]:
constant[
Sets the stream parameters which are expected to be declared
constant.
]
with call[name[util].disable_constant, parameter[name[self]]] begin[:]
call[name[self].param.set_param, parameter[]] | keyword[def] identifier[_set_stream_parameters] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[util] . identifier[disable_constant] ( identifier[self] ):
identifier[self] . identifier[param] . identifier[set_param] (** identifier[kwargs] ) | def _set_stream_parameters(self, **kwargs):
"""
Sets the stream parameters which are expected to be declared
constant.
"""
with util.disable_constant(self):
self.param.set_param(**kwargs) # depends on [control=['with'], data=[]] |
def child_task(self):
'''child process - this holds all the GUI elements'''
self.parent_pipe_send.close()
self.parent_pipe_recv.close()
import wx_processguard
from wx_loader import wx
from wxconsole_ui import ConsoleFrame
app = wx.App(False)
app.frame = ConsoleFrame(state=self, title=self.title)
app.frame.Show()
app.MainLoop() | def function[child_task, parameter[self]]:
constant[child process - this holds all the GUI elements]
call[name[self].parent_pipe_send.close, parameter[]]
call[name[self].parent_pipe_recv.close, parameter[]]
import module[wx_processguard]
from relative_module[wx_loader] import module[wx]
from relative_module[wxconsole_ui] import module[ConsoleFrame]
variable[app] assign[=] call[name[wx].App, parameter[constant[False]]]
name[app].frame assign[=] call[name[ConsoleFrame], parameter[]]
call[name[app].frame.Show, parameter[]]
call[name[app].MainLoop, parameter[]] | keyword[def] identifier[child_task] ( identifier[self] ):
literal[string]
identifier[self] . identifier[parent_pipe_send] . identifier[close] ()
identifier[self] . identifier[parent_pipe_recv] . identifier[close] ()
keyword[import] identifier[wx_processguard]
keyword[from] identifier[wx_loader] keyword[import] identifier[wx]
keyword[from] identifier[wxconsole_ui] keyword[import] identifier[ConsoleFrame]
identifier[app] = identifier[wx] . identifier[App] ( keyword[False] )
identifier[app] . identifier[frame] = identifier[ConsoleFrame] ( identifier[state] = identifier[self] , identifier[title] = identifier[self] . identifier[title] )
identifier[app] . identifier[frame] . identifier[Show] ()
identifier[app] . identifier[MainLoop] () | def child_task(self):
"""child process - this holds all the GUI elements"""
self.parent_pipe_send.close()
self.parent_pipe_recv.close()
import wx_processguard
from wx_loader import wx
from wxconsole_ui import ConsoleFrame
app = wx.App(False)
app.frame = ConsoleFrame(state=self, title=self.title)
app.frame.Show()
app.MainLoop() |
def group_and_sort_nodes(self):
"""
Groups and then sorts the nodes according to the criteria passed into
the Plot constructor.
"""
if self.node_grouping and not self.node_order:
if self.group_order == "alphabetically":
self.nodes = [
n
for n, d in sorted(
self.graph.nodes(data=True),
key=lambda x: x[1][self.node_grouping],
)
]
elif self.group_order == "default":
grp = [
d[self.node_grouping]
for _, d in self.graph.nodes(data=True)
]
grp_name = list(unique_everseen(grp))
nodes = []
for key in grp_name:
nodes.extend(
[
n
for n, d in self.graph.nodes(data=True)
if key in d.values()
]
)
self.nodes = nodes
elif self.node_order and not self.node_grouping:
self.nodes = [
n
for n, _ in sorted(
self.graph.nodes(data=True),
key=lambda x: x[1][self.node_order],
)
]
elif self.node_grouping and self.node_order:
if self.group_order == "alphabetically":
self.nodes = [
n
for n, d in sorted(
self.graph.nodes(data=True),
key=lambda x: (
x[1][self.node_grouping],
x[1][self.node_order],
),
)
]
elif self.group_order == "default":
grp = [
d[self.node_grouping]
for _, d in self.graph.nodes(data=True)
]
grp_name = list(unique_everseen(grp))
nodes = []
for key in grp_name:
nodes.extend(
[
n
for n, d in sorted(
self.graph.nodes(data=True),
key=lambda x: x[1][self.node_order],
)
if key in d.values()
]
)
self.nodes = nodes | def function[group_and_sort_nodes, parameter[self]]:
constant[
Groups and then sorts the nodes according to the criteria passed into
the Plot constructor.
]
if <ast.BoolOp object at 0x7da1b1eea3b0> begin[:]
if compare[name[self].group_order equal[==] constant[alphabetically]] begin[:]
name[self].nodes assign[=] <ast.ListComp object at 0x7da1b1ee92a0> | keyword[def] identifier[group_and_sort_nodes] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[node_grouping] keyword[and] keyword[not] identifier[self] . identifier[node_order] :
keyword[if] identifier[self] . identifier[group_order] == literal[string] :
identifier[self] . identifier[nodes] =[
identifier[n]
keyword[for] identifier[n] , identifier[d] keyword[in] identifier[sorted] (
identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] ),
identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ][ identifier[self] . identifier[node_grouping] ],
)
]
keyword[elif] identifier[self] . identifier[group_order] == literal[string] :
identifier[grp] =[
identifier[d] [ identifier[self] . identifier[node_grouping] ]
keyword[for] identifier[_] , identifier[d] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] )
]
identifier[grp_name] = identifier[list] ( identifier[unique_everseen] ( identifier[grp] ))
identifier[nodes] =[]
keyword[for] identifier[key] keyword[in] identifier[grp_name] :
identifier[nodes] . identifier[extend] (
[
identifier[n]
keyword[for] identifier[n] , identifier[d] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] )
keyword[if] identifier[key] keyword[in] identifier[d] . identifier[values] ()
]
)
identifier[self] . identifier[nodes] = identifier[nodes]
keyword[elif] identifier[self] . identifier[node_order] keyword[and] keyword[not] identifier[self] . identifier[node_grouping] :
identifier[self] . identifier[nodes] =[
identifier[n]
keyword[for] identifier[n] , identifier[_] keyword[in] identifier[sorted] (
identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] ),
identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ][ identifier[self] . identifier[node_order] ],
)
]
keyword[elif] identifier[self] . identifier[node_grouping] keyword[and] identifier[self] . identifier[node_order] :
keyword[if] identifier[self] . identifier[group_order] == literal[string] :
identifier[self] . identifier[nodes] =[
identifier[n]
keyword[for] identifier[n] , identifier[d] keyword[in] identifier[sorted] (
identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] ),
identifier[key] = keyword[lambda] identifier[x] :(
identifier[x] [ literal[int] ][ identifier[self] . identifier[node_grouping] ],
identifier[x] [ literal[int] ][ identifier[self] . identifier[node_order] ],
),
)
]
keyword[elif] identifier[self] . identifier[group_order] == literal[string] :
identifier[grp] =[
identifier[d] [ identifier[self] . identifier[node_grouping] ]
keyword[for] identifier[_] , identifier[d] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] )
]
identifier[grp_name] = identifier[list] ( identifier[unique_everseen] ( identifier[grp] ))
identifier[nodes] =[]
keyword[for] identifier[key] keyword[in] identifier[grp_name] :
identifier[nodes] . identifier[extend] (
[
identifier[n]
keyword[for] identifier[n] , identifier[d] keyword[in] identifier[sorted] (
identifier[self] . identifier[graph] . identifier[nodes] ( identifier[data] = keyword[True] ),
identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ][ identifier[self] . identifier[node_order] ],
)
keyword[if] identifier[key] keyword[in] identifier[d] . identifier[values] ()
]
)
identifier[self] . identifier[nodes] = identifier[nodes] | def group_and_sort_nodes(self):
"""
Groups and then sorts the nodes according to the criteria passed into
the Plot constructor.
"""
if self.node_grouping and (not self.node_order):
if self.group_order == 'alphabetically':
self.nodes = [n for (n, d) in sorted(self.graph.nodes(data=True), key=lambda x: x[1][self.node_grouping])] # depends on [control=['if'], data=[]]
elif self.group_order == 'default':
grp = [d[self.node_grouping] for (_, d) in self.graph.nodes(data=True)]
grp_name = list(unique_everseen(grp))
nodes = []
for key in grp_name:
nodes.extend([n for (n, d) in self.graph.nodes(data=True) if key in d.values()]) # depends on [control=['for'], data=['key']]
self.nodes = nodes # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.node_order and (not self.node_grouping):
self.nodes = [n for (n, _) in sorted(self.graph.nodes(data=True), key=lambda x: x[1][self.node_order])] # depends on [control=['if'], data=[]]
elif self.node_grouping and self.node_order:
if self.group_order == 'alphabetically':
self.nodes = [n for (n, d) in sorted(self.graph.nodes(data=True), key=lambda x: (x[1][self.node_grouping], x[1][self.node_order]))] # depends on [control=['if'], data=[]]
elif self.group_order == 'default':
grp = [d[self.node_grouping] for (_, d) in self.graph.nodes(data=True)]
grp_name = list(unique_everseen(grp))
nodes = []
for key in grp_name:
nodes.extend([n for (n, d) in sorted(self.graph.nodes(data=True), key=lambda x: x[1][self.node_order]) if key in d.values()]) # depends on [control=['for'], data=['key']]
self.nodes = nodes # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
if self.parent.get_layer_geometry_key() == \
layer_geometry_raster['key']:
return self.parent.step_kw_source
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose['key'] != layer_purpose_aggregation['key']:
subcategory = self.parent.step_kw_subcategory. \
selected_subcategory()
else:
subcategory = {'key': None}
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(
layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False
)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source | def function[get_next_step, parameter[self]]:
constant[Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
]
if compare[call[name[self].parent.get_layer_geometry_key, parameter[]] equal[==] call[name[layer_geometry_raster]][constant[key]]] begin[:]
return[name[self].parent.step_kw_source]
variable[layer_purpose] assign[=] call[name[self].parent.step_kw_purpose.selected_purpose, parameter[]]
if compare[call[name[layer_purpose]][constant[key]] not_equal[!=] call[name[layer_purpose_aggregation]][constant[key]]] begin[:]
variable[subcategory] assign[=] call[name[self].parent.step_kw_subcategory.selected_subcategory, parameter[]]
variable[non_compulsory_fields] assign[=] call[name[get_non_compulsory_fields], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
if <ast.UnaryOp object at 0x7da18ede4af0> begin[:]
return[name[self].parent.step_kw_inasafe_fields]
variable[default_inasafe_fields] assign[=] call[name[get_fields], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
if name[default_inasafe_fields] begin[:]
return[name[self].parent.step_kw_default_inasafe_fields]
return[name[self].parent.step_kw_source] | keyword[def] identifier[get_next_step] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] . identifier[get_layer_geometry_key] ()== identifier[layer_geometry_raster] [ literal[string] ]:
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_source]
identifier[layer_purpose] = identifier[self] . identifier[parent] . identifier[step_kw_purpose] . identifier[selected_purpose] ()
keyword[if] identifier[layer_purpose] [ literal[string] ]!= identifier[layer_purpose_aggregation] [ literal[string] ]:
identifier[subcategory] = identifier[self] . identifier[parent] . identifier[step_kw_subcategory] . identifier[selected_subcategory] ()
keyword[else] :
identifier[subcategory] ={ literal[string] : keyword[None] }
identifier[non_compulsory_fields] = identifier[get_non_compulsory_fields] (
identifier[layer_purpose] [ literal[string] ], identifier[subcategory] [ literal[string] ])
keyword[if] keyword[not] identifier[skip_inasafe_field] ( identifier[self] . identifier[parent] . identifier[layer] , identifier[non_compulsory_fields] ):
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_inasafe_fields]
identifier[default_inasafe_fields] = identifier[get_fields] (
identifier[layer_purpose] [ literal[string] ],
identifier[subcategory] [ literal[string] ],
identifier[replace_null] = keyword[True] ,
identifier[in_group] = keyword[False]
)
keyword[if] identifier[default_inasafe_fields] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_default_inasafe_fields]
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_source] | def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
if self.parent.get_layer_geometry_key() == layer_geometry_raster['key']:
return self.parent.step_kw_source # depends on [control=['if'], data=[]]
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose['key'] != layer_purpose_aggregation['key']:
subcategory = self.parent.step_kw_subcategory.selected_subcategory() # depends on [control=['if'], data=[]]
else:
subcategory = {'key': None}
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields # depends on [control=['if'], data=[]]
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(layer_purpose['key'], subcategory['key'], replace_null=True, in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields # depends on [control=['if'], data=[]]
# Any other case
return self.parent.step_kw_source |
def _draw(self, prev_angle = None, prev_length = None):
"""
Draws a new length- and angle-difference pair and calculates
length and angle absolutes matching the last saccade drawn.
Parameters:
prev_angle : float, optional
The last angle that was drawn in the current trajectory
prev_length : float, optional
The last length that was drawn in the current trajectory
Note: Either both prev_angle and prev_length have to be given
or none; if only one parameter is given, it will be neglected.
"""
if (prev_angle is None) or (prev_length is None):
(length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')),
self.firstLenAng_shape)
angle = angle-((self.firstLenAng_shape[1]-1)/2)
angle += 0.5
length += 0.5
length *= self.fm.pixels_per_degree
else:
ind = int(floor(prev_length/self.fm.pixels_per_degree))
while ind >= len(self.probability_cumsum):
ind -= 1
while not(self.probability_cumsum[ind]).any():
ind -= 1
J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))),
self.full_H1[ind].shape)
angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle)
angle += 0.5
length = J+0.5
length *= self.fm.pixels_per_degree
return angle, length | def function[_draw, parameter[self, prev_angle, prev_length]]:
constant[
Draws a new length- and angle-difference pair and calculates
length and angle absolutes matching the last saccade drawn.
Parameters:
prev_angle : float, optional
The last angle that was drawn in the current trajectory
prev_length : float, optional
The last length that was drawn in the current trajectory
Note: Either both prev_angle and prev_length have to be given
or none; if only one parameter is given, it will be neglected.
]
if <ast.BoolOp object at 0x7da1b11ed210> begin[:]
<ast.Tuple object at 0x7da1b11ed690> assign[=] call[name[np].unravel_index, parameter[call[name[self].drawFrom, parameter[constant[self.firstLenAng_cumsum], call[name[self].getrand, parameter[constant[self.firstLenAng_cumsum]]]]], name[self].firstLenAng_shape]]
variable[angle] assign[=] binary_operation[name[angle] - binary_operation[binary_operation[call[name[self].firstLenAng_shape][constant[1]] - constant[1]] / constant[2]]]
<ast.AugAssign object at 0x7da1b11ec9a0>
<ast.AugAssign object at 0x7da1b11ef6a0>
<ast.AugAssign object at 0x7da18f09fee0>
return[tuple[[<ast.Name object at 0x7da20c9906d0>, <ast.Name object at 0x7da20c991cf0>]]] | keyword[def] identifier[_draw] ( identifier[self] , identifier[prev_angle] = keyword[None] , identifier[prev_length] = keyword[None] ):
literal[string]
keyword[if] ( identifier[prev_angle] keyword[is] keyword[None] ) keyword[or] ( identifier[prev_length] keyword[is] keyword[None] ):
( identifier[length] , identifier[angle] )= identifier[np] . identifier[unravel_index] ( identifier[self] . identifier[drawFrom] ( literal[string] , identifier[self] . identifier[getrand] ( literal[string] )),
identifier[self] . identifier[firstLenAng_shape] )
identifier[angle] = identifier[angle] -(( identifier[self] . identifier[firstLenAng_shape] [ literal[int] ]- literal[int] )/ literal[int] )
identifier[angle] += literal[int]
identifier[length] += literal[int]
identifier[length] *= identifier[self] . identifier[fm] . identifier[pixels_per_degree]
keyword[else] :
identifier[ind] = identifier[int] ( identifier[floor] ( identifier[prev_length] / identifier[self] . identifier[fm] . identifier[pixels_per_degree] ))
keyword[while] identifier[ind] >= identifier[len] ( identifier[self] . identifier[probability_cumsum] ):
identifier[ind] -= literal[int]
keyword[while] keyword[not] ( identifier[self] . identifier[probability_cumsum] [ identifier[ind] ]). identifier[any] ():
identifier[ind] -= literal[int]
identifier[J] , identifier[I] = identifier[np] . identifier[unravel_index] ( identifier[self] . identifier[drawFrom] ( literal[string] + identifier[repr] ( identifier[ind] ), identifier[self] . identifier[getrand] ( literal[string] + identifier[repr] ( identifier[ind] ))),
identifier[self] . identifier[full_H1] [ identifier[ind] ]. identifier[shape] )
identifier[angle] = identifier[reshift] (( identifier[I] - identifier[self] . identifier[full_H1] [ identifier[ind] ]. identifier[shape] [ literal[int] ]/ literal[int] )+ identifier[prev_angle] )
identifier[angle] += literal[int]
identifier[length] = identifier[J] + literal[int]
identifier[length] *= identifier[self] . identifier[fm] . identifier[pixels_per_degree]
keyword[return] identifier[angle] , identifier[length] | def _draw(self, prev_angle=None, prev_length=None):
"""
Draws a new length- and angle-difference pair and calculates
length and angle absolutes matching the last saccade drawn.
Parameters:
prev_angle : float, optional
The last angle that was drawn in the current trajectory
prev_length : float, optional
The last length that was drawn in the current trajectory
Note: Either both prev_angle and prev_length have to be given
or none; if only one parameter is given, it will be neglected.
"""
if prev_angle is None or prev_length is None:
(length, angle) = np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')), self.firstLenAng_shape)
angle = angle - (self.firstLenAng_shape[1] - 1) / 2
angle += 0.5
length += 0.5
length *= self.fm.pixels_per_degree # depends on [control=['if'], data=[]]
else:
ind = int(floor(prev_length / self.fm.pixels_per_degree))
while ind >= len(self.probability_cumsum):
ind -= 1 # depends on [control=['while'], data=['ind']]
while not self.probability_cumsum[ind].any():
ind -= 1 # depends on [control=['while'], data=[]]
(J, I) = np.unravel_index(self.drawFrom('self.probability_cumsum ' + repr(ind), self.getrand('self.probability_cumsum ' + repr(ind))), self.full_H1[ind].shape)
angle = reshift(I - self.full_H1[ind].shape[1] / 2 + prev_angle)
angle += 0.5
length = J + 0.5
length *= self.fm.pixels_per_degree
return (angle, length) |
def os_info():
"""Returns os data.
"""
return {
'uname': dict(platform.uname()._asdict()),
'path': os.environ.get('PATH', '').split(':'),
'shell': os.environ.get('SHELL', '/bin/sh'),
} | def function[os_info, parameter[]]:
constant[Returns os data.
]
return[dictionary[[<ast.Constant object at 0x7da20cabf430>, <ast.Constant object at 0x7da20cabf910>, <ast.Constant object at 0x7da1b144c490>], [<ast.Call object at 0x7da1b144de40>, <ast.Call object at 0x7da1b144c040>, <ast.Call object at 0x7da1b144e470>]]] | keyword[def] identifier[os_info] ():
literal[string]
keyword[return] {
literal[string] : identifier[dict] ( identifier[platform] . identifier[uname] (). identifier[_asdict] ()),
literal[string] : identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] ),
literal[string] : identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ),
} | def os_info():
"""Returns os data.
"""
return {'uname': dict(platform.uname()._asdict()), 'path': os.environ.get('PATH', '').split(':'), 'shell': os.environ.get('SHELL', '/bin/sh')} |
def _execute_cmd(plugin, args='', run_type='cmd.retcode'):
'''
Execute nagios plugin if it's in the directory with salt command specified in run_type
'''
data = {}
all_plugins = list_plugins()
if plugin in all_plugins:
data = __salt__[run_type](
'{0}{1} {2}'.format(PLUGINDIR, plugin, args),
python_shell=False)
return data | def function[_execute_cmd, parameter[plugin, args, run_type]]:
constant[
Execute nagios plugin if it's in the directory with salt command specified in run_type
]
variable[data] assign[=] dictionary[[], []]
variable[all_plugins] assign[=] call[name[list_plugins], parameter[]]
if compare[name[plugin] in name[all_plugins]] begin[:]
variable[data] assign[=] call[call[name[__salt__]][name[run_type]], parameter[call[constant[{0}{1} {2}].format, parameter[name[PLUGINDIR], name[plugin], name[args]]]]]
return[name[data]] | keyword[def] identifier[_execute_cmd] ( identifier[plugin] , identifier[args] = literal[string] , identifier[run_type] = literal[string] ):
literal[string]
identifier[data] ={}
identifier[all_plugins] = identifier[list_plugins] ()
keyword[if] identifier[plugin] keyword[in] identifier[all_plugins] :
identifier[data] = identifier[__salt__] [ identifier[run_type] ](
literal[string] . identifier[format] ( identifier[PLUGINDIR] , identifier[plugin] , identifier[args] ),
identifier[python_shell] = keyword[False] )
keyword[return] identifier[data] | def _execute_cmd(plugin, args='', run_type='cmd.retcode'):
"""
Execute nagios plugin if it's in the directory with salt command specified in run_type
"""
data = {}
all_plugins = list_plugins()
if plugin in all_plugins:
data = __salt__[run_type]('{0}{1} {2}'.format(PLUGINDIR, plugin, args), python_shell=False) # depends on [control=['if'], data=['plugin']]
return data |
def extend_slice(s, shape, pad=1):
r"""
Adjust slice indices to include additional voxles around the slice.
This function does bounds checking to ensure the indices don't extend
outside the image.
Parameters
----------
s : list of slice objects
A list (or tuple) of N slice objects, where N is the number of
dimensions in the image.
shape : array_like
The shape of the image into which the slice objects apply. This is
used to check the bounds to prevent indexing beyond the image.
pad : int
The number of voxels to expand in each direction.
Returns
-------
slices : list of slice objects
A list slice of objects with the start and stop attributes respectively
incremented and decremented by 1, without extending beyond the image
boundaries.
Examples
--------
>>> from scipy.ndimage import label, find_objects
>>> from porespy.tools import extend_slice
>>> im = sp.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])
>>> labels = label(im)[0]
>>> s = find_objects(labels)
Using the slices returned by ``find_objects``, set the first label to 3
>>> labels[s[0]] = 3
>>> print(labels)
[[3 0 0]
[3 0 0]
[0 0 2]]
Next extend the slice, and use it to set the values to 4
>>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)
>>> labels[s_ext] = 4
>>> print(labels)
[[4 4 0]
[4 4 0]
[4 4 2]]
As can be seen by the location of the 4s, the slice was extended by 1, and
also handled the extension beyond the boundary correctly.
"""
pad = int(pad)
a = []
for i, dim in zip(s, shape):
start = 0
stop = dim
if i.start - pad >= 0:
start = i.start - pad
if i.stop + pad < dim:
stop = i.stop + pad
a.append(slice(start, stop, None))
return tuple(a) | def function[extend_slice, parameter[s, shape, pad]]:
constant[
Adjust slice indices to include additional voxles around the slice.
This function does bounds checking to ensure the indices don't extend
outside the image.
Parameters
----------
s : list of slice objects
A list (or tuple) of N slice objects, where N is the number of
dimensions in the image.
shape : array_like
The shape of the image into which the slice objects apply. This is
used to check the bounds to prevent indexing beyond the image.
pad : int
The number of voxels to expand in each direction.
Returns
-------
slices : list of slice objects
A list slice of objects with the start and stop attributes respectively
incremented and decremented by 1, without extending beyond the image
boundaries.
Examples
--------
>>> from scipy.ndimage import label, find_objects
>>> from porespy.tools import extend_slice
>>> im = sp.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])
>>> labels = label(im)[0]
>>> s = find_objects(labels)
Using the slices returned by ``find_objects``, set the first label to 3
>>> labels[s[0]] = 3
>>> print(labels)
[[3 0 0]
[3 0 0]
[0 0 2]]
Next extend the slice, and use it to set the values to 4
>>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)
>>> labels[s_ext] = 4
>>> print(labels)
[[4 4 0]
[4 4 0]
[4 4 2]]
As can be seen by the location of the 4s, the slice was extended by 1, and
also handled the extension beyond the boundary correctly.
]
variable[pad] assign[=] call[name[int], parameter[name[pad]]]
variable[a] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b064fc40>, <ast.Name object at 0x7da1b064f250>]]] in starred[call[name[zip], parameter[name[s], name[shape]]]] begin[:]
variable[start] assign[=] constant[0]
variable[stop] assign[=] name[dim]
if compare[binary_operation[name[i].start - name[pad]] greater_or_equal[>=] constant[0]] begin[:]
variable[start] assign[=] binary_operation[name[i].start - name[pad]]
if compare[binary_operation[name[i].stop + name[pad]] less[<] name[dim]] begin[:]
variable[stop] assign[=] binary_operation[name[i].stop + name[pad]]
call[name[a].append, parameter[call[name[slice], parameter[name[start], name[stop], constant[None]]]]]
return[call[name[tuple], parameter[name[a]]]] | keyword[def] identifier[extend_slice] ( identifier[s] , identifier[shape] , identifier[pad] = literal[int] ):
literal[string]
identifier[pad] = identifier[int] ( identifier[pad] )
identifier[a] =[]
keyword[for] identifier[i] , identifier[dim] keyword[in] identifier[zip] ( identifier[s] , identifier[shape] ):
identifier[start] = literal[int]
identifier[stop] = identifier[dim]
keyword[if] identifier[i] . identifier[start] - identifier[pad] >= literal[int] :
identifier[start] = identifier[i] . identifier[start] - identifier[pad]
keyword[if] identifier[i] . identifier[stop] + identifier[pad] < identifier[dim] :
identifier[stop] = identifier[i] . identifier[stop] + identifier[pad]
identifier[a] . identifier[append] ( identifier[slice] ( identifier[start] , identifier[stop] , keyword[None] ))
keyword[return] identifier[tuple] ( identifier[a] ) | def extend_slice(s, shape, pad=1):
"""
Adjust slice indices to include additional voxles around the slice.
This function does bounds checking to ensure the indices don't extend
outside the image.
Parameters
----------
s : list of slice objects
A list (or tuple) of N slice objects, where N is the number of
dimensions in the image.
shape : array_like
The shape of the image into which the slice objects apply. This is
used to check the bounds to prevent indexing beyond the image.
pad : int
The number of voxels to expand in each direction.
Returns
-------
slices : list of slice objects
A list slice of objects with the start and stop attributes respectively
incremented and decremented by 1, without extending beyond the image
boundaries.
Examples
--------
>>> from scipy.ndimage import label, find_objects
>>> from porespy.tools import extend_slice
>>> im = sp.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])
>>> labels = label(im)[0]
>>> s = find_objects(labels)
Using the slices returned by ``find_objects``, set the first label to 3
>>> labels[s[0]] = 3
>>> print(labels)
[[3 0 0]
[3 0 0]
[0 0 2]]
Next extend the slice, and use it to set the values to 4
>>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)
>>> labels[s_ext] = 4
>>> print(labels)
[[4 4 0]
[4 4 0]
[4 4 2]]
As can be seen by the location of the 4s, the slice was extended by 1, and
also handled the extension beyond the boundary correctly.
"""
pad = int(pad)
a = []
for (i, dim) in zip(s, shape):
start = 0
stop = dim
if i.start - pad >= 0:
start = i.start - pad # depends on [control=['if'], data=[]]
if i.stop + pad < dim:
stop = i.stop + pad # depends on [control=['if'], data=[]]
a.append(slice(start, stop, None)) # depends on [control=['for'], data=[]]
return tuple(a) |
def log(self) :
"""logs stats about the progression, without printing anything on screen"""
self.logs['epochDuration'].append(self.lastEpochDuration)
self.logs['avg'].append(self.avg)
self.logs['runtime'].append(self.runtime)
self.logs['remtime'].append(self.remtime) | def function[log, parameter[self]]:
constant[logs stats about the progression, without printing anything on screen]
call[call[name[self].logs][constant[epochDuration]].append, parameter[name[self].lastEpochDuration]]
call[call[name[self].logs][constant[avg]].append, parameter[name[self].avg]]
call[call[name[self].logs][constant[runtime]].append, parameter[name[self].runtime]]
call[call[name[self].logs][constant[remtime]].append, parameter[name[self].remtime]] | keyword[def] identifier[log] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logs] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[lastEpochDuration] )
identifier[self] . identifier[logs] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[avg] )
identifier[self] . identifier[logs] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[runtime] )
identifier[self] . identifier[logs] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[remtime] ) | def log(self):
"""logs stats about the progression, without printing anything on screen"""
self.logs['epochDuration'].append(self.lastEpochDuration)
self.logs['avg'].append(self.avg)
self.logs['runtime'].append(self.runtime)
self.logs['remtime'].append(self.remtime) |
def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int,
help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info("Deleting PB devices:")
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info("\t%s", name)
tango_db.delete_device(name) | def function[delete_pb_devices, parameter[]]:
constant[Delete PBs devices from the Tango database.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[num_pb]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
variable[log] assign[=] call[name[logging].getLogger, parameter[constant[sip.tango_control.subarray]]]
variable[tango_db] assign[=] call[name[Database], parameter[]]
call[name[log].info, parameter[constant[Deleting PB devices:]]]
for taget[name[index]] in starred[call[name[range], parameter[name[args].num_pb]]] begin[:]
variable[name] assign[=] call[constant[sip_sdp/pb/{:05d}].format, parameter[name[index]]]
call[name[log].info, parameter[constant[ %s], name[name]]]
call[name[tango_db].delete_device, parameter[name[name]]] | keyword[def] identifier[delete_pb_devices] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] ,
identifier[help] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[tango_db] = identifier[Database] ()
identifier[log] . identifier[info] ( literal[string] )
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[args] . identifier[num_pb] ):
identifier[name] = literal[string] . identifier[format] ( identifier[index] )
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
identifier[tango_db] . identifier[delete_device] ( identifier[name] ) | def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int, help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info('Deleting PB devices:')
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info('\t%s', name)
tango_db.delete_device(name) # depends on [control=['for'], data=['index']] |
def parse_rules(self):
"""
Add a set of rules to the app, dividing between filter and other rule set
"""
# Load patterns: an app is removed when has no defined patterns.
try:
rule_options = self.config.items('rules')
except configparser.NoSectionError:
raise LogRaptorConfigError("the app %r has no defined rules!" % self.name)
rules = []
for option, value in rule_options:
pattern = value.replace('\n', '') # Strip newlines for multi-line declarations
if not self.args.filters:
# No filters case: substitute the filter fields with the corresponding patterns.
pattern = string.Template(pattern).safe_substitute(self.fields)
rules.append(AppRule(option, pattern, self.args))
continue
for filter_group in self.args.filters:
_pattern, filter_keys = exact_sub(pattern, filter_group)
_pattern = string.Template(_pattern).safe_substitute(self.fields)
if len(filter_keys) >= len(filter_group):
rules.append(AppRule(option, _pattern, self.args, filter_keys))
elif self._thread:
rules.append(AppRule(option, _pattern, self.args))
return rules | def function[parse_rules, parameter[self]]:
constant[
Add a set of rules to the app, dividing between filter and other rule set
]
<ast.Try object at 0x7da20c7943d0>
variable[rules] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c5180>, <ast.Name object at 0x7da20c6c65c0>]]] in starred[name[rule_options]] begin[:]
variable[pattern] assign[=] call[name[value].replace, parameter[constant[
], constant[]]]
if <ast.UnaryOp object at 0x7da20c6c4ac0> begin[:]
variable[pattern] assign[=] call[call[name[string].Template, parameter[name[pattern]]].safe_substitute, parameter[name[self].fields]]
call[name[rules].append, parameter[call[name[AppRule], parameter[name[option], name[pattern], name[self].args]]]]
continue
for taget[name[filter_group]] in starred[name[self].args.filters] begin[:]
<ast.Tuple object at 0x7da20c6c4070> assign[=] call[name[exact_sub], parameter[name[pattern], name[filter_group]]]
variable[_pattern] assign[=] call[call[name[string].Template, parameter[name[_pattern]]].safe_substitute, parameter[name[self].fields]]
if compare[call[name[len], parameter[name[filter_keys]]] greater_or_equal[>=] call[name[len], parameter[name[filter_group]]]] begin[:]
call[name[rules].append, parameter[call[name[AppRule], parameter[name[option], name[_pattern], name[self].args, name[filter_keys]]]]]
return[name[rules]] | keyword[def] identifier[parse_rules] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[rule_options] = identifier[self] . identifier[config] . identifier[items] ( literal[string] )
keyword[except] identifier[configparser] . identifier[NoSectionError] :
keyword[raise] identifier[LogRaptorConfigError] ( literal[string] % identifier[self] . identifier[name] )
identifier[rules] =[]
keyword[for] identifier[option] , identifier[value] keyword[in] identifier[rule_options] :
identifier[pattern] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[args] . identifier[filters] :
identifier[pattern] = identifier[string] . identifier[Template] ( identifier[pattern] ). identifier[safe_substitute] ( identifier[self] . identifier[fields] )
identifier[rules] . identifier[append] ( identifier[AppRule] ( identifier[option] , identifier[pattern] , identifier[self] . identifier[args] ))
keyword[continue]
keyword[for] identifier[filter_group] keyword[in] identifier[self] . identifier[args] . identifier[filters] :
identifier[_pattern] , identifier[filter_keys] = identifier[exact_sub] ( identifier[pattern] , identifier[filter_group] )
identifier[_pattern] = identifier[string] . identifier[Template] ( identifier[_pattern] ). identifier[safe_substitute] ( identifier[self] . identifier[fields] )
keyword[if] identifier[len] ( identifier[filter_keys] )>= identifier[len] ( identifier[filter_group] ):
identifier[rules] . identifier[append] ( identifier[AppRule] ( identifier[option] , identifier[_pattern] , identifier[self] . identifier[args] , identifier[filter_keys] ))
keyword[elif] identifier[self] . identifier[_thread] :
identifier[rules] . identifier[append] ( identifier[AppRule] ( identifier[option] , identifier[_pattern] , identifier[self] . identifier[args] ))
keyword[return] identifier[rules] | def parse_rules(self):
"""
Add a set of rules to the app, dividing between filter and other rule set
""" # Load patterns: an app is removed when has no defined patterns.
try:
rule_options = self.config.items('rules') # depends on [control=['try'], data=[]]
except configparser.NoSectionError:
raise LogRaptorConfigError('the app %r has no defined rules!' % self.name) # depends on [control=['except'], data=[]]
rules = []
for (option, value) in rule_options:
pattern = value.replace('\n', '') # Strip newlines for multi-line declarations
if not self.args.filters: # No filters case: substitute the filter fields with the corresponding patterns.
pattern = string.Template(pattern).safe_substitute(self.fields)
rules.append(AppRule(option, pattern, self.args))
continue # depends on [control=['if'], data=[]]
for filter_group in self.args.filters:
(_pattern, filter_keys) = exact_sub(pattern, filter_group)
_pattern = string.Template(_pattern).safe_substitute(self.fields)
if len(filter_keys) >= len(filter_group):
rules.append(AppRule(option, _pattern, self.args, filter_keys)) # depends on [control=['if'], data=[]]
elif self._thread:
rules.append(AppRule(option, _pattern, self.args)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filter_group']] # depends on [control=['for'], data=[]]
return rules |
def cli(obj):
"""Display alerts like unix "top" command."""
client = obj['client']
timezone = obj['timezone']
screen = Screen(client, timezone)
screen.run() | def function[cli, parameter[obj]]:
constant[Display alerts like unix "top" command.]
variable[client] assign[=] call[name[obj]][constant[client]]
variable[timezone] assign[=] call[name[obj]][constant[timezone]]
variable[screen] assign[=] call[name[Screen], parameter[name[client], name[timezone]]]
call[name[screen].run, parameter[]] | keyword[def] identifier[cli] ( identifier[obj] ):
literal[string]
identifier[client] = identifier[obj] [ literal[string] ]
identifier[timezone] = identifier[obj] [ literal[string] ]
identifier[screen] = identifier[Screen] ( identifier[client] , identifier[timezone] )
identifier[screen] . identifier[run] () | def cli(obj):
"""Display alerts like unix "top" command."""
client = obj['client']
timezone = obj['timezone']
screen = Screen(client, timezone)
screen.run() |
def draw_round_rect(setter, x, y, w, h, r, color=None, aa=False):
"""Draw rectangle with top-left corner at x,y, width w, height h,
and corner radius r.
"""
_draw_fast_hline(setter, x + r, y, w - 2 * r, color, aa) # Top
_draw_fast_hline(setter, x + r, y + h - 1, w - 2 * r, color, aa) # Bottom
_draw_fast_vline(setter, x, y + r, h - 2 * r, color, aa) # Left
_draw_fast_vline(setter, x + w - 1, y + r, h - 2 * r, color, aa) # Right
# draw four corners
_draw_circle_helper(setter, x + r, y + r, r, 1, color, aa)
_draw_circle_helper(setter, x + w - r - 1, y + r, r, 2, color, aa)
_draw_circle_helper(setter, x + w - r - 1, y + h - r - 1, r, 4, color, aa)
_draw_circle_helper(setter, x + r, y + h - r - 1, r, 8, color, aa) | def function[draw_round_rect, parameter[setter, x, y, w, h, r, color, aa]]:
constant[Draw rectangle with top-left corner at x,y, width w, height h,
and corner radius r.
]
call[name[_draw_fast_hline], parameter[name[setter], binary_operation[name[x] + name[r]], name[y], binary_operation[name[w] - binary_operation[constant[2] * name[r]]], name[color], name[aa]]]
call[name[_draw_fast_hline], parameter[name[setter], binary_operation[name[x] + name[r]], binary_operation[binary_operation[name[y] + name[h]] - constant[1]], binary_operation[name[w] - binary_operation[constant[2] * name[r]]], name[color], name[aa]]]
call[name[_draw_fast_vline], parameter[name[setter], name[x], binary_operation[name[y] + name[r]], binary_operation[name[h] - binary_operation[constant[2] * name[r]]], name[color], name[aa]]]
call[name[_draw_fast_vline], parameter[name[setter], binary_operation[binary_operation[name[x] + name[w]] - constant[1]], binary_operation[name[y] + name[r]], binary_operation[name[h] - binary_operation[constant[2] * name[r]]], name[color], name[aa]]]
call[name[_draw_circle_helper], parameter[name[setter], binary_operation[name[x] + name[r]], binary_operation[name[y] + name[r]], name[r], constant[1], name[color], name[aa]]]
call[name[_draw_circle_helper], parameter[name[setter], binary_operation[binary_operation[binary_operation[name[x] + name[w]] - name[r]] - constant[1]], binary_operation[name[y] + name[r]], name[r], constant[2], name[color], name[aa]]]
call[name[_draw_circle_helper], parameter[name[setter], binary_operation[binary_operation[binary_operation[name[x] + name[w]] - name[r]] - constant[1]], binary_operation[binary_operation[binary_operation[name[y] + name[h]] - name[r]] - constant[1]], name[r], constant[4], name[color], name[aa]]]
call[name[_draw_circle_helper], parameter[name[setter], binary_operation[name[x] + name[r]], binary_operation[binary_operation[binary_operation[name[y] + name[h]] - name[r]] - constant[1]], name[r], constant[8], name[color], name[aa]]] | keyword[def] identifier[draw_round_rect] ( identifier[setter] , identifier[x] , identifier[y] , identifier[w] , identifier[h] , identifier[r] , identifier[color] = keyword[None] , identifier[aa] = keyword[False] ):
literal[string]
identifier[_draw_fast_hline] ( identifier[setter] , identifier[x] + identifier[r] , identifier[y] , identifier[w] - literal[int] * identifier[r] , identifier[color] , identifier[aa] )
identifier[_draw_fast_hline] ( identifier[setter] , identifier[x] + identifier[r] , identifier[y] + identifier[h] - literal[int] , identifier[w] - literal[int] * identifier[r] , identifier[color] , identifier[aa] )
identifier[_draw_fast_vline] ( identifier[setter] , identifier[x] , identifier[y] + identifier[r] , identifier[h] - literal[int] * identifier[r] , identifier[color] , identifier[aa] )
identifier[_draw_fast_vline] ( identifier[setter] , identifier[x] + identifier[w] - literal[int] , identifier[y] + identifier[r] , identifier[h] - literal[int] * identifier[r] , identifier[color] , identifier[aa] )
identifier[_draw_circle_helper] ( identifier[setter] , identifier[x] + identifier[r] , identifier[y] + identifier[r] , identifier[r] , literal[int] , identifier[color] , identifier[aa] )
identifier[_draw_circle_helper] ( identifier[setter] , identifier[x] + identifier[w] - identifier[r] - literal[int] , identifier[y] + identifier[r] , identifier[r] , literal[int] , identifier[color] , identifier[aa] )
identifier[_draw_circle_helper] ( identifier[setter] , identifier[x] + identifier[w] - identifier[r] - literal[int] , identifier[y] + identifier[h] - identifier[r] - literal[int] , identifier[r] , literal[int] , identifier[color] , identifier[aa] )
identifier[_draw_circle_helper] ( identifier[setter] , identifier[x] + identifier[r] , identifier[y] + identifier[h] - identifier[r] - literal[int] , identifier[r] , literal[int] , identifier[color] , identifier[aa] ) | def draw_round_rect(setter, x, y, w, h, r, color=None, aa=False):
"""Draw rectangle with top-left corner at x,y, width w, height h,
and corner radius r.
"""
_draw_fast_hline(setter, x + r, y, w - 2 * r, color, aa) # Top
_draw_fast_hline(setter, x + r, y + h - 1, w - 2 * r, color, aa) # Bottom
_draw_fast_vline(setter, x, y + r, h - 2 * r, color, aa) # Left
_draw_fast_vline(setter, x + w - 1, y + r, h - 2 * r, color, aa) # Right
# draw four corners
_draw_circle_helper(setter, x + r, y + r, r, 1, color, aa)
_draw_circle_helper(setter, x + w - r - 1, y + r, r, 2, color, aa)
_draw_circle_helper(setter, x + w - r - 1, y + h - r - 1, r, 4, color, aa)
_draw_circle_helper(setter, x + r, y + h - r - 1, r, 8, color, aa) |
def _slug_strip(self, value):
"""
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value) | def function[_slug_strip, parameter[self, value]]:
constant[
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
]
variable[re_sep] assign[=] binary_operation[constant[(?:-|%s)] <ast.Mod object at 0x7da2590d6920> call[name[re].escape, parameter[name[self].separator]]]
variable[value] assign[=] call[name[re].sub, parameter[binary_operation[constant[%s+] <ast.Mod object at 0x7da2590d6920> name[re_sep]], name[self].separator, name[value]]]
return[call[name[re].sub, parameter[binary_operation[constant[^%s+|%s+$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b17d4fd0>, <ast.Name object at 0x7da1b17d5180>]]], constant[], name[value]]]] | keyword[def] identifier[_slug_strip] ( identifier[self] , identifier[value] ):
literal[string]
identifier[re_sep] = literal[string] % identifier[re] . identifier[escape] ( identifier[self] . identifier[separator] )
identifier[value] = identifier[re] . identifier[sub] ( literal[string] % identifier[re_sep] , identifier[self] . identifier[separator] , identifier[value] )
keyword[return] identifier[re] . identifier[sub] ( literal[string] %( identifier[re_sep] , identifier[re_sep] ), literal[string] , identifier[value] ) | def _slug_strip(self, value):
"""
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub('^%s+|%s+$' % (re_sep, re_sep), '', value) |
def get_access_token(self, http=None):
"""Return the access token and its expiration information.
If the token does not exist, get one.
If the token expired, refresh it.
"""
if not self.access_token or self.access_token_expired:
if not http:
http = transport.get_http_object()
self.refresh(http)
return AccessTokenInfo(access_token=self.access_token,
expires_in=self._expires_in()) | def function[get_access_token, parameter[self, http]]:
constant[Return the access token and its expiration information.
If the token does not exist, get one.
If the token expired, refresh it.
]
if <ast.BoolOp object at 0x7da1b01bac50> begin[:]
if <ast.UnaryOp object at 0x7da1b014f7c0> begin[:]
variable[http] assign[=] call[name[transport].get_http_object, parameter[]]
call[name[self].refresh, parameter[name[http]]]
return[call[name[AccessTokenInfo], parameter[]]] | keyword[def] identifier[get_access_token] ( identifier[self] , identifier[http] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[access_token] keyword[or] identifier[self] . identifier[access_token_expired] :
keyword[if] keyword[not] identifier[http] :
identifier[http] = identifier[transport] . identifier[get_http_object] ()
identifier[self] . identifier[refresh] ( identifier[http] )
keyword[return] identifier[AccessTokenInfo] ( identifier[access_token] = identifier[self] . identifier[access_token] ,
identifier[expires_in] = identifier[self] . identifier[_expires_in] ()) | def get_access_token(self, http=None):
"""Return the access token and its expiration information.
If the token does not exist, get one.
If the token expired, refresh it.
"""
if not self.access_token or self.access_token_expired:
if not http:
http = transport.get_http_object() # depends on [control=['if'], data=[]]
self.refresh(http) # depends on [control=['if'], data=[]]
return AccessTokenInfo(access_token=self.access_token, expires_in=self._expires_in()) |
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result | def function[run_dot, parameter[self, args, name, parts, urls, graph_options, node_options, edge_options]]:
constant[
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
]
<ast.Try object at 0x7da207f9a710>
call[name[self].generate_dot, parameter[name[dot].stdin, name[name], name[parts], name[urls], name[graph_options], name[node_options], name[edge_options]]]
call[name[dot].stdin.close, parameter[]]
variable[result] assign[=] call[name[dot].stdout.read, parameter[]]
variable[returncode] assign[=] call[name[dot].wait, parameter[]]
if compare[name[returncode] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18c4cf400>
return[name[result]] | keyword[def] identifier[run_dot] ( identifier[self] , identifier[args] , identifier[name] , identifier[parts] = literal[int] , identifier[urls] ={},
identifier[graph_options] ={}, identifier[node_options] ={}, identifier[edge_options] ={}):
literal[string]
keyword[try] :
identifier[dot] = identifier[subprocess] . identifier[Popen] ([ literal[string] ]+ identifier[list] ( identifier[args] ),
identifier[stdin] = identifier[subprocess] . identifier[PIPE] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[close_fds] = keyword[True] )
keyword[except] identifier[OSError] :
keyword[raise] identifier[DotException] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[DotException] ( literal[string] )
keyword[except] :
keyword[raise] identifier[DotException] ( literal[string] )
identifier[self] . identifier[generate_dot] ( identifier[dot] . identifier[stdin] , identifier[name] , identifier[parts] , identifier[urls] , identifier[graph_options] ,
identifier[node_options] , identifier[edge_options] )
identifier[dot] . identifier[stdin] . identifier[close] ()
identifier[result] = identifier[dot] . identifier[stdout] . identifier[read] ()
identifier[returncode] = identifier[dot] . identifier[wait] ()
keyword[if] identifier[returncode] != literal[int] :
keyword[raise] identifier[DotException] ( literal[string] % identifier[returncode] )
keyword[return] identifier[result] | def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) # depends on [control=['try'], data=[]]
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") # depends on [control=['except'], data=[]]
except ValueError:
raise DotException("'dot' called with invalid arguments") # depends on [control=['except'], data=[]]
except:
raise DotException("Unexpected error calling 'dot'") # depends on [control=['except'], data=[]]
self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode) # depends on [control=['if'], data=['returncode']]
return result |
def add_feature(self, kind, component=None, **kwargs):
"""
Add a new feature (spot, etc) to a component in the system. If not
provided, 'feature' (the name of the new feature) will be created
for you and can be accessed by the 'feature' attribute of the returned
ParameterSet
>>> b.add_feature(feature.spot, component='mystar')
or
>>> b.add_feature('spot', 'mystar', colat=90)
Available kinds include:
* :func:`phoebe.parameters.feature.spot`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default values,
or the name of a function (as a string) that can be found in the
:mod:`phoebe.parameters.feature` module (ie. 'spot')
:type kind: str or callable
:parameter str component: name of the component to attach the feature
:parameter str feature: (optional) name of the newly-created feature
:parameter **kwargs: default value for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(_feature, kind)
if kwargs.get('feature', False) is None:
# then we want to apply the default below, so let's pop for now
_ = kwargs.pop('feature')
kwargs.setdefault('feature',
self._default_label(func.func_name,
**{'context': 'feature',
'kind': func.func_name}))
self._check_label(kwargs['feature'])
if component is None:
stars = self.hierarchy.get_meshables()
if len(stars) == 1:
component = stars[0]
else:
raise ValueError("must provide component")
if component not in self.components:
raise ValueError('component not recognized')
component_kind = self.filter(component=component, context='component').kind
if not _feature._component_allowed_for_feature(func.func_name, component_kind):
raise ValueError("{} does not support component with kind {}".format(func.func_name, component_kind))
params, constraints = func(**kwargs)
metawargs = {'context': 'feature',
'component': component,
'feature': kwargs['feature'],
'kind': func.func_name}
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_feature',
redo_kwargs=redo_kwargs,
undo_func='remove_feature',
undo_kwargs={'feature': kwargs['feature']})
for constraint in constraints:
self.add_constraint(*constraint)
#return params
# NOTE: we need to call get_ in order to make sure all metawargs are applied
return self.get_feature(**metawargs) | def function[add_feature, parameter[self, kind, component]]:
constant[
Add a new feature (spot, etc) to a component in the system. If not
provided, 'feature' (the name of the new feature) will be created
for you and can be accessed by the 'feature' attribute of the returned
ParameterSet
>>> b.add_feature(feature.spot, component='mystar')
or
>>> b.add_feature('spot', 'mystar', colat=90)
Available kinds include:
* :func:`phoebe.parameters.feature.spot`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default values,
or the name of a function (as a string) that can be found in the
:mod:`phoebe.parameters.feature` module (ie. 'spot')
:type kind: str or callable
:parameter str component: name of the component to attach the feature
:parameter str feature: (optional) name of the newly-created feature
:parameter **kwargs: default value for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
]
variable[func] assign[=] call[name[_get_add_func], parameter[name[_feature], name[kind]]]
if compare[call[name[kwargs].get, parameter[constant[feature], constant[False]]] is constant[None]] begin[:]
variable[_] assign[=] call[name[kwargs].pop, parameter[constant[feature]]]
call[name[kwargs].setdefault, parameter[constant[feature], call[name[self]._default_label, parameter[name[func].func_name]]]]
call[name[self]._check_label, parameter[call[name[kwargs]][constant[feature]]]]
if compare[name[component] is constant[None]] begin[:]
variable[stars] assign[=] call[name[self].hierarchy.get_meshables, parameter[]]
if compare[call[name[len], parameter[name[stars]]] equal[==] constant[1]] begin[:]
variable[component] assign[=] call[name[stars]][constant[0]]
if compare[name[component] <ast.NotIn object at 0x7da2590d7190> name[self].components] begin[:]
<ast.Raise object at 0x7da2041d8100>
variable[component_kind] assign[=] call[name[self].filter, parameter[]].kind
if <ast.UnaryOp object at 0x7da2041d8190> begin[:]
<ast.Raise object at 0x7da2041d8a90>
<ast.Tuple object at 0x7da2041d9510> assign[=] call[name[func], parameter[]]
variable[metawargs] assign[=] dictionary[[<ast.Constant object at 0x7da2041db4f0>, <ast.Constant object at 0x7da2041d9de0>, <ast.Constant object at 0x7da2041db2b0>, <ast.Constant object at 0x7da2041d9360>], [<ast.Constant object at 0x7da2041d8520>, <ast.Name object at 0x7da2041d9630>, <ast.Subscript object at 0x7da2041d94b0>, <ast.Attribute object at 0x7da2041d8130>]]
call[name[self]._attach_params, parameter[name[params]]]
variable[redo_kwargs] assign[=] call[name[deepcopy], parameter[name[kwargs]]]
call[name[redo_kwargs]][constant[func]] assign[=] name[func].func_name
call[name[self]._add_history, parameter[]]
for taget[name[constraint]] in starred[name[constraints]] begin[:]
call[name[self].add_constraint, parameter[<ast.Starred object at 0x7da2041d8c40>]]
return[call[name[self].get_feature, parameter[]]] | keyword[def] identifier[add_feature] ( identifier[self] , identifier[kind] , identifier[component] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[func] = identifier[_get_add_func] ( identifier[_feature] , identifier[kind] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) keyword[is] keyword[None] :
identifier[_] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[kwargs] . identifier[setdefault] ( literal[string] ,
identifier[self] . identifier[_default_label] ( identifier[func] . identifier[func_name] ,
**{ literal[string] : literal[string] ,
literal[string] : identifier[func] . identifier[func_name] }))
identifier[self] . identifier[_check_label] ( identifier[kwargs] [ literal[string] ])
keyword[if] identifier[component] keyword[is] keyword[None] :
identifier[stars] = identifier[self] . identifier[hierarchy] . identifier[get_meshables] ()
keyword[if] identifier[len] ( identifier[stars] )== literal[int] :
identifier[component] = identifier[stars] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[component] keyword[not] keyword[in] identifier[self] . identifier[components] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[component_kind] = identifier[self] . identifier[filter] ( identifier[component] = identifier[component] , identifier[context] = literal[string] ). identifier[kind]
keyword[if] keyword[not] identifier[_feature] . identifier[_component_allowed_for_feature] ( identifier[func] . identifier[func_name] , identifier[component_kind] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[func] . identifier[func_name] , identifier[component_kind] ))
identifier[params] , identifier[constraints] = identifier[func] (** identifier[kwargs] )
identifier[metawargs] ={ literal[string] : literal[string] ,
literal[string] : identifier[component] ,
literal[string] : identifier[kwargs] [ literal[string] ],
literal[string] : identifier[func] . identifier[func_name] }
identifier[self] . identifier[_attach_params] ( identifier[params] ,** identifier[metawargs] )
identifier[redo_kwargs] = identifier[deepcopy] ( identifier[kwargs] )
identifier[redo_kwargs] [ literal[string] ]= identifier[func] . identifier[func_name]
identifier[self] . identifier[_add_history] ( identifier[redo_func] = literal[string] ,
identifier[redo_kwargs] = identifier[redo_kwargs] ,
identifier[undo_func] = literal[string] ,
identifier[undo_kwargs] ={ literal[string] : identifier[kwargs] [ literal[string] ]})
keyword[for] identifier[constraint] keyword[in] identifier[constraints] :
identifier[self] . identifier[add_constraint] (* identifier[constraint] )
keyword[return] identifier[self] . identifier[get_feature] (** identifier[metawargs] ) | def add_feature(self, kind, component=None, **kwargs):
"""
Add a new feature (spot, etc) to a component in the system. If not
provided, 'feature' (the name of the new feature) will be created
for you and can be accessed by the 'feature' attribute of the returned
ParameterSet
>>> b.add_feature(feature.spot, component='mystar')
or
>>> b.add_feature('spot', 'mystar', colat=90)
Available kinds include:
* :func:`phoebe.parameters.feature.spot`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default values,
or the name of a function (as a string) that can be found in the
:mod:`phoebe.parameters.feature` module (ie. 'spot')
:type kind: str or callable
:parameter str component: name of the component to attach the feature
:parameter str feature: (optional) name of the newly-created feature
:parameter **kwargs: default value for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(_feature, kind)
if kwargs.get('feature', False) is None:
# then we want to apply the default below, so let's pop for now
_ = kwargs.pop('feature') # depends on [control=['if'], data=[]]
kwargs.setdefault('feature', self._default_label(func.func_name, **{'context': 'feature', 'kind': func.func_name}))
self._check_label(kwargs['feature'])
if component is None:
stars = self.hierarchy.get_meshables()
if len(stars) == 1:
component = stars[0] # depends on [control=['if'], data=[]]
else:
raise ValueError('must provide component') # depends on [control=['if'], data=['component']]
if component not in self.components:
raise ValueError('component not recognized') # depends on [control=['if'], data=[]]
component_kind = self.filter(component=component, context='component').kind
if not _feature._component_allowed_for_feature(func.func_name, component_kind):
raise ValueError('{} does not support component with kind {}'.format(func.func_name, component_kind)) # depends on [control=['if'], data=[]]
(params, constraints) = func(**kwargs)
metawargs = {'context': 'feature', 'component': component, 'feature': kwargs['feature'], 'kind': func.func_name}
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_feature', redo_kwargs=redo_kwargs, undo_func='remove_feature', undo_kwargs={'feature': kwargs['feature']})
for constraint in constraints:
self.add_constraint(*constraint) # depends on [control=['for'], data=['constraint']]
#return params
# NOTE: we need to call get_ in order to make sure all metawargs are applied
return self.get_feature(**metawargs) |
def commit_changeset(self, changeset_id: uuid.UUID) -> Dict[bytes, Union[bytes, DeletedEntry]]:
"""
Collapses all changes for the given changeset into the previous
changesets if it exists.
"""
does_clear = self.has_clear(changeset_id)
changeset_data = self.pop_changeset(changeset_id)
if not self.is_empty():
# we only have to assign changeset data into the latest changeset if
# there is one.
if does_clear:
# if there was a clear and more changesets underneath then clear the latest
# changeset, and replace with a new clear changeset
self.latest = {}
self._clears_at.add(self.latest_id)
self.record_changeset()
self.latest = changeset_data
else:
# otherwise, merge in all the current data
self.latest = merge(
self.latest,
changeset_data,
)
return changeset_data | def function[commit_changeset, parameter[self, changeset_id]]:
constant[
Collapses all changes for the given changeset into the previous
changesets if it exists.
]
variable[does_clear] assign[=] call[name[self].has_clear, parameter[name[changeset_id]]]
variable[changeset_data] assign[=] call[name[self].pop_changeset, parameter[name[changeset_id]]]
if <ast.UnaryOp object at 0x7da1b17f8400> begin[:]
if name[does_clear] begin[:]
name[self].latest assign[=] dictionary[[], []]
call[name[self]._clears_at.add, parameter[name[self].latest_id]]
call[name[self].record_changeset, parameter[]]
name[self].latest assign[=] name[changeset_data]
return[name[changeset_data]] | keyword[def] identifier[commit_changeset] ( identifier[self] , identifier[changeset_id] : identifier[uuid] . identifier[UUID] )-> identifier[Dict] [ identifier[bytes] , identifier[Union] [ identifier[bytes] , identifier[DeletedEntry] ]]:
literal[string]
identifier[does_clear] = identifier[self] . identifier[has_clear] ( identifier[changeset_id] )
identifier[changeset_data] = identifier[self] . identifier[pop_changeset] ( identifier[changeset_id] )
keyword[if] keyword[not] identifier[self] . identifier[is_empty] ():
keyword[if] identifier[does_clear] :
identifier[self] . identifier[latest] ={}
identifier[self] . identifier[_clears_at] . identifier[add] ( identifier[self] . identifier[latest_id] )
identifier[self] . identifier[record_changeset] ()
identifier[self] . identifier[latest] = identifier[changeset_data]
keyword[else] :
identifier[self] . identifier[latest] = identifier[merge] (
identifier[self] . identifier[latest] ,
identifier[changeset_data] ,
)
keyword[return] identifier[changeset_data] | def commit_changeset(self, changeset_id: uuid.UUID) -> Dict[bytes, Union[bytes, DeletedEntry]]:
"""
Collapses all changes for the given changeset into the previous
changesets if it exists.
"""
does_clear = self.has_clear(changeset_id)
changeset_data = self.pop_changeset(changeset_id)
if not self.is_empty():
# we only have to assign changeset data into the latest changeset if
# there is one.
if does_clear:
# if there was a clear and more changesets underneath then clear the latest
# changeset, and replace with a new clear changeset
self.latest = {}
self._clears_at.add(self.latest_id)
self.record_changeset()
self.latest = changeset_data # depends on [control=['if'], data=[]]
else:
# otherwise, merge in all the current data
self.latest = merge(self.latest, changeset_data) # depends on [control=['if'], data=[]]
return changeset_data |
def get_object_collection(self, object_name=''):
"""
If the remote end exposes a collection of objects under the supplied object name (empty
for top level), this method returns a dictionary of these objects stored under their
names on the server.
This function performs n + 1 calls to the server, where n is the number of objects.
:param object_name: Object name on the server. This is required if the object collection
is not the top level object.
"""
object_names = self.get_object(object_name).get_objects()
return {obj: self.get_object(obj) for obj in object_names} | def function[get_object_collection, parameter[self, object_name]]:
constant[
If the remote end exposes a collection of objects under the supplied object name (empty
for top level), this method returns a dictionary of these objects stored under their
names on the server.
This function performs n + 1 calls to the server, where n is the number of objects.
:param object_name: Object name on the server. This is required if the object collection
is not the top level object.
]
variable[object_names] assign[=] call[call[name[self].get_object, parameter[name[object_name]]].get_objects, parameter[]]
return[<ast.DictComp object at 0x7da1b13a9630>] | keyword[def] identifier[get_object_collection] ( identifier[self] , identifier[object_name] = literal[string] ):
literal[string]
identifier[object_names] = identifier[self] . identifier[get_object] ( identifier[object_name] ). identifier[get_objects] ()
keyword[return] { identifier[obj] : identifier[self] . identifier[get_object] ( identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[object_names] } | def get_object_collection(self, object_name=''):
"""
If the remote end exposes a collection of objects under the supplied object name (empty
for top level), this method returns a dictionary of these objects stored under their
names on the server.
This function performs n + 1 calls to the server, where n is the number of objects.
:param object_name: Object name on the server. This is required if the object collection
is not the top level object.
"""
object_names = self.get_object(object_name).get_objects()
return {obj: self.get_object(obj) for obj in object_names} |
def bound_elems(elems):
"""
Finds the minimal bbox that contains all given elems
"""
group_x0 = min(map(lambda l: l.x0, elems))
group_y0 = min(map(lambda l: l.y0, elems))
group_x1 = max(map(lambda l: l.x1, elems))
group_y1 = max(map(lambda l: l.y1, elems))
return (group_x0, group_y0, group_x1, group_y1) | def function[bound_elems, parameter[elems]]:
constant[
Finds the minimal bbox that contains all given elems
]
variable[group_x0] assign[=] call[name[min], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b135a4a0>, name[elems]]]]]
variable[group_y0] assign[=] call[name[min], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b135b520>, name[elems]]]]]
variable[group_x1] assign[=] call[name[max], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b13447c0>, name[elems]]]]]
variable[group_y1] assign[=] call[name[max], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b1345990>, name[elems]]]]]
return[tuple[[<ast.Name object at 0x7da1b1344250>, <ast.Name object at 0x7da1b13450f0>, <ast.Name object at 0x7da1b1344d60>, <ast.Name object at 0x7da1b1345d50>]]] | keyword[def] identifier[bound_elems] ( identifier[elems] ):
literal[string]
identifier[group_x0] = identifier[min] ( identifier[map] ( keyword[lambda] identifier[l] : identifier[l] . identifier[x0] , identifier[elems] ))
identifier[group_y0] = identifier[min] ( identifier[map] ( keyword[lambda] identifier[l] : identifier[l] . identifier[y0] , identifier[elems] ))
identifier[group_x1] = identifier[max] ( identifier[map] ( keyword[lambda] identifier[l] : identifier[l] . identifier[x1] , identifier[elems] ))
identifier[group_y1] = identifier[max] ( identifier[map] ( keyword[lambda] identifier[l] : identifier[l] . identifier[y1] , identifier[elems] ))
keyword[return] ( identifier[group_x0] , identifier[group_y0] , identifier[group_x1] , identifier[group_y1] ) | def bound_elems(elems):
"""
Finds the minimal bbox that contains all given elems
"""
group_x0 = min(map(lambda l: l.x0, elems))
group_y0 = min(map(lambda l: l.y0, elems))
group_x1 = max(map(lambda l: l.x1, elems))
group_y1 = max(map(lambda l: l.y1, elems))
return (group_x0, group_y0, group_x1, group_y1) |
def remove_this_clink(self,clink_id):
"""
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
"""
for clink in self.get_clinks():
if clink.get_id() == clink_id:
self.node.remove(clink.get_node())
break | def function[remove_this_clink, parameter[self, clink_id]]:
constant[
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
]
for taget[name[clink]] in starred[call[name[self].get_clinks, parameter[]]] begin[:]
if compare[call[name[clink].get_id, parameter[]] equal[==] name[clink_id]] begin[:]
call[name[self].node.remove, parameter[call[name[clink].get_node, parameter[]]]]
break | keyword[def] identifier[remove_this_clink] ( identifier[self] , identifier[clink_id] ):
literal[string]
keyword[for] identifier[clink] keyword[in] identifier[self] . identifier[get_clinks] ():
keyword[if] identifier[clink] . identifier[get_id] ()== identifier[clink_id] :
identifier[self] . identifier[node] . identifier[remove] ( identifier[clink] . identifier[get_node] ())
keyword[break] | def remove_this_clink(self, clink_id):
"""
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
"""
for clink in self.get_clinks():
if clink.get_id() == clink_id:
self.node.remove(clink.get_node())
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['clink']] |
def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row
return None | def function[RowWith, parameter[self, column, value]]:
constant[Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
]
for taget[name[row]] in starred[call[name[self]._table][<ast.Slice object at 0x7da1b1fde470>]] begin[:]
if compare[call[name[row]][name[column]] equal[==] name[value]] begin[:]
return[name[row]]
return[constant[None]] | keyword[def] identifier[RowWith] ( identifier[self] , identifier[column] , identifier[value] ):
literal[string]
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[_table] [ literal[int] :]:
keyword[if] identifier[row] [ identifier[column] ]== identifier[value] :
keyword[return] identifier[row]
keyword[return] keyword[None] | def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
return None |
def _finish_fragment(self):
""" Creates fragment
"""
if self.fragment:
self.fragment.finish()
if self.fragment.headers:
# Regardless of what's been seen to this point, if we encounter a headers fragment,
# all the previous fragments should be marked hidden and found_visible set to False.
self.found_visible = False
for f in self.fragments:
f.hidden = True
if not self.found_visible:
if self.fragment.quoted \
or self.fragment.headers \
or self.fragment.signature \
or (len(self.fragment.content.strip()) == 0):
self.fragment.hidden = True
else:
self.found_visible = True
self.fragments.append(self.fragment)
self.fragment = None | def function[_finish_fragment, parameter[self]]:
constant[ Creates fragment
]
if name[self].fragment begin[:]
call[name[self].fragment.finish, parameter[]]
if name[self].fragment.headers begin[:]
name[self].found_visible assign[=] constant[False]
for taget[name[f]] in starred[name[self].fragments] begin[:]
name[f].hidden assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18f00f3a0> begin[:]
if <ast.BoolOp object at 0x7da20c76f910> begin[:]
name[self].fragment.hidden assign[=] constant[True]
call[name[self].fragments.append, parameter[name[self].fragment]]
name[self].fragment assign[=] constant[None] | keyword[def] identifier[_finish_fragment] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[fragment] :
identifier[self] . identifier[fragment] . identifier[finish] ()
keyword[if] identifier[self] . identifier[fragment] . identifier[headers] :
identifier[self] . identifier[found_visible] = keyword[False]
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[fragments] :
identifier[f] . identifier[hidden] = keyword[True]
keyword[if] keyword[not] identifier[self] . identifier[found_visible] :
keyword[if] identifier[self] . identifier[fragment] . identifier[quoted] keyword[or] identifier[self] . identifier[fragment] . identifier[headers] keyword[or] identifier[self] . identifier[fragment] . identifier[signature] keyword[or] ( identifier[len] ( identifier[self] . identifier[fragment] . identifier[content] . identifier[strip] ())== literal[int] ):
identifier[self] . identifier[fragment] . identifier[hidden] = keyword[True]
keyword[else] :
identifier[self] . identifier[found_visible] = keyword[True]
identifier[self] . identifier[fragments] . identifier[append] ( identifier[self] . identifier[fragment] )
identifier[self] . identifier[fragment] = keyword[None] | def _finish_fragment(self):
""" Creates fragment
"""
if self.fragment:
self.fragment.finish()
if self.fragment.headers:
# Regardless of what's been seen to this point, if we encounter a headers fragment,
# all the previous fragments should be marked hidden and found_visible set to False.
self.found_visible = False
for f in self.fragments:
f.hidden = True # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=[]]
if not self.found_visible:
if self.fragment.quoted or self.fragment.headers or self.fragment.signature or (len(self.fragment.content.strip()) == 0):
self.fragment.hidden = True # depends on [control=['if'], data=[]]
else:
self.found_visible = True # depends on [control=['if'], data=[]]
self.fragments.append(self.fragment) # depends on [control=['if'], data=[]]
self.fragment = None |
def learning_curve(train_scores, test_scores, train_sizes, ax=None):
"""Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
"""
if ax is None:
ax = plt.gca()
ax.grid()
ax.set_title("Learning Curve")
ax.set_xlabel("Training examples")
ax.set_ylabel("Score mean")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.legend(loc="best")
ax.margins(0.05)
return ax | def function[learning_curve, parameter[train_scores, test_scores, train_sizes, ax]]:
constant[Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
]
if compare[name[ax] is constant[None]] begin[:]
variable[ax] assign[=] call[name[plt].gca, parameter[]]
call[name[ax].grid, parameter[]]
call[name[ax].set_title, parameter[constant[Learning Curve]]]
call[name[ax].set_xlabel, parameter[constant[Training examples]]]
call[name[ax].set_ylabel, parameter[constant[Score mean]]]
variable[train_scores_mean] assign[=] call[name[np].mean, parameter[name[train_scores]]]
variable[train_scores_std] assign[=] call[name[np].std, parameter[name[train_scores]]]
variable[test_scores_mean] assign[=] call[name[np].mean, parameter[name[test_scores]]]
variable[test_scores_std] assign[=] call[name[np].std, parameter[name[test_scores]]]
call[name[ax].fill_between, parameter[name[train_sizes], binary_operation[name[train_scores_mean] - name[train_scores_std]], binary_operation[name[train_scores_mean] + name[train_scores_std]]]]
call[name[ax].fill_between, parameter[name[train_sizes], binary_operation[name[test_scores_mean] - name[test_scores_std]], binary_operation[name[test_scores_mean] + name[test_scores_std]]]]
call[name[ax].plot, parameter[name[train_sizes], name[train_scores_mean], constant[o-]]]
call[name[ax].plot, parameter[name[train_sizes], name[test_scores_mean], constant[o-]]]
call[name[ax].legend, parameter[]]
call[name[ax].margins, parameter[constant[0.05]]]
return[name[ax]] | keyword[def] identifier[learning_curve] ( identifier[train_scores] , identifier[test_scores] , identifier[train_sizes] , identifier[ax] = keyword[None] ):
literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[ax] . identifier[grid] ()
identifier[ax] . identifier[set_title] ( literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[train_scores_mean] = identifier[np] . identifier[mean] ( identifier[train_scores] , identifier[axis] = literal[int] )
identifier[train_scores_std] = identifier[np] . identifier[std] ( identifier[train_scores] , identifier[axis] = literal[int] )
identifier[test_scores_mean] = identifier[np] . identifier[mean] ( identifier[test_scores] , identifier[axis] = literal[int] )
identifier[test_scores_std] = identifier[np] . identifier[std] ( identifier[test_scores] , identifier[axis] = literal[int] )
identifier[ax] . identifier[fill_between] ( identifier[train_sizes] , identifier[train_scores_mean] - identifier[train_scores_std] ,
identifier[train_scores_mean] + identifier[train_scores_std] , identifier[alpha] = literal[int] ,
identifier[color] = literal[string] )
identifier[ax] . identifier[fill_between] ( identifier[train_sizes] , identifier[test_scores_mean] - identifier[test_scores_std] ,
identifier[test_scores_mean] + identifier[test_scores_std] , identifier[alpha] = literal[int] , identifier[color] = literal[string] )
identifier[ax] . identifier[plot] ( identifier[train_sizes] , identifier[train_scores_mean] , literal[string] , identifier[color] = literal[string] ,
identifier[label] = literal[string] )
identifier[ax] . identifier[plot] ( identifier[train_sizes] , identifier[test_scores_mean] , literal[string] , identifier[color] = literal[string] ,
identifier[label] = literal[string] )
identifier[ax] . identifier[legend] ( identifier[loc] = literal[string] )
identifier[ax] . identifier[margins] ( literal[int] )
keyword[return] identifier[ax] | def learning_curve(train_scores, test_scores, train_sizes, ax=None):
"""Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
"""
if ax is None:
ax = plt.gca() # depends on [control=['if'], data=['ax']]
ax.grid()
ax.set_title('Learning Curve')
ax.set_xlabel('Training examples')
ax.set_ylabel('Score mean')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
ax.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
ax.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')
ax.legend(loc='best')
ax.margins(0.05)
return ax |
def makedir(self, dir_name, mode=PERM_DEF):
"""Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per :py:meth:`add_object`.
"""
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
parent_dir, _ = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(
parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and not self.is_windows_fs:
base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and not self.exists(dir_name):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
head, tail = self.splitpath(dir_name)
self.add_object(
head, FakeDirectory(tail, mode & ~self.umask, filesystem=self)) | def function[makedir, parameter[self, dir_name, mode]]:
constant[Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per :py:meth:`add_object`.
]
variable[dir_name] assign[=] call[name[make_string_path], parameter[name[dir_name]]]
variable[ends_with_sep] assign[=] call[name[self].ends_with_path_separator, parameter[name[dir_name]]]
variable[dir_name] assign[=] call[name[self]._path_without_trailing_separators, parameter[name[dir_name]]]
if <ast.UnaryOp object at 0x7da204345d50> begin[:]
call[name[self].raise_os_error, parameter[name[errno].ENOENT, constant[]]]
if name[self].is_windows_fs begin[:]
variable[dir_name] assign[=] call[name[self].absnormpath, parameter[name[dir_name]]]
<ast.Tuple object at 0x7da204346b00> assign[=] call[name[self].splitpath, parameter[name[dir_name]]]
if name[parent_dir] begin[:]
variable[base_dir] assign[=] call[name[self].normpath, parameter[name[parent_dir]]]
variable[ellipsis] assign[=] call[name[self]._matching_string, parameter[name[parent_dir], binary_operation[name[self].path_separator + constant[..]]]]
if <ast.BoolOp object at 0x7da204344be0> begin[:]
<ast.Tuple object at 0x7da204345240> assign[=] call[name[parent_dir].partition, parameter[name[ellipsis]]]
if <ast.UnaryOp object at 0x7da204344940> begin[:]
call[name[self].raise_os_error, parameter[name[errno].ENOENT, name[base_dir]]]
variable[dir_name] assign[=] call[name[self].absnormpath, parameter[name[dir_name]]]
if call[name[self].exists, parameter[name[dir_name]]] begin[:]
if <ast.BoolOp object at 0x7da204345720> begin[:]
variable[error_nr] assign[=] name[errno].EACCES
if <ast.BoolOp object at 0x7da20c6e5420> begin[:]
call[name[self].remove_object, parameter[name[dir_name]]]
<ast.Tuple object at 0x7da20c6e7e50> assign[=] call[name[self].splitpath, parameter[name[dir_name]]]
call[name[self].add_object, parameter[name[head], call[name[FakeDirectory], parameter[name[tail], binary_operation[name[mode] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da20c6e74c0>]]]]] | keyword[def] identifier[makedir] ( identifier[self] , identifier[dir_name] , identifier[mode] = identifier[PERM_DEF] ):
literal[string]
identifier[dir_name] = identifier[make_string_path] ( identifier[dir_name] )
identifier[ends_with_sep] = identifier[self] . identifier[ends_with_path_separator] ( identifier[dir_name] )
identifier[dir_name] = identifier[self] . identifier[_path_without_trailing_separators] ( identifier[dir_name] )
keyword[if] keyword[not] identifier[dir_name] :
identifier[self] . identifier[raise_os_error] ( identifier[errno] . identifier[ENOENT] , literal[string] )
keyword[if] identifier[self] . identifier[is_windows_fs] :
identifier[dir_name] = identifier[self] . identifier[absnormpath] ( identifier[dir_name] )
identifier[parent_dir] , identifier[_] = identifier[self] . identifier[splitpath] ( identifier[dir_name] )
keyword[if] identifier[parent_dir] :
identifier[base_dir] = identifier[self] . identifier[normpath] ( identifier[parent_dir] )
identifier[ellipsis] = identifier[self] . identifier[_matching_string] (
identifier[parent_dir] , identifier[self] . identifier[path_separator] + literal[string] )
keyword[if] identifier[parent_dir] . identifier[endswith] ( identifier[ellipsis] ) keyword[and] keyword[not] identifier[self] . identifier[is_windows_fs] :
identifier[base_dir] , identifier[dummy_dotdot] , identifier[_] = identifier[parent_dir] . identifier[partition] ( identifier[ellipsis] )
keyword[if] keyword[not] identifier[self] . identifier[exists] ( identifier[base_dir] ):
identifier[self] . identifier[raise_os_error] ( identifier[errno] . identifier[ENOENT] , identifier[base_dir] )
identifier[dir_name] = identifier[self] . identifier[absnormpath] ( identifier[dir_name] )
keyword[if] identifier[self] . identifier[exists] ( identifier[dir_name] , identifier[check_link] = keyword[True] ):
keyword[if] identifier[self] . identifier[is_windows_fs] keyword[and] identifier[dir_name] == identifier[self] . identifier[path_separator] :
identifier[error_nr] = identifier[errno] . identifier[EACCES]
keyword[else] :
identifier[error_nr] = identifier[errno] . identifier[EEXIST]
keyword[if] identifier[ends_with_sep] keyword[and] identifier[self] . identifier[is_macos] keyword[and] keyword[not] identifier[self] . identifier[exists] ( identifier[dir_name] ):
identifier[self] . identifier[remove_object] ( identifier[dir_name] )
keyword[else] :
identifier[self] . identifier[raise_os_error] ( identifier[error_nr] , identifier[dir_name] )
identifier[head] , identifier[tail] = identifier[self] . identifier[splitpath] ( identifier[dir_name] )
identifier[self] . identifier[add_object] (
identifier[head] , identifier[FakeDirectory] ( identifier[tail] , identifier[mode] &~ identifier[self] . identifier[umask] , identifier[filesystem] = identifier[self] )) | def makedir(self, dir_name, mode=PERM_DEF):
"""Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per :py:meth:`add_object`.
"""
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '') # depends on [control=['if'], data=[]]
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name) # depends on [control=['if'], data=[]]
(parent_dir, _) = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and (not self.is_windows_fs):
(base_dir, dummy_dotdot, _) = parent_dir.partition(ellipsis) # depends on [control=['if'], data=[]]
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES # depends on [control=['if'], data=[]]
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and (not self.exists(dir_name)):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name) # depends on [control=['if'], data=[]]
else:
self.raise_os_error(error_nr, dir_name) # depends on [control=['if'], data=[]]
(head, tail) = self.splitpath(dir_name)
self.add_object(head, FakeDirectory(tail, mode & ~self.umask, filesystem=self)) |
def get_rule_option(self, rule_name_or_id, option_name):
""" Returns the value of a given option for a given rule. LintConfigErrors will be raised if the
rule or option don't exist. """
option = self._get_option(rule_name_or_id, option_name)
return option.value | def function[get_rule_option, parameter[self, rule_name_or_id, option_name]]:
constant[ Returns the value of a given option for a given rule. LintConfigErrors will be raised if the
rule or option don't exist. ]
variable[option] assign[=] call[name[self]._get_option, parameter[name[rule_name_or_id], name[option_name]]]
return[name[option].value] | keyword[def] identifier[get_rule_option] ( identifier[self] , identifier[rule_name_or_id] , identifier[option_name] ):
literal[string]
identifier[option] = identifier[self] . identifier[_get_option] ( identifier[rule_name_or_id] , identifier[option_name] )
keyword[return] identifier[option] . identifier[value] | def get_rule_option(self, rule_name_or_id, option_name):
""" Returns the value of a given option for a given rule. LintConfigErrors will be raised if the
rule or option don't exist. """
option = self._get_option(rule_name_or_id, option_name)
return option.value |
def as_representer(resource, content_type):
"""
Adapts the given resource and content type to a representer.
:param resource: resource to adapt.
:param str content_type: content (MIME) type to obtain a representer for.
"""
reg = get_current_registry()
rpr_reg = reg.queryUtility(IRepresenterRegistry)
return rpr_reg.create(type(resource), content_type) | def function[as_representer, parameter[resource, content_type]]:
constant[
Adapts the given resource and content type to a representer.
:param resource: resource to adapt.
:param str content_type: content (MIME) type to obtain a representer for.
]
variable[reg] assign[=] call[name[get_current_registry], parameter[]]
variable[rpr_reg] assign[=] call[name[reg].queryUtility, parameter[name[IRepresenterRegistry]]]
return[call[name[rpr_reg].create, parameter[call[name[type], parameter[name[resource]]], name[content_type]]]] | keyword[def] identifier[as_representer] ( identifier[resource] , identifier[content_type] ):
literal[string]
identifier[reg] = identifier[get_current_registry] ()
identifier[rpr_reg] = identifier[reg] . identifier[queryUtility] ( identifier[IRepresenterRegistry] )
keyword[return] identifier[rpr_reg] . identifier[create] ( identifier[type] ( identifier[resource] ), identifier[content_type] ) | def as_representer(resource, content_type):
"""
Adapts the given resource and content type to a representer.
:param resource: resource to adapt.
:param str content_type: content (MIME) type to obtain a representer for.
"""
reg = get_current_registry()
rpr_reg = reg.queryUtility(IRepresenterRegistry)
return rpr_reg.create(type(resource), content_type) |
def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"')
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 workaround
if sys.version_info[0] == 2 and not isinstance(cache_directory, str):
raise TypeError('expected str')
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False,
opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get("uid"))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize | def function[_caching, parameter[self, disk_or_memory, cache_directory]]:
constant[
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
]
if compare[name[disk_or_memory] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c993160>, <ast.Constant object at 0x7da20c9926e0>]]] begin[:]
<ast.Raise object at 0x7da20c992260>
variable[get_methods_id] assign[=] list[[<ast.Constant object at 0x7da20c993280>]]
def function[memoize, parameter[func]]:
variable[_global_cache_dir] assign[=] constant[]
if compare[name[disk_or_memory] equal[==] constant[disk]] begin[:]
if name[cache_directory] begin[:]
if <ast.BoolOp object at 0x7da20c992e60> begin[:]
<ast.Raise object at 0x7da20c993af0>
variable[_global_cache_dir] assign[=] call[name[os].path.join, parameter[name[cache_directory], constant[pokepy_cache]]]
variable[cache_dir] assign[=] call[name[os].path.join, parameter[name[_global_cache_dir], call[name[str], parameter[call[name[get_methods_id]][constant[0]]]]]]
variable[cache] assign[=] call[name[FileCache], parameter[constant[pokepy]]]
<ast.AugAssign object at 0x7da20c991c90>
if <ast.UnaryOp object at 0x7da20c9909a0> begin[:]
name[self]._cache_location_global assign[=] name[_global_cache_dir]
variable[hits] assign[=] list[[<ast.Constant object at 0x7da18c4cfbe0>]]
variable[misses] assign[=] list[[<ast.Constant object at 0x7da18c4cf370>]]
def function[cache_info, parameter[]]:
return[call[name[self]._cache_info_, parameter[call[name[hits]][constant[0]], call[name[misses]][constant[0]], call[name[len], parameter[name[cache]]]]]]
def function[cache_clear, parameter[]]:
<ast.AugAssign object at 0x7da18c4ce680>
<ast.AugAssign object at 0x7da18c4ce530>
<ast.AugAssign object at 0x7da18c4cd420>
call[name[hits]][constant[0]] assign[=] constant[0]
call[name[misses]][constant[0]] assign[=] constant[0]
call[name[cache].clear, parameter[]]
if compare[name[disk_or_memory] equal[==] constant[disk]] begin[:]
call[name[cache].create, parameter[]]
def function[cache_location, parameter[]]:
return[<ast.IfExp object at 0x7da18c4cf9a0>]
def function[memoizer, parameter[]]:
variable[key] assign[=] <ast.IfExp object at 0x7da18c4cd8d0>
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[cache]] begin[:]
<ast.AugAssign object at 0x7da18eb55f00>
<ast.AugAssign object at 0x7da18eb57ee0>
call[name[cache]][name[key]] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da18eb55c60>]]
<ast.AugAssign object at 0x7da18eb57910>
return[call[name[cache]][name[key]]]
name[memoizer].cache_info assign[=] name[cache_info]
name[memoizer].cache_clear assign[=] name[cache_clear]
name[memoizer].cache_location assign[=] name[cache_location]
return[name[memoizer]]
return[name[memoize]] | keyword[def] identifier[_caching] ( identifier[self] , identifier[disk_or_memory] , identifier[cache_directory] = keyword[None] ):
literal[string]
keyword[if] identifier[disk_or_memory] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[get_methods_id] =[ literal[int] ]
keyword[def] identifier[memoize] ( identifier[func] ):
identifier[_global_cache_dir] = literal[string]
keyword[if] identifier[disk_or_memory] == literal[string] :
keyword[if] identifier[cache_directory] :
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[cache_directory] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[_global_cache_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[cache_directory] , literal[string] )
identifier[cache_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[_global_cache_dir] , identifier[str] ( identifier[get_methods_id] [ literal[int] ]))
keyword[else] :
identifier[_global_cache_dir] = identifier[appdirs] . identifier[user_cache_dir] ( literal[string] , keyword[False] ,
identifier[opinion] = keyword[False] )
identifier[cache_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[_global_cache_dir] , identifier[str] ( identifier[get_methods_id] [ literal[int] ]))
identifier[cache] = identifier[FileCache] ( literal[string] , identifier[flag] = literal[string] , identifier[app_cache_dir] = identifier[cache_dir] )
identifier[get_methods_id] [ literal[int] ]+= literal[int]
keyword[else] :
identifier[cache] ={}
identifier[_global_cache_dir] = literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_cache_location_global] :
identifier[self] . identifier[_cache_location_global] = identifier[_global_cache_dir]
identifier[hits] =[ literal[int] ]
identifier[misses] =[ literal[int] ]
keyword[def] identifier[cache_info] ():
keyword[return] identifier[self] . identifier[_cache_info_] ( identifier[hits] [ literal[int] ], identifier[misses] [ literal[int] ], identifier[len] ( identifier[cache] ))
keyword[def] identifier[cache_clear] ():
identifier[self] . identifier[_cache_hits_global] -= identifier[hits] [ literal[int] ]
identifier[self] . identifier[_cache_misses_global] -= identifier[misses] [ literal[int] ]
identifier[self] . identifier[_cache_len_global] -= identifier[len] ( identifier[cache] )
identifier[hits] [ literal[int] ]= literal[int]
identifier[misses] [ literal[int] ]= literal[int]
identifier[cache] . identifier[clear] ()
keyword[if] identifier[disk_or_memory] == literal[string] :
identifier[cache] . identifier[create] ()
keyword[def] identifier[cache_location] ():
keyword[return] literal[string] keyword[if] identifier[disk_or_memory] == literal[string] keyword[else] identifier[cache] . identifier[cache_dir]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[memoizer] (* identifier[args] ,** identifier[kwargs] ):
identifier[key] = identifier[str] ( identifier[args] [ literal[int] ]) keyword[if] identifier[len] ( identifier[args] )> literal[int] keyword[else] identifier[str] ( identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[if] identifier[key] keyword[not] keyword[in] identifier[cache] :
identifier[misses] [ literal[int] ]+= literal[int]
identifier[self] . identifier[_cache_misses_global] += literal[int]
identifier[cache] [ identifier[key] ]= identifier[func] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[_cache_len_global] += literal[int]
keyword[else] :
identifier[self] . identifier[_cache_hits_global] += literal[int]
identifier[hits] [ literal[int] ]+= literal[int]
keyword[return] identifier[cache] [ identifier[key] ]
identifier[memoizer] . identifier[cache_info] = identifier[cache_info]
identifier[memoizer] . identifier[cache_clear] = identifier[cache_clear]
identifier[memoizer] . identifier[cache_location] = identifier[cache_location]
keyword[return] identifier[memoizer]
keyword[return] identifier[memoize] | def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"') # depends on [control=['if'], data=[]]
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 workaround
if sys.version_info[0] == 2 and (not isinstance(cache_directory, str)):
raise TypeError('expected str') # depends on [control=['if'], data=[]]
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0])) # depends on [control=['if'], data=[]]
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False, opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1 # depends on [control=['if'], data=[]]
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir # depends on [control=['if'], data=[]]
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles # depends on [control=['if'], data=[]]
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get('uid'))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1 # depends on [control=['if'], data=['key', 'cache']]
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize |
def buffer(self, byte_offset=0):
"""Get a copy of the map buffer"""
contents = self.ptr.contents
ptr = addressof(contents.buffer.contents) + byte_offset
length = contents.length * 4 - byte_offset
return buffer((c_char * length).from_address(ptr).raw) \
if length else None | def function[buffer, parameter[self, byte_offset]]:
constant[Get a copy of the map buffer]
variable[contents] assign[=] name[self].ptr.contents
variable[ptr] assign[=] binary_operation[call[name[addressof], parameter[name[contents].buffer.contents]] + name[byte_offset]]
variable[length] assign[=] binary_operation[binary_operation[name[contents].length * constant[4]] - name[byte_offset]]
return[<ast.IfExp object at 0x7da1b246b760>] | keyword[def] identifier[buffer] ( identifier[self] , identifier[byte_offset] = literal[int] ):
literal[string]
identifier[contents] = identifier[self] . identifier[ptr] . identifier[contents]
identifier[ptr] = identifier[addressof] ( identifier[contents] . identifier[buffer] . identifier[contents] )+ identifier[byte_offset]
identifier[length] = identifier[contents] . identifier[length] * literal[int] - identifier[byte_offset]
keyword[return] identifier[buffer] (( identifier[c_char] * identifier[length] ). identifier[from_address] ( identifier[ptr] ). identifier[raw] ) keyword[if] identifier[length] keyword[else] keyword[None] | def buffer(self, byte_offset=0):
"""Get a copy of the map buffer"""
contents = self.ptr.contents
ptr = addressof(contents.buffer.contents) + byte_offset
length = contents.length * 4 - byte_offset
return buffer((c_char * length).from_address(ptr).raw) if length else None |
def set_option(key, value):
"""Set the value of the specified option.
Available options:
parse.full_codestream
print.xml
print.codestream
print.short
Parameters
----------
key : str
Name of a single option.
value :
New value of option.
Option Descriptions
-------------------
parse.full_codestream : bool
When False, only the codestream header is parsed for metadata. This
can results in faster JP2/JPX parsing. When True, the entire
codestream is parsed. [default: False]
print.codestream : bool
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on the value of the
parse.full_codestream option. [default: True]
print.short : bool
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file. [default: False]
print.xml : bool
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed. [default: True]
See also
--------
get_option
"""
if key not in _options.keys():
raise KeyError('{key} not valid.'.format(key=key))
_options[key] = value | def function[set_option, parameter[key, value]]:
constant[Set the value of the specified option.
Available options:
parse.full_codestream
print.xml
print.codestream
print.short
Parameters
----------
key : str
Name of a single option.
value :
New value of option.
Option Descriptions
-------------------
parse.full_codestream : bool
When False, only the codestream header is parsed for metadata. This
can results in faster JP2/JPX parsing. When True, the entire
codestream is parsed. [default: False]
print.codestream : bool
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on the value of the
parse.full_codestream option. [default: True]
print.short : bool
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file. [default: False]
print.xml : bool
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed. [default: True]
See also
--------
get_option
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> call[name[_options].keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da18f58e710>
call[name[_options]][name[key]] assign[=] name[value] | keyword[def] identifier[set_option] ( identifier[key] , identifier[value] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[_options] . identifier[keys] ():
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[key] = identifier[key] ))
identifier[_options] [ identifier[key] ]= identifier[value] | def set_option(key, value):
"""Set the value of the specified option.
Available options:
parse.full_codestream
print.xml
print.codestream
print.short
Parameters
----------
key : str
Name of a single option.
value :
New value of option.
Option Descriptions
-------------------
parse.full_codestream : bool
When False, only the codestream header is parsed for metadata. This
can results in faster JP2/JPX parsing. When True, the entire
codestream is parsed. [default: False]
print.codestream : bool
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on the value of the
parse.full_codestream option. [default: True]
print.short : bool
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file. [default: False]
print.xml : bool
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed. [default: True]
See also
--------
get_option
"""
if key not in _options.keys():
raise KeyError('{key} not valid.'.format(key=key)) # depends on [control=['if'], data=['key']]
_options[key] = value |
def strace_configure(self, port_width):
"""Configures the trace port width for tracing.
Note that configuration cannot occur while STRACE is running.
Args:
self (JLink): the ``JLink`` instance
port_width (int): the trace port width to use.
Returns:
``None``
Raises:
ValueError: if ``port_width`` is not ``1``, ``2``, or ``4``.
JLinkException: on error.
"""
if port_width not in [1, 2, 4]:
raise ValueError('Invalid port width: %s' % str(port_width))
config_string = 'PortWidth=%d' % port_width
res = self._dll.JLINK_STRACE_Config(config_string.encode())
if res < 0:
raise errors.JLinkException('Failed to configure STRACE port')
return None | def function[strace_configure, parameter[self, port_width]]:
constant[Configures the trace port width for tracing.
Note that configuration cannot occur while STRACE is running.
Args:
self (JLink): the ``JLink`` instance
port_width (int): the trace port width to use.
Returns:
``None``
Raises:
ValueError: if ``port_width`` is not ``1``, ``2``, or ``4``.
JLinkException: on error.
]
if compare[name[port_width] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b16a5060>, <ast.Constant object at 0x7da1b16a7250>, <ast.Constant object at 0x7da1b16a55a0>]]] begin[:]
<ast.Raise object at 0x7da1b16a7a30>
variable[config_string] assign[=] binary_operation[constant[PortWidth=%d] <ast.Mod object at 0x7da2590d6920> name[port_width]]
variable[res] assign[=] call[name[self]._dll.JLINK_STRACE_Config, parameter[call[name[config_string].encode, parameter[]]]]
if compare[name[res] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b16a66e0>
return[constant[None]] | keyword[def] identifier[strace_configure] ( identifier[self] , identifier[port_width] ):
literal[string]
keyword[if] identifier[port_width] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] % identifier[str] ( identifier[port_width] ))
identifier[config_string] = literal[string] % identifier[port_width]
identifier[res] = identifier[self] . identifier[_dll] . identifier[JLINK_STRACE_Config] ( identifier[config_string] . identifier[encode] ())
keyword[if] identifier[res] < literal[int] :
keyword[raise] identifier[errors] . identifier[JLinkException] ( literal[string] )
keyword[return] keyword[None] | def strace_configure(self, port_width):
"""Configures the trace port width for tracing.
Note that configuration cannot occur while STRACE is running.
Args:
self (JLink): the ``JLink`` instance
port_width (int): the trace port width to use.
Returns:
``None``
Raises:
ValueError: if ``port_width`` is not ``1``, ``2``, or ``4``.
JLinkException: on error.
"""
if port_width not in [1, 2, 4]:
raise ValueError('Invalid port width: %s' % str(port_width)) # depends on [control=['if'], data=['port_width']]
config_string = 'PortWidth=%d' % port_width
res = self._dll.JLINK_STRACE_Config(config_string.encode())
if res < 0:
raise errors.JLinkException('Failed to configure STRACE port') # depends on [control=['if'], data=[]]
return None |
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir) | def function[build_mxnet, parameter[app]]:
constant[Build mxnet .so lib]
if <ast.UnaryOp object at 0x7da2054a6620> begin[:]
call[name[_run_cmd], parameter[binary_operation[constant[cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 ] <ast.Mod object at 0x7da2590d6920> name[app].builder.srcdir]]] | keyword[def] identifier[build_mxnet] ( identifier[app] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[app] . identifier[builder] . identifier[srcdir] , literal[string] , literal[string] )):
identifier[_run_cmd] ( literal[string] %
identifier[app] . identifier[builder] . identifier[srcdir] )
keyword[else] :
identifier[_run_cmd] ( literal[string] %
identifier[app] . identifier[builder] . identifier[srcdir] ) | def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd('cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 ' % app.builder.srcdir) # depends on [control=['if'], data=[]]
else:
_run_cmd('cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 ' % app.builder.srcdir) |
def prepare_uuid(data, schema):
"""Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
"""
if isinstance(data, uuid.UUID):
return str(data)
else:
return data | def function[prepare_uuid, parameter[data, schema]]:
constant[Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
]
if call[name[isinstance], parameter[name[data], name[uuid].UUID]] begin[:]
return[call[name[str], parameter[name[data]]]] | keyword[def] identifier[prepare_uuid] ( identifier[data] , identifier[schema] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[uuid] . identifier[UUID] ):
keyword[return] identifier[str] ( identifier[data] )
keyword[else] :
keyword[return] identifier[data] | def prepare_uuid(data, schema):
"""Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
"""
if isinstance(data, uuid.UUID):
return str(data) # depends on [control=['if'], data=[]]
else:
return data |
def decode(cls, value):
"""
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
"""
try:
return None if value is None else \
list(json.loads(value.decode(cls._encoding)))
except (TypeError, AttributeError):
return list(value) | def function[decode, parameter[cls, value]]:
constant[
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
]
<ast.Try object at 0x7da1b0a214e0> | keyword[def] identifier[decode] ( identifier[cls] , identifier[value] ):
literal[string]
keyword[try] :
keyword[return] keyword[None] keyword[if] identifier[value] keyword[is] keyword[None] keyword[else] identifier[list] ( identifier[json] . identifier[loads] ( identifier[value] . identifier[decode] ( identifier[cls] . identifier[_encoding] )))
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
keyword[return] identifier[list] ( identifier[value] ) | def decode(cls, value):
"""
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
"""
try:
return None if value is None else list(json.loads(value.decode(cls._encoding))) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError):
return list(value) # depends on [control=['except'], data=[]] |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the KeyWrappingSpecification struct and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(KeyWrappingSpecification, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream):
self._wrapping_method = primitives.Enumeration(
enum=enums.WrappingMethod,
tag=enums.Tags.WRAPPING_METHOD
)
self._wrapping_method.read(
local_stream,
kmip_version=kmip_version
)
else:
raise ValueError(
"Invalid struct missing the wrapping method attribute."
)
if self.is_tag_next(
enums.Tags.ENCRYPTION_KEY_INFORMATION,
local_stream
):
self._encryption_key_information = EncryptionKeyInformation()
self._encryption_key_information.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(
enums.Tags.MAC_SIGNATURE_KEY_INFORMATION,
local_stream
):
self._mac_signature_key_information = MACSignatureKeyInformation()
self._mac_signature_key_information.read(
local_stream,
kmip_version=kmip_version
)
attribute_names = []
while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_stream):
attribute_name = primitives.TextString(
tag=enums.Tags.ATTRIBUTE_NAME
)
attribute_name.read(local_stream, kmip_version=kmip_version)
attribute_names.append(attribute_name)
self._attribute_names = attribute_names
if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream):
self._encoding_option = primitives.Enumeration(
enum=enums.EncodingOption,
tag=enums.Tags.ENCODING_OPTION
)
self._encoding_option.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream) | def function[read, parameter[self, input_stream, kmip_version]]:
constant[
Read the data encoding the KeyWrappingSpecification struct and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
]
call[call[name[super], parameter[name[KeyWrappingSpecification], name[self]]].read, parameter[name[input_stream]]]
variable[local_stream] assign[=] call[name[BytearrayStream], parameter[call[name[input_stream].read, parameter[name[self].length]]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.WRAPPING_METHOD, name[local_stream]]] begin[:]
name[self]._wrapping_method assign[=] call[name[primitives].Enumeration, parameter[]]
call[name[self]._wrapping_method.read, parameter[name[local_stream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.ENCRYPTION_KEY_INFORMATION, name[local_stream]]] begin[:]
name[self]._encryption_key_information assign[=] call[name[EncryptionKeyInformation], parameter[]]
call[name[self]._encryption_key_information.read, parameter[name[local_stream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.MAC_SIGNATURE_KEY_INFORMATION, name[local_stream]]] begin[:]
name[self]._mac_signature_key_information assign[=] call[name[MACSignatureKeyInformation], parameter[]]
call[name[self]._mac_signature_key_information.read, parameter[name[local_stream]]]
variable[attribute_names] assign[=] list[[]]
while call[name[self].is_tag_next, parameter[name[enums].Tags.ATTRIBUTE_NAME, name[local_stream]]] begin[:]
variable[attribute_name] assign[=] call[name[primitives].TextString, parameter[]]
call[name[attribute_name].read, parameter[name[local_stream]]]
call[name[attribute_names].append, parameter[name[attribute_name]]]
name[self]._attribute_names assign[=] name[attribute_names]
if call[name[self].is_tag_next, parameter[name[enums].Tags.ENCODING_OPTION, name[local_stream]]] begin[:]
name[self]._encoding_option assign[=] call[name[primitives].Enumeration, parameter[]]
call[name[self]._encoding_option.read, parameter[name[local_stream]]]
call[name[self].is_oversized, parameter[name[local_stream]]] | keyword[def] identifier[read] ( identifier[self] , identifier[input_stream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ):
literal[string]
identifier[super] ( identifier[KeyWrappingSpecification] , identifier[self] ). identifier[read] (
identifier[input_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[local_stream] = identifier[BytearrayStream] ( identifier[input_stream] . identifier[read] ( identifier[self] . identifier[length] ))
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[WRAPPING_METHOD] , identifier[local_stream] ):
identifier[self] . identifier[_wrapping_method] = identifier[primitives] . identifier[Enumeration] (
identifier[enum] = identifier[enums] . identifier[WrappingMethod] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[WRAPPING_METHOD]
)
identifier[self] . identifier[_wrapping_method] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[ENCRYPTION_KEY_INFORMATION] ,
identifier[local_stream]
):
identifier[self] . identifier[_encryption_key_information] = identifier[EncryptionKeyInformation] ()
identifier[self] . identifier[_encryption_key_information] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[MAC_SIGNATURE_KEY_INFORMATION] ,
identifier[local_stream]
):
identifier[self] . identifier[_mac_signature_key_information] = identifier[MACSignatureKeyInformation] ()
identifier[self] . identifier[_mac_signature_key_information] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[attribute_names] =[]
keyword[while] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[ATTRIBUTE_NAME] , identifier[local_stream] ):
identifier[attribute_name] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[ATTRIBUTE_NAME]
)
identifier[attribute_name] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] )
identifier[attribute_names] . identifier[append] ( identifier[attribute_name] )
identifier[self] . identifier[_attribute_names] = identifier[attribute_names]
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[ENCODING_OPTION] , identifier[local_stream] ):
identifier[self] . identifier[_encoding_option] = identifier[primitives] . identifier[Enumeration] (
identifier[enum] = identifier[enums] . identifier[EncodingOption] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[ENCODING_OPTION]
)
identifier[self] . identifier[_encoding_option] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[is_oversized] ( identifier[local_stream] ) | def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the KeyWrappingSpecification struct and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(KeyWrappingSpecification, self).read(input_stream, kmip_version=kmip_version)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream):
self._wrapping_method = primitives.Enumeration(enum=enums.WrappingMethod, tag=enums.Tags.WRAPPING_METHOD)
self._wrapping_method.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid struct missing the wrapping method attribute.')
if self.is_tag_next(enums.Tags.ENCRYPTION_KEY_INFORMATION, local_stream):
self._encryption_key_information = EncryptionKeyInformation()
self._encryption_key_information.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.MAC_SIGNATURE_KEY_INFORMATION, local_stream):
self._mac_signature_key_information = MACSignatureKeyInformation()
self._mac_signature_key_information.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
attribute_names = []
while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_stream):
attribute_name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)
attribute_name.read(local_stream, kmip_version=kmip_version)
attribute_names.append(attribute_name) # depends on [control=['while'], data=[]]
self._attribute_names = attribute_names
if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream):
self._encoding_option = primitives.Enumeration(enum=enums.EncodingOption, tag=enums.Tags.ENCODING_OPTION)
self._encoding_option.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
self.is_oversized(local_stream) |
def _set_variant_id(self, variant_id=None):
"""Set the variant id for this variant"""
if not variant_id:
variant_id = '_'.join([
self.CHROM,
str(self.POS),
self.REF,
self.ALT
])
logger.debug("Updating variant id to {0}".format(
variant_id))
self['variant_id'] = variant_id | def function[_set_variant_id, parameter[self, variant_id]]:
constant[Set the variant id for this variant]
if <ast.UnaryOp object at 0x7da18bc701f0> begin[:]
variable[variant_id] assign[=] call[constant[_].join, parameter[list[[<ast.Attribute object at 0x7da18bc70dc0>, <ast.Call object at 0x7da18bc70df0>, <ast.Attribute object at 0x7da18f58d960>, <ast.Attribute object at 0x7da18f58e560>]]]]
call[name[logger].debug, parameter[call[constant[Updating variant id to {0}].format, parameter[name[variant_id]]]]]
call[name[self]][constant[variant_id]] assign[=] name[variant_id] | keyword[def] identifier[_set_variant_id] ( identifier[self] , identifier[variant_id] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[variant_id] :
identifier[variant_id] = literal[string] . identifier[join] ([
identifier[self] . identifier[CHROM] ,
identifier[str] ( identifier[self] . identifier[POS] ),
identifier[self] . identifier[REF] ,
identifier[self] . identifier[ALT]
])
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[variant_id] ))
identifier[self] [ literal[string] ]= identifier[variant_id] | def _set_variant_id(self, variant_id=None):
"""Set the variant id for this variant"""
if not variant_id:
variant_id = '_'.join([self.CHROM, str(self.POS), self.REF, self.ALT]) # depends on [control=['if'], data=[]]
logger.debug('Updating variant id to {0}'.format(variant_id))
self['variant_id'] = variant_id |
def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Centre object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuCentre just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuCentre, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
"""
for n,b in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
centre = self.mambucentreclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambucentreclass = MambuCentre
centre = self.mambucentreclass(urlfunc=None, entid=None, *args, **kwargs)
centre.init(b, *args, **kwargs)
self.attrs[n] = centre | def function[convertDict2Attrs, parameter[self]]:
constant[The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Centre object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuCentre just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuCentre, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
]
for taget[tuple[[<ast.Name object at 0x7da1b26acbb0>, <ast.Name object at 0x7da1b26aecb0>]]] in starred[call[name[enumerate], parameter[name[self].attrs]]] begin[:]
<ast.Try object at 0x7da1b26ac160>
call[name[kwargs].update, parameter[name[params]]]
<ast.Try object at 0x7da1b26ad4b0>
call[name[centre].init, parameter[name[b], <ast.Starred object at 0x7da1b26ac5e0>]]
call[name[self].attrs][name[n]] assign[=] name[centre] | keyword[def] identifier[convertDict2Attrs] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[n] , identifier[b] keyword[in] identifier[enumerate] ( identifier[self] . identifier[attrs] ):
keyword[try] :
identifier[params] = identifier[self] . identifier[params]
keyword[except] identifier[AttributeError] keyword[as] identifier[aerr] :
identifier[params] ={}
identifier[kwargs] . identifier[update] ( identifier[params] )
keyword[try] :
identifier[centre] = identifier[self] . identifier[mambucentreclass] ( identifier[urlfunc] = keyword[None] , identifier[entid] = keyword[None] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[AttributeError] keyword[as] identifier[ae] :
identifier[self] . identifier[mambucentreclass] = identifier[MambuCentre]
identifier[centre] = identifier[self] . identifier[mambucentreclass] ( identifier[urlfunc] = keyword[None] , identifier[entid] = keyword[None] ,* identifier[args] ,** identifier[kwargs] )
identifier[centre] . identifier[init] ( identifier[b] ,* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[attrs] [ identifier[n] ]= identifier[centre] | def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Centre object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuCentre just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuCentre, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
"""
for (n, b) in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params # depends on [control=['try'], data=[]]
except AttributeError as aerr:
params = {} # depends on [control=['except'], data=[]]
kwargs.update(params)
try:
centre = self.mambucentreclass(*args, urlfunc=None, entid=None, **kwargs) # depends on [control=['try'], data=[]]
except AttributeError as ae:
self.mambucentreclass = MambuCentre
centre = self.mambucentreclass(*args, urlfunc=None, entid=None, **kwargs) # depends on [control=['except'], data=[]]
centre.init(b, *args, **kwargs)
self.attrs[n] = centre # depends on [control=['for'], data=[]] |
def get_key(key, key_container, num_results=1, best=True,
resolution=None, calibration=None, polarization=None,
level=None, modifiers=None):
"""Get the fully-specified key best matching the provided key.
Only the best match is returned if `best` is `True` (default). See
`get_best_dataset_key` for more information on how this is determined.
The `resolution` and other identifier keywords are provided as a
convenience to filter by multiple parameters at once without having
to filter by multiple `key` inputs.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted.
key_container (dict or set): Container of DatasetID objects that
uses hashing to quickly access items.
num_results (int): Number of results to return. Use `0` for all
matching results. If `1` then the single matching
key is returned instead of a list of length 1.
(default: 1)
best (bool): Sort results to get "best" result first
(default: True). See `get_best_dataset_key` for details.
resolution (float, int, or list): Resolution of the dataset in
dataset units (typically
meters). This can also be a
list of these numbers.
calibration (str or list): Dataset calibration
(ex.'reflectance'). This can also be a
list of these strings.
polarization (str or list): Dataset polarization
(ex.'V'). This can also be a
list of these strings.
level (number or list): Dataset level (ex. 100). This can also be a
list of these numbers.
modifiers (list): Modifiers applied to the dataset. Unlike
resolution and calibration this is the exact
desired list of modifiers for one dataset, not
a list of possible modifiers.
Returns (list or DatasetID): Matching key(s)
Raises: KeyError if no matching results or if more than one result is
found when `num_results` is `1`.
"""
if isinstance(key, numbers.Number):
# we want this ID to act as a query so we set modifiers to None
# meaning "we don't care how many modifiers it has".
key = DatasetID(wavelength=key, modifiers=None)
elif isinstance(key, (str, six.text_type)):
# ID should act as a query (see wl comment above)
key = DatasetID(name=key, modifiers=None)
elif not isinstance(key, DatasetID):
raise ValueError("Expected 'DatasetID', str, or number dict key, "
"not {}".format(str(type(key))))
res = filter_keys_by_dataset_id(key, key_container)
# further filter by other parameters
if resolution is not None:
if not isinstance(resolution, (list, tuple)):
resolution = (resolution, )
res = [k for k in res
if k.resolution is not None and k.resolution in resolution]
if polarization is not None:
if not isinstance(polarization, (list, tuple)):
polarization = (polarization, )
res = [k for k in res
if k.polarization is not None and k.polarization in
polarization]
if calibration is not None:
if not isinstance(calibration, (list, tuple)):
calibration = (calibration, )
res = [k for k in res
if k.calibration is not None and k.calibration in calibration]
if level is not None:
if not isinstance(level, (list, tuple)):
level = (level, )
res = [k for k in res
if k.level is not None and k.level in level]
if modifiers is not None:
res = [k for k in res
if k.modifiers is not None and k.modifiers == modifiers]
if best:
res = get_best_dataset_key(key, res)
if num_results == 1 and not res:
raise KeyError("No dataset matching '{}' found".format(str(key)))
elif num_results == 1 and len(res) != 1:
raise TooManyResults("No unique dataset matching {}".format(str(key)))
elif num_results == 1:
return res[0]
elif num_results == 0:
return res
else:
return res[:num_results] | def function[get_key, parameter[key, key_container, num_results, best, resolution, calibration, polarization, level, modifiers]]:
constant[Get the fully-specified key best matching the provided key.
Only the best match is returned if `best` is `True` (default). See
`get_best_dataset_key` for more information on how this is determined.
The `resolution` and other identifier keywords are provided as a
convenience to filter by multiple parameters at once without having
to filter by multiple `key` inputs.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted.
key_container (dict or set): Container of DatasetID objects that
uses hashing to quickly access items.
num_results (int): Number of results to return. Use `0` for all
matching results. If `1` then the single matching
key is returned instead of a list of length 1.
(default: 1)
best (bool): Sort results to get "best" result first
(default: True). See `get_best_dataset_key` for details.
resolution (float, int, or list): Resolution of the dataset in
dataset units (typically
meters). This can also be a
list of these numbers.
calibration (str or list): Dataset calibration
(ex.'reflectance'). This can also be a
list of these strings.
polarization (str or list): Dataset polarization
(ex.'V'). This can also be a
list of these strings.
level (number or list): Dataset level (ex. 100). This can also be a
list of these numbers.
modifiers (list): Modifiers applied to the dataset. Unlike
resolution and calibration this is the exact
desired list of modifiers for one dataset, not
a list of possible modifiers.
Returns (list or DatasetID): Matching key(s)
Raises: KeyError if no matching results or if more than one result is
found when `num_results` is `1`.
]
if call[name[isinstance], parameter[name[key], name[numbers].Number]] begin[:]
variable[key] assign[=] call[name[DatasetID], parameter[]]
variable[res] assign[=] call[name[filter_keys_by_dataset_id], parameter[name[key], name[key_container]]]
if compare[name[resolution] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b22ad420> begin[:]
variable[resolution] assign[=] tuple[[<ast.Name object at 0x7da1b22ad240>]]
variable[res] assign[=] <ast.ListComp object at 0x7da1b22ad180>
if compare[name[polarization] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b22ae560> begin[:]
variable[polarization] assign[=] tuple[[<ast.Name object at 0x7da1b22ae350>]]
variable[res] assign[=] <ast.ListComp object at 0x7da1b22ae3e0>
if compare[name[calibration] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b22adf00> begin[:]
variable[calibration] assign[=] tuple[[<ast.Name object at 0x7da1b22ae0e0>]]
variable[res] assign[=] <ast.ListComp object at 0x7da1b22ae2c0>
if compare[name[level] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b22ad690> begin[:]
variable[level] assign[=] tuple[[<ast.Name object at 0x7da1b22ad780>]]
variable[res] assign[=] <ast.ListComp object at 0x7da1b22ad8d0>
if compare[name[modifiers] is_not constant[None]] begin[:]
variable[res] assign[=] <ast.ListComp object at 0x7da1b22acdc0>
if name[best] begin[:]
variable[res] assign[=] call[name[get_best_dataset_key], parameter[name[key], name[res]]]
if <ast.BoolOp object at 0x7da1b22aed70> begin[:]
<ast.Raise object at 0x7da1b22aebc0> | keyword[def] identifier[get_key] ( identifier[key] , identifier[key_container] , identifier[num_results] = literal[int] , identifier[best] = keyword[True] ,
identifier[resolution] = keyword[None] , identifier[calibration] = keyword[None] , identifier[polarization] = keyword[None] ,
identifier[level] = keyword[None] , identifier[modifiers] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[numbers] . identifier[Number] ):
identifier[key] = identifier[DatasetID] ( identifier[wavelength] = identifier[key] , identifier[modifiers] = keyword[None] )
keyword[elif] identifier[isinstance] ( identifier[key] ,( identifier[str] , identifier[six] . identifier[text_type] )):
identifier[key] = identifier[DatasetID] ( identifier[name] = identifier[key] , identifier[modifiers] = keyword[None] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[key] , identifier[DatasetID] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[str] ( identifier[type] ( identifier[key] ))))
identifier[res] = identifier[filter_keys_by_dataset_id] ( identifier[key] , identifier[key_container] )
keyword[if] identifier[resolution] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[resolution] ,( identifier[list] , identifier[tuple] )):
identifier[resolution] =( identifier[resolution] ,)
identifier[res] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[res]
keyword[if] identifier[k] . identifier[resolution] keyword[is] keyword[not] keyword[None] keyword[and] identifier[k] . identifier[resolution] keyword[in] identifier[resolution] ]
keyword[if] identifier[polarization] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[polarization] ,( identifier[list] , identifier[tuple] )):
identifier[polarization] =( identifier[polarization] ,)
identifier[res] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[res]
keyword[if] identifier[k] . identifier[polarization] keyword[is] keyword[not] keyword[None] keyword[and] identifier[k] . identifier[polarization] keyword[in]
identifier[polarization] ]
keyword[if] identifier[calibration] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[calibration] ,( identifier[list] , identifier[tuple] )):
identifier[calibration] =( identifier[calibration] ,)
identifier[res] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[res]
keyword[if] identifier[k] . identifier[calibration] keyword[is] keyword[not] keyword[None] keyword[and] identifier[k] . identifier[calibration] keyword[in] identifier[calibration] ]
keyword[if] identifier[level] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[level] ,( identifier[list] , identifier[tuple] )):
identifier[level] =( identifier[level] ,)
identifier[res] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[res]
keyword[if] identifier[k] . identifier[level] keyword[is] keyword[not] keyword[None] keyword[and] identifier[k] . identifier[level] keyword[in] identifier[level] ]
keyword[if] identifier[modifiers] keyword[is] keyword[not] keyword[None] :
identifier[res] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[res]
keyword[if] identifier[k] . identifier[modifiers] keyword[is] keyword[not] keyword[None] keyword[and] identifier[k] . identifier[modifiers] == identifier[modifiers] ]
keyword[if] identifier[best] :
identifier[res] = identifier[get_best_dataset_key] ( identifier[key] , identifier[res] )
keyword[if] identifier[num_results] == literal[int] keyword[and] keyword[not] identifier[res] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[key] )))
keyword[elif] identifier[num_results] == literal[int] keyword[and] identifier[len] ( identifier[res] )!= literal[int] :
keyword[raise] identifier[TooManyResults] ( literal[string] . identifier[format] ( identifier[str] ( identifier[key] )))
keyword[elif] identifier[num_results] == literal[int] :
keyword[return] identifier[res] [ literal[int] ]
keyword[elif] identifier[num_results] == literal[int] :
keyword[return] identifier[res]
keyword[else] :
keyword[return] identifier[res] [: identifier[num_results] ] | def get_key(key, key_container, num_results=1, best=True, resolution=None, calibration=None, polarization=None, level=None, modifiers=None):
"""Get the fully-specified key best matching the provided key.
Only the best match is returned if `best` is `True` (default). See
`get_best_dataset_key` for more information on how this is determined.
The `resolution` and other identifier keywords are provided as a
convenience to filter by multiple parameters at once without having
to filter by multiple `key` inputs.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted.
key_container (dict or set): Container of DatasetID objects that
uses hashing to quickly access items.
num_results (int): Number of results to return. Use `0` for all
matching results. If `1` then the single matching
key is returned instead of a list of length 1.
(default: 1)
best (bool): Sort results to get "best" result first
(default: True). See `get_best_dataset_key` for details.
resolution (float, int, or list): Resolution of the dataset in
dataset units (typically
meters). This can also be a
list of these numbers.
calibration (str or list): Dataset calibration
(ex.'reflectance'). This can also be a
list of these strings.
polarization (str or list): Dataset polarization
(ex.'V'). This can also be a
list of these strings.
level (number or list): Dataset level (ex. 100). This can also be a
list of these numbers.
modifiers (list): Modifiers applied to the dataset. Unlike
resolution and calibration this is the exact
desired list of modifiers for one dataset, not
a list of possible modifiers.
Returns (list or DatasetID): Matching key(s)
Raises: KeyError if no matching results or if more than one result is
found when `num_results` is `1`.
"""
if isinstance(key, numbers.Number):
# we want this ID to act as a query so we set modifiers to None
# meaning "we don't care how many modifiers it has".
key = DatasetID(wavelength=key, modifiers=None) # depends on [control=['if'], data=[]]
elif isinstance(key, (str, six.text_type)):
# ID should act as a query (see wl comment above)
key = DatasetID(name=key, modifiers=None) # depends on [control=['if'], data=[]]
elif not isinstance(key, DatasetID):
raise ValueError("Expected 'DatasetID', str, or number dict key, not {}".format(str(type(key)))) # depends on [control=['if'], data=[]]
res = filter_keys_by_dataset_id(key, key_container)
# further filter by other parameters
if resolution is not None:
if not isinstance(resolution, (list, tuple)):
resolution = (resolution,) # depends on [control=['if'], data=[]]
res = [k for k in res if k.resolution is not None and k.resolution in resolution] # depends on [control=['if'], data=['resolution']]
if polarization is not None:
if not isinstance(polarization, (list, tuple)):
polarization = (polarization,) # depends on [control=['if'], data=[]]
res = [k for k in res if k.polarization is not None and k.polarization in polarization] # depends on [control=['if'], data=['polarization']]
if calibration is not None:
if not isinstance(calibration, (list, tuple)):
calibration = (calibration,) # depends on [control=['if'], data=[]]
res = [k for k in res if k.calibration is not None and k.calibration in calibration] # depends on [control=['if'], data=['calibration']]
if level is not None:
if not isinstance(level, (list, tuple)):
level = (level,) # depends on [control=['if'], data=[]]
res = [k for k in res if k.level is not None and k.level in level] # depends on [control=['if'], data=['level']]
if modifiers is not None:
res = [k for k in res if k.modifiers is not None and k.modifiers == modifiers] # depends on [control=['if'], data=['modifiers']]
if best:
res = get_best_dataset_key(key, res) # depends on [control=['if'], data=[]]
if num_results == 1 and (not res):
raise KeyError("No dataset matching '{}' found".format(str(key))) # depends on [control=['if'], data=[]]
elif num_results == 1 and len(res) != 1:
raise TooManyResults('No unique dataset matching {}'.format(str(key))) # depends on [control=['if'], data=[]]
elif num_results == 1:
return res[0] # depends on [control=['if'], data=[]]
elif num_results == 0:
return res # depends on [control=['if'], data=[]]
else:
return res[:num_results] |
def resolving_deps(self):
"""Return package dependencies
"""
requires = []
if (self.meta.rsl_deps in ["on", "ON"] and
self.flag != "--resolve-off"):
self.msg.resolving()
for dep in self.packages:
status(0.05)
dependencies = []
dependencies = Utils().dimensional_list(Dependencies(
self.repo, self.blacklist).binary(dep, self.flag))
requires += self._fix_deps_repos(dependencies)
self.deps_dict[dep] = Utils().remove_dbs(requires)
return Utils().remove_dbs(requires) | def function[resolving_deps, parameter[self]]:
constant[Return package dependencies
]
variable[requires] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b283b220> begin[:]
call[name[self].msg.resolving, parameter[]]
for taget[name[dep]] in starred[name[self].packages] begin[:]
call[name[status], parameter[constant[0.05]]]
variable[dependencies] assign[=] list[[]]
variable[dependencies] assign[=] call[call[name[Utils], parameter[]].dimensional_list, parameter[call[call[name[Dependencies], parameter[name[self].repo, name[self].blacklist]].binary, parameter[name[dep], name[self].flag]]]]
<ast.AugAssign object at 0x7da1b28391e0>
call[name[self].deps_dict][name[dep]] assign[=] call[call[name[Utils], parameter[]].remove_dbs, parameter[name[requires]]]
return[call[call[name[Utils], parameter[]].remove_dbs, parameter[name[requires]]]] | keyword[def] identifier[resolving_deps] ( identifier[self] ):
literal[string]
identifier[requires] =[]
keyword[if] ( identifier[self] . identifier[meta] . identifier[rsl_deps] keyword[in] [ literal[string] , literal[string] ] keyword[and]
identifier[self] . identifier[flag] != literal[string] ):
identifier[self] . identifier[msg] . identifier[resolving] ()
keyword[for] identifier[dep] keyword[in] identifier[self] . identifier[packages] :
identifier[status] ( literal[int] )
identifier[dependencies] =[]
identifier[dependencies] = identifier[Utils] (). identifier[dimensional_list] ( identifier[Dependencies] (
identifier[self] . identifier[repo] , identifier[self] . identifier[blacklist] ). identifier[binary] ( identifier[dep] , identifier[self] . identifier[flag] ))
identifier[requires] += identifier[self] . identifier[_fix_deps_repos] ( identifier[dependencies] )
identifier[self] . identifier[deps_dict] [ identifier[dep] ]= identifier[Utils] (). identifier[remove_dbs] ( identifier[requires] )
keyword[return] identifier[Utils] (). identifier[remove_dbs] ( identifier[requires] ) | def resolving_deps(self):
"""Return package dependencies
"""
requires = []
if self.meta.rsl_deps in ['on', 'ON'] and self.flag != '--resolve-off':
self.msg.resolving() # depends on [control=['if'], data=[]]
for dep in self.packages:
status(0.05)
dependencies = []
dependencies = Utils().dimensional_list(Dependencies(self.repo, self.blacklist).binary(dep, self.flag))
requires += self._fix_deps_repos(dependencies)
self.deps_dict[dep] = Utils().remove_dbs(requires) # depends on [control=['for'], data=['dep']]
return Utils().remove_dbs(requires) |
def origin_canada(origin):
"""\
Returns if the origin is Canada.
`origin`
The origin to check.
"""
return origin in (
u'CALGARY', u'HALIFAX', u'MONTREAL', u'QUEBEC', u'OTTAWA', u'TORONTO',
u'VANCOUVER') | def function[origin_canada, parameter[origin]]:
constant[ Returns if the origin is Canada.
`origin`
The origin to check.
]
return[compare[name[origin] in tuple[[<ast.Constant object at 0x7da204346d40>, <ast.Constant object at 0x7da204344f10>, <ast.Constant object at 0x7da204346590>, <ast.Constant object at 0x7da204344d00>, <ast.Constant object at 0x7da2043479d0>, <ast.Constant object at 0x7da2043441f0>, <ast.Constant object at 0x7da2043450c0>]]]] | keyword[def] identifier[origin_canada] ( identifier[origin] ):
literal[string]
keyword[return] identifier[origin] keyword[in] (
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ) | def origin_canada(origin):
""" Returns if the origin is Canada.
`origin`
The origin to check.
"""
return origin in (u'CALGARY', u'HALIFAX', u'MONTREAL', u'QUEBEC', u'OTTAWA', u'TORONTO', u'VANCOUVER') |
def add_equad(psr, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
N.random.seed(seed)
# default equadvec
equadvec = N.zeros(psr.nobs)
# check that equad is scalar if flags is None
if flags is None:
if not N.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = N.ones(psr.nobs) * equad
if flags is not None and flagid is not None and not N.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == N.array(psr.flagvals(flagid))
equadvec[ind] = equad[ct]
psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs) | def function[add_equad, parameter[psr, equad, flagid, flags, seed]]:
constant[Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed.]
if compare[name[seed] is_not constant[None]] begin[:]
call[name[N].random.seed, parameter[name[seed]]]
variable[equadvec] assign[=] call[name[N].zeros, parameter[name[psr].nobs]]
if compare[name[flags] is constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b04da2f0> begin[:]
<ast.Raise object at 0x7da1b04d9bd0>
if <ast.BoolOp object at 0x7da1b04d9870> begin[:]
if compare[call[name[len], parameter[name[equad]]] equal[==] call[name[len], parameter[name[flags]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b04d8490>, <ast.Name object at 0x7da1b04dae60>]]] in starred[call[name[enumerate], parameter[name[flags]]]] begin[:]
variable[ind] assign[=] compare[name[flag] equal[==] call[name[N].array, parameter[call[name[psr].flagvals, parameter[name[flagid]]]]]]
call[name[equadvec]][name[ind]] assign[=] call[name[equad]][name[ct]]
<ast.AugAssign object at 0x7da1b0416140> | keyword[def] identifier[add_equad] ( identifier[psr] , identifier[equad] , identifier[flagid] = keyword[None] , identifier[flags] = keyword[None] , identifier[seed] = keyword[None] ):
literal[string]
keyword[if] identifier[seed] keyword[is] keyword[not] keyword[None] :
identifier[N] . identifier[random] . identifier[seed] ( identifier[seed] )
identifier[equadvec] = identifier[N] . identifier[zeros] ( identifier[psr] . identifier[nobs] )
keyword[if] identifier[flags] keyword[is] keyword[None] :
keyword[if] keyword[not] identifier[N] . identifier[isscalar] ( identifier[equad] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[equadvec] = identifier[N] . identifier[ones] ( identifier[psr] . identifier[nobs] )* identifier[equad]
keyword[if] identifier[flags] keyword[is] keyword[not] keyword[None] keyword[and] identifier[flagid] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[N] . identifier[isscalar] ( identifier[equad] ):
keyword[if] identifier[len] ( identifier[equad] )== identifier[len] ( identifier[flags] ):
keyword[for] identifier[ct] , identifier[flag] keyword[in] identifier[enumerate] ( identifier[flags] ):
identifier[ind] = identifier[flag] == identifier[N] . identifier[array] ( identifier[psr] . identifier[flagvals] ( identifier[flagid] ))
identifier[equadvec] [ identifier[ind] ]= identifier[equad] [ identifier[ct] ]
identifier[psr] . identifier[stoas] [:]+=( identifier[equadvec] / identifier[day] )* identifier[N] . identifier[random] . identifier[randn] ( identifier[psr] . identifier[nobs] ) | def add_equad(psr, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
N.random.seed(seed) # depends on [control=['if'], data=['seed']]
# default equadvec
equadvec = N.zeros(psr.nobs)
# check that equad is scalar if flags is None
if flags is None:
if not N.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar') # depends on [control=['if'], data=[]]
else:
equadvec = N.ones(psr.nobs) * equad # depends on [control=['if'], data=[]]
if flags is not None and flagid is not None and (not N.isscalar(equad)):
if len(equad) == len(flags):
for (ct, flag) in enumerate(flags):
ind = flag == N.array(psr.flagvals(flagid))
equadvec[ind] = equad[ct] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
psr.stoas[:] += equadvec / day * N.random.randn(psr.nobs) |
def _density_par(self,dangle,tdisrupt=None,approx=True,
higherorder=None):
"""The raw density as a function of parallel angle,
approx= use faster method that directly integrates the spline
representation"""
if higherorder is None: higherorder= self._higherorderTrack
if tdisrupt is None: tdisrupt= self._tdisrupt
if approx:
return self._density_par_approx(dangle,tdisrupt,
higherorder=higherorder)
else:
return integrate.quad(lambda T: numpy.sqrt(self._sortedSigOEig[2])\
*(1+T*T)/(1-T*T)**2.\
*self.pOparapar(T/(1-T*T)\
*numpy.sqrt(self._sortedSigOEig[2])\
+self._meandO,dangle),
-1.,1.)[0] | def function[_density_par, parameter[self, dangle, tdisrupt, approx, higherorder]]:
constant[The raw density as a function of parallel angle,
approx= use faster method that directly integrates the spline
representation]
if compare[name[higherorder] is constant[None]] begin[:]
variable[higherorder] assign[=] name[self]._higherorderTrack
if compare[name[tdisrupt] is constant[None]] begin[:]
variable[tdisrupt] assign[=] name[self]._tdisrupt
if name[approx] begin[:]
return[call[name[self]._density_par_approx, parameter[name[dangle], name[tdisrupt]]]] | keyword[def] identifier[_density_par] ( identifier[self] , identifier[dangle] , identifier[tdisrupt] = keyword[None] , identifier[approx] = keyword[True] ,
identifier[higherorder] = keyword[None] ):
literal[string]
keyword[if] identifier[higherorder] keyword[is] keyword[None] : identifier[higherorder] = identifier[self] . identifier[_higherorderTrack]
keyword[if] identifier[tdisrupt] keyword[is] keyword[None] : identifier[tdisrupt] = identifier[self] . identifier[_tdisrupt]
keyword[if] identifier[approx] :
keyword[return] identifier[self] . identifier[_density_par_approx] ( identifier[dangle] , identifier[tdisrupt] ,
identifier[higherorder] = identifier[higherorder] )
keyword[else] :
keyword[return] identifier[integrate] . identifier[quad] ( keyword[lambda] identifier[T] : identifier[numpy] . identifier[sqrt] ( identifier[self] . identifier[_sortedSigOEig] [ literal[int] ])*( literal[int] + identifier[T] * identifier[T] )/( literal[int] - identifier[T] * identifier[T] )** literal[int] * identifier[self] . identifier[pOparapar] ( identifier[T] /( literal[int] - identifier[T] * identifier[T] )* identifier[numpy] . identifier[sqrt] ( identifier[self] . identifier[_sortedSigOEig] [ literal[int] ])+ identifier[self] . identifier[_meandO] , identifier[dangle] ),
- literal[int] , literal[int] )[ literal[int] ] | def _density_par(self, dangle, tdisrupt=None, approx=True, higherorder=None):
"""The raw density as a function of parallel angle,
approx= use faster method that directly integrates the spline
representation"""
if higherorder is None:
higherorder = self._higherorderTrack # depends on [control=['if'], data=['higherorder']]
if tdisrupt is None:
tdisrupt = self._tdisrupt # depends on [control=['if'], data=['tdisrupt']]
if approx:
return self._density_par_approx(dangle, tdisrupt, higherorder=higherorder) # depends on [control=['if'], data=[]]
else:
return integrate.quad(lambda T: numpy.sqrt(self._sortedSigOEig[2]) * (1 + T * T) / (1 - T * T) ** 2.0 * self.pOparapar(T / (1 - T * T) * numpy.sqrt(self._sortedSigOEig[2]) + self._meandO, dangle), -1.0, 1.0)[0] |
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass | def function[check_alert, parameter[step, text]]:
constant[
Check the alert text
]
<ast.Try object at 0x7da1b253b700> | keyword[def] identifier[check_alert] ( identifier[step] , identifier[text] ):
literal[string]
keyword[try] :
identifier[alert] = identifier[Alert] ( identifier[world] . identifier[browser] )
identifier[assert_equals] ( identifier[alert] . identifier[text] , identifier[text] )
keyword[except] identifier[WebDriverException] :
keyword[pass] | def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text) # depends on [control=['try'], data=[]]
except WebDriverException:
# PhantomJS is kinda poor
pass # depends on [control=['except'], data=[]] |
def delete_by_id(
self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
"""
raw_result = self._delete_by_id_initial(
application_definition_id=application_definition_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | def function[delete_by_id, parameter[self, application_definition_id, custom_headers, raw, polling]]:
constant[Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
]
variable[raw_result] assign[=] call[name[self]._delete_by_id_initial, parameter[]]
def function[get_long_running_output, parameter[response]]:
if name[raw] begin[:]
variable[client_raw_response] assign[=] call[name[ClientRawResponse], parameter[constant[None], name[response]]]
return[name[client_raw_response]]
variable[lro_delay] assign[=] call[name[operation_config].get, parameter[constant[long_running_operation_timeout], name[self].config.long_running_operation_timeout]]
if compare[name[polling] is constant[True]] begin[:]
variable[polling_method] assign[=] call[name[ARMPolling], parameter[name[lro_delay]]]
return[call[name[LROPoller], parameter[name[self]._client, name[raw_result], name[get_long_running_output], name[polling_method]]]] | keyword[def] identifier[delete_by_id] (
identifier[self] , identifier[application_definition_id] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] , identifier[polling] = keyword[True] ,** identifier[operation_config] ):
literal[string]
identifier[raw_result] = identifier[self] . identifier[_delete_by_id_initial] (
identifier[application_definition_id] = identifier[application_definition_id] ,
identifier[custom_headers] = identifier[custom_headers] ,
identifier[raw] = keyword[True] ,
** identifier[operation_config]
)
keyword[def] identifier[get_long_running_output] ( identifier[response] ):
keyword[if] identifier[raw] :
identifier[client_raw_response] = identifier[ClientRawResponse] ( keyword[None] , identifier[response] )
keyword[return] identifier[client_raw_response]
identifier[lro_delay] = identifier[operation_config] . identifier[get] (
literal[string] ,
identifier[self] . identifier[config] . identifier[long_running_operation_timeout] )
keyword[if] identifier[polling] keyword[is] keyword[True] : identifier[polling_method] = identifier[ARMPolling] ( identifier[lro_delay] ,** identifier[operation_config] )
keyword[elif] identifier[polling] keyword[is] keyword[False] : identifier[polling_method] = identifier[NoPolling] ()
keyword[else] : identifier[polling_method] = identifier[polling]
keyword[return] identifier[LROPoller] ( identifier[self] . identifier[_client] , identifier[raw_result] , identifier[get_long_running_output] , identifier[polling_method] ) | def delete_by_id(self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
"""
raw_result = self._delete_by_id_initial(application_definition_id=application_definition_id, custom_headers=custom_headers, raw=True, **operation_config)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response # depends on [control=['if'], data=[]]
lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout)
if polling is True:
polling_method = ARMPolling(lro_delay, **operation_config) # depends on [control=['if'], data=[]]
elif polling is False:
polling_method = NoPolling() # depends on [control=['if'], data=[]]
else:
polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset] | def function[getCollectorPath, parameter[self]]:
constant[
Returns collector path
servers.host.cpu.total.idle
return "cpu"
]
if compare[name[self].host is constant[None]] begin[:]
return[call[call[name[self].path.split, parameter[constant[.]]]][constant[2]]]
variable[offset] assign[=] call[name[self].path.index, parameter[name[self].host]]
<ast.AugAssign object at 0x7da1b1982ad0>
variable[endoffset] assign[=] call[name[self].path.index, parameter[constant[.], name[offset]]]
return[call[name[self].path][<ast.Slice object at 0x7da1b19802b0>]] | keyword[def] identifier[getCollectorPath] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[host] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[path] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[offset] = identifier[self] . identifier[path] . identifier[index] ( identifier[self] . identifier[host] )
identifier[offset] += identifier[len] ( identifier[self] . identifier[host] )+ literal[int]
identifier[endoffset] = identifier[self] . identifier[path] . identifier[index] ( literal[string] , identifier[offset] )
keyword[return] identifier[self] . identifier[path] [ identifier[offset] : identifier[endoffset] ] | def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2] # depends on [control=['if'], data=[]]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset] |
def get_file_for_id(_id, language=DEFAULT_LANG):
"""
Given _id, search the DB for the file which contains the data
:param _id: The id to search (int)
:param language: The user's language (en, es, etc.)
:return: The filename
"""
file_start = '%s-' % _id
json_path = DBVuln.get_json_path(language=language)
for _file in os.listdir(json_path):
if _file.startswith(file_start):
return os.path.join(json_path, _file)
raise NotFoundException('No data for ID %s' % _id) | def function[get_file_for_id, parameter[_id, language]]:
constant[
Given _id, search the DB for the file which contains the data
:param _id: The id to search (int)
:param language: The user's language (en, es, etc.)
:return: The filename
]
variable[file_start] assign[=] binary_operation[constant[%s-] <ast.Mod object at 0x7da2590d6920> name[_id]]
variable[json_path] assign[=] call[name[DBVuln].get_json_path, parameter[]]
for taget[name[_file]] in starred[call[name[os].listdir, parameter[name[json_path]]]] begin[:]
if call[name[_file].startswith, parameter[name[file_start]]] begin[:]
return[call[name[os].path.join, parameter[name[json_path], name[_file]]]]
<ast.Raise object at 0x7da1b26aefb0> | keyword[def] identifier[get_file_for_id] ( identifier[_id] , identifier[language] = identifier[DEFAULT_LANG] ):
literal[string]
identifier[file_start] = literal[string] % identifier[_id]
identifier[json_path] = identifier[DBVuln] . identifier[get_json_path] ( identifier[language] = identifier[language] )
keyword[for] identifier[_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[json_path] ):
keyword[if] identifier[_file] . identifier[startswith] ( identifier[file_start] ):
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[json_path] , identifier[_file] )
keyword[raise] identifier[NotFoundException] ( literal[string] % identifier[_id] ) | def get_file_for_id(_id, language=DEFAULT_LANG):
"""
Given _id, search the DB for the file which contains the data
:param _id: The id to search (int)
:param language: The user's language (en, es, etc.)
:return: The filename
"""
file_start = '%s-' % _id
json_path = DBVuln.get_json_path(language=language)
for _file in os.listdir(json_path):
if _file.startswith(file_start):
return os.path.join(json_path, _file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_file']]
raise NotFoundException('No data for ID %s' % _id) |
def WriteScanContext(self, scan_context, scan_step=None):
"""Writes the source scanner context to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_step (Optional[int]): the scan step, where None represents no step.
"""
if scan_step is not None:
print('Scan step: {0:d}'.format(scan_step))
print('Source type\t\t: {0:s}'.format(scan_context.source_type))
print('')
scan_node = scan_context.GetRootScanNode()
self.WriteScanNode(scan_context, scan_node)
print('') | def function[WriteScanContext, parameter[self, scan_context, scan_step]]:
constant[Writes the source scanner context to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_step (Optional[int]): the scan step, where None represents no step.
]
if compare[name[scan_step] is_not constant[None]] begin[:]
call[name[print], parameter[call[constant[Scan step: {0:d}].format, parameter[name[scan_step]]]]]
call[name[print], parameter[call[constant[Source type : {0:s}].format, parameter[name[scan_context].source_type]]]]
call[name[print], parameter[constant[]]]
variable[scan_node] assign[=] call[name[scan_context].GetRootScanNode, parameter[]]
call[name[self].WriteScanNode, parameter[name[scan_context], name[scan_node]]]
call[name[print], parameter[constant[]]] | keyword[def] identifier[WriteScanContext] ( identifier[self] , identifier[scan_context] , identifier[scan_step] = keyword[None] ):
literal[string]
keyword[if] identifier[scan_step] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] . identifier[format] ( identifier[scan_step] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[scan_context] . identifier[source_type] ))
identifier[print] ( literal[string] )
identifier[scan_node] = identifier[scan_context] . identifier[GetRootScanNode] ()
identifier[self] . identifier[WriteScanNode] ( identifier[scan_context] , identifier[scan_node] )
identifier[print] ( literal[string] ) | def WriteScanContext(self, scan_context, scan_step=None):
"""Writes the source scanner context to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_step (Optional[int]): the scan step, where None represents no step.
"""
if scan_step is not None:
print('Scan step: {0:d}'.format(scan_step)) # depends on [control=['if'], data=['scan_step']]
print('Source type\t\t: {0:s}'.format(scan_context.source_type))
print('')
scan_node = scan_context.GetRootScanNode()
self.WriteScanNode(scan_context, scan_node)
print('') |
def _get_value_for_serialization(self, data, name, spec):
""" Return the value of the field in entity (or ``None``). """
name = self.property_name_map[name]
return getattr(data, name, None) | def function[_get_value_for_serialization, parameter[self, data, name, spec]]:
constant[ Return the value of the field in entity (or ``None``). ]
variable[name] assign[=] call[name[self].property_name_map][name[name]]
return[call[name[getattr], parameter[name[data], name[name], constant[None]]]] | keyword[def] identifier[_get_value_for_serialization] ( identifier[self] , identifier[data] , identifier[name] , identifier[spec] ):
literal[string]
identifier[name] = identifier[self] . identifier[property_name_map] [ identifier[name] ]
keyword[return] identifier[getattr] ( identifier[data] , identifier[name] , keyword[None] ) | def _get_value_for_serialization(self, data, name, spec):
""" Return the value of the field in entity (or ``None``). """
name = self.property_name_map[name]
return getattr(data, name, None) |
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args() | def function[__parse_args, parameter[self, accept_unrecognized_args]]:
constant[ Invoke the argument parser. ]
if name[self].description begin[:]
name[self].argparser.description assign[=] name[self].description
name[self].argparser.epilog assign[=] name[self].epilog
if <ast.BoolOp object at 0x7da1b1416140> begin[:]
call[name[self].argparser.set_defaults, parameter[]]
if name[accept_unrecognized_args] begin[:]
<ast.Tuple object at 0x7da1b1435cf0> assign[=] call[name[self].argparser.parse_known_args, parameter[]] | keyword[def] identifier[__parse_args] ( identifier[self] , identifier[accept_unrecognized_args] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[description] :
identifier[self] . identifier[argparser] . identifier[description] = identifier[self] . identifier[description]
keyword[elif] identifier[getattr] ( identifier[sys] . identifier[modules] [ literal[string] ], literal[string] , keyword[None] ):
identifier[self] . identifier[argparser] . identifier[description] = identifier[getattr] ( identifier[sys] . identifier[modules] [ literal[string] ], literal[string] )
keyword[else] :
identifier[self] . identifier[argparser] . identifier[description] = literal[string] % identifier[sys] . identifier[modules] [ literal[string] ]. identifier[__file__]
identifier[self] . identifier[argparser] . identifier[epilog] = identifier[self] . identifier[epilog]
keyword[if] identifier[len] ( identifier[sys] . identifier[argv] )== literal[int] keyword[and] identifier[self] . identifier[argument_defaults] :
identifier[self] . identifier[argparser] . identifier[set_defaults] (** identifier[self] . identifier[argument_defaults] )
keyword[if] identifier[accept_unrecognized_args] :
identifier[self] . identifier[args] , identifier[self] . identifier[unrecognized_args] = identifier[self] . identifier[argparser] . identifier[parse_known_args] ()
keyword[else] :
identifier[self] . identifier[args] = identifier[self] . identifier[argparser] . identifier[parse_args] () | def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description # depends on [control=['if'], data=[]]
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__') # depends on [control=['if'], data=[]]
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults) # depends on [control=['if'], data=[]]
if accept_unrecognized_args:
(self.args, self.unrecognized_args) = self.argparser.parse_known_args() # depends on [control=['if'], data=[]]
else:
self.args = self.argparser.parse_args() |
def expect_optional_token(lexer: Lexer, kind: TokenKind) -> Optional[Token]:
"""Expect the next token optionally to be of the given kind.
If the next token is of the given kind, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == kind:
lexer.advance()
return token
return None | def function[expect_optional_token, parameter[lexer, kind]]:
constant[Expect the next token optionally to be of the given kind.
If the next token is of the given kind, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
]
variable[token] assign[=] name[lexer].token
if compare[name[token].kind equal[==] name[kind]] begin[:]
call[name[lexer].advance, parameter[]]
return[name[token]]
return[constant[None]] | keyword[def] identifier[expect_optional_token] ( identifier[lexer] : identifier[Lexer] , identifier[kind] : identifier[TokenKind] )-> identifier[Optional] [ identifier[Token] ]:
literal[string]
identifier[token] = identifier[lexer] . identifier[token]
keyword[if] identifier[token] . identifier[kind] == identifier[kind] :
identifier[lexer] . identifier[advance] ()
keyword[return] identifier[token]
keyword[return] keyword[None] | def expect_optional_token(lexer: Lexer, kind: TokenKind) -> Optional[Token]:
"""Expect the next token optionally to be of the given kind.
If the next token is of the given kind, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == kind:
lexer.advance()
return token # depends on [control=['if'], data=[]]
return None |
def list(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
limit=None, page_size=None):
"""
Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
"""
return list(self.stream(
enabled=enabled,
date_created_after=date_created_after,
date_created_before=date_created_before,
friendly_name=friendly_name,
limit=limit,
page_size=page_size,
)) | def function[list, parameter[self, enabled, date_created_after, date_created_before, friendly_name, limit, page_size]]:
constant[
Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
]
return[call[name[list], parameter[call[name[self].stream, parameter[]]]]] | keyword[def] identifier[list] ( identifier[self] , identifier[enabled] = identifier[values] . identifier[unset] , identifier[date_created_after] = identifier[values] . identifier[unset] ,
identifier[date_created_before] = identifier[values] . identifier[unset] , identifier[friendly_name] = identifier[values] . identifier[unset] ,
identifier[limit] = keyword[None] , identifier[page_size] = keyword[None] ):
literal[string]
keyword[return] identifier[list] ( identifier[self] . identifier[stream] (
identifier[enabled] = identifier[enabled] ,
identifier[date_created_after] = identifier[date_created_after] ,
identifier[date_created_before] = identifier[date_created_before] ,
identifier[friendly_name] = identifier[friendly_name] ,
identifier[limit] = identifier[limit] ,
identifier[page_size] = identifier[page_size] ,
)) | def list(self, enabled=values.unset, date_created_after=values.unset, date_created_before=values.unset, friendly_name=values.unset, limit=None, page_size=None):
"""
Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
"""
return list(self.stream(enabled=enabled, date_created_after=date_created_after, date_created_before=date_created_before, friendly_name=friendly_name, limit=limit, page_size=page_size)) |
def classify_file(f):
"""Examine the column names to determine which type of file
this is. Return a tuple:
retvalue[0] = "file is non-parameterized"
retvalue[1] = "file contains error column"
"""
cols=f[1].columns
if len(cols) == 2:
#Then we must have a simple file
return (True,False)
elif len(cols) == 3 and ('ERROR' in cols.names):
return (True,True)
elif len(cols) > 2 and ('ERROR' not in cols.names):
return (True,False)
else:
return (False,True) | def function[classify_file, parameter[f]]:
constant[Examine the column names to determine which type of file
this is. Return a tuple:
retvalue[0] = "file is non-parameterized"
retvalue[1] = "file contains error column"
]
variable[cols] assign[=] call[name[f]][constant[1]].columns
if compare[call[name[len], parameter[name[cols]]] equal[==] constant[2]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18f720460>, <ast.Constant object at 0x7da18f722680>]]] | keyword[def] identifier[classify_file] ( identifier[f] ):
literal[string]
identifier[cols] = identifier[f] [ literal[int] ]. identifier[columns]
keyword[if] identifier[len] ( identifier[cols] )== literal[int] :
keyword[return] ( keyword[True] , keyword[False] )
keyword[elif] identifier[len] ( identifier[cols] )== literal[int] keyword[and] ( literal[string] keyword[in] identifier[cols] . identifier[names] ):
keyword[return] ( keyword[True] , keyword[True] )
keyword[elif] identifier[len] ( identifier[cols] )> literal[int] keyword[and] ( literal[string] keyword[not] keyword[in] identifier[cols] . identifier[names] ):
keyword[return] ( keyword[True] , keyword[False] )
keyword[else] :
keyword[return] ( keyword[False] , keyword[True] ) | def classify_file(f):
"""Examine the column names to determine which type of file
this is. Return a tuple:
retvalue[0] = "file is non-parameterized"
retvalue[1] = "file contains error column"
"""
cols = f[1].columns
if len(cols) == 2:
#Then we must have a simple file
return (True, False) # depends on [control=['if'], data=[]]
elif len(cols) == 3 and 'ERROR' in cols.names:
return (True, True) # depends on [control=['if'], data=[]]
elif len(cols) > 2 and 'ERROR' not in cols.names:
return (True, False) # depends on [control=['if'], data=[]]
else:
return (False, True) |
def _get_branches(self):
"""Get branches from org/repo."""
if self.offline:
local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo
get_refs = f"git -C {shlex.quote(str(local_path))} show-ref --heads"
else:
get_refs = f"git ls-remote --heads https://github.com/{self.org}/{self.repo}"
try:
# Parse get_refs output for the actual branch names
return (line.split()[1].replace("refs/heads/", "") for line in _run(get_refs, timeout=3).split("\n"))
except Error:
return [] | def function[_get_branches, parameter[self]]:
constant[Get branches from org/repo.]
if name[self].offline begin[:]
variable[local_path] assign[=] binary_operation[binary_operation[call[call[name[Path], parameter[name[LOCAL_PATH]]].expanduser, parameter[]] / name[self].org] / name[self].repo]
variable[get_refs] assign[=] <ast.JoinedStr object at 0x7da207f99900>
<ast.Try object at 0x7da207f9b700> | keyword[def] identifier[_get_branches] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[offline] :
identifier[local_path] = identifier[Path] ( identifier[LOCAL_PATH] ). identifier[expanduser] ()/ identifier[self] . identifier[org] / identifier[self] . identifier[repo]
identifier[get_refs] = literal[string]
keyword[else] :
identifier[get_refs] = literal[string]
keyword[try] :
keyword[return] ( identifier[line] . identifier[split] ()[ literal[int] ]. identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[line] keyword[in] identifier[_run] ( identifier[get_refs] , identifier[timeout] = literal[int] ). identifier[split] ( literal[string] ))
keyword[except] identifier[Error] :
keyword[return] [] | def _get_branches(self):
"""Get branches from org/repo."""
if self.offline:
local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo
get_refs = f'git -C {shlex.quote(str(local_path))} show-ref --heads' # depends on [control=['if'], data=[]]
else:
get_refs = f'git ls-remote --heads https://github.com/{self.org}/{self.repo}'
try:
# Parse get_refs output for the actual branch names
return (line.split()[1].replace('refs/heads/', '') for line in _run(get_refs, timeout=3).split('\n')) # depends on [control=['try'], data=[]]
except Error:
return [] # depends on [control=['except'], data=[]] |
def generate_vcpu(self, vcpu_num):
"""
Generate <vcpu> domain XML child
Args:
vcpu_num(str): number of virtual cpus
Returns:
lxml.etree.Element: vcpu XML element
"""
vcpu = ET.Element('vcpu')
vcpu.text = str(vcpu_num)
return vcpu | def function[generate_vcpu, parameter[self, vcpu_num]]:
constant[
Generate <vcpu> domain XML child
Args:
vcpu_num(str): number of virtual cpus
Returns:
lxml.etree.Element: vcpu XML element
]
variable[vcpu] assign[=] call[name[ET].Element, parameter[constant[vcpu]]]
name[vcpu].text assign[=] call[name[str], parameter[name[vcpu_num]]]
return[name[vcpu]] | keyword[def] identifier[generate_vcpu] ( identifier[self] , identifier[vcpu_num] ):
literal[string]
identifier[vcpu] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[vcpu] . identifier[text] = identifier[str] ( identifier[vcpu_num] )
keyword[return] identifier[vcpu] | def generate_vcpu(self, vcpu_num):
"""
Generate <vcpu> domain XML child
Args:
vcpu_num(str): number of virtual cpus
Returns:
lxml.etree.Element: vcpu XML element
"""
vcpu = ET.Element('vcpu')
vcpu.text = str(vcpu_num)
return vcpu |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
try:
timestamp, event_data = self._ReadEntry(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: {0!s}'.format(
exception))
if not event_data.username:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: missing username')
if not timestamp:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry with error: missing timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
file_size = file_object.get_size()
while file_offset < file_size:
if parser_mediator.abort:
break
try:
timestamp, event_data = self._ReadEntry(
parser_mediator, file_object, file_offset)
except errors.ParseError:
# Note that the utmp file can contain trailing data.
break
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell() | def function[ParseFileObject, parameter[self, parser_mediator, file_object]]:
constant[Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
]
variable[file_offset] assign[=] constant[0]
<ast.Try object at 0x7da18dc99c00>
if <ast.UnaryOp object at 0x7da18dc988b0> begin[:]
<ast.Raise object at 0x7da18dc9a230>
if <ast.UnaryOp object at 0x7da18dc997e0> begin[:]
<ast.Raise object at 0x7da18dc9a980>
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTimeInMicroseconds, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_START]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[file_offset] assign[=] call[name[file_object].tell, parameter[]]
variable[file_size] assign[=] call[name[file_object].get_size, parameter[]]
while compare[name[file_offset] less[<] name[file_size]] begin[:]
if name[parser_mediator].abort begin[:]
break
<ast.Try object at 0x7da20cabda50>
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTimeInMicroseconds, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_START]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[file_offset] assign[=] call[name[file_object].tell, parameter[]] | keyword[def] identifier[ParseFileObject] ( identifier[self] , identifier[parser_mediator] , identifier[file_object] ):
literal[string]
identifier[file_offset] = literal[int]
keyword[try] :
identifier[timestamp] , identifier[event_data] = identifier[self] . identifier[_ReadEntry] (
identifier[parser_mediator] , identifier[file_object] , identifier[file_offset] )
keyword[except] identifier[errors] . identifier[ParseError] keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[UnableToParseFile] (
literal[string] . identifier[format] (
identifier[exception] ))
keyword[if] keyword[not] identifier[event_data] . identifier[username] :
keyword[raise] identifier[errors] . identifier[UnableToParseFile] (
literal[string] )
keyword[if] keyword[not] identifier[timestamp] :
keyword[raise] identifier[errors] . identifier[UnableToParseFile] (
literal[string] )
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTimeInMicroseconds] (
identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_START] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[file_offset] = identifier[file_object] . identifier[tell] ()
identifier[file_size] = identifier[file_object] . identifier[get_size] ()
keyword[while] identifier[file_offset] < identifier[file_size] :
keyword[if] identifier[parser_mediator] . identifier[abort] :
keyword[break]
keyword[try] :
identifier[timestamp] , identifier[event_data] = identifier[self] . identifier[_ReadEntry] (
identifier[parser_mediator] , identifier[file_object] , identifier[file_offset] )
keyword[except] identifier[errors] . identifier[ParseError] :
keyword[break]
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTimeInMicroseconds] (
identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_START] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[file_offset] = identifier[file_object] . identifier[tell] () | def ParseFileObject(self, parser_mediator, file_object):
"""Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
try:
(timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset) # depends on [control=['try'], data=[]]
except errors.ParseError as exception:
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
if not event_data.username:
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing username') # depends on [control=['if'], data=[]]
if not timestamp:
raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing timestamp') # depends on [control=['if'], data=[]]
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell()
file_size = file_object.get_size()
while file_offset < file_size:
if parser_mediator.abort:
break # depends on [control=['if'], data=[]]
try:
(timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset) # depends on [control=['try'], data=[]]
except errors.ParseError:
# Note that the utmp file can contain trailing data.
break # depends on [control=['except'], data=[]]
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset = file_object.tell() # depends on [control=['while'], data=['file_offset']] |
def Negation(expr: Expression) -> Expression:
"""Return expression which is the negation of `expr`."""
expr = Expression(_negate(expr.body))
return ast.fix_missing_locations(expr) | def function[Negation, parameter[expr]]:
constant[Return expression which is the negation of `expr`.]
variable[expr] assign[=] call[name[Expression], parameter[call[name[_negate], parameter[name[expr].body]]]]
return[call[name[ast].fix_missing_locations, parameter[name[expr]]]] | keyword[def] identifier[Negation] ( identifier[expr] : identifier[Expression] )-> identifier[Expression] :
literal[string]
identifier[expr] = identifier[Expression] ( identifier[_negate] ( identifier[expr] . identifier[body] ))
keyword[return] identifier[ast] . identifier[fix_missing_locations] ( identifier[expr] ) | def Negation(expr: Expression) -> Expression:
"""Return expression which is the negation of `expr`."""
expr = Expression(_negate(expr.body))
return ast.fix_missing_locations(expr) |
def function(self,p):
"""
Return a square-wave grating (alternating black and white bars).
"""
return np.around(
0.5 +
0.5*np.sin(pi*(p.duty_cycle-0.5)) +
0.5*np.sin(p.frequency*2*pi*self.pattern_y + p.phase)) | def function[function, parameter[self, p]]:
constant[
Return a square-wave grating (alternating black and white bars).
]
return[call[name[np].around, parameter[binary_operation[binary_operation[constant[0.5] + binary_operation[constant[0.5] * call[name[np].sin, parameter[binary_operation[name[pi] * binary_operation[name[p].duty_cycle - constant[0.5]]]]]]] + binary_operation[constant[0.5] * call[name[np].sin, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[p].frequency * constant[2]] * name[pi]] * name[self].pattern_y] + name[p].phase]]]]]]]] | keyword[def] identifier[function] ( identifier[self] , identifier[p] ):
literal[string]
keyword[return] identifier[np] . identifier[around] (
literal[int] +
literal[int] * identifier[np] . identifier[sin] ( identifier[pi] *( identifier[p] . identifier[duty_cycle] - literal[int] ))+
literal[int] * identifier[np] . identifier[sin] ( identifier[p] . identifier[frequency] * literal[int] * identifier[pi] * identifier[self] . identifier[pattern_y] + identifier[p] . identifier[phase] )) | def function(self, p):
"""
Return a square-wave grating (alternating black and white bars).
"""
return np.around(0.5 + 0.5 * np.sin(pi * (p.duty_cycle - 0.5)) + 0.5 * np.sin(p.frequency * 2 * pi * self.pattern_y + p.phase)) |
def Managed(cls, manager, field, items):
"""Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`.
"""
attrs = {
"_manager": manager,
"_manager_field": field,
}
if hasattr(cls, "create"):
attrs['create'] = ManagedCreate(cls)
cls = type(
"%s.Managed#%s" % (
cls.__name__, manager.__class__.__name__), (cls,), attrs)
return cls(items) | def function[Managed, parameter[cls, manager, field, items]]:
constant[Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`.
]
variable[attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aee90>, <ast.Constant object at 0x7da1b26ae410>], [<ast.Name object at 0x7da1b26ae4d0>, <ast.Name object at 0x7da1b26af910>]]
if call[name[hasattr], parameter[name[cls], constant[create]]] begin[:]
call[name[attrs]][constant[create]] assign[=] call[name[ManagedCreate], parameter[name[cls]]]
variable[cls] assign[=] call[name[type], parameter[binary_operation[constant[%s.Managed#%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b26ac970>, <ast.Attribute object at 0x7da1b26ad810>]]], tuple[[<ast.Name object at 0x7da1b26ac3a0>]], name[attrs]]]
return[call[name[cls], parameter[name[items]]]] | keyword[def] identifier[Managed] ( identifier[cls] , identifier[manager] , identifier[field] , identifier[items] ):
literal[string]
identifier[attrs] ={
literal[string] : identifier[manager] ,
literal[string] : identifier[field] ,
}
keyword[if] identifier[hasattr] ( identifier[cls] , literal[string] ):
identifier[attrs] [ literal[string] ]= identifier[ManagedCreate] ( identifier[cls] )
identifier[cls] = identifier[type] (
literal[string] %(
identifier[cls] . identifier[__name__] , identifier[manager] . identifier[__class__] . identifier[__name__] ),( identifier[cls] ,), identifier[attrs] )
keyword[return] identifier[cls] ( identifier[items] ) | def Managed(cls, manager, field, items):
"""Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`.
"""
attrs = {'_manager': manager, '_manager_field': field}
if hasattr(cls, 'create'):
attrs['create'] = ManagedCreate(cls) # depends on [control=['if'], data=[]]
cls = type('%s.Managed#%s' % (cls.__name__, manager.__class__.__name__), (cls,), attrs)
return cls(items) |
def brutefore_passwords(ip, url, credentials, service):
"""
Bruteforce function, will try all the credentials at the same time, splits the given credentials at a ':'.
"""
auth_requests = []
for credential in credentials:
split = credential.strip().split(':')
username = split[0]
password = ''
if len(split) > 1:
password = split[1]
auth_requests.append(grequests.get(url, auth=(username, password)))
results = grequests.map(auth_requests)
for result in results:
if result and result.status_code == 200:
creds = result.request.headers['Authorization'].split(' ')[1]
creds = base64.b64decode(creds).decode('utf-8')
creds = creds.split(':')
print_success("Found a password for tomcat: {0}:{1} at: {2}".format(
creds[0], creds[1], url))
credential = Credential(secret=creds[1], username=creds[0], type='plaintext', access_level='administrator', service_id=service.id, host_ip=ip, description='Tomcat')
credential.save() | def function[brutefore_passwords, parameter[ip, url, credentials, service]]:
constant[
Bruteforce function, will try all the credentials at the same time, splits the given credentials at a ':'.
]
variable[auth_requests] assign[=] list[[]]
for taget[name[credential]] in starred[name[credentials]] begin[:]
variable[split] assign[=] call[call[name[credential].strip, parameter[]].split, parameter[constant[:]]]
variable[username] assign[=] call[name[split]][constant[0]]
variable[password] assign[=] constant[]
if compare[call[name[len], parameter[name[split]]] greater[>] constant[1]] begin[:]
variable[password] assign[=] call[name[split]][constant[1]]
call[name[auth_requests].append, parameter[call[name[grequests].get, parameter[name[url]]]]]
variable[results] assign[=] call[name[grequests].map, parameter[name[auth_requests]]]
for taget[name[result]] in starred[name[results]] begin[:]
if <ast.BoolOp object at 0x7da1b004dcf0> begin[:]
variable[creds] assign[=] call[call[call[name[result].request.headers][constant[Authorization]].split, parameter[constant[ ]]]][constant[1]]
variable[creds] assign[=] call[call[name[base64].b64decode, parameter[name[creds]]].decode, parameter[constant[utf-8]]]
variable[creds] assign[=] call[name[creds].split, parameter[constant[:]]]
call[name[print_success], parameter[call[constant[Found a password for tomcat: {0}:{1} at: {2}].format, parameter[call[name[creds]][constant[0]], call[name[creds]][constant[1]], name[url]]]]]
variable[credential] assign[=] call[name[Credential], parameter[]]
call[name[credential].save, parameter[]] | keyword[def] identifier[brutefore_passwords] ( identifier[ip] , identifier[url] , identifier[credentials] , identifier[service] ):
literal[string]
identifier[auth_requests] =[]
keyword[for] identifier[credential] keyword[in] identifier[credentials] :
identifier[split] = identifier[credential] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[username] = identifier[split] [ literal[int] ]
identifier[password] = literal[string]
keyword[if] identifier[len] ( identifier[split] )> literal[int] :
identifier[password] = identifier[split] [ literal[int] ]
identifier[auth_requests] . identifier[append] ( identifier[grequests] . identifier[get] ( identifier[url] , identifier[auth] =( identifier[username] , identifier[password] )))
identifier[results] = identifier[grequests] . identifier[map] ( identifier[auth_requests] )
keyword[for] identifier[result] keyword[in] identifier[results] :
keyword[if] identifier[result] keyword[and] identifier[result] . identifier[status_code] == literal[int] :
identifier[creds] = identifier[result] . identifier[request] . identifier[headers] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[creds] = identifier[base64] . identifier[b64decode] ( identifier[creds] ). identifier[decode] ( literal[string] )
identifier[creds] = identifier[creds] . identifier[split] ( literal[string] )
identifier[print_success] ( literal[string] . identifier[format] (
identifier[creds] [ literal[int] ], identifier[creds] [ literal[int] ], identifier[url] ))
identifier[credential] = identifier[Credential] ( identifier[secret] = identifier[creds] [ literal[int] ], identifier[username] = identifier[creds] [ literal[int] ], identifier[type] = literal[string] , identifier[access_level] = literal[string] , identifier[service_id] = identifier[service] . identifier[id] , identifier[host_ip] = identifier[ip] , identifier[description] = literal[string] )
identifier[credential] . identifier[save] () | def brutefore_passwords(ip, url, credentials, service):
"""
Bruteforce function, will try all the credentials at the same time, splits the given credentials at a ':'.
"""
auth_requests = []
for credential in credentials:
split = credential.strip().split(':')
username = split[0]
password = ''
if len(split) > 1:
password = split[1] # depends on [control=['if'], data=[]]
auth_requests.append(grequests.get(url, auth=(username, password))) # depends on [control=['for'], data=['credential']]
results = grequests.map(auth_requests)
for result in results:
if result and result.status_code == 200:
creds = result.request.headers['Authorization'].split(' ')[1]
creds = base64.b64decode(creds).decode('utf-8')
creds = creds.split(':')
print_success('Found a password for tomcat: {0}:{1} at: {2}'.format(creds[0], creds[1], url))
credential = Credential(secret=creds[1], username=creds[0], type='plaintext', access_level='administrator', service_id=service.id, host_ip=ip, description='Tomcat')
credential.save() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']] |
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to put()")
if entity.key is None:
raise ValueError("Entity must have a key")
if self.project != entity.key.project:
raise ValueError("Key must be from same project as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity) | def function[put, parameter[self, entity]]:
constant[Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
]
if compare[name[self]._status not_equal[!=] name[self]._IN_PROGRESS] begin[:]
<ast.Raise object at 0x7da20e954a30>
if compare[name[entity].key is constant[None]] begin[:]
<ast.Raise object at 0x7da20e9549a0>
if compare[name[self].project not_equal[!=] name[entity].key.project] begin[:]
<ast.Raise object at 0x7da20e954520>
if name[entity].key.is_partial begin[:]
variable[entity_pb] assign[=] call[name[self]._add_partial_key_entity_pb, parameter[]]
call[name[self]._partial_key_entities.append, parameter[name[entity]]]
call[name[_assign_entity_to_pb], parameter[name[entity_pb], name[entity]]] | keyword[def] identifier[put] ( identifier[self] , identifier[entity] ):
literal[string]
keyword[if] identifier[self] . identifier[_status] != identifier[self] . identifier[_IN_PROGRESS] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[entity] . identifier[key] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[project] != identifier[entity] . identifier[key] . identifier[project] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[entity] . identifier[key] . identifier[is_partial] :
identifier[entity_pb] = identifier[self] . identifier[_add_partial_key_entity_pb] ()
identifier[self] . identifier[_partial_key_entities] . identifier[append] ( identifier[entity] )
keyword[else] :
identifier[entity_pb] = identifier[self] . identifier[_add_complete_key_entity_pb] ()
identifier[_assign_entity_to_pb] ( identifier[entity_pb] , identifier[entity] ) | def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError('Batch must be in progress to put()') # depends on [control=['if'], data=[]]
if entity.key is None:
raise ValueError('Entity must have a key') # depends on [control=['if'], data=[]]
if self.project != entity.key.project:
raise ValueError('Key must be from same project as batch') # depends on [control=['if'], data=[]]
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity) # depends on [control=['if'], data=[]]
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity) |
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnAdminSession) - ``a
GradebookColumnAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_gradebook_column_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.GradebookColumnAdminSession(gradebook_id, runtime=self._runtime) | def function[get_gradebook_column_admin_session_for_gradebook, parameter[self, gradebook_id]]:
constant[Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnAdminSession) - ``a
GradebookColumnAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
]
if <ast.UnaryOp object at 0x7da20e9568f0> begin[:]
<ast.Raise object at 0x7da20e956200>
return[call[name[sessions].GradebookColumnAdminSession, parameter[name[gradebook_id]]]] | keyword[def] identifier[get_gradebook_column_admin_session_for_gradebook] ( identifier[self] , identifier[gradebook_id] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_gradebook_column_admin] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[GradebookColumnAdminSession] ( identifier[gradebook_id] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnAdminSession) - ``a
GradebookColumnAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_gradebook_column_admin():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.GradebookColumnAdminSession(gradebook_id, runtime=self._runtime) |
def get_template(name):
"""
Still unsure about best way to do this, hence cruft.
"""
text = re.sub(r'\r\n', r'\n', name)
text = re.sub(r'\{([FISDE°].*?)\}', r'{{\1}}', text)
return text | def function[get_template, parameter[name]]:
constant[
Still unsure about best way to do this, hence cruft.
]
variable[text] assign[=] call[name[re].sub, parameter[constant[\r\n], constant[\n], name[name]]]
variable[text] assign[=] call[name[re].sub, parameter[constant[\{([FISDE°].*?)\}], constant[{{\1}}], name[text]]]
return[name[text]] | keyword[def] identifier[get_template] ( identifier[name] ):
literal[string]
identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[name] )
identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] )
keyword[return] identifier[text] | def get_template(name):
"""
Still unsure about best way to do this, hence cruft.
"""
text = re.sub('\\r\\n', '\\n', name)
text = re.sub('\\{([FISDE°].*?)\\}', '{{\\1}}', text)
return text |
def has_capabilities(self, *cap_names):
""" Check if class has all of the specified capabilities
:param cap_names: capabilities names to check
:return: bool
"""
for name in cap_names:
if name not in self.__class_capabilities__:
return False
return True | def function[has_capabilities, parameter[self]]:
constant[ Check if class has all of the specified capabilities
:param cap_names: capabilities names to check
:return: bool
]
for taget[name[name]] in starred[name[cap_names]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].__class_capabilities__] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[has_capabilities] ( identifier[self] ,* identifier[cap_names] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[cap_names] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[__class_capabilities__] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def has_capabilities(self, *cap_names):
""" Check if class has all of the specified capabilities
:param cap_names: capabilities names to check
:return: bool
"""
for name in cap_names:
if name not in self.__class_capabilities__:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return True |
def synchronized(func, *args, **kwargs):
"""Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing.
"""
if not (args and hasattr(args[0], '_lock')):
return func(*args, **kwargs)
with args[0]._lock: # pylint: disable=W0212
return func(*args, **kwargs) | def function[synchronized, parameter[func]]:
constant[Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing.
]
if <ast.UnaryOp object at 0x7da20c794580> begin[:]
return[call[name[func], parameter[<ast.Starred object at 0x7da20c794d60>]]]
with call[name[args]][constant[0]]._lock begin[:]
return[call[name[func], parameter[<ast.Starred object at 0x7da20c795ed0>]]] | keyword[def] identifier[synchronized] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] ( identifier[args] keyword[and] identifier[hasattr] ( identifier[args] [ literal[int] ], literal[string] )):
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[with] identifier[args] [ literal[int] ]. identifier[_lock] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) | def synchronized(func, *args, **kwargs):
"""Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing.
"""
if not (args and hasattr(args[0], '_lock')):
return func(*args, **kwargs) # depends on [control=['if'], data=[]]
with args[0]._lock: # pylint: disable=W0212
return func(*args, **kwargs) # depends on [control=['with'], data=[]] |
def formatResults(self, op, results):
"""
This formats the results of the database operations for printing
back to the caller
@param op: operation to perform (add, remove, update, get)
@type op: string
@param results: results from db queries in perspective_commandline
@type results: list
@returns: string containing formatted results
"""
formatted_results = ""
if op == 'add':
# list, alternating ident, uid
formatted_results += "user(s) added:\n"
for user in results:
if isinstance(user, str):
formatted_results += "identifier: %s\n" % user
else:
formatted_results += "uid: %d\n\n" % user
elif op == 'remove':
# list of dictionaries
formatted_results += "user(s) removed:\n"
for user in results:
if user:
formatted_results += "identifier: %s\n" % (user)
elif op == 'update':
# list, alternating ident, None
formatted_results += "user(s) updated:\n"
for user in results:
if user:
formatted_results += "identifier: %s\n" % (user)
elif op == 'get':
# list of dictionaries
formatted_results += "user(s) found:\n"
for user in results:
if user:
for key in sorted(user.keys()):
if key != 'bb_password':
formatted_results += "%s: %s\n" % (key, user[key])
formatted_results += "\n"
else:
formatted_results += "no match found\n"
return formatted_results | def function[formatResults, parameter[self, op, results]]:
constant[
This formats the results of the database operations for printing
back to the caller
@param op: operation to perform (add, remove, update, get)
@type op: string
@param results: results from db queries in perspective_commandline
@type results: list
@returns: string containing formatted results
]
variable[formatted_results] assign[=] constant[]
if compare[name[op] equal[==] constant[add]] begin[:]
<ast.AugAssign object at 0x7da2041da740>
for taget[name[user]] in starred[name[results]] begin[:]
if call[name[isinstance], parameter[name[user], name[str]]] begin[:]
<ast.AugAssign object at 0x7da2041da110>
return[name[formatted_results]] | keyword[def] identifier[formatResults] ( identifier[self] , identifier[op] , identifier[results] ):
literal[string]
identifier[formatted_results] = literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[formatted_results] += literal[string]
keyword[for] identifier[user] keyword[in] identifier[results] :
keyword[if] identifier[isinstance] ( identifier[user] , identifier[str] ):
identifier[formatted_results] += literal[string] % identifier[user]
keyword[else] :
identifier[formatted_results] += literal[string] % identifier[user]
keyword[elif] identifier[op] == literal[string] :
identifier[formatted_results] += literal[string]
keyword[for] identifier[user] keyword[in] identifier[results] :
keyword[if] identifier[user] :
identifier[formatted_results] += literal[string] %( identifier[user] )
keyword[elif] identifier[op] == literal[string] :
identifier[formatted_results] += literal[string]
keyword[for] identifier[user] keyword[in] identifier[results] :
keyword[if] identifier[user] :
identifier[formatted_results] += literal[string] %( identifier[user] )
keyword[elif] identifier[op] == literal[string] :
identifier[formatted_results] += literal[string]
keyword[for] identifier[user] keyword[in] identifier[results] :
keyword[if] identifier[user] :
keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[user] . identifier[keys] ()):
keyword[if] identifier[key] != literal[string] :
identifier[formatted_results] += literal[string] %( identifier[key] , identifier[user] [ identifier[key] ])
identifier[formatted_results] += literal[string]
keyword[else] :
identifier[formatted_results] += literal[string]
keyword[return] identifier[formatted_results] | def formatResults(self, op, results):
"""
This formats the results of the database operations for printing
back to the caller
@param op: operation to perform (add, remove, update, get)
@type op: string
@param results: results from db queries in perspective_commandline
@type results: list
@returns: string containing formatted results
"""
formatted_results = ''
if op == 'add':
# list, alternating ident, uid
formatted_results += 'user(s) added:\n'
for user in results:
if isinstance(user, str):
formatted_results += 'identifier: %s\n' % user # depends on [control=['if'], data=[]]
else:
formatted_results += 'uid: %d\n\n' % user # depends on [control=['for'], data=['user']] # depends on [control=['if'], data=[]]
elif op == 'remove':
# list of dictionaries
formatted_results += 'user(s) removed:\n'
for user in results:
if user:
formatted_results += 'identifier: %s\n' % user # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['user']] # depends on [control=['if'], data=[]]
elif op == 'update':
# list, alternating ident, None
formatted_results += 'user(s) updated:\n'
for user in results:
if user:
formatted_results += 'identifier: %s\n' % user # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['user']] # depends on [control=['if'], data=[]]
elif op == 'get':
# list of dictionaries
formatted_results += 'user(s) found:\n'
for user in results:
if user:
for key in sorted(user.keys()):
if key != 'bb_password':
formatted_results += '%s: %s\n' % (key, user[key]) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
formatted_results += '\n' # depends on [control=['if'], data=[]]
else:
formatted_results += 'no match found\n' # depends on [control=['for'], data=['user']] # depends on [control=['if'], data=[]]
return formatted_results |
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms() | def function[push_fbo, parameter[self, fbo, offset, csize]]:
constant[ Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
]
call[name[self]._fb_stack.append, parameter[tuple[[<ast.Name object at 0x7da1b0ebcd60>, <ast.Name object at 0x7da1b0ebfdc0>, <ast.Name object at 0x7da1b0ebea10>]]]]
<ast.Try object at 0x7da1b0ebd720>
call[name[self]._update_transforms, parameter[]] | keyword[def] identifier[push_fbo] ( identifier[self] , identifier[fbo] , identifier[offset] , identifier[csize] ):
literal[string]
identifier[self] . identifier[_fb_stack] . identifier[append] (( identifier[fbo] , identifier[offset] , identifier[csize] ))
keyword[try] :
identifier[fbo] . identifier[activate] ()
identifier[h] , identifier[w] = identifier[fbo] . identifier[color_buffer] . identifier[shape] [: literal[int] ]
identifier[self] . identifier[push_viewport] (( literal[int] , literal[int] , identifier[w] , identifier[h] ))
keyword[except] identifier[Exception] :
identifier[self] . identifier[_fb_stack] . identifier[pop] ()
keyword[raise]
identifier[self] . identifier[_update_transforms] () | def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
(h, w) = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h)) # depends on [control=['try'], data=[]]
except Exception:
self._fb_stack.pop()
raise # depends on [control=['except'], data=[]]
self._update_transforms() |
def _try_dump(cnf, outpath, otype, fmsg, extra_opts=None):
"""
:param cnf: Configuration object to print out
:param outpath: Output file path or None
:param otype: Output type or None
:param fmsg: message if it cannot detect otype by 'inpath'
:param extra_opts: Map object will be given to API.dump as extra options
"""
if extra_opts is None:
extra_opts = {}
try:
API.dump(cnf, outpath, otype, **extra_opts)
except API.UnknownFileTypeError:
_exit_with_output(fmsg % outpath, 1)
except API.UnknownProcessorTypeError:
_exit_with_output("Invalid output type '%s'" % otype, 1) | def function[_try_dump, parameter[cnf, outpath, otype, fmsg, extra_opts]]:
constant[
:param cnf: Configuration object to print out
:param outpath: Output file path or None
:param otype: Output type or None
:param fmsg: message if it cannot detect otype by 'inpath'
:param extra_opts: Map object will be given to API.dump as extra options
]
if compare[name[extra_opts] is constant[None]] begin[:]
variable[extra_opts] assign[=] dictionary[[], []]
<ast.Try object at 0x7da20e9575e0> | keyword[def] identifier[_try_dump] ( identifier[cnf] , identifier[outpath] , identifier[otype] , identifier[fmsg] , identifier[extra_opts] = keyword[None] ):
literal[string]
keyword[if] identifier[extra_opts] keyword[is] keyword[None] :
identifier[extra_opts] ={}
keyword[try] :
identifier[API] . identifier[dump] ( identifier[cnf] , identifier[outpath] , identifier[otype] ,** identifier[extra_opts] )
keyword[except] identifier[API] . identifier[UnknownFileTypeError] :
identifier[_exit_with_output] ( identifier[fmsg] % identifier[outpath] , literal[int] )
keyword[except] identifier[API] . identifier[UnknownProcessorTypeError] :
identifier[_exit_with_output] ( literal[string] % identifier[otype] , literal[int] ) | def _try_dump(cnf, outpath, otype, fmsg, extra_opts=None):
"""
:param cnf: Configuration object to print out
:param outpath: Output file path or None
:param otype: Output type or None
:param fmsg: message if it cannot detect otype by 'inpath'
:param extra_opts: Map object will be given to API.dump as extra options
"""
if extra_opts is None:
extra_opts = {} # depends on [control=['if'], data=['extra_opts']]
try:
API.dump(cnf, outpath, otype, **extra_opts) # depends on [control=['try'], data=[]]
except API.UnknownFileTypeError:
_exit_with_output(fmsg % outpath, 1) # depends on [control=['except'], data=[]]
except API.UnknownProcessorTypeError:
_exit_with_output("Invalid output type '%s'" % otype, 1) # depends on [control=['except'], data=[]] |
def is_after(self, other):
"""
Returns true if this vector clock is causally strictly after the
provided vector clock. This means that it the provided clock is neither
equal to, greater than or concurrent to this vector clock.
:param other: (:class:`~hazelcast.cluster.VectorClock`), Vector clock to be compared
:return: (bool), True if this vector clock is strictly after the other vector clock, False otherwise
"""
any_timestamp_greater = False
for replica_id, other_timestamp in other.entry_set():
local_timestamp = self._replica_timestamps.get(replica_id)
if local_timestamp is None or local_timestamp < other_timestamp:
return False
elif local_timestamp > other_timestamp:
any_timestamp_greater = True
# there is at least one local timestamp greater or local vector clock has additional timestamps
return any_timestamp_greater or other.size() < self.size() | def function[is_after, parameter[self, other]]:
constant[
Returns true if this vector clock is causally strictly after the
provided vector clock. This means that it the provided clock is neither
equal to, greater than or concurrent to this vector clock.
:param other: (:class:`~hazelcast.cluster.VectorClock`), Vector clock to be compared
:return: (bool), True if this vector clock is strictly after the other vector clock, False otherwise
]
variable[any_timestamp_greater] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b1668f70>, <ast.Name object at 0x7da1b166af80>]]] in starred[call[name[other].entry_set, parameter[]]] begin[:]
variable[local_timestamp] assign[=] call[name[self]._replica_timestamps.get, parameter[name[replica_id]]]
if <ast.BoolOp object at 0x7da1b1668190> begin[:]
return[constant[False]]
return[<ast.BoolOp object at 0x7da1b16d12d0>] | keyword[def] identifier[is_after] ( identifier[self] , identifier[other] ):
literal[string]
identifier[any_timestamp_greater] = keyword[False]
keyword[for] identifier[replica_id] , identifier[other_timestamp] keyword[in] identifier[other] . identifier[entry_set] ():
identifier[local_timestamp] = identifier[self] . identifier[_replica_timestamps] . identifier[get] ( identifier[replica_id] )
keyword[if] identifier[local_timestamp] keyword[is] keyword[None] keyword[or] identifier[local_timestamp] < identifier[other_timestamp] :
keyword[return] keyword[False]
keyword[elif] identifier[local_timestamp] > identifier[other_timestamp] :
identifier[any_timestamp_greater] = keyword[True]
keyword[return] identifier[any_timestamp_greater] keyword[or] identifier[other] . identifier[size] ()< identifier[self] . identifier[size] () | def is_after(self, other):
"""
Returns true if this vector clock is causally strictly after the
provided vector clock. This means that it the provided clock is neither
equal to, greater than or concurrent to this vector clock.
:param other: (:class:`~hazelcast.cluster.VectorClock`), Vector clock to be compared
:return: (bool), True if this vector clock is strictly after the other vector clock, False otherwise
"""
any_timestamp_greater = False
for (replica_id, other_timestamp) in other.entry_set():
local_timestamp = self._replica_timestamps.get(replica_id)
if local_timestamp is None or local_timestamp < other_timestamp:
return False # depends on [control=['if'], data=[]]
elif local_timestamp > other_timestamp:
any_timestamp_greater = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# there is at least one local timestamp greater or local vector clock has additional timestamps
return any_timestamp_greater or other.size() < self.size() |
def _input_as_multiline_string(self, data):
"""Write multiline string to temp file, return filename
data: a multiline string to be written to a file.
"""
self._input_filename = self.getTmpFilename(
self.WorkingDir, suffix='.fasta')
with open(self._input_filename, 'w') as f:
f.write(data)
return self._input_filename | def function[_input_as_multiline_string, parameter[self, data]]:
constant[Write multiline string to temp file, return filename
data: a multiline string to be written to a file.
]
name[self]._input_filename assign[=] call[name[self].getTmpFilename, parameter[name[self].WorkingDir]]
with call[name[open], parameter[name[self]._input_filename, constant[w]]] begin[:]
call[name[f].write, parameter[name[data]]]
return[name[self]._input_filename] | keyword[def] identifier[_input_as_multiline_string] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[_input_filename] = identifier[self] . identifier[getTmpFilename] (
identifier[self] . identifier[WorkingDir] , identifier[suffix] = literal[string] )
keyword[with] identifier[open] ( identifier[self] . identifier[_input_filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[data] )
keyword[return] identifier[self] . identifier[_input_filename] | def _input_as_multiline_string(self, data):
"""Write multiline string to temp file, return filename
data: a multiline string to be written to a file.
"""
self._input_filename = self.getTmpFilename(self.WorkingDir, suffix='.fasta')
with open(self._input_filename, 'w') as f:
f.write(data) # depends on [control=['with'], data=['f']]
return self._input_filename |
def _inherited_row(row, base_rows_from_pillar, ret):
'''Return a row with properties from parents.'''
base_rows = []
for base_row_from_pillar in base_rows_from_pillar:
base_row = __salt__['pillar.get'](base_row_from_pillar)
if base_row:
base_rows.append(base_row)
elif base_row_from_pillar != _DEFAULT_ROW_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find row pillar "{0}".'.format(
base_row_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message)
base_rows.append(row)
result_row = {}
for row in base_rows:
result_row.update(row)
return result_row | def function[_inherited_row, parameter[row, base_rows_from_pillar, ret]]:
constant[Return a row with properties from parents.]
variable[base_rows] assign[=] list[[]]
for taget[name[base_row_from_pillar]] in starred[name[base_rows_from_pillar]] begin[:]
variable[base_row] assign[=] call[call[name[__salt__]][constant[pillar.get]], parameter[name[base_row_from_pillar]]]
if name[base_row] begin[:]
call[name[base_rows].append, parameter[name[base_row]]]
call[name[base_rows].append, parameter[name[row]]]
variable[result_row] assign[=] dictionary[[], []]
for taget[name[row]] in starred[name[base_rows]] begin[:]
call[name[result_row].update, parameter[name[row]]]
return[name[result_row]] | keyword[def] identifier[_inherited_row] ( identifier[row] , identifier[base_rows_from_pillar] , identifier[ret] ):
literal[string]
identifier[base_rows] =[]
keyword[for] identifier[base_row_from_pillar] keyword[in] identifier[base_rows_from_pillar] :
identifier[base_row] = identifier[__salt__] [ literal[string] ]( identifier[base_row_from_pillar] )
keyword[if] identifier[base_row] :
identifier[base_rows] . identifier[append] ( identifier[base_row] )
keyword[elif] identifier[base_row_from_pillar] != identifier[_DEFAULT_ROW_PILLAR] :
identifier[ret] . identifier[setdefault] ( literal[string] ,[])
identifier[warning_message] = literal[string] . identifier[format] (
identifier[base_row_from_pillar] )
keyword[if] identifier[warning_message] keyword[not] keyword[in] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[warning_message] )
identifier[base_rows] . identifier[append] ( identifier[row] )
identifier[result_row] ={}
keyword[for] identifier[row] keyword[in] identifier[base_rows] :
identifier[result_row] . identifier[update] ( identifier[row] )
keyword[return] identifier[result_row] | def _inherited_row(row, base_rows_from_pillar, ret):
"""Return a row with properties from parents."""
base_rows = []
for base_row_from_pillar in base_rows_from_pillar:
base_row = __salt__['pillar.get'](base_row_from_pillar)
if base_row:
base_rows.append(base_row) # depends on [control=['if'], data=[]]
elif base_row_from_pillar != _DEFAULT_ROW_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find row pillar "{0}".'.format(base_row_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message) # depends on [control=['if'], data=['warning_message']] # depends on [control=['if'], data=['base_row_from_pillar']] # depends on [control=['for'], data=['base_row_from_pillar']]
base_rows.append(row)
result_row = {}
for row in base_rows:
result_row.update(row) # depends on [control=['for'], data=['row']]
return result_row |
def enumerate(vendor_id=0, product_id=0):
""" Enumerate the HID Devices.
Returns a generator that yields all of the HID devices attached to the
system.
:param vendor_id: Only return devices which match this vendor id
:type vendor_id: int
:param product_id: Only return devices which match this product id
:type product_id: int
:return: Generator that yields informations about attached
HID devices
:rval: generator(DeviceInfo)
"""
info = hidapi.hid_enumerate(vendor_id, product_id)
while info:
yield DeviceInfo(info)
info = info.next
hidapi.hid_free_enumeration(info) | def function[enumerate, parameter[vendor_id, product_id]]:
constant[ Enumerate the HID Devices.
Returns a generator that yields all of the HID devices attached to the
system.
:param vendor_id: Only return devices which match this vendor id
:type vendor_id: int
:param product_id: Only return devices which match this product id
:type product_id: int
:return: Generator that yields informations about attached
HID devices
:rval: generator(DeviceInfo)
]
variable[info] assign[=] call[name[hidapi].hid_enumerate, parameter[name[vendor_id], name[product_id]]]
while name[info] begin[:]
<ast.Yield object at 0x7da1b0910970>
variable[info] assign[=] name[info].next
call[name[hidapi].hid_free_enumeration, parameter[name[info]]] | keyword[def] identifier[enumerate] ( identifier[vendor_id] = literal[int] , identifier[product_id] = literal[int] ):
literal[string]
identifier[info] = identifier[hidapi] . identifier[hid_enumerate] ( identifier[vendor_id] , identifier[product_id] )
keyword[while] identifier[info] :
keyword[yield] identifier[DeviceInfo] ( identifier[info] )
identifier[info] = identifier[info] . identifier[next]
identifier[hidapi] . identifier[hid_free_enumeration] ( identifier[info] ) | def enumerate(vendor_id=0, product_id=0):
""" Enumerate the HID Devices.
Returns a generator that yields all of the HID devices attached to the
system.
:param vendor_id: Only return devices which match this vendor id
:type vendor_id: int
:param product_id: Only return devices which match this product id
:type product_id: int
:return: Generator that yields informations about attached
HID devices
:rval: generator(DeviceInfo)
"""
info = hidapi.hid_enumerate(vendor_id, product_id)
while info:
yield DeviceInfo(info)
info = info.next # depends on [control=['while'], data=[]]
hidapi.hid_free_enumeration(info) |
def can_paste(self):
""" Returns whether text can be pasted from the clipboard.
"""
if self._control.textInteractionFlags() & QtCore.Qt.TextEditable:
return bool(QtGui.QApplication.clipboard().text())
return False | def function[can_paste, parameter[self]]:
constant[ Returns whether text can be pasted from the clipboard.
]
if binary_operation[call[name[self]._control.textInteractionFlags, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> name[QtCore].Qt.TextEditable] begin[:]
return[call[name[bool], parameter[call[call[name[QtGui].QApplication.clipboard, parameter[]].text, parameter[]]]]]
return[constant[False]] | keyword[def] identifier[can_paste] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_control] . identifier[textInteractionFlags] ()& identifier[QtCore] . identifier[Qt] . identifier[TextEditable] :
keyword[return] identifier[bool] ( identifier[QtGui] . identifier[QApplication] . identifier[clipboard] (). identifier[text] ())
keyword[return] keyword[False] | def can_paste(self):
""" Returns whether text can be pasted from the clipboard.
"""
if self._control.textInteractionFlags() & QtCore.Qt.TextEditable:
return bool(QtGui.QApplication.clipboard().text()) # depends on [control=['if'], data=[]]
return False |
def set_element_focus(self, locator):
"""Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Setting focus on element '%s'" % (locator))
element = self._element_find(locator, True, True)
element.send_keys(Keys.NULL)
self._wait_until_no_error(None, self._check_element_focus, True, locator) | def function[set_element_focus, parameter[self, locator]]:
constant[Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |]
call[name[self]._info, parameter[binary_operation[constant[Setting focus on element '%s'] <ast.Mod object at 0x7da2590d6920> name[locator]]]]
variable[element] assign[=] call[name[self]._element_find, parameter[name[locator], constant[True], constant[True]]]
call[name[element].send_keys, parameter[name[Keys].NULL]]
call[name[self]._wait_until_no_error, parameter[constant[None], name[self]._check_element_focus, constant[True], name[locator]]] | keyword[def] identifier[set_element_focus] ( identifier[self] , identifier[locator] ):
literal[string]
identifier[self] . identifier[_info] ( literal[string] %( identifier[locator] ))
identifier[element] = identifier[self] . identifier[_element_find] ( identifier[locator] , keyword[True] , keyword[True] )
identifier[element] . identifier[send_keys] ( identifier[Keys] . identifier[NULL] )
identifier[self] . identifier[_wait_until_no_error] ( keyword[None] , identifier[self] . identifier[_check_element_focus] , keyword[True] , identifier[locator] ) | def set_element_focus(self, locator):
"""Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Setting focus on element '%s'" % locator)
element = self._element_find(locator, True, True)
element.send_keys(Keys.NULL)
self._wait_until_no_error(None, self._check_element_focus, True, locator) |
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward the player."""
# Disappear if we've hit a bunker.
if self.character in the_plot['bunker_hitters']:
return self._teleport((-1, -1))
# End the game if we've hit the player.
if self.position == things['P'].position: the_plot.terminate_episode()
self._south(board, the_plot) | def function[_fly, parameter[self, board, layers, things, the_plot]]:
constant[Handles the behaviour of visible bolts flying toward the player.]
if compare[name[self].character in call[name[the_plot]][constant[bunker_hitters]]] begin[:]
return[call[name[self]._teleport, parameter[tuple[[<ast.UnaryOp object at 0x7da2041dab30>, <ast.UnaryOp object at 0x7da2041d86a0>]]]]]
if compare[name[self].position equal[==] call[name[things]][constant[P]].position] begin[:]
call[name[the_plot].terminate_episode, parameter[]]
call[name[self]._south, parameter[name[board], name[the_plot]]] | keyword[def] identifier[_fly] ( identifier[self] , identifier[board] , identifier[layers] , identifier[things] , identifier[the_plot] ):
literal[string]
keyword[if] identifier[self] . identifier[character] keyword[in] identifier[the_plot] [ literal[string] ]:
keyword[return] identifier[self] . identifier[_teleport] ((- literal[int] ,- literal[int] ))
keyword[if] identifier[self] . identifier[position] == identifier[things] [ literal[string] ]. identifier[position] : identifier[the_plot] . identifier[terminate_episode] ()
identifier[self] . identifier[_south] ( identifier[board] , identifier[the_plot] ) | def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward the player."""
# Disappear if we've hit a bunker.
if self.character in the_plot['bunker_hitters']:
return self._teleport((-1, -1)) # depends on [control=['if'], data=[]]
# End the game if we've hit the player.
if self.position == things['P'].position:
the_plot.terminate_episode() # depends on [control=['if'], data=[]]
self._south(board, the_plot) |
def cactiStyle(requestContext, seriesList, system=None, units=None):
"""
Takes a series list and modifies the aliases to provide column aligned
output with Current, Max, and Min values in the style of cacti. Optionally
takes a "system" value to apply unit formatting in the same style as the
Y-axis, or a "unit" string to append an arbitrary unit suffix.
NOTE: column alignment only works with monospace fonts such as terminus.
Example::
&target=cactiStyle(ganglia.*.net.bytes_out,"si")
&target=cactiStyle(ganglia.*.net.bytes_out,"si","b")
"""
def fmt(x):
if system:
if units:
return "%.2f %s" % format_units(x, system=system, units=units)
else:
return "%.2f%s" % format_units(x, system=system)
else:
if units:
return "%.2f %s" % (x, units)
else:
return "%.2f" % x
nameLen = max([0] + [len(series.name) for series in seriesList])
lastLen = max([0] + [len(fmt(int(safeLast(series) or 3)))
for series in seriesList]) + 3
maxLen = max([0] + [len(fmt(int(safeMax(series) or 3)))
for series in seriesList]) + 3
minLen = max([0] + [len(fmt(int(safeMin(series) or 3)))
for series in seriesList]) + 3
for series in seriesList:
last = safeLast(series)
maximum = safeMax(series)
minimum = safeMin(series)
if last is None:
last = NAN
else:
last = fmt(float(last))
if maximum is None:
maximum = NAN
else:
maximum = fmt(float(maximum))
if minimum is None:
minimum = NAN
else:
minimum = fmt(float(minimum))
series.name = "%*s Current:%*s Max:%*s Min:%*s " % (
-nameLen, series.name, -lastLen, last,
-maxLen, maximum, -minLen, minimum)
return seriesList | def function[cactiStyle, parameter[requestContext, seriesList, system, units]]:
constant[
Takes a series list and modifies the aliases to provide column aligned
output with Current, Max, and Min values in the style of cacti. Optionally
takes a "system" value to apply unit formatting in the same style as the
Y-axis, or a "unit" string to append an arbitrary unit suffix.
NOTE: column alignment only works with monospace fonts such as terminus.
Example::
&target=cactiStyle(ganglia.*.net.bytes_out,"si")
&target=cactiStyle(ganglia.*.net.bytes_out,"si","b")
]
def function[fmt, parameter[x]]:
if name[system] begin[:]
if name[units] begin[:]
return[binary_operation[constant[%.2f %s] <ast.Mod object at 0x7da2590d6920> call[name[format_units], parameter[name[x]]]]]
variable[nameLen] assign[=] call[name[max], parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe93370>]] + <ast.ListComp object at 0x7da18fe91cc0>]]]
variable[lastLen] assign[=] binary_operation[call[name[max], parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe92b30>]] + <ast.ListComp object at 0x7da18fe93c70>]]] + constant[3]]
variable[maxLen] assign[=] binary_operation[call[name[max], parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe92950>]] + <ast.ListComp object at 0x7da18fe912a0>]]] + constant[3]]
variable[minLen] assign[=] binary_operation[call[name[max], parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe916c0>]] + <ast.ListComp object at 0x7da18fe922c0>]]] + constant[3]]
for taget[name[series]] in starred[name[seriesList]] begin[:]
variable[last] assign[=] call[name[safeLast], parameter[name[series]]]
variable[maximum] assign[=] call[name[safeMax], parameter[name[series]]]
variable[minimum] assign[=] call[name[safeMin], parameter[name[series]]]
if compare[name[last] is constant[None]] begin[:]
variable[last] assign[=] name[NAN]
if compare[name[maximum] is constant[None]] begin[:]
variable[maximum] assign[=] name[NAN]
if compare[name[minimum] is constant[None]] begin[:]
variable[minimum] assign[=] name[NAN]
name[series].name assign[=] binary_operation[constant[%*s Current:%*s Max:%*s Min:%*s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.UnaryOp object at 0x7da207f02050>, <ast.Attribute object at 0x7da207f02ef0>, <ast.UnaryOp object at 0x7da207f017b0>, <ast.Name object at 0x7da207f013c0>, <ast.UnaryOp object at 0x7da207f02530>, <ast.Name object at 0x7da207f01570>, <ast.UnaryOp object at 0x7da207f00f10>, <ast.Name object at 0x7da207f024a0>]]]
return[name[seriesList]] | keyword[def] identifier[cactiStyle] ( identifier[requestContext] , identifier[seriesList] , identifier[system] = keyword[None] , identifier[units] = keyword[None] ):
literal[string]
keyword[def] identifier[fmt] ( identifier[x] ):
keyword[if] identifier[system] :
keyword[if] identifier[units] :
keyword[return] literal[string] % identifier[format_units] ( identifier[x] , identifier[system] = identifier[system] , identifier[units] = identifier[units] )
keyword[else] :
keyword[return] literal[string] % identifier[format_units] ( identifier[x] , identifier[system] = identifier[system] )
keyword[else] :
keyword[if] identifier[units] :
keyword[return] literal[string] %( identifier[x] , identifier[units] )
keyword[else] :
keyword[return] literal[string] % identifier[x]
identifier[nameLen] = identifier[max] ([ literal[int] ]+[ identifier[len] ( identifier[series] . identifier[name] ) keyword[for] identifier[series] keyword[in] identifier[seriesList] ])
identifier[lastLen] = identifier[max] ([ literal[int] ]+[ identifier[len] ( identifier[fmt] ( identifier[int] ( identifier[safeLast] ( identifier[series] ) keyword[or] literal[int] )))
keyword[for] identifier[series] keyword[in] identifier[seriesList] ])+ literal[int]
identifier[maxLen] = identifier[max] ([ literal[int] ]+[ identifier[len] ( identifier[fmt] ( identifier[int] ( identifier[safeMax] ( identifier[series] ) keyword[or] literal[int] )))
keyword[for] identifier[series] keyword[in] identifier[seriesList] ])+ literal[int]
identifier[minLen] = identifier[max] ([ literal[int] ]+[ identifier[len] ( identifier[fmt] ( identifier[int] ( identifier[safeMin] ( identifier[series] ) keyword[or] literal[int] )))
keyword[for] identifier[series] keyword[in] identifier[seriesList] ])+ literal[int]
keyword[for] identifier[series] keyword[in] identifier[seriesList] :
identifier[last] = identifier[safeLast] ( identifier[series] )
identifier[maximum] = identifier[safeMax] ( identifier[series] )
identifier[minimum] = identifier[safeMin] ( identifier[series] )
keyword[if] identifier[last] keyword[is] keyword[None] :
identifier[last] = identifier[NAN]
keyword[else] :
identifier[last] = identifier[fmt] ( identifier[float] ( identifier[last] ))
keyword[if] identifier[maximum] keyword[is] keyword[None] :
identifier[maximum] = identifier[NAN]
keyword[else] :
identifier[maximum] = identifier[fmt] ( identifier[float] ( identifier[maximum] ))
keyword[if] identifier[minimum] keyword[is] keyword[None] :
identifier[minimum] = identifier[NAN]
keyword[else] :
identifier[minimum] = identifier[fmt] ( identifier[float] ( identifier[minimum] ))
identifier[series] . identifier[name] = literal[string] %(
- identifier[nameLen] , identifier[series] . identifier[name] ,- identifier[lastLen] , identifier[last] ,
- identifier[maxLen] , identifier[maximum] ,- identifier[minLen] , identifier[minimum] )
keyword[return] identifier[seriesList] | def cactiStyle(requestContext, seriesList, system=None, units=None):
"""
Takes a series list and modifies the aliases to provide column aligned
output with Current, Max, and Min values in the style of cacti. Optionally
takes a "system" value to apply unit formatting in the same style as the
Y-axis, or a "unit" string to append an arbitrary unit suffix.
NOTE: column alignment only works with monospace fonts such as terminus.
Example::
&target=cactiStyle(ganglia.*.net.bytes_out,"si")
&target=cactiStyle(ganglia.*.net.bytes_out,"si","b")
"""
def fmt(x):
if system:
if units:
return '%.2f %s' % format_units(x, system=system, units=units) # depends on [control=['if'], data=[]]
else:
return '%.2f%s' % format_units(x, system=system) # depends on [control=['if'], data=[]]
elif units:
return '%.2f %s' % (x, units) # depends on [control=['if'], data=[]]
else:
return '%.2f' % x
nameLen = max([0] + [len(series.name) for series in seriesList])
lastLen = max([0] + [len(fmt(int(safeLast(series) or 3))) for series in seriesList]) + 3
maxLen = max([0] + [len(fmt(int(safeMax(series) or 3))) for series in seriesList]) + 3
minLen = max([0] + [len(fmt(int(safeMin(series) or 3))) for series in seriesList]) + 3
for series in seriesList:
last = safeLast(series)
maximum = safeMax(series)
minimum = safeMin(series)
if last is None:
last = NAN # depends on [control=['if'], data=['last']]
else:
last = fmt(float(last))
if maximum is None:
maximum = NAN # depends on [control=['if'], data=['maximum']]
else:
maximum = fmt(float(maximum))
if minimum is None:
minimum = NAN # depends on [control=['if'], data=['minimum']]
else:
minimum = fmt(float(minimum))
series.name = '%*s Current:%*s Max:%*s Min:%*s ' % (-nameLen, series.name, -lastLen, last, -maxLen, maximum, -minLen, minimum) # depends on [control=['for'], data=['series']]
return seriesList |
def sub_list(self, from_index, to_index):
"""
Returns a sublist from this list, whose range is specified with from_index(inclusive) and to_index(exclusive).
The returned list is backed by this list, so non-structural changes in the returned list are reflected in this
list, and vice-versa.
:param from_index: (int), the start point(inclusive) of the sub_list.
:param to_index: (int), th end point(exclusive) of the sub_list.
:return: (Sequence), a view of the specified range within this list.
"""
return self._encode_invoke(list_sub_codec, from_=from_index, to=to_index) | def function[sub_list, parameter[self, from_index, to_index]]:
constant[
Returns a sublist from this list, whose range is specified with from_index(inclusive) and to_index(exclusive).
The returned list is backed by this list, so non-structural changes in the returned list are reflected in this
list, and vice-versa.
:param from_index: (int), the start point(inclusive) of the sub_list.
:param to_index: (int), th end point(exclusive) of the sub_list.
:return: (Sequence), a view of the specified range within this list.
]
return[call[name[self]._encode_invoke, parameter[name[list_sub_codec]]]] | keyword[def] identifier[sub_list] ( identifier[self] , identifier[from_index] , identifier[to_index] ):
literal[string]
keyword[return] identifier[self] . identifier[_encode_invoke] ( identifier[list_sub_codec] , identifier[from_] = identifier[from_index] , identifier[to] = identifier[to_index] ) | def sub_list(self, from_index, to_index):
"""
Returns a sublist from this list, whose range is specified with from_index(inclusive) and to_index(exclusive).
The returned list is backed by this list, so non-structural changes in the returned list are reflected in this
list, and vice-versa.
:param from_index: (int), the start point(inclusive) of the sub_list.
:param to_index: (int), th end point(exclusive) of the sub_list.
:return: (Sequence), a view of the specified range within this list.
"""
return self._encode_invoke(list_sub_codec, from_=from_index, to=to_index) |
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n=parent.newChild(None,"ORG",None)
n.newTextChild(None,"ORGNAME",to_utf8(self.name))
n.newTextChild(None,"ORGUNIT",to_utf8(self.unit))
return n | def function[as_xml, parameter[self, parent]]:
constant[Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`]
variable[n] assign[=] call[name[parent].newChild, parameter[constant[None], constant[ORG], constant[None]]]
call[name[n].newTextChild, parameter[constant[None], constant[ORGNAME], call[name[to_utf8], parameter[name[self].name]]]]
call[name[n].newTextChild, parameter[constant[None], constant[ORGUNIT], call[name[to_utf8], parameter[name[self].unit]]]]
return[name[n]] | keyword[def] identifier[as_xml] ( identifier[self] , identifier[parent] ):
literal[string]
identifier[n] = identifier[parent] . identifier[newChild] ( keyword[None] , literal[string] , keyword[None] )
identifier[n] . identifier[newTextChild] ( keyword[None] , literal[string] , identifier[to_utf8] ( identifier[self] . identifier[name] ))
identifier[n] . identifier[newTextChild] ( keyword[None] , literal[string] , identifier[to_utf8] ( identifier[self] . identifier[unit] ))
keyword[return] identifier[n] | def as_xml(self, parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n = parent.newChild(None, 'ORG', None)
n.newTextChild(None, 'ORGNAME', to_utf8(self.name))
n.newTextChild(None, 'ORGUNIT', to_utf8(self.unit))
return n |
def close(self) -> None:
"""Closes connection to the LifeSOS ethernet interface."""
self.cancel_pending_tasks()
_LOGGER.debug("Disconnected")
if self._transport:
self._transport.close()
self._is_connected = False | def function[close, parameter[self]]:
constant[Closes connection to the LifeSOS ethernet interface.]
call[name[self].cancel_pending_tasks, parameter[]]
call[name[_LOGGER].debug, parameter[constant[Disconnected]]]
if name[self]._transport begin[:]
call[name[self]._transport.close, parameter[]]
name[self]._is_connected assign[=] constant[False] | keyword[def] identifier[close] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[self] . identifier[cancel_pending_tasks] ()
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[if] identifier[self] . identifier[_transport] :
identifier[self] . identifier[_transport] . identifier[close] ()
identifier[self] . identifier[_is_connected] = keyword[False] | def close(self) -> None:
"""Closes connection to the LifeSOS ethernet interface."""
self.cancel_pending_tasks()
_LOGGER.debug('Disconnected')
if self._transport:
self._transport.close() # depends on [control=['if'], data=[]]
self._is_connected = False |
def _convert_to_var(self, graph, var_res):
"""
Create tf.Variables from a list of numpy arrays
var_res: dictionary of numpy arrays with the key names corresponding to var
"""
with graph.as_default():
var = {}
for key, value in var_res.items():
if value is not None:
var[key] = tf.Variable(value, name="tf_%s" % key)
else:
var[key] = None
return var | def function[_convert_to_var, parameter[self, graph, var_res]]:
constant[
Create tf.Variables from a list of numpy arrays
var_res: dictionary of numpy arrays with the key names corresponding to var
]
with call[name[graph].as_default, parameter[]] begin[:]
variable[var] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20e956590>, <ast.Name object at 0x7da20e954430>]]] in starred[call[name[var_res].items, parameter[]]] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
call[name[var]][name[key]] assign[=] call[name[tf].Variable, parameter[name[value]]]
return[name[var]] | keyword[def] identifier[_convert_to_var] ( identifier[self] , identifier[graph] , identifier[var_res] ):
literal[string]
keyword[with] identifier[graph] . identifier[as_default] ():
identifier[var] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[var_res] . identifier[items] ():
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[var] [ identifier[key] ]= identifier[tf] . identifier[Variable] ( identifier[value] , identifier[name] = literal[string] % identifier[key] )
keyword[else] :
identifier[var] [ identifier[key] ]= keyword[None]
keyword[return] identifier[var] | def _convert_to_var(self, graph, var_res):
"""
Create tf.Variables from a list of numpy arrays
var_res: dictionary of numpy arrays with the key names corresponding to var
"""
with graph.as_default():
var = {}
for (key, value) in var_res.items():
if value is not None:
var[key] = tf.Variable(value, name='tf_%s' % key) # depends on [control=['if'], data=['value']]
else:
var[key] = None # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
return var |
def setup_directories(self, use_sudo=True):
"""Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
runner("mkdir -p '{0}'".format(self._releases)) | def function[setup_directories, parameter[self, use_sudo]]:
constant[Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
]
variable[runner] assign[=] <ast.IfExp object at 0x7da1b14604f0>
call[name[runner], parameter[call[constant[mkdir -p '{0}'].format, parameter[name[self]._releases]]]] | keyword[def] identifier[setup_directories] ( identifier[self] , identifier[use_sudo] = keyword[True] ):
literal[string]
identifier[runner] = identifier[self] . identifier[_runner] . identifier[sudo] keyword[if] identifier[use_sudo] keyword[else] identifier[self] . identifier[_runner] . identifier[run]
identifier[runner] ( literal[string] . identifier[format] ( identifier[self] . identifier[_releases] )) | def setup_directories(self, use_sudo=True):
"""Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
runner("mkdir -p '{0}'".format(self._releases)) |
def _validated(self, data):
"""Convert data or die trying."""
try:
return self.convert(data)
except (TypeError, ValueError) as ex:
raise NotValid(*ex.args) | def function[_validated, parameter[self, data]]:
constant[Convert data or die trying.]
<ast.Try object at 0x7da1b1471db0> | keyword[def] identifier[_validated] ( identifier[self] , identifier[data] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[convert] ( identifier[data] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[ex] :
keyword[raise] identifier[NotValid] (* identifier[ex] . identifier[args] ) | def _validated(self, data):
"""Convert data or die trying."""
try:
return self.convert(data) # depends on [control=['try'], data=[]]
except (TypeError, ValueError) as ex:
raise NotValid(*ex.args) # depends on [control=['except'], data=['ex']] |
def add_volume(self, volume):
"""
Args:
volume (Volume):
"""
self._add_volume(name=volume.name, configs=volume.configs) | def function[add_volume, parameter[self, volume]]:
constant[
Args:
volume (Volume):
]
call[name[self]._add_volume, parameter[]] | keyword[def] identifier[add_volume] ( identifier[self] , identifier[volume] ):
literal[string]
identifier[self] . identifier[_add_volume] ( identifier[name] = identifier[volume] . identifier[name] , identifier[configs] = identifier[volume] . identifier[configs] ) | def add_volume(self, volume):
"""
Args:
volume (Volume):
"""
self._add_volume(name=volume.name, configs=volume.configs) |
def remote_app(self, name, version=None, **kwargs):
"""Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
"""
if version is None:
if 'request_token_url' in kwargs:
version = '1'
else:
version = '2'
if version == '1':
remote_app = OAuth1Application(name, clients=cached_clients)
elif version == '2':
remote_app = OAuth2Application(name, clients=cached_clients)
else:
raise ValueError('unkonwn version %r' % version)
return self.add_remote_app(remote_app, **kwargs) | def function[remote_app, parameter[self, name, version]]:
constant[Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
]
if compare[name[version] is constant[None]] begin[:]
if compare[constant[request_token_url] in name[kwargs]] begin[:]
variable[version] assign[=] constant[1]
if compare[name[version] equal[==] constant[1]] begin[:]
variable[remote_app] assign[=] call[name[OAuth1Application], parameter[name[name]]]
return[call[name[self].add_remote_app, parameter[name[remote_app]]]] | keyword[def] identifier[remote_app] ( identifier[self] , identifier[name] , identifier[version] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[version] keyword[is] keyword[None] :
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[version] = literal[string]
keyword[else] :
identifier[version] = literal[string]
keyword[if] identifier[version] == literal[string] :
identifier[remote_app] = identifier[OAuth1Application] ( identifier[name] , identifier[clients] = identifier[cached_clients] )
keyword[elif] identifier[version] == literal[string] :
identifier[remote_app] = identifier[OAuth2Application] ( identifier[name] , identifier[clients] = identifier[cached_clients] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[version] )
keyword[return] identifier[self] . identifier[add_remote_app] ( identifier[remote_app] ,** identifier[kwargs] ) | def remote_app(self, name, version=None, **kwargs):
"""Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
"""
if version is None:
if 'request_token_url' in kwargs:
version = '1' # depends on [control=['if'], data=[]]
else:
version = '2' # depends on [control=['if'], data=['version']]
if version == '1':
remote_app = OAuth1Application(name, clients=cached_clients) # depends on [control=['if'], data=[]]
elif version == '2':
remote_app = OAuth2Application(name, clients=cached_clients) # depends on [control=['if'], data=[]]
else:
raise ValueError('unkonwn version %r' % version)
return self.add_remote_app(remote_app, **kwargs) |
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2 | def function[deriv2, parameter[self, p]]:
constant[
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
]
variable[p] assign[=] call[name[self]._clean, parameter[name[p]]]
variable[fl] assign[=] call[name[np].log, parameter[binary_operation[constant[1] - name[p]]]]
variable[d2] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18f58f370> / binary_operation[binary_operation[binary_operation[constant[1] - name[p]] ** constant[2]] * name[fl]]]
<ast.AugAssign object at 0x7da18f58dfc0>
return[name[d2]] | keyword[def] identifier[deriv2] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] = identifier[self] . identifier[_clean] ( identifier[p] )
identifier[fl] = identifier[np] . identifier[log] ( literal[int] - identifier[p] )
identifier[d2] =- literal[int] /(( literal[int] - identifier[p] )** literal[int] * identifier[fl] )
identifier[d2] *= literal[int] + literal[int] / identifier[fl]
keyword[return] identifier[d2] | def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p) ** 2 * fl)
d2 *= 1 + 1 / fl
return d2 |
def stem(self, s):
"""
Performs stemming on the string.
:param s: the string to stem
:type s: str
:return: the stemmed string
:rtype: str
"""
return javabridge.get_env().get_string(self.__stem(javabridge.get_env().new_string_utf(s))) | def function[stem, parameter[self, s]]:
constant[
Performs stemming on the string.
:param s: the string to stem
:type s: str
:return: the stemmed string
:rtype: str
]
return[call[call[name[javabridge].get_env, parameter[]].get_string, parameter[call[name[self].__stem, parameter[call[call[name[javabridge].get_env, parameter[]].new_string_utf, parameter[name[s]]]]]]]] | keyword[def] identifier[stem] ( identifier[self] , identifier[s] ):
literal[string]
keyword[return] identifier[javabridge] . identifier[get_env] (). identifier[get_string] ( identifier[self] . identifier[__stem] ( identifier[javabridge] . identifier[get_env] (). identifier[new_string_utf] ( identifier[s] ))) | def stem(self, s):
"""
Performs stemming on the string.
:param s: the string to stem
:type s: str
:return: the stemmed string
:rtype: str
"""
return javabridge.get_env().get_string(self.__stem(javabridge.get_env().new_string_utf(s))) |
def validate(self, validators, resp_obj):
""" make validation with comparators
"""
self.validation_results = []
if not validators:
return
logger.log_debug("start to validate.")
validate_pass = True
failures = []
for validator in validators:
# validator should be LazyFunction object
if not isinstance(validator, parser.LazyFunction):
raise exceptions.ValidationFailure(
"validator should be parsed first: {}".format(validators))
# evaluate validator args with context variable mapping.
validator_args = validator.get_args()
check_item, expect_item = validator_args
check_value = self.__eval_validator_check(
check_item,
resp_obj
)
expect_value = self.__eval_validator_expect(expect_item)
validator.update_args([check_value, expect_value])
comparator = validator.func_name
validator_dict = {
"comparator": comparator,
"check": check_item,
"check_value": check_value,
"expect": expect_item,
"expect_value": expect_value
}
validate_msg = "\nvalidate: {} {} {}({})".format(
check_item,
comparator,
expect_value,
type(expect_value).__name__
)
try:
validator.to_value(self.test_variables_mapping)
validator_dict["check_result"] = "pass"
validate_msg += "\t==> pass"
logger.log_debug(validate_msg)
except (AssertionError, TypeError):
validate_pass = False
validator_dict["check_result"] = "fail"
validate_msg += "\t==> fail"
validate_msg += "\n{}({}) {} {}({})".format(
check_value,
type(check_value).__name__,
comparator,
expect_value,
type(expect_value).__name__
)
logger.log_error(validate_msg)
failures.append(validate_msg)
self.validation_results.append(validator_dict)
# restore validator args, in case of running multiple times
validator.update_args(validator_args)
if not validate_pass:
failures_string = "\n".join([failure for failure in failures])
raise exceptions.ValidationFailure(failures_string) | def function[validate, parameter[self, validators, resp_obj]]:
constant[ make validation with comparators
]
name[self].validation_results assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b21c7c70> begin[:]
return[None]
call[name[logger].log_debug, parameter[constant[start to validate.]]]
variable[validate_pass] assign[=] constant[True]
variable[failures] assign[=] list[[]]
for taget[name[validator]] in starred[name[validators]] begin[:]
if <ast.UnaryOp object at 0x7da1b21c7d00> begin[:]
<ast.Raise object at 0x7da1b21c6ef0>
variable[validator_args] assign[=] call[name[validator].get_args, parameter[]]
<ast.Tuple object at 0x7da1b21c7ac0> assign[=] name[validator_args]
variable[check_value] assign[=] call[name[self].__eval_validator_check, parameter[name[check_item], name[resp_obj]]]
variable[expect_value] assign[=] call[name[self].__eval_validator_expect, parameter[name[expect_item]]]
call[name[validator].update_args, parameter[list[[<ast.Name object at 0x7da18dc99900>, <ast.Name object at 0x7da18dc9bca0>]]]]
variable[comparator] assign[=] name[validator].func_name
variable[validator_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18dc981c0>, <ast.Constant object at 0x7da18dc9b280>, <ast.Constant object at 0x7da18dc983a0>, <ast.Constant object at 0x7da18dc98070>, <ast.Constant object at 0x7da18dc9b970>], [<ast.Name object at 0x7da18dc9a9b0>, <ast.Name object at 0x7da18dc9bfa0>, <ast.Name object at 0x7da18dc98d00>, <ast.Name object at 0x7da18dc9bbe0>, <ast.Name object at 0x7da18dc9b070>]]
variable[validate_msg] assign[=] call[constant[
validate: {} {} {}({})].format, parameter[name[check_item], name[comparator], name[expect_value], call[name[type], parameter[name[expect_value]]].__name__]]
<ast.Try object at 0x7da18dc98d60>
call[name[self].validation_results.append, parameter[name[validator_dict]]]
call[name[validator].update_args, parameter[name[validator_args]]]
if <ast.UnaryOp object at 0x7da18ede7280> begin[:]
variable[failures_string] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da18ede6cb0>]]
<ast.Raise object at 0x7da18ede50c0> | keyword[def] identifier[validate] ( identifier[self] , identifier[validators] , identifier[resp_obj] ):
literal[string]
identifier[self] . identifier[validation_results] =[]
keyword[if] keyword[not] identifier[validators] :
keyword[return]
identifier[logger] . identifier[log_debug] ( literal[string] )
identifier[validate_pass] = keyword[True]
identifier[failures] =[]
keyword[for] identifier[validator] keyword[in] identifier[validators] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[validator] , identifier[parser] . identifier[LazyFunction] ):
keyword[raise] identifier[exceptions] . identifier[ValidationFailure] (
literal[string] . identifier[format] ( identifier[validators] ))
identifier[validator_args] = identifier[validator] . identifier[get_args] ()
identifier[check_item] , identifier[expect_item] = identifier[validator_args]
identifier[check_value] = identifier[self] . identifier[__eval_validator_check] (
identifier[check_item] ,
identifier[resp_obj]
)
identifier[expect_value] = identifier[self] . identifier[__eval_validator_expect] ( identifier[expect_item] )
identifier[validator] . identifier[update_args] ([ identifier[check_value] , identifier[expect_value] ])
identifier[comparator] = identifier[validator] . identifier[func_name]
identifier[validator_dict] ={
literal[string] : identifier[comparator] ,
literal[string] : identifier[check_item] ,
literal[string] : identifier[check_value] ,
literal[string] : identifier[expect_item] ,
literal[string] : identifier[expect_value]
}
identifier[validate_msg] = literal[string] . identifier[format] (
identifier[check_item] ,
identifier[comparator] ,
identifier[expect_value] ,
identifier[type] ( identifier[expect_value] ). identifier[__name__]
)
keyword[try] :
identifier[validator] . identifier[to_value] ( identifier[self] . identifier[test_variables_mapping] )
identifier[validator_dict] [ literal[string] ]= literal[string]
identifier[validate_msg] += literal[string]
identifier[logger] . identifier[log_debug] ( identifier[validate_msg] )
keyword[except] ( identifier[AssertionError] , identifier[TypeError] ):
identifier[validate_pass] = keyword[False]
identifier[validator_dict] [ literal[string] ]= literal[string]
identifier[validate_msg] += literal[string]
identifier[validate_msg] += literal[string] . identifier[format] (
identifier[check_value] ,
identifier[type] ( identifier[check_value] ). identifier[__name__] ,
identifier[comparator] ,
identifier[expect_value] ,
identifier[type] ( identifier[expect_value] ). identifier[__name__]
)
identifier[logger] . identifier[log_error] ( identifier[validate_msg] )
identifier[failures] . identifier[append] ( identifier[validate_msg] )
identifier[self] . identifier[validation_results] . identifier[append] ( identifier[validator_dict] )
identifier[validator] . identifier[update_args] ( identifier[validator_args] )
keyword[if] keyword[not] identifier[validate_pass] :
identifier[failures_string] = literal[string] . identifier[join] ([ identifier[failure] keyword[for] identifier[failure] keyword[in] identifier[failures] ])
keyword[raise] identifier[exceptions] . identifier[ValidationFailure] ( identifier[failures_string] ) | def validate(self, validators, resp_obj):
""" make validation with comparators
"""
self.validation_results = []
if not validators:
return # depends on [control=['if'], data=[]]
logger.log_debug('start to validate.')
validate_pass = True
failures = []
for validator in validators:
# validator should be LazyFunction object
if not isinstance(validator, parser.LazyFunction):
raise exceptions.ValidationFailure('validator should be parsed first: {}'.format(validators)) # depends on [control=['if'], data=[]]
# evaluate validator args with context variable mapping.
validator_args = validator.get_args()
(check_item, expect_item) = validator_args
check_value = self.__eval_validator_check(check_item, resp_obj)
expect_value = self.__eval_validator_expect(expect_item)
validator.update_args([check_value, expect_value])
comparator = validator.func_name
validator_dict = {'comparator': comparator, 'check': check_item, 'check_value': check_value, 'expect': expect_item, 'expect_value': expect_value}
validate_msg = '\nvalidate: {} {} {}({})'.format(check_item, comparator, expect_value, type(expect_value).__name__)
try:
validator.to_value(self.test_variables_mapping)
validator_dict['check_result'] = 'pass'
validate_msg += '\t==> pass'
logger.log_debug(validate_msg) # depends on [control=['try'], data=[]]
except (AssertionError, TypeError):
validate_pass = False
validator_dict['check_result'] = 'fail'
validate_msg += '\t==> fail'
validate_msg += '\n{}({}) {} {}({})'.format(check_value, type(check_value).__name__, comparator, expect_value, type(expect_value).__name__)
logger.log_error(validate_msg)
failures.append(validate_msg) # depends on [control=['except'], data=[]]
self.validation_results.append(validator_dict)
# restore validator args, in case of running multiple times
validator.update_args(validator_args) # depends on [control=['for'], data=['validator']]
if not validate_pass:
failures_string = '\n'.join([failure for failure in failures])
raise exceptions.ValidationFailure(failures_string) # depends on [control=['if'], data=[]] |
def BooleanField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(default=default, validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key)) | def function[BooleanField, parameter[default, required, repr, cmp, key]]:
constant[
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
]
variable[default] assign[=] call[name[_init_fields].init_default, parameter[name[required], name[default], constant[None]]]
variable[validator] assign[=] call[name[_init_fields].init_validator, parameter[name[required], name[bool]]]
return[call[name[attrib], parameter[]]] | keyword[def] identifier[BooleanField] ( identifier[default] = identifier[NOTHING] , identifier[required] = keyword[True] , identifier[repr] = keyword[True] , identifier[cmp] = keyword[True] ,
identifier[key] = keyword[None] ):
literal[string]
identifier[default] = identifier[_init_fields] . identifier[init_default] ( identifier[required] , identifier[default] , keyword[None] )
identifier[validator] = identifier[_init_fields] . identifier[init_validator] ( identifier[required] , identifier[bool] )
keyword[return] identifier[attrib] ( identifier[default] = identifier[default] , identifier[validator] = identifier[validator] , identifier[repr] = identifier[repr] , identifier[cmp] = identifier[cmp] ,
identifier[metadata] = identifier[dict] ( identifier[key] = identifier[key] )) | def BooleanField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(default=default, validator=validator, repr=repr, cmp=cmp, metadata=dict(key=key)) |
def set_dependencies(analysis, dependencies, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for i, tpl in enumerate(toc):
if not tpl[1] in dependencies.keys():
logger.info("Adding dependency %s located in %s" % (tpl[1], path))
dependencies[tpl[1]] = path
else:
dep_path = get_relative_path(path, dependencies[tpl[1]])
logger.info("Referencing %s to be a dependecy for %s, located in %s" % (tpl[1], path, dep_path))
analysis.dependencies.append((":".join((dep_path, tpl[0])), tpl[1], "DEPENDENCY"))
toc[i] = (None, None, None)
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)] | def function[set_dependencies, parameter[analysis, dependencies, path]]:
constant[
Syncronize the Analysis result with the needed dependencies.
]
for taget[name[toc]] in starred[tuple[[<ast.Attribute object at 0x7da18fe918a0>, <ast.Attribute object at 0x7da18fe93910>]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18fe92e00>, <ast.Name object at 0x7da18fe91750>]]] in starred[call[name[enumerate], parameter[name[toc]]]] begin[:]
if <ast.UnaryOp object at 0x7da18fe92260> begin[:]
call[name[logger].info, parameter[binary_operation[constant[Adding dependency %s located in %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18fe921d0>, <ast.Name object at 0x7da18fe93820>]]]]]
call[name[dependencies]][call[name[tpl]][constant[1]]] assign[=] name[path]
call[name[toc]][<ast.Slice object at 0x7da204621e40>] assign[=] <ast.ListComp object at 0x7da204620ca0> | keyword[def] identifier[set_dependencies] ( identifier[analysis] , identifier[dependencies] , identifier[path] ):
literal[string]
keyword[for] identifier[toc] keyword[in] ( identifier[analysis] . identifier[binaries] , identifier[analysis] . identifier[datas] ):
keyword[for] identifier[i] , identifier[tpl] keyword[in] identifier[enumerate] ( identifier[toc] ):
keyword[if] keyword[not] identifier[tpl] [ literal[int] ] keyword[in] identifier[dependencies] . identifier[keys] ():
identifier[logger] . identifier[info] ( literal[string] %( identifier[tpl] [ literal[int] ], identifier[path] ))
identifier[dependencies] [ identifier[tpl] [ literal[int] ]]= identifier[path]
keyword[else] :
identifier[dep_path] = identifier[get_relative_path] ( identifier[path] , identifier[dependencies] [ identifier[tpl] [ literal[int] ]])
identifier[logger] . identifier[info] ( literal[string] %( identifier[tpl] [ literal[int] ], identifier[path] , identifier[dep_path] ))
identifier[analysis] . identifier[dependencies] . identifier[append] (( literal[string] . identifier[join] (( identifier[dep_path] , identifier[tpl] [ literal[int] ])), identifier[tpl] [ literal[int] ], literal[string] ))
identifier[toc] [ identifier[i] ]=( keyword[None] , keyword[None] , keyword[None] )
identifier[toc] [:]=[ identifier[tpl] keyword[for] identifier[tpl] keyword[in] identifier[toc] keyword[if] identifier[tpl] !=( keyword[None] , keyword[None] , keyword[None] )] | def set_dependencies(analysis, dependencies, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for (i, tpl) in enumerate(toc):
if not tpl[1] in dependencies.keys():
logger.info('Adding dependency %s located in %s' % (tpl[1], path))
dependencies[tpl[1]] = path # depends on [control=['if'], data=[]]
else:
dep_path = get_relative_path(path, dependencies[tpl[1]])
logger.info('Referencing %s to be a dependecy for %s, located in %s' % (tpl[1], path, dep_path))
analysis.dependencies.append((':'.join((dep_path, tpl[0])), tpl[1], 'DEPENDENCY'))
toc[i] = (None, None, None) # depends on [control=['for'], data=[]]
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)] # depends on [control=['for'], data=['toc']] |
def register_presence_callback(self, type_, from_, cb):
"""
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
raise a :class:`TypeError` as of the 1.0 release. See the Changelog
for :ref:`api-changelog-0.7` for further details on how to upgrade
your code efficiently.
.. deprecated:: 0.9
This method has been deprecated. It is recommended to use
:class:`aioxmpp.PresenceClient` instead.
"""
type_ = self._coerce_enum(type_, structs.PresenceType)
warnings.warn(
"register_presence_callback is deprecated; use "
"aioxmpp.dispatcher.SimplePresenceDispatcher or "
"aioxmpp.PresenceClient instead",
DeprecationWarning,
stacklevel=2
)
self._xxx_presence_dispatcher.register_callback(
type_,
from_,
cb,
) | def function[register_presence_callback, parameter[self, type_, from_, cb]]:
constant[
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
raise a :class:`TypeError` as of the 1.0 release. See the Changelog
for :ref:`api-changelog-0.7` for further details on how to upgrade
your code efficiently.
.. deprecated:: 0.9
This method has been deprecated. It is recommended to use
:class:`aioxmpp.PresenceClient` instead.
]
variable[type_] assign[=] call[name[self]._coerce_enum, parameter[name[type_], name[structs].PresenceType]]
call[name[warnings].warn, parameter[constant[register_presence_callback is deprecated; use aioxmpp.dispatcher.SimplePresenceDispatcher or aioxmpp.PresenceClient instead], name[DeprecationWarning]]]
call[name[self]._xxx_presence_dispatcher.register_callback, parameter[name[type_], name[from_], name[cb]]] | keyword[def] identifier[register_presence_callback] ( identifier[self] , identifier[type_] , identifier[from_] , identifier[cb] ):
literal[string]
identifier[type_] = identifier[self] . identifier[_coerce_enum] ( identifier[type_] , identifier[structs] . identifier[PresenceType] )
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] ,
identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int]
)
identifier[self] . identifier[_xxx_presence_dispatcher] . identifier[register_callback] (
identifier[type_] ,
identifier[from_] ,
identifier[cb] ,
) | def register_presence_callback(self, type_, from_, cb):
"""
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
raise a :class:`TypeError` as of the 1.0 release. See the Changelog
for :ref:`api-changelog-0.7` for further details on how to upgrade
your code efficiently.
.. deprecated:: 0.9
This method has been deprecated. It is recommended to use
:class:`aioxmpp.PresenceClient` instead.
"""
type_ = self._coerce_enum(type_, structs.PresenceType)
warnings.warn('register_presence_callback is deprecated; use aioxmpp.dispatcher.SimplePresenceDispatcher or aioxmpp.PresenceClient instead', DeprecationWarning, stacklevel=2)
self._xxx_presence_dispatcher.register_callback(type_, from_, cb) |
def indented_script(self) -> bool:
''' check self._script and see if it is indented '''
# get all leading space, tab and newline
leading = INDENTED.match(self._script)
return 0 if leading is None else len(leading.group(2)) | def function[indented_script, parameter[self]]:
constant[ check self._script and see if it is indented ]
variable[leading] assign[=] call[name[INDENTED].match, parameter[name[self]._script]]
return[<ast.IfExp object at 0x7da204622650>] | keyword[def] identifier[indented_script] ( identifier[self] )-> identifier[bool] :
literal[string]
identifier[leading] = identifier[INDENTED] . identifier[match] ( identifier[self] . identifier[_script] )
keyword[return] literal[int] keyword[if] identifier[leading] keyword[is] keyword[None] keyword[else] identifier[len] ( identifier[leading] . identifier[group] ( literal[int] )) | def indented_script(self) -> bool:
""" check self._script and see if it is indented """
# get all leading space, tab and newline
leading = INDENTED.match(self._script)
return 0 if leading is None else len(leading.group(2)) |
def generateCertificate(self, alias,
commonName, organizationalUnit,
city, state, country,
keyalg="RSA", keysize=1024,
sigalg="SHA256withRSA",
validity=90
):
"""
Use this operation to create a self-signed certificate or as a
starting point for getting a production-ready CA-signed
certificate. The portal will generate a certificate for you and
store it in its keystore.
"""
params = {"f" : "json",
"alias" : alias,
"commonName" : commonName,
"organizationalUnit" : organizationalUnit,
"city" : city,
"state" : state,
"country" : country,
"keyalg" : keyalg,
"keysize" : keysize,
"sigalg" : sigalg,
"validity" : validity
}
url = self._url + "/SSLCertificate/ generateCertificate"
return self._post(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | def function[generateCertificate, parameter[self, alias, commonName, organizationalUnit, city, state, country, keyalg, keysize, sigalg, validity]]:
constant[
Use this operation to create a self-signed certificate or as a
starting point for getting a production-ready CA-signed
certificate. The portal will generate a certificate for you and
store it in its keystore.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6aad10>, <ast.Constant object at 0x7da20c6aa080>, <ast.Constant object at 0x7da20c6a89a0>, <ast.Constant object at 0x7da20c6abd60>, <ast.Constant object at 0x7da20c6ab8e0>, <ast.Constant object at 0x7da20c6a9e40>, <ast.Constant object at 0x7da20c6a9ae0>, <ast.Constant object at 0x7da20c6aa6e0>, <ast.Constant object at 0x7da20c6a86a0>, <ast.Constant object at 0x7da20c6abf70>, <ast.Constant object at 0x7da20c6a8370>], [<ast.Constant object at 0x7da20c6a9030>, <ast.Name object at 0x7da20c6abbe0>, <ast.Name object at 0x7da20c6aa740>, <ast.Name object at 0x7da20c6ab2b0>, <ast.Name object at 0x7da20c6a8c10>, <ast.Name object at 0x7da20c6aa8f0>, <ast.Name object at 0x7da20c6a9150>, <ast.Name object at 0x7da20c6a9d50>, <ast.Name object at 0x7da20c6aaf80>, <ast.Name object at 0x7da20c6aa6b0>, <ast.Name object at 0x7da20c6aad40>]]
variable[url] assign[=] binary_operation[name[self]._url + constant[/SSLCertificate/ generateCertificate]]
return[call[name[self]._post, parameter[]]] | keyword[def] identifier[generateCertificate] ( identifier[self] , identifier[alias] ,
identifier[commonName] , identifier[organizationalUnit] ,
identifier[city] , identifier[state] , identifier[country] ,
identifier[keyalg] = literal[string] , identifier[keysize] = literal[int] ,
identifier[sigalg] = literal[string] ,
identifier[validity] = literal[int]
):
literal[string]
identifier[params] ={ literal[string] : literal[string] ,
literal[string] : identifier[alias] ,
literal[string] : identifier[commonName] ,
literal[string] : identifier[organizationalUnit] ,
literal[string] : identifier[city] ,
literal[string] : identifier[state] ,
literal[string] : identifier[country] ,
literal[string] : identifier[keyalg] ,
literal[string] : identifier[keysize] ,
literal[string] : identifier[sigalg] ,
literal[string] : identifier[validity]
}
identifier[url] = identifier[self] . identifier[_url] + literal[string]
keyword[return] identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ) | def generateCertificate(self, alias, commonName, organizationalUnit, city, state, country, keyalg='RSA', keysize=1024, sigalg='SHA256withRSA', validity=90):
"""
Use this operation to create a self-signed certificate or as a
starting point for getting a production-ready CA-signed
certificate. The portal will generate a certificate for you and
store it in its keystore.
"""
params = {'f': 'json', 'alias': alias, 'commonName': commonName, 'organizationalUnit': organizationalUnit, 'city': city, 'state': state, 'country': country, 'keyalg': keyalg, 'keysize': keysize, 'sigalg': sigalg, 'validity': validity}
url = self._url + '/SSLCertificate/ generateCertificate'
return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url) |
def export_sync(self, **kwargs):
'''
EXPORT: exports data from the Firebase into a File, if given.
Requires the 'point' parameter as a keyworded argument.
'''
self.amust(("point",), kwargs)
response = requests.get(self.url_correct(kwargs["point"],
kwargs.get("auth", self.__auth), True))
self.catch_error(response)
path = kwargs.get("path", None)
if path:
self.__write(path, response.content, kwargs.get("mode", "w"))
return response.content | def function[export_sync, parameter[self]]:
constant[
EXPORT: exports data from the Firebase into a File, if given.
Requires the 'point' parameter as a keyworded argument.
]
call[name[self].amust, parameter[tuple[[<ast.Constant object at 0x7da20e954ee0>]], name[kwargs]]]
variable[response] assign[=] call[name[requests].get, parameter[call[name[self].url_correct, parameter[call[name[kwargs]][constant[point]], call[name[kwargs].get, parameter[constant[auth], name[self].__auth]], constant[True]]]]]
call[name[self].catch_error, parameter[name[response]]]
variable[path] assign[=] call[name[kwargs].get, parameter[constant[path], constant[None]]]
if name[path] begin[:]
call[name[self].__write, parameter[name[path], name[response].content, call[name[kwargs].get, parameter[constant[mode], constant[w]]]]]
return[name[response].content] | keyword[def] identifier[export_sync] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[amust] (( literal[string] ,), identifier[kwargs] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[self] . identifier[url_correct] ( identifier[kwargs] [ literal[string] ],
identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[__auth] ), keyword[True] ))
identifier[self] . identifier[catch_error] ( identifier[response] )
identifier[path] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[path] :
identifier[self] . identifier[__write] ( identifier[path] , identifier[response] . identifier[content] , identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ))
keyword[return] identifier[response] . identifier[content] | def export_sync(self, **kwargs):
"""
EXPORT: exports data from the Firebase into a File, if given.
Requires the 'point' parameter as a keyworded argument.
"""
self.amust(('point',), kwargs)
response = requests.get(self.url_correct(kwargs['point'], kwargs.get('auth', self.__auth), True))
self.catch_error(response)
path = kwargs.get('path', None)
if path:
self.__write(path, response.content, kwargs.get('mode', 'w')) # depends on [control=['if'], data=[]]
return response.content |
def delete(self, key, cas=0):
"""
Delete a key/value from server. If key does not exist, it returns True.
:param key: Key's name to be deleted
:param cas: CAS of the key
:return: True in case o success and False in case of failure.
"""
server = self._get_server(key)
return server.delete(key, cas) | def function[delete, parameter[self, key, cas]]:
constant[
Delete a key/value from server. If key does not exist, it returns True.
:param key: Key's name to be deleted
:param cas: CAS of the key
:return: True in case o success and False in case of failure.
]
variable[server] assign[=] call[name[self]._get_server, parameter[name[key]]]
return[call[name[server].delete, parameter[name[key], name[cas]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[key] , identifier[cas] = literal[int] ):
literal[string]
identifier[server] = identifier[self] . identifier[_get_server] ( identifier[key] )
keyword[return] identifier[server] . identifier[delete] ( identifier[key] , identifier[cas] ) | def delete(self, key, cas=0):
"""
Delete a key/value from server. If key does not exist, it returns True.
:param key: Key's name to be deleted
:param cas: CAS of the key
:return: True in case o success and False in case of failure.
"""
server = self._get_server(key)
return server.delete(key, cas) |
def unpickle(self, throw=True, throw_dead=True):
"""
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
"""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
self._throw_dead()
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
obj = unpickler.load()
self._unpickled = obj
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e)
if throw:
if isinstance(obj, CallError):
raise obj
return obj | def function[unpickle, parameter[self, throw, throw_dead]]:
constant[
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
]
<ast.BoolOp object at 0x7da1b1eb5660>
if <ast.BoolOp object at 0x7da1b1eb6cb0> begin[:]
call[name[self]._throw_dead, parameter[]]
variable[obj] assign[=] name[self]._unpickled
if compare[name[obj] is name[Message]._unpickled] begin[:]
variable[fp] assign[=] call[name[BytesIO], parameter[name[self].data]]
variable[unpickler] assign[=] call[name[_Unpickler], parameter[name[fp]]]
name[unpickler].find_global assign[=] name[self]._find_global
<ast.Try object at 0x7da1b1eb5b70>
if name[throw] begin[:]
if call[name[isinstance], parameter[name[obj], name[CallError]]] begin[:]
<ast.Raise object at 0x7da1b1eb5120>
return[name[obj]] | keyword[def] identifier[unpickle] ( identifier[self] , identifier[throw] = keyword[True] , identifier[throw_dead] = keyword[True] ):
literal[string]
identifier[_vv] keyword[and] identifier[IOLOG] . identifier[debug] ( literal[string] , identifier[self] )
keyword[if] identifier[throw_dead] keyword[and] identifier[self] . identifier[is_dead] :
identifier[self] . identifier[_throw_dead] ()
identifier[obj] = identifier[self] . identifier[_unpickled]
keyword[if] identifier[obj] keyword[is] identifier[Message] . identifier[_unpickled] :
identifier[fp] = identifier[BytesIO] ( identifier[self] . identifier[data] )
identifier[unpickler] = identifier[_Unpickler] ( identifier[fp] ,** identifier[self] . identifier[UNPICKLER_KWARGS] )
identifier[unpickler] . identifier[find_global] = identifier[self] . identifier[_find_global]
keyword[try] :
identifier[obj] = identifier[unpickler] . identifier[load] ()
identifier[self] . identifier[_unpickled] = identifier[obj]
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
keyword[raise] identifier[StreamError] ( literal[string] , identifier[e] )
keyword[if] identifier[throw] :
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[CallError] ):
keyword[raise] identifier[obj]
keyword[return] identifier[obj] | def unpickle(self, throw=True, throw_dead=True):
"""
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
"""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
self._throw_dead() # depends on [control=['if'], data=[]]
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
obj = unpickler.load()
self._unpickled = obj # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['obj']]
if throw:
if isinstance(obj, CallError):
raise obj # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return obj |
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error) | def function[_handle_error, parameter[self, text, ErrorClass, infile, cur_index]]:
constant[
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
]
variable[line] assign[=] call[name[infile]][name[cur_index]]
<ast.AugAssign object at 0x7da1b0f0da50>
variable[message] assign[=] binary_operation[name[text] <ast.Mod object at 0x7da2590d6920> name[cur_index]]
variable[error] assign[=] call[name[ErrorClass], parameter[name[message], name[cur_index], name[line]]]
if name[self].raise_errors begin[:]
<ast.Raise object at 0x7da1b0f0c040>
call[name[self]._errors.append, parameter[name[error]]] | keyword[def] identifier[_handle_error] ( identifier[self] , identifier[text] , identifier[ErrorClass] , identifier[infile] , identifier[cur_index] ):
literal[string]
identifier[line] = identifier[infile] [ identifier[cur_index] ]
identifier[cur_index] += literal[int]
identifier[message] = identifier[text] % identifier[cur_index]
identifier[error] = identifier[ErrorClass] ( identifier[message] , identifier[cur_index] , identifier[line] )
keyword[if] identifier[self] . identifier[raise_errors] :
keyword[raise] identifier[error]
identifier[self] . identifier[_errors] . identifier[append] ( identifier[error] ) | def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error # depends on [control=['if'], data=[]]
# store the error
# reraise when parsing has finished
self._errors.append(error) |
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Install all test dependencies.
session.install('-r', 'requirements-test.txt')
# Install dev packages.
_install_dev_packages(session)
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=opencensus',
'--cov=context',
'--cov=contrib',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit/',
'context/',
'contrib/',
*session.posargs
) | def function[unit, parameter[session, py]]:
constant[Run the unit test suite.]
name[session].interpreter assign[=] call[constant[python{}].format, parameter[name[py]]]
call[name[session].install, parameter[constant[-r], constant[requirements-test.txt]]]
call[name[_install_dev_packages], parameter[name[session]]]
call[name[session].run, parameter[constant[py.test], constant[--quiet], constant[--cov=opencensus], constant[--cov=context], constant[--cov=contrib], constant[--cov-append], constant[--cov-config=.coveragerc], constant[--cov-report=], constant[--cov-fail-under=97], constant[tests/unit/], constant[context/], constant[contrib/], <ast.Starred object at 0x7da18f8126b0>]] | keyword[def] identifier[unit] ( identifier[session] , identifier[py] ):
literal[string]
identifier[session] . identifier[interpreter] = literal[string] . identifier[format] ( identifier[py] )
identifier[session] . identifier[install] ( literal[string] , literal[string] )
identifier[_install_dev_packages] ( identifier[session] )
identifier[session] . identifier[run] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
* identifier[session] . identifier[posargs]
) | def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Install all test dependencies.
session.install('-r', 'requirements-test.txt')
# Install dev packages.
_install_dev_packages(session)
# Run py.test against the unit tests.
session.run('py.test', '--quiet', '--cov=opencensus', '--cov=context', '--cov=contrib', '--cov-append', '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', 'tests/unit/', 'context/', 'contrib/', *session.posargs) |
def quoted(arg):
""" Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license
"""
if isinstance(arg, str):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
q = '"'
else:
arg = arg.replace(b'\\', b'\\\\')
arg = arg.replace(b'"', b'\\"')
q = b'"'
return q + arg + q | def function[quoted, parameter[arg]]:
constant[ Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license
]
if call[name[isinstance], parameter[name[arg], name[str]]] begin[:]
variable[arg] assign[=] call[name[arg].replace, parameter[constant[\], constant[\\]]]
variable[arg] assign[=] call[name[arg].replace, parameter[constant["], constant[\"]]]
variable[q] assign[=] constant["]
return[binary_operation[binary_operation[name[q] + name[arg]] + name[q]]] | keyword[def] identifier[quoted] ( identifier[arg] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[str] ):
identifier[arg] = identifier[arg] . identifier[replace] ( literal[string] , literal[string] )
identifier[arg] = identifier[arg] . identifier[replace] ( literal[string] , literal[string] )
identifier[q] = literal[string]
keyword[else] :
identifier[arg] = identifier[arg] . identifier[replace] ( literal[string] , literal[string] )
identifier[arg] = identifier[arg] . identifier[replace] ( literal[string] , literal[string] )
identifier[q] = literal[string]
keyword[return] identifier[q] + identifier[arg] + identifier[q] | def quoted(arg):
""" Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license
"""
if isinstance(arg, str):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
q = '"' # depends on [control=['if'], data=[]]
else:
arg = arg.replace(b'\\', b'\\\\')
arg = arg.replace(b'"', b'\\"')
q = b'"'
return q + arg + q |
def setCurrentSchemaPath(self, path):
"""
Sets the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:param path | <str>
"""
if not path:
self.setCurrentItem(None)
return False
for item in self.topLevelItems():
if item.setCurrentSchemaPath(nativestring(path)):
return True
self.setCurrentItem(None)
return False | def function[setCurrentSchemaPath, parameter[self, path]]:
constant[
Sets the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:param path | <str>
]
if <ast.UnaryOp object at 0x7da18f09e560> begin[:]
call[name[self].setCurrentItem, parameter[constant[None]]]
return[constant[False]]
for taget[name[item]] in starred[call[name[self].topLevelItems, parameter[]]] begin[:]
if call[name[item].setCurrentSchemaPath, parameter[call[name[nativestring], parameter[name[path]]]]] begin[:]
return[constant[True]]
call[name[self].setCurrentItem, parameter[constant[None]]]
return[constant[False]] | keyword[def] identifier[setCurrentSchemaPath] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
identifier[self] . identifier[setCurrentItem] ( keyword[None] )
keyword[return] keyword[False]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[topLevelItems] ():
keyword[if] identifier[item] . identifier[setCurrentSchemaPath] ( identifier[nativestring] ( identifier[path] )):
keyword[return] keyword[True]
identifier[self] . identifier[setCurrentItem] ( keyword[None] )
keyword[return] keyword[False] | def setCurrentSchemaPath(self, path):
"""
Sets the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:param path | <str>
"""
if not path:
self.setCurrentItem(None)
return False # depends on [control=['if'], data=[]]
for item in self.topLevelItems():
if item.setCurrentSchemaPath(nativestring(path)):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
self.setCurrentItem(None)
return False |
def _write_vvr(self, f, data):
'''
Writes a vvr to the end of file "f" with the byte stream "data".
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.VVR_BASE_SIZE64 + len(data)
section_type = CDF.VVR_
vvr1 = bytearray(12)
vvr1[0:8] = struct.pack('>q', block_size)
vvr1[8:12] = struct.pack('>i', section_type)
f.write(vvr1)
f.write(data)
return byte_loc | def function[_write_vvr, parameter[self, f, data]]:
constant[
Writes a vvr to the end of file "f" with the byte stream "data".
]
call[name[f].seek, parameter[constant[0], constant[2]]]
variable[byte_loc] assign[=] call[name[f].tell, parameter[]]
variable[block_size] assign[=] binary_operation[name[CDF].VVR_BASE_SIZE64 + call[name[len], parameter[name[data]]]]
variable[section_type] assign[=] name[CDF].VVR_
variable[vvr1] assign[=] call[name[bytearray], parameter[constant[12]]]
call[name[vvr1]][<ast.Slice object at 0x7da1b064d630>] assign[=] call[name[struct].pack, parameter[constant[>q], name[block_size]]]
call[name[vvr1]][<ast.Slice object at 0x7da1b064e500>] assign[=] call[name[struct].pack, parameter[constant[>i], name[section_type]]]
call[name[f].write, parameter[name[vvr1]]]
call[name[f].write, parameter[name[data]]]
return[name[byte_loc]] | keyword[def] identifier[_write_vvr] ( identifier[self] , identifier[f] , identifier[data] ):
literal[string]
identifier[f] . identifier[seek] ( literal[int] , literal[int] )
identifier[byte_loc] = identifier[f] . identifier[tell] ()
identifier[block_size] = identifier[CDF] . identifier[VVR_BASE_SIZE64] + identifier[len] ( identifier[data] )
identifier[section_type] = identifier[CDF] . identifier[VVR_]
identifier[vvr1] = identifier[bytearray] ( literal[int] )
identifier[vvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[block_size] )
identifier[vvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[section_type] )
identifier[f] . identifier[write] ( identifier[vvr1] )
identifier[f] . identifier[write] ( identifier[data] )
keyword[return] identifier[byte_loc] | def _write_vvr(self, f, data):
"""
Writes a vvr to the end of file "f" with the byte stream "data".
"""
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.VVR_BASE_SIZE64 + len(data)
section_type = CDF.VVR_
vvr1 = bytearray(12)
vvr1[0:8] = struct.pack('>q', block_size)
vvr1[8:12] = struct.pack('>i', section_type)
f.write(vvr1)
f.write(data)
return byte_loc |
def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes:
"""
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
"""
try:
validate_empty_bytes(offset, length, bytecode)
except ValidationError:
raise BytecodeLinkingError("Link references cannot be applied to bytecode")
new_bytes = (
# Ignore linting error b/c conflict b/w black & flake8
bytecode[:offset]
+ value
+ bytecode[offset + length :] # noqa: E201, E203
)
return new_bytes | def function[apply_link_ref, parameter[offset, length, value, bytecode]]:
constant[
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
]
<ast.Try object at 0x7da20e960520>
variable[new_bytes] assign[=] binary_operation[binary_operation[call[name[bytecode]][<ast.Slice object at 0x7da20e9638e0>] + name[value]] + call[name[bytecode]][<ast.Slice object at 0x7da20e960df0>]]
return[name[new_bytes]] | keyword[def] identifier[apply_link_ref] ( identifier[offset] : identifier[int] , identifier[length] : identifier[int] , identifier[value] : identifier[bytes] , identifier[bytecode] : identifier[bytes] )-> identifier[bytes] :
literal[string]
keyword[try] :
identifier[validate_empty_bytes] ( identifier[offset] , identifier[length] , identifier[bytecode] )
keyword[except] identifier[ValidationError] :
keyword[raise] identifier[BytecodeLinkingError] ( literal[string] )
identifier[new_bytes] =(
identifier[bytecode] [: identifier[offset] ]
+ identifier[value]
+ identifier[bytecode] [ identifier[offset] + identifier[length] :]
)
keyword[return] identifier[new_bytes] | def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes:
"""
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
"""
try:
validate_empty_bytes(offset, length, bytecode) # depends on [control=['try'], data=[]]
except ValidationError:
raise BytecodeLinkingError('Link references cannot be applied to bytecode') # depends on [control=['except'], data=[]]
# Ignore linting error b/c conflict b/w black & flake8
# noqa: E201, E203
new_bytes = bytecode[:offset] + value + bytecode[offset + length:]
return new_bytes |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.