code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
"""
# Assumes `x` is a list,
# and contains a tensor representing a batch of images
assert len(x) == 1 and isinstance(x, list), \
'x should be a list and contain only one image tensor'
x = x[0]
loss = reduce_mean(loss_fn(x), axis=0)
return tf.gradients(loss, x) | def function[_compute_gradients, parameter[self, loss_fn, x, unused_optim_state]]:
constant[Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
]
assert[<ast.BoolOp object at 0x7da18dc9b8b0>]
variable[x] assign[=] call[name[x]][constant[0]]
variable[loss] assign[=] call[name[reduce_mean], parameter[call[name[loss_fn], parameter[name[x]]]]]
return[call[name[tf].gradients, parameter[name[loss], name[x]]]] | keyword[def] identifier[_compute_gradients] ( identifier[self] , identifier[loss_fn] , identifier[x] , identifier[unused_optim_state] ):
literal[string]
keyword[assert] identifier[len] ( identifier[x] )== literal[int] keyword[and] identifier[isinstance] ( identifier[x] , identifier[list] ), literal[string]
identifier[x] = identifier[x] [ literal[int] ]
identifier[loss] = identifier[reduce_mean] ( identifier[loss_fn] ( identifier[x] ), identifier[axis] = literal[int] )
keyword[return] identifier[tf] . identifier[gradients] ( identifier[loss] , identifier[x] ) | def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
"""
# Assumes `x` is a list,
# and contains a tensor representing a batch of images
assert len(x) == 1 and isinstance(x, list), 'x should be a list and contain only one image tensor'
x = x[0]
loss = reduce_mean(loss_fn(x), axis=0)
return tf.gradients(loss, x) |
def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) | def function[tile, parameter[lt, width, gap]]:
constant[
Pretty print list of items.
]
from relative_module[jcvi.utils.iter] import module[grouper]
variable[max_len] assign[=] binary_operation[call[name[max], parameter[<ast.GeneratorExp object at 0x7da204622da0>]] + name[gap]]
variable[items_per_line] assign[=] call[name[max], parameter[binary_operation[name[width] <ast.FloorDiv object at 0x7da2590d6bc0> name[max_len]], constant[1]]]
variable[lt] assign[=] <ast.ListComp object at 0x7da204620e20>
variable[g] assign[=] call[name[list], parameter[call[name[grouper], parameter[name[lt], name[items_per_line]]]]]
return[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da204623310>]]] | keyword[def] identifier[tile] ( identifier[lt] , identifier[width] = literal[int] , identifier[gap] = literal[int] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[iter] keyword[import] identifier[grouper]
identifier[max_len] = identifier[max] ( identifier[len] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[lt] )+ identifier[gap]
identifier[items_per_line] = identifier[max] ( identifier[width] // identifier[max_len] , literal[int] )
identifier[lt] =[ identifier[x] . identifier[rjust] ( identifier[max_len] ) keyword[for] identifier[x] keyword[in] identifier[lt] ]
identifier[g] = identifier[list] ( identifier[grouper] ( identifier[lt] , identifier[items_per_line] , identifier[fillvalue] = literal[string] ))
keyword[return] literal[string] . identifier[join] ( literal[string] . identifier[join] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[g] ) | def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max((len(x) for x in lt)) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=''))
return '\n'.join((''.join(x) for x in g)) |
def batch_taxids(list_of_names):
"""
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
"""
for name in list_of_names:
handle = Entrez.esearch(db='Taxonomy', term=name, retmode="xml")
records = Entrez.read(handle)
yield records["IdList"][0] | def function[batch_taxids, parameter[list_of_names]]:
constant[
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
]
for taget[name[name]] in starred[name[list_of_names]] begin[:]
variable[handle] assign[=] call[name[Entrez].esearch, parameter[]]
variable[records] assign[=] call[name[Entrez].read, parameter[name[handle]]]
<ast.Yield object at 0x7da1b2346140> | keyword[def] identifier[batch_taxids] ( identifier[list_of_names] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[list_of_names] :
identifier[handle] = identifier[Entrez] . identifier[esearch] ( identifier[db] = literal[string] , identifier[term] = identifier[name] , identifier[retmode] = literal[string] )
identifier[records] = identifier[Entrez] . identifier[read] ( identifier[handle] )
keyword[yield] identifier[records] [ literal[string] ][ literal[int] ] | def batch_taxids(list_of_names):
"""
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
"""
for name in list_of_names:
handle = Entrez.esearch(db='Taxonomy', term=name, retmode='xml')
records = Entrez.read(handle)
yield records['IdList'][0] # depends on [control=['for'], data=['name']] |
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping | def function[setStateCodes, parameter[self]]:
constant[
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
]
variable[statespace] assign[=] call[name[self].statespace, parameter[]]
name[self].minvalues assign[=] call[name[np].amin, parameter[name[statespace]]]
name[self].maxvalues assign[=] call[name[np].amax, parameter[name[statespace]]]
variable[statesize] assign[=] call[name[statespace].shape][constant[1]]
variable[largestRange] assign[=] binary_operation[constant[1] + call[name[np].max, parameter[binary_operation[name[self].maxvalues - name[self].minvalues]]]]
name[self].statecode assign[=] call[name[np].power, parameter[name[largestRange], call[name[np].arange, parameter[name[statesize]]]]]
variable[codes] assign[=] call[name[self].getStateCode, parameter[name[statespace]]]
variable[sorted_indices] assign[=] call[name[np].argsort, parameter[name[codes]]]
name[self].codes assign[=] call[name[codes]][name[sorted_indices]]
if compare[call[name[np].unique, parameter[name[self].codes]].shape not_equal[!=] name[self].codes.shape] begin[:]
<ast.Raise object at 0x7da2043472b0>
variable[mapping] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2043467a0>, <ast.Name object at 0x7da204344670>]]] in starred[call[name[enumerate], parameter[call[name[statespace]][name[sorted_indices]]]]] begin[:]
call[name[mapping]][name[index]] assign[=] name[state]
name[self].mapping assign[=] name[mapping] | keyword[def] identifier[setStateCodes] ( identifier[self] ):
literal[string]
identifier[statespace] = identifier[self] . identifier[statespace] ()
identifier[self] . identifier[minvalues] = identifier[np] . identifier[amin] ( identifier[statespace] , identifier[axis] = literal[int] )
identifier[self] . identifier[maxvalues] = identifier[np] . identifier[amax] ( identifier[statespace] , identifier[axis] = literal[int] )
identifier[statesize] = identifier[statespace] . identifier[shape] [ literal[int] ]
identifier[largestRange] = literal[int] + identifier[np] . identifier[max] ( identifier[self] . identifier[maxvalues] - identifier[self] . identifier[minvalues] )
identifier[self] . identifier[statecode] = identifier[np] . identifier[power] ( identifier[largestRange] , identifier[np] . identifier[arange] ( identifier[statesize] ), identifier[dtype] = identifier[int] )
identifier[codes] = identifier[self] . identifier[getStateCode] ( identifier[statespace] )
identifier[sorted_indices] = identifier[np] . identifier[argsort] ( identifier[codes] )
identifier[self] . identifier[codes] = identifier[codes] [ identifier[sorted_indices] ]
keyword[if] identifier[np] . identifier[unique] ( identifier[self] . identifier[codes] ). identifier[shape] != identifier[self] . identifier[codes] . identifier[shape] :
keyword[raise] literal[string]
identifier[mapping] = identifier[OrderedDict] ()
keyword[for] identifier[index] , identifier[state] keyword[in] identifier[enumerate] ( identifier[statespace] [ identifier[sorted_indices] ]):
identifier[mapping] [ identifier[index] ]= identifier[state]
identifier[self] . identifier[mapping] = identifier[mapping] | def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
""" #calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace, axis=0)
self.maxvalues = np.amax(statespace, axis=0) #calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1 + np.max(self.maxvalues - self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize), dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise 'Non-unique coding of states, results are unreliable' # depends on [control=['if'], data=[]] #For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for (index, state) in enumerate(statespace[sorted_indices]):
mapping[index] = state # depends on [control=['for'], data=[]]
self.mapping = mapping |
def delete_user(self, auth, username):
"""
Deletes the user with username ``username``. Should only be called if the
to-be-deleted user has no repositories.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to delete
"""
path = "/admin/users/{}".format(username)
self.delete(path, auth=auth) | def function[delete_user, parameter[self, auth, username]]:
constant[
Deletes the user with username ``username``. Should only be called if the
to-be-deleted user has no repositories.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to delete
]
variable[path] assign[=] call[constant[/admin/users/{}].format, parameter[name[username]]]
call[name[self].delete, parameter[name[path]]] | keyword[def] identifier[delete_user] ( identifier[self] , identifier[auth] , identifier[username] ):
literal[string]
identifier[path] = literal[string] . identifier[format] ( identifier[username] )
identifier[self] . identifier[delete] ( identifier[path] , identifier[auth] = identifier[auth] ) | def delete_user(self, auth, username):
"""
Deletes the user with username ``username``. Should only be called if the
to-be-deleted user has no repositories.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to delete
"""
path = '/admin/users/{}'.format(username)
self.delete(path, auth=auth) |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ContainerValuePicker, self).fix_config(options)
opt = "value"
if opt not in options:
options[opt] = "Model"
if opt not in self.help:
self.help[opt] = "The name of the container value to pick from the container (string)."
opt = "switch"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to switch the ouputs, i.e., forward the container to the sub-flow and the " \
+ "container value to the following actor instead (bool)."
return options | def function[fix_config, parameter[self, options]]:
constant[
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
]
variable[options] assign[=] call[call[name[super], parameter[name[ContainerValuePicker], name[self]]].fix_config, parameter[name[options]]]
variable[opt] assign[=] constant[value]
if compare[name[opt] <ast.NotIn object at 0x7da2590d7190> name[options]] begin[:]
call[name[options]][name[opt]] assign[=] constant[Model]
if compare[name[opt] <ast.NotIn object at 0x7da2590d7190> name[self].help] begin[:]
call[name[self].help][name[opt]] assign[=] constant[The name of the container value to pick from the container (string).]
variable[opt] assign[=] constant[switch]
if compare[name[opt] <ast.NotIn object at 0x7da2590d7190> name[options]] begin[:]
call[name[options]][name[opt]] assign[=] constant[False]
if compare[name[opt] <ast.NotIn object at 0x7da2590d7190> name[self].help] begin[:]
call[name[self].help][name[opt]] assign[=] binary_operation[constant[Whether to switch the ouputs, i.e., forward the container to the sub-flow and the ] + constant[container value to the following actor instead (bool).]]
return[name[options]] | keyword[def] identifier[fix_config] ( identifier[self] , identifier[options] ):
literal[string]
identifier[options] = identifier[super] ( identifier[ContainerValuePicker] , identifier[self] ). identifier[fix_config] ( identifier[options] )
identifier[opt] = literal[string]
keyword[if] identifier[opt] keyword[not] keyword[in] identifier[options] :
identifier[options] [ identifier[opt] ]= literal[string]
keyword[if] identifier[opt] keyword[not] keyword[in] identifier[self] . identifier[help] :
identifier[self] . identifier[help] [ identifier[opt] ]= literal[string]
identifier[opt] = literal[string]
keyword[if] identifier[opt] keyword[not] keyword[in] identifier[options] :
identifier[options] [ identifier[opt] ]= keyword[False]
keyword[if] identifier[opt] keyword[not] keyword[in] identifier[self] . identifier[help] :
identifier[self] . identifier[help] [ identifier[opt] ]= literal[string] + literal[string]
keyword[return] identifier[options] | def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ContainerValuePicker, self).fix_config(options)
opt = 'value'
if opt not in options:
options[opt] = 'Model' # depends on [control=['if'], data=['opt', 'options']]
if opt not in self.help:
self.help[opt] = 'The name of the container value to pick from the container (string).' # depends on [control=['if'], data=['opt']]
opt = 'switch'
if opt not in options:
options[opt] = False # depends on [control=['if'], data=['opt', 'options']]
if opt not in self.help:
self.help[opt] = 'Whether to switch the ouputs, i.e., forward the container to the sub-flow and the ' + 'container value to the following actor instead (bool).' # depends on [control=['if'], data=['opt']]
return options |
def update_script(self, script_body):
"""
Updates the configuration script of the enclosure-group with the specified URI.
Args:
id_or_uri: Resource id or resource uri.
script_body: Configuration script.
Returns:
dict: Updated enclosure group.
"""
uri = "{}/script".format(self.data['uri'])
return self._helper.update(script_body, uri=uri) | def function[update_script, parameter[self, script_body]]:
constant[
Updates the configuration script of the enclosure-group with the specified URI.
Args:
id_or_uri: Resource id or resource uri.
script_body: Configuration script.
Returns:
dict: Updated enclosure group.
]
variable[uri] assign[=] call[constant[{}/script].format, parameter[call[name[self].data][constant[uri]]]]
return[call[name[self]._helper.update, parameter[name[script_body]]]] | keyword[def] identifier[update_script] ( identifier[self] , identifier[script_body] ):
literal[string]
identifier[uri] = literal[string] . identifier[format] ( identifier[self] . identifier[data] [ literal[string] ])
keyword[return] identifier[self] . identifier[_helper] . identifier[update] ( identifier[script_body] , identifier[uri] = identifier[uri] ) | def update_script(self, script_body):
"""
Updates the configuration script of the enclosure-group with the specified URI.
Args:
id_or_uri: Resource id or resource uri.
script_body: Configuration script.
Returns:
dict: Updated enclosure group.
"""
uri = '{}/script'.format(self.data['uri'])
return self._helper.update(script_body, uri=uri) |
def build_hypo_list_node(hypo_list):
"""
:param hypo_list:
an array of shape (N, 3) with columns (alongStrike, downDip, weight)
:returns:
a hypoList node containing N hypo nodes
"""
hypolist = Node('hypoList', {})
for row in hypo_list:
n = Node(
'hypo', dict(alongStrike=row[0], downDip=row[1], weight=row[2]))
hypolist.append(n)
return hypolist | def function[build_hypo_list_node, parameter[hypo_list]]:
constant[
:param hypo_list:
an array of shape (N, 3) with columns (alongStrike, downDip, weight)
:returns:
a hypoList node containing N hypo nodes
]
variable[hypolist] assign[=] call[name[Node], parameter[constant[hypoList], dictionary[[], []]]]
for taget[name[row]] in starred[name[hypo_list]] begin[:]
variable[n] assign[=] call[name[Node], parameter[constant[hypo], call[name[dict], parameter[]]]]
call[name[hypolist].append, parameter[name[n]]]
return[name[hypolist]] | keyword[def] identifier[build_hypo_list_node] ( identifier[hypo_list] ):
literal[string]
identifier[hypolist] = identifier[Node] ( literal[string] ,{})
keyword[for] identifier[row] keyword[in] identifier[hypo_list] :
identifier[n] = identifier[Node] (
literal[string] , identifier[dict] ( identifier[alongStrike] = identifier[row] [ literal[int] ], identifier[downDip] = identifier[row] [ literal[int] ], identifier[weight] = identifier[row] [ literal[int] ]))
identifier[hypolist] . identifier[append] ( identifier[n] )
keyword[return] identifier[hypolist] | def build_hypo_list_node(hypo_list):
"""
:param hypo_list:
an array of shape (N, 3) with columns (alongStrike, downDip, weight)
:returns:
a hypoList node containing N hypo nodes
"""
hypolist = Node('hypoList', {})
for row in hypo_list:
n = Node('hypo', dict(alongStrike=row[0], downDip=row[1], weight=row[2]))
hypolist.append(n) # depends on [control=['for'], data=['row']]
return hypolist |
def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops) | def function[is_subgroup, parameter[self, supergroup]]:
constant[
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
]
call[name[warnings].warn, parameter[constant[This is not fully functional. Only trivial subsets are tested right now. ]]]
return[call[call[name[set], parameter[name[self].symmetry_ops]].issubset, parameter[name[supergroup].symmetry_ops]]] | keyword[def] identifier[is_subgroup] ( identifier[self] , identifier[supergroup] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[set] ( identifier[self] . identifier[symmetry_ops] ). identifier[issubset] ( identifier[supergroup] . identifier[symmetry_ops] ) | def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn('This is not fully functional. Only trivial subsets are tested right now. ')
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops) |
def minimise_tables(routing_tables, target_lengths,
methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda: target_lengths)
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for chip, table in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods)
except MinimisationFailedError as exc:
exc.chip = chip
raise
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table
return new_tables | def function[minimise_tables, parameter[routing_tables, target_lengths, methods]]:
constant[Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [ :py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
]
if <ast.UnaryOp object at 0x7da1b195d600> begin[:]
variable[lengths] assign[=] call[name[collections].defaultdict, parameter[<ast.Lambda object at 0x7da1b195fd60>]]
variable[new_tables] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b195c460>, <ast.Name object at 0x7da1b195fcd0>]]] in starred[call[name[iteritems], parameter[name[routing_tables]]]] begin[:]
<ast.Try object at 0x7da1b195cd60>
if name[new_table] begin[:]
call[name[new_tables]][name[chip]] assign[=] name[new_table]
return[name[new_tables]] | keyword[def] identifier[minimise_tables] ( identifier[routing_tables] , identifier[target_lengths] ,
identifier[methods] =( identifier[remove_default_entries] , identifier[ordered_covering] )):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[target_lengths] , identifier[dict] ):
identifier[lengths] = identifier[collections] . identifier[defaultdict] ( keyword[lambda] : identifier[target_lengths] )
keyword[else] :
identifier[lengths] = identifier[target_lengths]
identifier[new_tables] = identifier[dict] ()
keyword[for] identifier[chip] , identifier[table] keyword[in] identifier[iteritems] ( identifier[routing_tables] ):
keyword[try] :
identifier[new_table] = identifier[minimise_table] ( identifier[table] , identifier[lengths] [ identifier[chip] ], identifier[methods] )
keyword[except] identifier[MinimisationFailedError] keyword[as] identifier[exc] :
identifier[exc] . identifier[chip] = identifier[chip]
keyword[raise]
keyword[if] identifier[new_table] :
identifier[new_tables] [ identifier[chip] ]= identifier[new_table]
keyword[return] identifier[new_tables] | def minimise_tables(routing_tables, target_lengths, methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [ :py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda : target_lengths) # depends on [control=['if'], data=[]]
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for (chip, table) in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods) # depends on [control=['try'], data=[]]
except MinimisationFailedError as exc:
exc.chip = chip
raise # depends on [control=['except'], data=['exc']]
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return new_tables |
def get_atten(self, idx=0):
"""This function returns the current attenuation from an attenuator at a
given index in the instrument.
Args:
idx: This zero-based index is the identifier for a particular
attenuator in an instrument.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
Returns:
A float that is the current attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count or idx < 0:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
atten_val_str = self._telnet_client.cmd("CHAN:%s:ATT?" % (idx + 1))
atten_val = float(atten_val_str)
return atten_val | def function[get_atten, parameter[self, idx]]:
constant[This function returns the current attenuation from an attenuator at a
given index in the instrument.
Args:
idx: This zero-based index is the identifier for a particular
attenuator in an instrument.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
Returns:
A float that is the current attenuation value.
]
if <ast.UnaryOp object at 0x7da1b0633a00> begin[:]
<ast.Raise object at 0x7da1b0633a60>
if <ast.BoolOp object at 0x7da1b0631db0> begin[:]
<ast.Raise object at 0x7da1b0632da0>
variable[atten_val_str] assign[=] call[name[self]._telnet_client.cmd, parameter[binary_operation[constant[CHAN:%s:ATT?] <ast.Mod object at 0x7da2590d6920> binary_operation[name[idx] + constant[1]]]]]
variable[atten_val] assign[=] call[name[float], parameter[name[atten_val_str]]]
return[name[atten_val]] | keyword[def] identifier[get_atten] ( identifier[self] , identifier[idx] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_open] :
keyword[raise] identifier[attenuator] . identifier[Error] (
literal[string] %
identifier[self] . identifier[_telnet_client] . identifier[host] )
keyword[if] identifier[idx] + literal[int] > identifier[self] . identifier[path_count] keyword[or] identifier[idx] < literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] , identifier[self] . identifier[path_count] ,
identifier[idx] )
identifier[atten_val_str] = identifier[self] . identifier[_telnet_client] . identifier[cmd] ( literal[string] %( identifier[idx] + literal[int] ))
identifier[atten_val] = identifier[float] ( identifier[atten_val_str] )
keyword[return] identifier[atten_val] | def get_atten(self, idx=0):
"""This function returns the current attenuation from an attenuator at a
given index in the instrument.
Args:
idx: This zero-based index is the identifier for a particular
attenuator in an instrument.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
Returns:
A float that is the current attenuation value.
"""
if not self.is_open:
raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host) # depends on [control=['if'], data=[]]
if idx + 1 > self.path_count or idx < 0:
raise IndexError('Attenuator index out of range!', self.path_count, idx) # depends on [control=['if'], data=[]]
atten_val_str = self._telnet_client.cmd('CHAN:%s:ATT?' % (idx + 1))
atten_val = float(atten_val_str)
return atten_val |
def calc_empirical_retinotopy(cortex,
polar_angle=None, eccentricity=None, pRF_radius=None, weight=None,
eccentricity_range=None, weight_min=0,
invert_rh_angle=False,
partial_voluming_correction=False):
'''
calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object
storing the retinotopy data for the registration.
Required afferent parameters:
@ cortex Must be the cortex object that is to be registered to the model of retinotopy.
Optional afferent parameters:
@ polar_angle May be an array of polar angle values or a polar angle property name; if None
(the default), attempts to auto-detect an empirical polar angle property.
@ eccentricity May be an array of eccentricity values or an eccentricity property name; if
None (the default), attempts to auto-detect an empirical eccentricity property.
@ pRF_radius May be an array of receptive field radius values or the property name for such an
array; if None (the default), attempts to auto-detect an empirical radius property.
@ weight May be an array of weight values or a weight property name; if None (the default),
attempts to auto-detect an empirical weight property, such as variance_explained.
@ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range
to be used in the registration; if None, then no clipping is done.
@ weight_min May be given to indicate that weight values below this value should not be
included in the registration; the default is 0.
@ partial_voluming_correction May be set to True (default is False) to indicate that partial
voluming correction should be used to adjust the weights.
@ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere
has its polar angle stored with opposite sign to the model polar angle.
Efferent values:
@ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in
the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight';
values that should be excluded for any reason will have 0 weight and undefined angles.
'''
data = {} # the map we build up in this function
n = cortex.vertex_count
(emin,emax) = (-np.inf,np.inf) if eccentricity_range is None else \
(0,eccentricity_range) if pimms.is_number(eccentricity_range) else \
eccentricity_range
# Step 1: get our properties straight ##########################################################
(ang, ecc, rad, wgt) = [
np.array(extract_retinotopy_argument(cortex, name, arg, default='empirical'))
for (name, arg) in [
('polar_angle', polar_angle),
('eccentricity', eccentricity),
('radius', pRF_radius),
('weight', np.full(n, weight) if pimms.is_number(weight) else weight)]]
if wgt is None: wgt = np.ones(len(ecc))
bad = np.logical_not(np.isfinite(np.prod([ang, ecc, wgt], axis=0)))
ecc[bad] = 0
wgt[bad] = 0
if rad is not None: rad[bad] = 0
# do partial voluming correction if requested
if partial_voluming_correction: wgt = wgt * (1 - cortex.partial_voluming_factor)
# now trim and finalize
bad = bad | (wgt <= weight_min) | (ecc < emin) | (ecc > emax)
wgt[bad] = 0
ang[bad] = 0
ecc[bad] = 0
for x in [ang, ecc, wgt, rad]:
if x is not None:
x.setflags(write=False)
# that's it!
dat = dict(polar_angle=ang, eccentricity=ecc, weight=wgt)
if rad is not None: dat['radius'] = rad
return (pimms.itable(dat),) | def function[calc_empirical_retinotopy, parameter[cortex, polar_angle, eccentricity, pRF_radius, weight, eccentricity_range, weight_min, invert_rh_angle, partial_voluming_correction]]:
constant[
calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object
storing the retinotopy data for the registration.
Required afferent parameters:
@ cortex Must be the cortex object that is to be registered to the model of retinotopy.
Optional afferent parameters:
@ polar_angle May be an array of polar angle values or a polar angle property name; if None
(the default), attempts to auto-detect an empirical polar angle property.
@ eccentricity May be an array of eccentricity values or an eccentricity property name; if
None (the default), attempts to auto-detect an empirical eccentricity property.
@ pRF_radius May be an array of receptive field radius values or the property name for such an
array; if None (the default), attempts to auto-detect an empirical radius property.
@ weight May be an array of weight values or a weight property name; if None (the default),
attempts to auto-detect an empirical weight property, such as variance_explained.
@ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range
to be used in the registration; if None, then no clipping is done.
@ weight_min May be given to indicate that weight values below this value should not be
included in the registration; the default is 0.
@ partial_voluming_correction May be set to True (default is False) to indicate that partial
voluming correction should be used to adjust the weights.
@ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere
has its polar angle stored with opposite sign to the model polar angle.
Efferent values:
@ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in
the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight';
values that should be excluded for any reason will have 0 weight and undefined angles.
]
variable[data] assign[=] dictionary[[], []]
variable[n] assign[=] name[cortex].vertex_count
<ast.Tuple object at 0x7da20e9b1120> assign[=] <ast.IfExp object at 0x7da20e9b32e0>
<ast.Tuple object at 0x7da20e9b05b0> assign[=] <ast.ListComp object at 0x7da20e9b0970>
if compare[name[wgt] is constant[None]] begin[:]
variable[wgt] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[ecc]]]]]
variable[bad] assign[=] call[name[np].logical_not, parameter[call[name[np].isfinite, parameter[call[name[np].prod, parameter[list[[<ast.Name object at 0x7da20e9b24d0>, <ast.Name object at 0x7da20e9b3fd0>, <ast.Name object at 0x7da20e9b23b0>]]]]]]]]
call[name[ecc]][name[bad]] assign[=] constant[0]
call[name[wgt]][name[bad]] assign[=] constant[0]
if compare[name[rad] is_not constant[None]] begin[:]
call[name[rad]][name[bad]] assign[=] constant[0]
if name[partial_voluming_correction] begin[:]
variable[wgt] assign[=] binary_operation[name[wgt] * binary_operation[constant[1] - name[cortex].partial_voluming_factor]]
variable[bad] assign[=] binary_operation[binary_operation[binary_operation[name[bad] <ast.BitOr object at 0x7da2590d6aa0> compare[name[wgt] less_or_equal[<=] name[weight_min]]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[ecc] less[<] name[emin]]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[ecc] greater[>] name[emax]]]
call[name[wgt]][name[bad]] assign[=] constant[0]
call[name[ang]][name[bad]] assign[=] constant[0]
call[name[ecc]][name[bad]] assign[=] constant[0]
for taget[name[x]] in starred[list[[<ast.Name object at 0x7da1b0e39480>, <ast.Name object at 0x7da1b0e38820>, <ast.Name object at 0x7da1b0e389d0>, <ast.Name object at 0x7da1b0e3a140>]]] begin[:]
if compare[name[x] is_not constant[None]] begin[:]
call[name[x].setflags, parameter[]]
variable[dat] assign[=] call[name[dict], parameter[]]
if compare[name[rad] is_not constant[None]] begin[:]
call[name[dat]][constant[radius]] assign[=] name[rad]
return[tuple[[<ast.Call object at 0x7da1b0e3b760>]]] | keyword[def] identifier[calc_empirical_retinotopy] ( identifier[cortex] ,
identifier[polar_angle] = keyword[None] , identifier[eccentricity] = keyword[None] , identifier[pRF_radius] = keyword[None] , identifier[weight] = keyword[None] ,
identifier[eccentricity_range] = keyword[None] , identifier[weight_min] = literal[int] ,
identifier[invert_rh_angle] = keyword[False] ,
identifier[partial_voluming_correction] = keyword[False] ):
literal[string]
identifier[data] ={}
identifier[n] = identifier[cortex] . identifier[vertex_count]
( identifier[emin] , identifier[emax] )=(- identifier[np] . identifier[inf] , identifier[np] . identifier[inf] ) keyword[if] identifier[eccentricity_range] keyword[is] keyword[None] keyword[else] ( literal[int] , identifier[eccentricity_range] ) keyword[if] identifier[pimms] . identifier[is_number] ( identifier[eccentricity_range] ) keyword[else] identifier[eccentricity_range]
( identifier[ang] , identifier[ecc] , identifier[rad] , identifier[wgt] )=[
identifier[np] . identifier[array] ( identifier[extract_retinotopy_argument] ( identifier[cortex] , identifier[name] , identifier[arg] , identifier[default] = literal[string] ))
keyword[for] ( identifier[name] , identifier[arg] ) keyword[in] [
( literal[string] , identifier[polar_angle] ),
( literal[string] , identifier[eccentricity] ),
( literal[string] , identifier[pRF_radius] ),
( literal[string] , identifier[np] . identifier[full] ( identifier[n] , identifier[weight] ) keyword[if] identifier[pimms] . identifier[is_number] ( identifier[weight] ) keyword[else] identifier[weight] )]]
keyword[if] identifier[wgt] keyword[is] keyword[None] : identifier[wgt] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[ecc] ))
identifier[bad] = identifier[np] . identifier[logical_not] ( identifier[np] . identifier[isfinite] ( identifier[np] . identifier[prod] ([ identifier[ang] , identifier[ecc] , identifier[wgt] ], identifier[axis] = literal[int] )))
identifier[ecc] [ identifier[bad] ]= literal[int]
identifier[wgt] [ identifier[bad] ]= literal[int]
keyword[if] identifier[rad] keyword[is] keyword[not] keyword[None] : identifier[rad] [ identifier[bad] ]= literal[int]
keyword[if] identifier[partial_voluming_correction] : identifier[wgt] = identifier[wgt] *( literal[int] - identifier[cortex] . identifier[partial_voluming_factor] )
identifier[bad] = identifier[bad] |( identifier[wgt] <= identifier[weight_min] )|( identifier[ecc] < identifier[emin] )|( identifier[ecc] > identifier[emax] )
identifier[wgt] [ identifier[bad] ]= literal[int]
identifier[ang] [ identifier[bad] ]= literal[int]
identifier[ecc] [ identifier[bad] ]= literal[int]
keyword[for] identifier[x] keyword[in] [ identifier[ang] , identifier[ecc] , identifier[wgt] , identifier[rad] ]:
keyword[if] identifier[x] keyword[is] keyword[not] keyword[None] :
identifier[x] . identifier[setflags] ( identifier[write] = keyword[False] )
identifier[dat] = identifier[dict] ( identifier[polar_angle] = identifier[ang] , identifier[eccentricity] = identifier[ecc] , identifier[weight] = identifier[wgt] )
keyword[if] identifier[rad] keyword[is] keyword[not] keyword[None] : identifier[dat] [ literal[string] ]= identifier[rad]
keyword[return] ( identifier[pimms] . identifier[itable] ( identifier[dat] ),) | def calc_empirical_retinotopy(cortex, polar_angle=None, eccentricity=None, pRF_radius=None, weight=None, eccentricity_range=None, weight_min=0, invert_rh_angle=False, partial_voluming_correction=False):
"""
calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object
storing the retinotopy data for the registration.
Required afferent parameters:
@ cortex Must be the cortex object that is to be registered to the model of retinotopy.
Optional afferent parameters:
@ polar_angle May be an array of polar angle values or a polar angle property name; if None
(the default), attempts to auto-detect an empirical polar angle property.
@ eccentricity May be an array of eccentricity values or an eccentricity property name; if
None (the default), attempts to auto-detect an empirical eccentricity property.
@ pRF_radius May be an array of receptive field radius values or the property name for such an
array; if None (the default), attempts to auto-detect an empirical radius property.
@ weight May be an array of weight values or a weight property name; if None (the default),
attempts to auto-detect an empirical weight property, such as variance_explained.
@ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range
to be used in the registration; if None, then no clipping is done.
@ weight_min May be given to indicate that weight values below this value should not be
included in the registration; the default is 0.
@ partial_voluming_correction May be set to True (default is False) to indicate that partial
voluming correction should be used to adjust the weights.
@ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere
has its polar angle stored with opposite sign to the model polar angle.
Efferent values:
@ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in
the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight';
values that should be excluded for any reason will have 0 weight and undefined angles.
"""
data = {} # the map we build up in this function
n = cortex.vertex_count
(emin, emax) = (-np.inf, np.inf) if eccentricity_range is None else (0, eccentricity_range) if pimms.is_number(eccentricity_range) else eccentricity_range
# Step 1: get our properties straight ##########################################################
(ang, ecc, rad, wgt) = [np.array(extract_retinotopy_argument(cortex, name, arg, default='empirical')) for (name, arg) in [('polar_angle', polar_angle), ('eccentricity', eccentricity), ('radius', pRF_radius), ('weight', np.full(n, weight) if pimms.is_number(weight) else weight)]]
if wgt is None:
wgt = np.ones(len(ecc)) # depends on [control=['if'], data=['wgt']]
bad = np.logical_not(np.isfinite(np.prod([ang, ecc, wgt], axis=0)))
ecc[bad] = 0
wgt[bad] = 0
if rad is not None:
rad[bad] = 0 # depends on [control=['if'], data=['rad']]
# do partial voluming correction if requested
if partial_voluming_correction:
wgt = wgt * (1 - cortex.partial_voluming_factor) # depends on [control=['if'], data=[]]
# now trim and finalize
bad = bad | (wgt <= weight_min) | (ecc < emin) | (ecc > emax)
wgt[bad] = 0
ang[bad] = 0
ecc[bad] = 0
for x in [ang, ecc, wgt, rad]:
if x is not None:
x.setflags(write=False) # depends on [control=['if'], data=['x']] # depends on [control=['for'], data=['x']]
# that's it!
dat = dict(polar_angle=ang, eccentricity=ecc, weight=wgt)
if rad is not None:
dat['radius'] = rad # depends on [control=['if'], data=['rad']]
return (pimms.itable(dat),) |
def grep(path,
pattern,
*opts):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
# Backup the path in case the glob returns nothing
_path = path
path = glob.glob(path)
# If the list is empty no files exist
# so we revert back to the original path
# so the result is an error.
if not path:
path = _path
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(six.text_type(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
if isinstance(path, list):
cmd = ['grep'] + split_opts + [pattern] + path
else:
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret | def function[grep, parameter[path, pattern]]:
constant[
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[_path] assign[=] name[path]
variable[path] assign[=] call[name[glob].glob, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da2041db6d0> begin[:]
variable[path] assign[=] name[_path]
variable[split_opts] assign[=] list[[]]
for taget[name[opt]] in starred[name[opts]] begin[:]
<ast.Try object at 0x7da2041dbc40>
if compare[call[name[len], parameter[name[split]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da2041d8a30>
call[name[split_opts].extend, parameter[name[split]]]
if call[name[isinstance], parameter[name[path], name[list]]] begin[:]
variable[cmd] assign[=] binary_operation[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da2041da320>]] + name[split_opts]] + list[[<ast.Name object at 0x7da2041dbf70>]]] + name[path]]
<ast.Try object at 0x7da2041daa70>
return[name[ret]] | keyword[def] identifier[grep] ( identifier[path] ,
identifier[pattern] ,
* identifier[opts] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[_path] = identifier[path]
identifier[path] = identifier[glob] . identifier[glob] ( identifier[path] )
keyword[if] keyword[not] identifier[path] :
identifier[path] = identifier[_path]
identifier[split_opts] =[]
keyword[for] identifier[opt] keyword[in] identifier[opts] :
keyword[try] :
identifier[split] = identifier[salt] . identifier[utils] . identifier[args] . identifier[shlex_split] ( identifier[opt] )
keyword[except] identifier[AttributeError] :
identifier[split] = identifier[salt] . identifier[utils] . identifier[args] . identifier[shlex_split] ( identifier[six] . identifier[text_type] ( identifier[opt] ))
keyword[if] identifier[len] ( identifier[split] )> literal[int] :
keyword[raise] identifier[SaltInvocationError] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[opt] )
)
identifier[split_opts] . identifier[extend] ( identifier[split] )
keyword[if] identifier[isinstance] ( identifier[path] , identifier[list] ):
identifier[cmd] =[ literal[string] ]+ identifier[split_opts] +[ identifier[pattern] ]+ identifier[path]
keyword[else] :
identifier[cmd] =[ literal[string] ]+ identifier[split_opts] +[ identifier[pattern] , identifier[path] ]
keyword[try] :
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] )
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] . identifier[strerror] )
keyword[return] identifier[ret] | def grep(path, pattern, *opts):
"""
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
"""
path = os.path.expanduser(path)
# Backup the path in case the glob returns nothing
_path = path
path = glob.glob(path)
# If the list is empty no files exist
# so we revert back to the original path
# so the result is an error.
if not path:
path = _path # depends on [control=['if'], data=[]]
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt) # depends on [control=['try'], data=[]]
except AttributeError:
split = salt.utils.args.shlex_split(six.text_type(opt)) # depends on [control=['except'], data=[]]
if len(split) > 1:
raise SaltInvocationError('Passing multiple command line arguments in a single string is not supported, please pass the following arguments separately: {0}'.format(opt)) # depends on [control=['if'], data=[]]
split_opts.extend(split) # depends on [control=['for'], data=['opt']]
if isinstance(path, list):
cmd = ['grep'] + split_opts + [pattern] + path # depends on [control=['if'], data=[]]
else:
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False) # depends on [control=['try'], data=[]]
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror) # depends on [control=['except'], data=['exc']]
return ret |
def get_context_data(self, **kwargs):
"""
Adds available urls and names.
"""
context = super(CRUDMixin, self).get_context_data(**kwargs)
context.update({
'model_verbose_name': self.model._meta.verbose_name,
'model_verbose_name_plural': self.model._meta.verbose_name_plural,
})
context['fields'] = utils.get_fields(self.model)
if hasattr(self, 'object') and self.object:
for action in utils.INSTANCE_ACTIONS:
try:
url = reverse(
utils.crud_url_name(self.model, action),
kwargs={'pk': self.object.pk})
except NoReverseMatch: # pragma: no cover
url = None
context['url_%s' % action] = url
for action in utils.LIST_ACTIONS:
try:
url = reverse(utils.crud_url_name(self.model, action))
except NoReverseMatch: # pragma: no cover
url = None
context['url_%s' % action] = url
return context | def function[get_context_data, parameter[self]]:
constant[
Adds available urls and names.
]
variable[context] assign[=] call[call[name[super], parameter[name[CRUDMixin], name[self]]].get_context_data, parameter[]]
call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da18ede6200>, <ast.Constant object at 0x7da18ede7b50>], [<ast.Attribute object at 0x7da18ede4bb0>, <ast.Attribute object at 0x7da18ede53f0>]]]]
call[name[context]][constant[fields]] assign[=] call[name[utils].get_fields, parameter[name[self].model]]
if <ast.BoolOp object at 0x7da18ede4c10> begin[:]
for taget[name[action]] in starred[name[utils].INSTANCE_ACTIONS] begin[:]
<ast.Try object at 0x7da18ede46d0>
call[name[context]][binary_operation[constant[url_%s] <ast.Mod object at 0x7da2590d6920> name[action]]] assign[=] name[url]
for taget[name[action]] in starred[name[utils].LIST_ACTIONS] begin[:]
<ast.Try object at 0x7da18f09f8e0>
call[name[context]][binary_operation[constant[url_%s] <ast.Mod object at 0x7da2590d6920> name[action]]] assign[=] name[url]
return[name[context]] | keyword[def] identifier[get_context_data] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[super] ( identifier[CRUDMixin] , identifier[self] ). identifier[get_context_data] (** identifier[kwargs] )
identifier[context] . identifier[update] ({
literal[string] : identifier[self] . identifier[model] . identifier[_meta] . identifier[verbose_name] ,
literal[string] : identifier[self] . identifier[model] . identifier[_meta] . identifier[verbose_name_plural] ,
})
identifier[context] [ literal[string] ]= identifier[utils] . identifier[get_fields] ( identifier[self] . identifier[model] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[object] :
keyword[for] identifier[action] keyword[in] identifier[utils] . identifier[INSTANCE_ACTIONS] :
keyword[try] :
identifier[url] = identifier[reverse] (
identifier[utils] . identifier[crud_url_name] ( identifier[self] . identifier[model] , identifier[action] ),
identifier[kwargs] ={ literal[string] : identifier[self] . identifier[object] . identifier[pk] })
keyword[except] identifier[NoReverseMatch] :
identifier[url] = keyword[None]
identifier[context] [ literal[string] % identifier[action] ]= identifier[url]
keyword[for] identifier[action] keyword[in] identifier[utils] . identifier[LIST_ACTIONS] :
keyword[try] :
identifier[url] = identifier[reverse] ( identifier[utils] . identifier[crud_url_name] ( identifier[self] . identifier[model] , identifier[action] ))
keyword[except] identifier[NoReverseMatch] :
identifier[url] = keyword[None]
identifier[context] [ literal[string] % identifier[action] ]= identifier[url]
keyword[return] identifier[context] | def get_context_data(self, **kwargs):
"""
Adds available urls and names.
"""
context = super(CRUDMixin, self).get_context_data(**kwargs)
context.update({'model_verbose_name': self.model._meta.verbose_name, 'model_verbose_name_plural': self.model._meta.verbose_name_plural})
context['fields'] = utils.get_fields(self.model)
if hasattr(self, 'object') and self.object:
for action in utils.INSTANCE_ACTIONS:
try:
url = reverse(utils.crud_url_name(self.model, action), kwargs={'pk': self.object.pk}) # depends on [control=['try'], data=[]]
except NoReverseMatch: # pragma: no cover
url = None # depends on [control=['except'], data=[]]
context['url_%s' % action] = url # depends on [control=['for'], data=['action']] # depends on [control=['if'], data=[]]
for action in utils.LIST_ACTIONS:
try:
url = reverse(utils.crud_url_name(self.model, action)) # depends on [control=['try'], data=[]]
except NoReverseMatch: # pragma: no cover
url = None # depends on [control=['except'], data=[]]
context['url_%s' % action] = url # depends on [control=['for'], data=['action']]
return context |
def helioY(self,*args,**kwargs):
"""
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y (can be Quantity)
obs=[X,Y,Z] - (optional) position and of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity))
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
out= self._orb.helioY(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | def function[helioY, parameter[self]]:
constant[
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y (can be Quantity)
obs=[X,Y,Z] - (optional) position and of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity))
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
]
variable[out] assign[=] call[name[self]._orb.helioY, parameter[<ast.Starred object at 0x7da1b0ec2650>]]
if compare[call[name[len], parameter[name[out]]] equal[==] constant[1]] begin[:]
return[call[name[out]][constant[0]]] | keyword[def] identifier[helioY] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[out] = identifier[self] . identifier[_orb] . identifier[helioY] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[out] )== literal[int] : keyword[return] identifier[out] [ literal[int] ]
keyword[else] : keyword[return] identifier[out] | def helioY(self, *args, **kwargs):
"""
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y (can be Quantity)
obs=[X,Y,Z] - (optional) position and of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity))
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
out = self._orb.helioY(*args, **kwargs)
if len(out) == 1:
return out[0] # depends on [control=['if'], data=[]]
else:
return out |
def strip_z(pts):
"""Strips a Z component from `pts` if it is present."""
pts = np.asarray(pts)
if pts.shape[-1] > 2:
pts = np.asarray((pts.T[0], pts.T[1])).T
return pts | def function[strip_z, parameter[pts]]:
constant[Strips a Z component from `pts` if it is present.]
variable[pts] assign[=] call[name[np].asarray, parameter[name[pts]]]
if compare[call[name[pts].shape][<ast.UnaryOp object at 0x7da1b0d0d360>] greater[>] constant[2]] begin[:]
variable[pts] assign[=] call[name[np].asarray, parameter[tuple[[<ast.Subscript object at 0x7da1b0d1e200>, <ast.Subscript object at 0x7da1b0d1e260>]]]].T
return[name[pts]] | keyword[def] identifier[strip_z] ( identifier[pts] ):
literal[string]
identifier[pts] = identifier[np] . identifier[asarray] ( identifier[pts] )
keyword[if] identifier[pts] . identifier[shape] [- literal[int] ]> literal[int] :
identifier[pts] = identifier[np] . identifier[asarray] (( identifier[pts] . identifier[T] [ literal[int] ], identifier[pts] . identifier[T] [ literal[int] ])). identifier[T]
keyword[return] identifier[pts] | def strip_z(pts):
"""Strips a Z component from `pts` if it is present."""
pts = np.asarray(pts)
if pts.shape[-1] > 2:
pts = np.asarray((pts.T[0], pts.T[1])).T # depends on [control=['if'], data=[]]
return pts |
def get_station_id(self):
'''
Use geolocation to get the station ID
'''
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception(
'No locations matched location_code %s' % self.location_code)
self.logger.error('nearest = %s', nearest)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for nearest PWS')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for nearest airport')
self.station_id = 'icao:%s' % nearest_airport | def function[get_station_id, parameter[self]]:
constant[
Use geolocation to get the station ID
]
variable[extra_opts] assign[=] <ast.IfExp object at 0x7da204347a00>
variable[api_url] assign[=] binary_operation[name[GEOLOOKUP_URL] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2043465c0>, <ast.Name object at 0x7da204345030>, <ast.Attribute object at 0x7da204346c80>]]]
variable[response] assign[=] call[name[self].api_request, parameter[name[api_url]]]
variable[station_type] assign[=] <ast.IfExp object at 0x7da204346770>
<ast.Try object at 0x7da204567be0>
call[name[self].logger.error, parameter[constant[nearest = %s], name[nearest]]]
if name[self].use_pws begin[:]
variable[nearest_pws] assign[=] call[name[nearest].get, parameter[constant[id], constant[]]]
if <ast.UnaryOp object at 0x7da2045663b0> begin[:]
<ast.Raise object at 0x7da204565480>
name[self].station_id assign[=] binary_operation[constant[pws:%s] <ast.Mod object at 0x7da2590d6920> name[nearest_pws]] | keyword[def] identifier[get_station_id] ( identifier[self] ):
literal[string]
identifier[extra_opts] = literal[string] keyword[if] keyword[not] identifier[self] . identifier[use_pws] keyword[else] literal[string]
identifier[api_url] = identifier[GEOLOOKUP_URL] %( identifier[self] . identifier[api_key] ,
identifier[extra_opts] ,
identifier[self] . identifier[location_code] )
identifier[response] = identifier[self] . identifier[api_request] ( identifier[api_url] )
identifier[station_type] = literal[string] keyword[if] identifier[self] . identifier[use_pws] keyword[else] literal[string]
keyword[try] :
identifier[stations] = identifier[response] [ literal[string] ][ literal[string] ]
identifier[nearest] = identifier[stations] [ identifier[station_type] ][ literal[string] ][ literal[int] ]
keyword[except] ( identifier[KeyError] , identifier[IndexError] ):
keyword[raise] identifier[Exception] (
literal[string] % identifier[self] . identifier[location_code] )
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[nearest] )
keyword[if] identifier[self] . identifier[use_pws] :
identifier[nearest_pws] = identifier[nearest] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[nearest_pws] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[station_id] = literal[string] % identifier[nearest_pws]
keyword[else] :
identifier[nearest_airport] = identifier[nearest] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[nearest_airport] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[station_id] = literal[string] % identifier[nearest_airport] | def get_station_id(self):
"""
Use geolocation to get the station ID
"""
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key, extra_opts, self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0] # depends on [control=['try'], data=[]]
except (KeyError, IndexError):
raise Exception('No locations matched location_code %s' % self.location_code) # depends on [control=['except'], data=[]]
self.logger.error('nearest = %s', nearest)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for nearest PWS') # depends on [control=['if'], data=[]]
self.station_id = 'pws:%s' % nearest_pws # depends on [control=['if'], data=[]]
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for nearest airport') # depends on [control=['if'], data=[]]
self.station_id = 'icao:%s' % nearest_airport |
def ensure_float_vector(F, require_order = False):
"""Ensures that F is a numpy array of floats
If F is already a numpy array of floats, F is returned (no copied!)
Otherwise, checks if the argument can be converted to an array of floats and does that.
Parameters
----------
F: float, or iterable of float
require_order : bool
If False (default), an unordered set is accepted. If True, a set is not accepted.
Returns
-------
arr : ndarray(n)
numpy array with the floats contained in the argument
"""
if is_float_vector(F):
return F
elif is_float(F):
return np.array([F])
elif is_iterable_of_float(F):
return np.array(F)
elif isinstance(F, set):
if require_order:
raise TypeError('Argument is an unordered set, but I require an ordered array of floats')
else:
lF = list(F)
if is_list_of_float(lF):
return np.array(lF)
else:
raise TypeError('Argument is not of a type that is convertible to an array of floats.') | def function[ensure_float_vector, parameter[F, require_order]]:
constant[Ensures that F is a numpy array of floats
If F is already a numpy array of floats, F is returned (no copied!)
Otherwise, checks if the argument can be converted to an array of floats and does that.
Parameters
----------
F: float, or iterable of float
require_order : bool
If False (default), an unordered set is accepted. If True, a set is not accepted.
Returns
-------
arr : ndarray(n)
numpy array with the floats contained in the argument
]
if call[name[is_float_vector], parameter[name[F]]] begin[:]
return[name[F]] | keyword[def] identifier[ensure_float_vector] ( identifier[F] , identifier[require_order] = keyword[False] ):
literal[string]
keyword[if] identifier[is_float_vector] ( identifier[F] ):
keyword[return] identifier[F]
keyword[elif] identifier[is_float] ( identifier[F] ):
keyword[return] identifier[np] . identifier[array] ([ identifier[F] ])
keyword[elif] identifier[is_iterable_of_float] ( identifier[F] ):
keyword[return] identifier[np] . identifier[array] ( identifier[F] )
keyword[elif] identifier[isinstance] ( identifier[F] , identifier[set] ):
keyword[if] identifier[require_order] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[else] :
identifier[lF] = identifier[list] ( identifier[F] )
keyword[if] identifier[is_list_of_float] ( identifier[lF] ):
keyword[return] identifier[np] . identifier[array] ( identifier[lF] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def ensure_float_vector(F, require_order=False):
"""Ensures that F is a numpy array of floats
If F is already a numpy array of floats, F is returned (no copied!)
Otherwise, checks if the argument can be converted to an array of floats and does that.
Parameters
----------
F: float, or iterable of float
require_order : bool
If False (default), an unordered set is accepted. If True, a set is not accepted.
Returns
-------
arr : ndarray(n)
numpy array with the floats contained in the argument
"""
if is_float_vector(F):
return F # depends on [control=['if'], data=[]]
elif is_float(F):
return np.array([F]) # depends on [control=['if'], data=[]]
elif is_iterable_of_float(F):
return np.array(F) # depends on [control=['if'], data=[]]
elif isinstance(F, set):
if require_order:
raise TypeError('Argument is an unordered set, but I require an ordered array of floats') # depends on [control=['if'], data=[]]
else:
lF = list(F)
if is_list_of_float(lF):
return np.array(lF) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise TypeError('Argument is not of a type that is convertible to an array of floats.') |
def set_environment_var_options(config, env=None, prefix='CONFPY'):
"""Set any configuration options which have an environment var set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
env (dict): Optional dictionary which contains environment variables.
The default is os.environ if no value is given.
prefix (str): The string prefix prepended to all environment variables.
This value will be set to upper case. The default is CONFPY.
Returns:
confpy.core.config.Configuration: A configuration object with
environment variables set.
The pattern to follow when setting environment variables is:
<PREFIX>_<SECTION>_<OPTION>
Each value should be upper case and separated by underscores.
"""
env = env or os.environ
for section_name, section in config:
for option_name, _ in section:
var_name = '{0}_{1}_{2}'.format(
prefix.upper(),
section_name.upper(),
option_name.upper(),
)
env_var = env.get(var_name)
if env_var:
setattr(section, option_name, env_var)
return config | def function[set_environment_var_options, parameter[config, env, prefix]]:
constant[Set any configuration options which have an environment var set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
env (dict): Optional dictionary which contains environment variables.
The default is os.environ if no value is given.
prefix (str): The string prefix prepended to all environment variables.
This value will be set to upper case. The default is CONFPY.
Returns:
confpy.core.config.Configuration: A configuration object with
environment variables set.
The pattern to follow when setting environment variables is:
<PREFIX>_<SECTION>_<OPTION>
Each value should be upper case and separated by underscores.
]
variable[env] assign[=] <ast.BoolOp object at 0x7da18bc72800>
for taget[tuple[[<ast.Name object at 0x7da18bc71390>, <ast.Name object at 0x7da18bc70700>]]] in starred[name[config]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bc73670>, <ast.Name object at 0x7da18bc73a30>]]] in starred[name[section]] begin[:]
variable[var_name] assign[=] call[constant[{0}_{1}_{2}].format, parameter[call[name[prefix].upper, parameter[]], call[name[section_name].upper, parameter[]], call[name[option_name].upper, parameter[]]]]
variable[env_var] assign[=] call[name[env].get, parameter[name[var_name]]]
if name[env_var] begin[:]
call[name[setattr], parameter[name[section], name[option_name], name[env_var]]]
return[name[config]] | keyword[def] identifier[set_environment_var_options] ( identifier[config] , identifier[env] = keyword[None] , identifier[prefix] = literal[string] ):
literal[string]
identifier[env] = identifier[env] keyword[or] identifier[os] . identifier[environ]
keyword[for] identifier[section_name] , identifier[section] keyword[in] identifier[config] :
keyword[for] identifier[option_name] , identifier[_] keyword[in] identifier[section] :
identifier[var_name] = literal[string] . identifier[format] (
identifier[prefix] . identifier[upper] (),
identifier[section_name] . identifier[upper] (),
identifier[option_name] . identifier[upper] (),
)
identifier[env_var] = identifier[env] . identifier[get] ( identifier[var_name] )
keyword[if] identifier[env_var] :
identifier[setattr] ( identifier[section] , identifier[option_name] , identifier[env_var] )
keyword[return] identifier[config] | def set_environment_var_options(config, env=None, prefix='CONFPY'):
"""Set any configuration options which have an environment var set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
env (dict): Optional dictionary which contains environment variables.
The default is os.environ if no value is given.
prefix (str): The string prefix prepended to all environment variables.
This value will be set to upper case. The default is CONFPY.
Returns:
confpy.core.config.Configuration: A configuration object with
environment variables set.
The pattern to follow when setting environment variables is:
<PREFIX>_<SECTION>_<OPTION>
Each value should be upper case and separated by underscores.
"""
env = env or os.environ
for (section_name, section) in config:
for (option_name, _) in section:
var_name = '{0}_{1}_{2}'.format(prefix.upper(), section_name.upper(), option_name.upper())
env_var = env.get(var_name)
if env_var:
setattr(section, option_name, env_var) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return config |
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
feincms_page = slug = template = None
try:
from leonardo.module.web.models import Page
feincms_page = Page.objects.for_request(request, best_match=True)
template = feincms_page.theme.template
except:
if Page.objects.exists():
feincms_page = Page.objects.filter(parent=None).first()
template = feincms_page.theme.template
else:
# nested path is not allowed for this time
try:
slug = request.path_info.split("/")[-2:-1][0]
except KeyError:
raise Exception("Nested path is not allowed !")
c = RequestContext(request, {
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
'feincms_page': feincms_page,
'template': template or 'base.html',
'standalone': True,
'slug': slug,
})
try:
t = render_to_string('404_technical.html', c)
except:
from django.views.debug import TECHNICAL_404_TEMPLATE
t = Template(TECHNICAL_404_TEMPLATE).render(c)
return HttpResponseNotFound(t, content_type='text/html') | def function[technical_404_response, parameter[request, exception]]:
constant[Create a technical 404 error response. The exception should be the Http404.]
<ast.Try object at 0x7da1b0e47e50>
<ast.Try object at 0x7da1b0e47a60>
variable[urlconf] assign[=] call[name[getattr], parameter[name[request], constant[urlconf], name[settings].ROOT_URLCONF]]
if call[name[isinstance], parameter[name[urlconf], name[types].ModuleType]] begin[:]
variable[urlconf] assign[=] name[urlconf].__name__
variable[caller] assign[=] constant[]
<ast.Try object at 0x7da1b0e46aa0>
variable[feincms_page] assign[=] constant[None]
<ast.Try object at 0x7da1b0e45f30>
variable[c] assign[=] call[name[RequestContext], parameter[name[request], dictionary[[<ast.Constant object at 0x7da1b0e452d0>, <ast.Constant object at 0x7da1b0e452a0>, <ast.Constant object at 0x7da1b0e45270>, <ast.Constant object at 0x7da1b0e45240>, <ast.Constant object at 0x7da1b0e45210>, <ast.Constant object at 0x7da1b0e451e0>, <ast.Constant object at 0x7da1b0e451b0>, <ast.Constant object at 0x7da1b0e45180>, <ast.Constant object at 0x7da1b0e45150>, <ast.Constant object at 0x7da1b0e45120>, <ast.Constant object at 0x7da1b0e450f0>, <ast.Constant object at 0x7da1b0e450c0>], [<ast.Name object at 0x7da1b0effb50>, <ast.Attribute object at 0x7da1b0efd6f0>, <ast.Name object at 0x7da1b0effbe0>, <ast.Name object at 0x7da1b0efc850>, <ast.Call object at 0x7da1b0efc100>, <ast.Name object at 0x7da1b0eff2e0>, <ast.Call object at 0x7da1b0efe3b0>, <ast.Name object at 0x7da1b0efe5c0>, <ast.Name object at 0x7da1b0efcc70>, <ast.BoolOp object at 0x7da1b0efcee0>, <ast.Constant object at 0x7da1b0e45090>, <ast.Name object at 0x7da1b0e45060>]]]]
<ast.Try object at 0x7da1b0e45030>
return[call[name[HttpResponseNotFound], parameter[name[t]]]] | keyword[def] identifier[technical_404_response] ( identifier[request] , identifier[exception] ):
literal[string]
keyword[try] :
identifier[error_url] = identifier[exception] . identifier[args] [ literal[int] ][ literal[string] ]
keyword[except] ( identifier[IndexError] , identifier[TypeError] , identifier[KeyError] ):
identifier[error_url] = identifier[request] . identifier[path_info] [ literal[int] :]
keyword[try] :
identifier[tried] = identifier[exception] . identifier[args] [ literal[int] ][ literal[string] ]
keyword[except] ( identifier[IndexError] , identifier[TypeError] , identifier[KeyError] ):
identifier[tried] =[]
keyword[else] :
keyword[if] ( keyword[not] identifier[tried]
keyword[or] ( identifier[request] . identifier[path] == literal[string]
keyword[and] identifier[len] ( identifier[tried] )== literal[int]
keyword[and] identifier[len] ( identifier[tried] [ literal[int] ])== literal[int]
keyword[and] identifier[getattr] ( identifier[tried] [ literal[int] ][ literal[int] ], literal[string] , literal[string] )== identifier[getattr] ( identifier[tried] [ literal[int] ][ literal[int] ], literal[string] , literal[string] )== literal[string] )):
keyword[return] identifier[default_urlconf] ( identifier[request] )
identifier[urlconf] = identifier[getattr] ( identifier[request] , literal[string] , identifier[settings] . identifier[ROOT_URLCONF] )
keyword[if] identifier[isinstance] ( identifier[urlconf] , identifier[types] . identifier[ModuleType] ):
identifier[urlconf] = identifier[urlconf] . identifier[__name__]
identifier[caller] = literal[string]
keyword[try] :
identifier[resolver_match] = identifier[resolve] ( identifier[request] . identifier[path] )
keyword[except] identifier[Resolver404] :
keyword[pass]
keyword[else] :
identifier[obj] = identifier[resolver_match] . identifier[func]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[caller] = identifier[obj] . identifier[__name__]
keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[obj] . identifier[__class__] , literal[string] ):
identifier[caller] = identifier[obj] . identifier[__class__] . identifier[__name__]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[module] = identifier[obj] . identifier[__module__]
identifier[caller] = literal[string] %( identifier[module] , identifier[caller] )
identifier[feincms_page] = identifier[slug] = identifier[template] = keyword[None]
keyword[try] :
keyword[from] identifier[leonardo] . identifier[module] . identifier[web] . identifier[models] keyword[import] identifier[Page]
identifier[feincms_page] = identifier[Page] . identifier[objects] . identifier[for_request] ( identifier[request] , identifier[best_match] = keyword[True] )
identifier[template] = identifier[feincms_page] . identifier[theme] . identifier[template]
keyword[except] :
keyword[if] identifier[Page] . identifier[objects] . identifier[exists] ():
identifier[feincms_page] = identifier[Page] . identifier[objects] . identifier[filter] ( identifier[parent] = keyword[None] ). identifier[first] ()
identifier[template] = identifier[feincms_page] . identifier[theme] . identifier[template]
keyword[else] :
keyword[try] :
identifier[slug] = identifier[request] . identifier[path_info] . identifier[split] ( literal[string] )[- literal[int] :- literal[int] ][ literal[int] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[c] = identifier[RequestContext] ( identifier[request] ,{
literal[string] : identifier[urlconf] ,
literal[string] : identifier[settings] . identifier[ROOT_URLCONF] ,
literal[string] : identifier[error_url] ,
literal[string] : identifier[tried] ,
literal[string] : identifier[force_bytes] ( identifier[exception] , identifier[errors] = literal[string] ),
literal[string] : identifier[request] ,
literal[string] : identifier[get_safe_settings] (),
literal[string] : identifier[caller] ,
literal[string] : identifier[feincms_page] ,
literal[string] : identifier[template] keyword[or] literal[string] ,
literal[string] : keyword[True] ,
literal[string] : identifier[slug] ,
})
keyword[try] :
identifier[t] = identifier[render_to_string] ( literal[string] , identifier[c] )
keyword[except] :
keyword[from] identifier[django] . identifier[views] . identifier[debug] keyword[import] identifier[TECHNICAL_404_TEMPLATE]
identifier[t] = identifier[Template] ( identifier[TECHNICAL_404_TEMPLATE] ). identifier[render] ( identifier[c] )
keyword[return] identifier[HttpResponseNotFound] ( identifier[t] , identifier[content_type] = literal[string] ) | def technical_404_response(request, exception):
"""Create a technical 404 error response. The exception should be the Http404."""
try:
error_url = exception.args[0]['path'] # depends on [control=['try'], data=[]]
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash # depends on [control=['except'], data=[]]
try:
tried = exception.args[0]['tried'] # depends on [control=['try'], data=[]]
except (IndexError, TypeError, KeyError):
tried = [] # depends on [control=['except'], data=[]]
else:
if not tried or (request.path == '/' and len(tried) == 1 and (len(tried[0]) == 1) and (getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): # empty URLconf
# default URLconf
return default_urlconf(request) # depends on [control=['if'], data=[]]
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__ # depends on [control=['if'], data=[]]
caller = ''
try:
resolver_match = resolve(request.path) # depends on [control=['try'], data=[]]
except Resolver404:
pass # depends on [control=['except'], data=[]]
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__ # depends on [control=['if'], data=[]]
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__ # depends on [control=['if'], data=[]]
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller) # depends on [control=['if'], data=[]]
feincms_page = slug = template = None
try:
from leonardo.module.web.models import Page
feincms_page = Page.objects.for_request(request, best_match=True)
template = feincms_page.theme.template # depends on [control=['try'], data=[]]
except:
if Page.objects.exists():
feincms_page = Page.objects.filter(parent=None).first()
template = feincms_page.theme.template # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
else:
# nested path is not allowed for this time
try:
slug = request.path_info.split('/')[-2:-1][0] # depends on [control=['try'], data=[]]
except KeyError:
raise Exception('Nested path is not allowed !') # depends on [control=['except'], data=[]]
c = RequestContext(request, {'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, 'feincms_page': feincms_page, 'template': template or 'base.html', 'standalone': True, 'slug': slug})
try:
t = render_to_string('404_technical.html', c) # depends on [control=['try'], data=[]]
except:
from django.views.debug import TECHNICAL_404_TEMPLATE
t = Template(TECHNICAL_404_TEMPLATE).render(c) # depends on [control=['except'], data=[]]
return HttpResponseNotFound(t, content_type='text/html') |
def rollback_transaction(self):
""" Rollback the current transaction.
:raise: :class:`.TransactionError` if no transaction is currently open
"""
self._assert_open()
if not self._transaction:
raise TransactionError("No transaction to rollback")
cx = self._connection
if cx:
metadata = {}
try:
cx.rollback(on_success=metadata.update)
finally:
self._disconnect(sync=True)
self._transaction = None | def function[rollback_transaction, parameter[self]]:
constant[ Rollback the current transaction.
:raise: :class:`.TransactionError` if no transaction is currently open
]
call[name[self]._assert_open, parameter[]]
if <ast.UnaryOp object at 0x7da18c4cc850> begin[:]
<ast.Raise object at 0x7da18c4cd8a0>
variable[cx] assign[=] name[self]._connection
if name[cx] begin[:]
variable[metadata] assign[=] dictionary[[], []]
<ast.Try object at 0x7da20c6c6f20> | keyword[def] identifier[rollback_transaction] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_assert_open] ()
keyword[if] keyword[not] identifier[self] . identifier[_transaction] :
keyword[raise] identifier[TransactionError] ( literal[string] )
identifier[cx] = identifier[self] . identifier[_connection]
keyword[if] identifier[cx] :
identifier[metadata] ={}
keyword[try] :
identifier[cx] . identifier[rollback] ( identifier[on_success] = identifier[metadata] . identifier[update] )
keyword[finally] :
identifier[self] . identifier[_disconnect] ( identifier[sync] = keyword[True] )
identifier[self] . identifier[_transaction] = keyword[None] | def rollback_transaction(self):
""" Rollback the current transaction.
:raise: :class:`.TransactionError` if no transaction is currently open
"""
self._assert_open()
if not self._transaction:
raise TransactionError('No transaction to rollback') # depends on [control=['if'], data=[]]
cx = self._connection
if cx:
metadata = {}
try:
cx.rollback(on_success=metadata.update) # depends on [control=['try'], data=[]]
finally:
self._disconnect(sync=True)
self._transaction = None # depends on [control=['if'], data=[]] |
def add_generic_info_message_for_error(request):
"""
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
"""
messages.info(
request,
_(
'{strong_start}Something happened.{strong_end} '
'{span_start}This course link is currently invalid. '
'Please reach out to your Administrator for assistance to this course.{span_end}'
).format(
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
) | def function[add_generic_info_message_for_error, parameter[request]]:
constant[
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
]
call[name[messages].info, parameter[name[request], call[call[name[_], parameter[constant[{strong_start}Something happened.{strong_end} {span_start}This course link is currently invalid. Please reach out to your Administrator for assistance to this course.{span_end}]]].format, parameter[]]]] | keyword[def] identifier[add_generic_info_message_for_error] ( identifier[request] ):
literal[string]
identifier[messages] . identifier[info] (
identifier[request] ,
identifier[_] (
literal[string]
literal[string]
literal[string]
). identifier[format] (
identifier[span_start] = literal[string] ,
identifier[span_end] = literal[string] ,
identifier[strong_start] = literal[string] ,
identifier[strong_end] = literal[string] ,
)
) | def add_generic_info_message_for_error(request):
"""
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
"""
messages.info(request, _('{strong_start}Something happened.{strong_end} {span_start}This course link is currently invalid. Please reach out to your Administrator for assistance to this course.{span_end}').format(span_start='<span>', span_end='</span>', strong_start='<strong>', strong_end='</strong>')) |
def cli(yamlfile, inline, format):
""" Generate JSON Schema representation of a biolink model """
print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline)) | def function[cli, parameter[yamlfile, inline, format]]:
constant[ Generate JSON Schema representation of a biolink model ]
call[name[print], parameter[call[call[name[JsonSchemaGenerator], parameter[name[yamlfile], name[format]]].serialize, parameter[]]]] | keyword[def] identifier[cli] ( identifier[yamlfile] , identifier[inline] , identifier[format] ):
literal[string]
identifier[print] ( identifier[JsonSchemaGenerator] ( identifier[yamlfile] , identifier[format] ). identifier[serialize] ( identifier[inline] = identifier[inline] )) | def cli(yamlfile, inline, format):
""" Generate JSON Schema representation of a biolink model """
print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline)) |
def part_edit_cmd():
'Edit a part from an OOXML Package without unzipping it'
parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd))
parser.add_argument(
'path',
help='Path to part (including path to zip file, i.e. ./file.zipx/part)',
)
parser.add_argument(
'--reformat-xml',
action='store_true',
help=(
'run the content through an XML pretty-printer '
'first for improved editability'
),
)
args = parser.parse_args()
part_edit(args.path, args.reformat_xml) | def function[part_edit_cmd, parameter[]]:
constant[Edit a part from an OOXML Package without unzipping it]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[path]]]
call[name[parser].add_argument, parameter[constant[--reformat-xml]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
call[name[part_edit], parameter[name[args].path, name[args].reformat_xml]] | keyword[def] identifier[part_edit_cmd] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[inspect] . identifier[getdoc] ( identifier[part_edit_cmd] ))
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[help] =(
literal[string]
literal[string]
),
)
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[part_edit] ( identifier[args] . identifier[path] , identifier[args] . identifier[reformat_xml] ) | def part_edit_cmd():
"""Edit a part from an OOXML Package without unzipping it"""
parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd))
parser.add_argument('path', help='Path to part (including path to zip file, i.e. ./file.zipx/part)')
parser.add_argument('--reformat-xml', action='store_true', help='run the content through an XML pretty-printer first for improved editability')
args = parser.parse_args()
part_edit(args.path, args.reformat_xml) |
def from_locale(cls, locale):
"""
Create a new Language instance from a locale string
:param locale: locale as string
:return: Language instance with instance.locale() == locale if locale is valid else instance of Unknown Language
"""
locale = str(locale)
if locale is 'unknown':
return UnknownLanguage(locale)
try:
return cls._from_xyz('locale', locale)
except NotALanguageException:
log.warning('Unknown locale: {}'.format(locale))
return UnknownLanguage(locale) | def function[from_locale, parameter[cls, locale]]:
constant[
Create a new Language instance from a locale string
:param locale: locale as string
:return: Language instance with instance.locale() == locale if locale is valid else instance of Unknown Language
]
variable[locale] assign[=] call[name[str], parameter[name[locale]]]
if compare[name[locale] is constant[unknown]] begin[:]
return[call[name[UnknownLanguage], parameter[name[locale]]]]
<ast.Try object at 0x7da1b10bd630> | keyword[def] identifier[from_locale] ( identifier[cls] , identifier[locale] ):
literal[string]
identifier[locale] = identifier[str] ( identifier[locale] )
keyword[if] identifier[locale] keyword[is] literal[string] :
keyword[return] identifier[UnknownLanguage] ( identifier[locale] )
keyword[try] :
keyword[return] identifier[cls] . identifier[_from_xyz] ( literal[string] , identifier[locale] )
keyword[except] identifier[NotALanguageException] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[locale] ))
keyword[return] identifier[UnknownLanguage] ( identifier[locale] ) | def from_locale(cls, locale):
"""
Create a new Language instance from a locale string
:param locale: locale as string
:return: Language instance with instance.locale() == locale if locale is valid else instance of Unknown Language
"""
locale = str(locale)
if locale is 'unknown':
return UnknownLanguage(locale) # depends on [control=['if'], data=['locale']]
try:
return cls._from_xyz('locale', locale) # depends on [control=['try'], data=[]]
except NotALanguageException:
log.warning('Unknown locale: {}'.format(locale))
return UnknownLanguage(locale) # depends on [control=['except'], data=[]] |
def _search_for_executable(self, executable):
"""
Search for file give in "executable". If it is not found, we try the environment PATH.
Returns either the absolute path to the found executable, or None if the executable
couldn't be found.
"""
if os.path.isfile(executable):
return os.path.abspath(executable)
else:
envpath = os.getenv('PATH')
if envpath is None:
return
for path in envpath.split(os.pathsep):
exe = os.path.join(path, executable)
if os.path.isfile(exe):
return os.path.abspath(exe) | def function[_search_for_executable, parameter[self, executable]]:
constant[
Search for file give in "executable". If it is not found, we try the environment PATH.
Returns either the absolute path to the found executable, or None if the executable
couldn't be found.
]
if call[name[os].path.isfile, parameter[name[executable]]] begin[:]
return[call[name[os].path.abspath, parameter[name[executable]]]] | keyword[def] identifier[_search_for_executable] ( identifier[self] , identifier[executable] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[executable] ):
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[executable] )
keyword[else] :
identifier[envpath] = identifier[os] . identifier[getenv] ( literal[string] )
keyword[if] identifier[envpath] keyword[is] keyword[None] :
keyword[return]
keyword[for] identifier[path] keyword[in] identifier[envpath] . identifier[split] ( identifier[os] . identifier[pathsep] ):
identifier[exe] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[executable] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[exe] ):
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[exe] ) | def _search_for_executable(self, executable):
"""
Search for file give in "executable". If it is not found, we try the environment PATH.
Returns either the absolute path to the found executable, or None if the executable
couldn't be found.
"""
if os.path.isfile(executable):
return os.path.abspath(executable) # depends on [control=['if'], data=[]]
else:
envpath = os.getenv('PATH')
if envpath is None:
return # depends on [control=['if'], data=[]]
for path in envpath.split(os.pathsep):
exe = os.path.join(path, executable)
if os.path.isfile(exe):
return os.path.abspath(exe) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] |
def to_ip(self):
"""Return of copy of the data inside a TDIP container
"""
if 'chargeability' in self.data.columns:
tdip = reda.TDIP(data=self.data)
else:
raise Exception('Missing column "chargeability"')
return tdip | def function[to_ip, parameter[self]]:
constant[Return of copy of the data inside a TDIP container
]
if compare[constant[chargeability] in name[self].data.columns] begin[:]
variable[tdip] assign[=] call[name[reda].TDIP, parameter[]]
return[name[tdip]] | keyword[def] identifier[to_ip] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[data] . identifier[columns] :
identifier[tdip] = identifier[reda] . identifier[TDIP] ( identifier[data] = identifier[self] . identifier[data] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[tdip] | def to_ip(self):
"""Return of copy of the data inside a TDIP container
"""
if 'chargeability' in self.data.columns:
tdip = reda.TDIP(data=self.data) # depends on [control=['if'], data=[]]
else:
raise Exception('Missing column "chargeability"')
return tdip |
def is_edge_consistent(graph, u, v):
"""Check if all edges between two nodes have the same relation.
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:return: If all edges from the source to target node have the same relation
:rtype: bool
"""
if not graph.has_edge(u, v):
raise ValueError('{} does not contain an edge ({}, {})'.format(graph, u, v))
return 0 == len(set(d[RELATION] for d in graph.edge[u][v].values())) | def function[is_edge_consistent, parameter[graph, u, v]]:
constant[Check if all edges between two nodes have the same relation.
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:return: If all edges from the source to target node have the same relation
:rtype: bool
]
if <ast.UnaryOp object at 0x7da18dc07640> begin[:]
<ast.Raise object at 0x7da18dc05030>
return[compare[constant[0] equal[==] call[name[len], parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da18dc079a0>]]]]]] | keyword[def] identifier[is_edge_consistent] ( identifier[graph] , identifier[u] , identifier[v] ):
literal[string]
keyword[if] keyword[not] identifier[graph] . identifier[has_edge] ( identifier[u] , identifier[v] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[graph] , identifier[u] , identifier[v] ))
keyword[return] literal[int] == identifier[len] ( identifier[set] ( identifier[d] [ identifier[RELATION] ] keyword[for] identifier[d] keyword[in] identifier[graph] . identifier[edge] [ identifier[u] ][ identifier[v] ]. identifier[values] ())) | def is_edge_consistent(graph, u, v):
"""Check if all edges between two nodes have the same relation.
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:return: If all edges from the source to target node have the same relation
:rtype: bool
"""
if not graph.has_edge(u, v):
raise ValueError('{} does not contain an edge ({}, {})'.format(graph, u, v)) # depends on [control=['if'], data=[]]
return 0 == len(set((d[RELATION] for d in graph.edge[u][v].values()))) |
def keep_on_one_line():
"""
Keep all the output generated within a with-block on one line. Whenever a
new line would be printed, instead reset the cursor to the beginning of the
line and print the new line without a line break.
"""
class CondensedStream:
def __init__(self):
self.sys_stdout = sys.stdout
def write(self, string):
with swap_streams(self.sys_stdout):
string = string.replace('\n', ' ')
string = truncate_to_fit_terminal(string)
if string.strip():
update(string)
def flush(self):
with swap_streams(self.sys_stdout):
flush()
with swap_streams(CondensedStream()):
yield | def function[keep_on_one_line, parameter[]]:
constant[
Keep all the output generated within a with-block on one line. Whenever a
new line would be printed, instead reset the cursor to the beginning of the
line and print the new line without a line break.
]
class class[CondensedStream, parameter[]] begin[:]
def function[__init__, parameter[self]]:
name[self].sys_stdout assign[=] name[sys].stdout
def function[write, parameter[self, string]]:
with call[name[swap_streams], parameter[name[self].sys_stdout]] begin[:]
variable[string] assign[=] call[name[string].replace, parameter[constant[
], constant[ ]]]
variable[string] assign[=] call[name[truncate_to_fit_terminal], parameter[name[string]]]
if call[name[string].strip, parameter[]] begin[:]
call[name[update], parameter[name[string]]]
def function[flush, parameter[self]]:
with call[name[swap_streams], parameter[name[self].sys_stdout]] begin[:]
call[name[flush], parameter[]]
with call[name[swap_streams], parameter[call[name[CondensedStream], parameter[]]]] begin[:]
<ast.Yield object at 0x7da18bc724d0> | keyword[def] identifier[keep_on_one_line] ():
literal[string]
keyword[class] identifier[CondensedStream] :
keyword[def] identifier[__init__] ( identifier[self] ):
identifier[self] . identifier[sys_stdout] = identifier[sys] . identifier[stdout]
keyword[def] identifier[write] ( identifier[self] , identifier[string] ):
keyword[with] identifier[swap_streams] ( identifier[self] . identifier[sys_stdout] ):
identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] )
identifier[string] = identifier[truncate_to_fit_terminal] ( identifier[string] )
keyword[if] identifier[string] . identifier[strip] ():
identifier[update] ( identifier[string] )
keyword[def] identifier[flush] ( identifier[self] ):
keyword[with] identifier[swap_streams] ( identifier[self] . identifier[sys_stdout] ):
identifier[flush] ()
keyword[with] identifier[swap_streams] ( identifier[CondensedStream] ()):
keyword[yield] | def keep_on_one_line():
"""
Keep all the output generated within a with-block on one line. Whenever a
new line would be printed, instead reset the cursor to the beginning of the
line and print the new line without a line break.
"""
class CondensedStream:
def __init__(self):
self.sys_stdout = sys.stdout
def write(self, string):
with swap_streams(self.sys_stdout):
string = string.replace('\n', ' ')
string = truncate_to_fit_terminal(string)
if string.strip():
update(string) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
def flush(self):
with swap_streams(self.sys_stdout):
flush() # depends on [control=['with'], data=[]]
with swap_streams(CondensedStream()):
yield # depends on [control=['with'], data=[]] |
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1) | def function[do_POST, parameter[self]]:
constant[Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
]
if <ast.UnaryOp object at 0x7da204621960> begin[:]
call[name[self].report_404, parameter[]]
return[None]
<ast.Try object at 0x7da204622110> | keyword[def] identifier[do_POST] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_rpc_path_valid] ():
identifier[self] . identifier[report_404] ()
keyword[return]
keyword[try] :
identifier[max_chunk_size] = literal[int] * literal[int] * literal[int]
identifier[size_remaining] = identifier[int] ( identifier[self] . identifier[headers] [ literal[string] ])
identifier[L] =[]
keyword[while] identifier[size_remaining] :
identifier[chunk_size] = identifier[min] ( identifier[size_remaining] , identifier[max_chunk_size] )
identifier[L] . identifier[append] ( identifier[self] . identifier[rfile] . identifier[read] ( identifier[chunk_size] ))
identifier[size_remaining] -= identifier[len] ( identifier[L] [- literal[int] ])
identifier[data] = literal[string] . identifier[join] ( identifier[L] )
identifier[response] = identifier[self] . identifier[server] . identifier[_marshaled_dispatch] (
identifier[data] , identifier[getattr] ( identifier[self] , literal[string] , keyword[None] )
)
keyword[except] :
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[end_headers] ()
keyword[else] :
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[send_header] ( literal[string] , identifier[str] ( identifier[len] ( identifier[response] )))
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( identifier[response] )
identifier[self] . identifier[wfile] . identifier[flush] ()
identifier[self] . identifier[connection] . identifier[shutdown] ( literal[int] ) | def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return # depends on [control=['if'], data=[]]
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers['content-length'])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1]) # depends on [control=['while'], data=[]]
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(data, getattr(self, '_dispatch', None)) # depends on [control=['try'], data=[]]
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers() # depends on [control=['except'], data=[]]
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1) |
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {0} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d) | def function[write, parameter[self, fp, space_around_delimiters]]:
constant[Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
]
if name[space_around_delimiters] begin[:]
variable[d] assign[=] call[constant[ {0} ].format, parameter[call[name[self]._delimiters][constant[0]]]]
if name[self]._defaults begin[:]
call[name[self]._write_section, parameter[name[fp], name[self].default_section, call[name[self]._defaults.items, parameter[]], name[d]]]
for taget[name[section]] in starred[name[self]._sections] begin[:]
call[name[self]._write_section, parameter[name[fp], name[section], call[call[name[self]._sections][name[section]].items, parameter[]], name[d]]] | keyword[def] identifier[write] ( identifier[self] , identifier[fp] , identifier[space_around_delimiters] = keyword[True] ):
literal[string]
keyword[if] identifier[space_around_delimiters] :
identifier[d] = literal[string] . identifier[format] ( identifier[self] . identifier[_delimiters] [ literal[int] ])
keyword[else] :
identifier[d] = identifier[self] . identifier[_delimiters] [ literal[int] ]
keyword[if] identifier[self] . identifier[_defaults] :
identifier[self] . identifier[_write_section] ( identifier[fp] , identifier[self] . identifier[default_section] ,
identifier[self] . identifier[_defaults] . identifier[items] (), identifier[d] )
keyword[for] identifier[section] keyword[in] identifier[self] . identifier[_sections] :
identifier[self] . identifier[_write_section] ( identifier[fp] , identifier[section] ,
identifier[self] . identifier[_sections] [ identifier[section] ]. identifier[items] (), identifier[d] ) | def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = ' {0} '.format(self._delimiters[0]) # depends on [control=['if'], data=[]]
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section, self._defaults.items(), d) # depends on [control=['if'], data=[]]
for section in self._sections:
self._write_section(fp, section, self._sections[section].items(), d) # depends on [control=['for'], data=['section']] |
def add_collaboration(self, collaboration):
"""Add collaboration.
:param collaboration: collaboration for the current document
:type collaboration: string
"""
collaborations = normalize_collaboration(collaboration)
for collaboration in collaborations:
self._append_to('collaborations', {
'value': collaboration
}) | def function[add_collaboration, parameter[self, collaboration]]:
constant[Add collaboration.
:param collaboration: collaboration for the current document
:type collaboration: string
]
variable[collaborations] assign[=] call[name[normalize_collaboration], parameter[name[collaboration]]]
for taget[name[collaboration]] in starred[name[collaborations]] begin[:]
call[name[self]._append_to, parameter[constant[collaborations], dictionary[[<ast.Constant object at 0x7da1b24ad7b0>], [<ast.Name object at 0x7da1b24af910>]]]] | keyword[def] identifier[add_collaboration] ( identifier[self] , identifier[collaboration] ):
literal[string]
identifier[collaborations] = identifier[normalize_collaboration] ( identifier[collaboration] )
keyword[for] identifier[collaboration] keyword[in] identifier[collaborations] :
identifier[self] . identifier[_append_to] ( literal[string] ,{
literal[string] : identifier[collaboration]
}) | def add_collaboration(self, collaboration):
"""Add collaboration.
:param collaboration: collaboration for the current document
:type collaboration: string
"""
collaborations = normalize_collaboration(collaboration)
for collaboration in collaborations:
self._append_to('collaborations', {'value': collaboration}) # depends on [control=['for'], data=['collaboration']] |
def draw_widget(self, item):
"""Create a preview of the selected treeview item"""
if item:
self.filter_remove(remember=True)
selected_id = self.treedata[item]['id']
item = self.get_toplevel_parent(item)
widget_id = self.treedata[item]['id']
wclass = self.treedata[item]['class']
xmlnode = self.tree_node_to_xml('', item)
self.previewer.draw(item, widget_id, xmlnode, wclass)
self.previewer.show_selected(item, selected_id)
self.filter_restore() | def function[draw_widget, parameter[self, item]]:
constant[Create a preview of the selected treeview item]
if name[item] begin[:]
call[name[self].filter_remove, parameter[]]
variable[selected_id] assign[=] call[call[name[self].treedata][name[item]]][constant[id]]
variable[item] assign[=] call[name[self].get_toplevel_parent, parameter[name[item]]]
variable[widget_id] assign[=] call[call[name[self].treedata][name[item]]][constant[id]]
variable[wclass] assign[=] call[call[name[self].treedata][name[item]]][constant[class]]
variable[xmlnode] assign[=] call[name[self].tree_node_to_xml, parameter[constant[], name[item]]]
call[name[self].previewer.draw, parameter[name[item], name[widget_id], name[xmlnode], name[wclass]]]
call[name[self].previewer.show_selected, parameter[name[item], name[selected_id]]]
call[name[self].filter_restore, parameter[]] | keyword[def] identifier[draw_widget] ( identifier[self] , identifier[item] ):
literal[string]
keyword[if] identifier[item] :
identifier[self] . identifier[filter_remove] ( identifier[remember] = keyword[True] )
identifier[selected_id] = identifier[self] . identifier[treedata] [ identifier[item] ][ literal[string] ]
identifier[item] = identifier[self] . identifier[get_toplevel_parent] ( identifier[item] )
identifier[widget_id] = identifier[self] . identifier[treedata] [ identifier[item] ][ literal[string] ]
identifier[wclass] = identifier[self] . identifier[treedata] [ identifier[item] ][ literal[string] ]
identifier[xmlnode] = identifier[self] . identifier[tree_node_to_xml] ( literal[string] , identifier[item] )
identifier[self] . identifier[previewer] . identifier[draw] ( identifier[item] , identifier[widget_id] , identifier[xmlnode] , identifier[wclass] )
identifier[self] . identifier[previewer] . identifier[show_selected] ( identifier[item] , identifier[selected_id] )
identifier[self] . identifier[filter_restore] () | def draw_widget(self, item):
"""Create a preview of the selected treeview item"""
if item:
self.filter_remove(remember=True)
selected_id = self.treedata[item]['id']
item = self.get_toplevel_parent(item)
widget_id = self.treedata[item]['id']
wclass = self.treedata[item]['class']
xmlnode = self.tree_node_to_xml('', item)
self.previewer.draw(item, widget_id, xmlnode, wclass)
self.previewer.show_selected(item, selected_id)
self.filter_restore() # depends on [control=['if'], data=[]] |
def find_charged(self, mol):
"""Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid."""
data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain')
a_set = []
# Iterate through all residue, exclude those in chains defined as peptides
for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]:
if config.INTRA is not None:
if res.GetChain() != config.INTRA:
continue
a_contributing = []
a_contributing_orig_idx = []
if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='positive',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='negative',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
return a_set | def function[find_charged, parameter[self, mol]]:
constant[Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.]
variable[data] assign[=] call[name[namedtuple], parameter[constant[pcharge], constant[atoms atoms_orig_idx type center restype resnr reschain]]]
variable[a_set] assign[=] list[[]]
for taget[name[res]] in starred[<ast.ListComp object at 0x7da2041db8b0>] begin[:]
if compare[name[config].INTRA is_not constant[None]] begin[:]
if compare[call[name[res].GetChain, parameter[]] not_equal[!=] name[config].INTRA] begin[:]
continue
variable[a_contributing] assign[=] list[[]]
variable[a_contributing_orig_idx] assign[=] list[[]]
if compare[call[name[res].GetName, parameter[]] in tuple[[<ast.Constant object at 0x7da18f00d630>, <ast.Constant object at 0x7da18f00feb0>, <ast.Constant object at 0x7da18f00e980>]]] begin[:]
for taget[name[a]] in starred[call[name[pybel].ob.OBResidueAtomIter, parameter[name[res]]]] begin[:]
if <ast.BoolOp object at 0x7da18f00cc70> begin[:]
call[name[a_contributing].append, parameter[call[name[pybel].Atom, parameter[name[a]]]]]
call[name[a_contributing_orig_idx].append, parameter[call[name[self].Mapper.mapid, parameter[call[name[a].GetIdx, parameter[]]]]]]
if <ast.UnaryOp object at 0x7da18f00d000> begin[:]
call[name[a_set].append, parameter[call[name[data], parameter[]]]]
if compare[call[name[res].GetName, parameter[]] in tuple[[<ast.Constant object at 0x7da1b26af850>, <ast.Constant object at 0x7da1b26ac460>]]] begin[:]
for taget[name[a]] in starred[call[name[pybel].ob.OBResidueAtomIter, parameter[name[res]]]] begin[:]
if <ast.BoolOp object at 0x7da1b26aca00> begin[:]
call[name[a_contributing].append, parameter[call[name[pybel].Atom, parameter[name[a]]]]]
call[name[a_contributing_orig_idx].append, parameter[call[name[self].Mapper.mapid, parameter[call[name[a].GetIdx, parameter[]]]]]]
if <ast.UnaryOp object at 0x7da1b26afdc0> begin[:]
call[name[a_set].append, parameter[call[name[data], parameter[]]]]
return[name[a_set]] | keyword[def] identifier[find_charged] ( identifier[self] , identifier[mol] ):
literal[string]
identifier[data] = identifier[namedtuple] ( literal[string] , literal[string] )
identifier[a_set] =[]
keyword[for] identifier[res] keyword[in] [ identifier[r] keyword[for] identifier[r] keyword[in] identifier[pybel] . identifier[ob] . identifier[OBResidueIter] ( identifier[mol] . identifier[OBMol] ) keyword[if] keyword[not] identifier[r] . identifier[GetChain] () keyword[in] identifier[config] . identifier[PEPTIDES] ]:
keyword[if] identifier[config] . identifier[INTRA] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[res] . identifier[GetChain] ()!= identifier[config] . identifier[INTRA] :
keyword[continue]
identifier[a_contributing] =[]
identifier[a_contributing_orig_idx] =[]
keyword[if] identifier[res] . identifier[GetName] () keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[for] identifier[a] keyword[in] identifier[pybel] . identifier[ob] . identifier[OBResidueAtomIter] ( identifier[res] ):
keyword[if] identifier[a] . identifier[GetType] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[res] . identifier[GetAtomProperty] ( identifier[a] , literal[int] ) keyword[and] keyword[not] identifier[self] . identifier[Mapper] . identifier[mapid] ( identifier[a] . identifier[GetIdx] (), identifier[mtype] = literal[string] ) keyword[in] identifier[self] . identifier[altconf] :
identifier[a_contributing] . identifier[append] ( identifier[pybel] . identifier[Atom] ( identifier[a] ))
identifier[a_contributing_orig_idx] . identifier[append] ( identifier[self] . identifier[Mapper] . identifier[mapid] ( identifier[a] . identifier[GetIdx] (), identifier[mtype] = literal[string] ))
keyword[if] keyword[not] identifier[len] ( identifier[a_contributing] )== literal[int] :
identifier[a_set] . identifier[append] ( identifier[data] ( identifier[atoms] = identifier[a_contributing] ,
identifier[atoms_orig_idx] = identifier[a_contributing_orig_idx] ,
identifier[type] = literal[string] ,
identifier[center] = identifier[centroid] ([ identifier[ac] . identifier[coords] keyword[for] identifier[ac] keyword[in] identifier[a_contributing] ]),
identifier[restype] = identifier[res] . identifier[GetName] (),
identifier[resnr] = identifier[res] . identifier[GetNum] (),
identifier[reschain] = identifier[res] . identifier[GetChain] ()))
keyword[if] identifier[res] . identifier[GetName] () keyword[in] ( literal[string] , literal[string] ):
keyword[for] identifier[a] keyword[in] identifier[pybel] . identifier[ob] . identifier[OBResidueAtomIter] ( identifier[res] ):
keyword[if] identifier[a] . identifier[GetType] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[res] . identifier[GetAtomProperty] ( identifier[a] , literal[int] ) keyword[and] keyword[not] identifier[self] . identifier[Mapper] . identifier[mapid] ( identifier[a] . identifier[GetIdx] (), identifier[mtype] = literal[string] ) keyword[in] identifier[self] . identifier[altconf] :
identifier[a_contributing] . identifier[append] ( identifier[pybel] . identifier[Atom] ( identifier[a] ))
identifier[a_contributing_orig_idx] . identifier[append] ( identifier[self] . identifier[Mapper] . identifier[mapid] ( identifier[a] . identifier[GetIdx] (), identifier[mtype] = literal[string] ))
keyword[if] keyword[not] identifier[len] ( identifier[a_contributing] )== literal[int] :
identifier[a_set] . identifier[append] ( identifier[data] ( identifier[atoms] = identifier[a_contributing] ,
identifier[atoms_orig_idx] = identifier[a_contributing_orig_idx] ,
identifier[type] = literal[string] ,
identifier[center] = identifier[centroid] ([ identifier[ac] . identifier[coords] keyword[for] identifier[ac] keyword[in] identifier[a_contributing] ]),
identifier[restype] = identifier[res] . identifier[GetName] (),
identifier[resnr] = identifier[res] . identifier[GetNum] (),
identifier[reschain] = identifier[res] . identifier[GetChain] ()))
keyword[return] identifier[a_set] | def find_charged(self, mol):
"""Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid."""
data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain')
a_set = []
# Iterate through all residue, exclude those in chains defined as peptides
for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]:
if config.INTRA is not None:
if res.GetChain() != config.INTRA:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
a_contributing = []
a_contributing_orig_idx = []
if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) and (not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf):
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='positive', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) and (not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf):
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='negative', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['res']]
return a_set |
def daemon_mode(self, args, options):
"""
Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line
"""
cws = ControlWebSocket(self, args, options)
cws.start()
if 'cmdsock' in args and args['cmdsock']:
lcs = LocalControlSocket(self, args, options)
lcs.start()
lcs.join()
cws.join() | def function[daemon_mode, parameter[self, args, options]]:
constant[
Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line
]
variable[cws] assign[=] call[name[ControlWebSocket], parameter[name[self], name[args], name[options]]]
call[name[cws].start, parameter[]]
if <ast.BoolOp object at 0x7da1b0e6e350> begin[:]
variable[lcs] assign[=] call[name[LocalControlSocket], parameter[name[self], name[args], name[options]]]
call[name[lcs].start, parameter[]]
call[name[lcs].join, parameter[]]
call[name[cws].join, parameter[]] | keyword[def] identifier[daemon_mode] ( identifier[self] , identifier[args] , identifier[options] ):
literal[string]
identifier[cws] = identifier[ControlWebSocket] ( identifier[self] , identifier[args] , identifier[options] )
identifier[cws] . identifier[start] ()
keyword[if] literal[string] keyword[in] identifier[args] keyword[and] identifier[args] [ literal[string] ]:
identifier[lcs] = identifier[LocalControlSocket] ( identifier[self] , identifier[args] , identifier[options] )
identifier[lcs] . identifier[start] ()
identifier[lcs] . identifier[join] ()
identifier[cws] . identifier[join] () | def daemon_mode(self, args, options):
"""
Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line
"""
cws = ControlWebSocket(self, args, options)
cws.start()
if 'cmdsock' in args and args['cmdsock']:
lcs = LocalControlSocket(self, args, options)
lcs.start()
lcs.join() # depends on [control=['if'], data=[]]
cws.join() |
def authentication_ok(self):
'''Checks the authentication and sets the username of the currently connected terminal. Returns True or False'''
username = None
password = None
if self.authCallback:
if self.authNeedUser:
username = self.readline(prompt=self.PROMPT_USER, use_history=False)
if self.authNeedPass:
password = self.readline(echo=False, prompt=self.PROMPT_PASS, use_history=False)
if self.DOECHO:
self.write("\n")
try:
self.authCallback(username, password)
except:
self.username = None
return False
else:
# Successful authentication
self.username = username
return True
else:
# No authentication desired
self.username = None
return True | def function[authentication_ok, parameter[self]]:
constant[Checks the authentication and sets the username of the currently connected terminal. Returns True or False]
variable[username] assign[=] constant[None]
variable[password] assign[=] constant[None]
if name[self].authCallback begin[:]
if name[self].authNeedUser begin[:]
variable[username] assign[=] call[name[self].readline, parameter[]]
if name[self].authNeedPass begin[:]
variable[password] assign[=] call[name[self].readline, parameter[]]
if name[self].DOECHO begin[:]
call[name[self].write, parameter[constant[
]]]
<ast.Try object at 0x7da20c990fd0> | keyword[def] identifier[authentication_ok] ( identifier[self] ):
literal[string]
identifier[username] = keyword[None]
identifier[password] = keyword[None]
keyword[if] identifier[self] . identifier[authCallback] :
keyword[if] identifier[self] . identifier[authNeedUser] :
identifier[username] = identifier[self] . identifier[readline] ( identifier[prompt] = identifier[self] . identifier[PROMPT_USER] , identifier[use_history] = keyword[False] )
keyword[if] identifier[self] . identifier[authNeedPass] :
identifier[password] = identifier[self] . identifier[readline] ( identifier[echo] = keyword[False] , identifier[prompt] = identifier[self] . identifier[PROMPT_PASS] , identifier[use_history] = keyword[False] )
keyword[if] identifier[self] . identifier[DOECHO] :
identifier[self] . identifier[write] ( literal[string] )
keyword[try] :
identifier[self] . identifier[authCallback] ( identifier[username] , identifier[password] )
keyword[except] :
identifier[self] . identifier[username] = keyword[None]
keyword[return] keyword[False]
keyword[else] :
identifier[self] . identifier[username] = identifier[username]
keyword[return] keyword[True]
keyword[else] :
identifier[self] . identifier[username] = keyword[None]
keyword[return] keyword[True] | def authentication_ok(self):
"""Checks the authentication and sets the username of the currently connected terminal. Returns True or False"""
username = None
password = None
if self.authCallback:
if self.authNeedUser:
username = self.readline(prompt=self.PROMPT_USER, use_history=False) # depends on [control=['if'], data=[]]
if self.authNeedPass:
password = self.readline(echo=False, prompt=self.PROMPT_PASS, use_history=False)
if self.DOECHO:
self.write('\n') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
self.authCallback(username, password) # depends on [control=['try'], data=[]]
except:
self.username = None
return False # depends on [control=['except'], data=[]]
else:
# Successful authentication
self.username = username
return True # depends on [control=['if'], data=[]]
else:
# No authentication desired
self.username = None
return True |
def name(self):
"""
Read only name tag
"""
name = super(Interface, self).name
return name if name else self.data.get('name') | def function[name, parameter[self]]:
constant[
Read only name tag
]
variable[name] assign[=] call[name[super], parameter[name[Interface], name[self]]].name
return[<ast.IfExp object at 0x7da1b1bc1b10>] | keyword[def] identifier[name] ( identifier[self] ):
literal[string]
identifier[name] = identifier[super] ( identifier[Interface] , identifier[self] ). identifier[name]
keyword[return] identifier[name] keyword[if] identifier[name] keyword[else] identifier[self] . identifier[data] . identifier[get] ( literal[string] ) | def name(self):
"""
Read only name tag
"""
name = super(Interface, self).name
return name if name else self.data.get('name') |
def _add_scope_decorations(self, block, start, end):
"""
Show a scope decoration on the editor widget
:param start: Start line
:param end: End line
"""
try:
parent = FoldScope(block).parent()
except ValueError:
parent = None
if TextBlockHelper.is_fold_trigger(block):
base_color = self._get_scope_highlight_color()
factor_step = 5
if base_color.lightness() < 128:
factor_step = 10
factor = 70
else:
factor = 100
while parent:
# highlight parent scope
parent_start, parent_end = parent.get_range()
self._add_scope_deco(
start, end + 1, parent_start, parent_end,
base_color, factor)
# next parent scope
start = parent_start
end = parent_end
parent = parent.parent()
factor += factor_step
# global scope
parent_start = 0
parent_end = self.editor.document().blockCount()
self._add_scope_deco(
start, end + 1, parent_start, parent_end, base_color,
factor + factor_step)
else:
self._clear_scope_decos() | def function[_add_scope_decorations, parameter[self, block, start, end]]:
constant[
Show a scope decoration on the editor widget
:param start: Start line
:param end: End line
]
<ast.Try object at 0x7da20c76ec20>
if call[name[TextBlockHelper].is_fold_trigger, parameter[name[block]]] begin[:]
variable[base_color] assign[=] call[name[self]._get_scope_highlight_color, parameter[]]
variable[factor_step] assign[=] constant[5]
if compare[call[name[base_color].lightness, parameter[]] less[<] constant[128]] begin[:]
variable[factor_step] assign[=] constant[10]
variable[factor] assign[=] constant[70]
while name[parent] begin[:]
<ast.Tuple object at 0x7da20c76cd30> assign[=] call[name[parent].get_range, parameter[]]
call[name[self]._add_scope_deco, parameter[name[start], binary_operation[name[end] + constant[1]], name[parent_start], name[parent_end], name[base_color], name[factor]]]
variable[start] assign[=] name[parent_start]
variable[end] assign[=] name[parent_end]
variable[parent] assign[=] call[name[parent].parent, parameter[]]
<ast.AugAssign object at 0x7da20c76c700>
variable[parent_start] assign[=] constant[0]
variable[parent_end] assign[=] call[call[name[self].editor.document, parameter[]].blockCount, parameter[]]
call[name[self]._add_scope_deco, parameter[name[start], binary_operation[name[end] + constant[1]], name[parent_start], name[parent_end], name[base_color], binary_operation[name[factor] + name[factor_step]]]] | keyword[def] identifier[_add_scope_decorations] ( identifier[self] , identifier[block] , identifier[start] , identifier[end] ):
literal[string]
keyword[try] :
identifier[parent] = identifier[FoldScope] ( identifier[block] ). identifier[parent] ()
keyword[except] identifier[ValueError] :
identifier[parent] = keyword[None]
keyword[if] identifier[TextBlockHelper] . identifier[is_fold_trigger] ( identifier[block] ):
identifier[base_color] = identifier[self] . identifier[_get_scope_highlight_color] ()
identifier[factor_step] = literal[int]
keyword[if] identifier[base_color] . identifier[lightness] ()< literal[int] :
identifier[factor_step] = literal[int]
identifier[factor] = literal[int]
keyword[else] :
identifier[factor] = literal[int]
keyword[while] identifier[parent] :
identifier[parent_start] , identifier[parent_end] = identifier[parent] . identifier[get_range] ()
identifier[self] . identifier[_add_scope_deco] (
identifier[start] , identifier[end] + literal[int] , identifier[parent_start] , identifier[parent_end] ,
identifier[base_color] , identifier[factor] )
identifier[start] = identifier[parent_start]
identifier[end] = identifier[parent_end]
identifier[parent] = identifier[parent] . identifier[parent] ()
identifier[factor] += identifier[factor_step]
identifier[parent_start] = literal[int]
identifier[parent_end] = identifier[self] . identifier[editor] . identifier[document] (). identifier[blockCount] ()
identifier[self] . identifier[_add_scope_deco] (
identifier[start] , identifier[end] + literal[int] , identifier[parent_start] , identifier[parent_end] , identifier[base_color] ,
identifier[factor] + identifier[factor_step] )
keyword[else] :
identifier[self] . identifier[_clear_scope_decos] () | def _add_scope_decorations(self, block, start, end):
"""
Show a scope decoration on the editor widget
:param start: Start line
:param end: End line
"""
try:
parent = FoldScope(block).parent() # depends on [control=['try'], data=[]]
except ValueError:
parent = None # depends on [control=['except'], data=[]]
if TextBlockHelper.is_fold_trigger(block):
base_color = self._get_scope_highlight_color()
factor_step = 5
if base_color.lightness() < 128:
factor_step = 10
factor = 70 # depends on [control=['if'], data=[]]
else:
factor = 100
while parent:
# highlight parent scope
(parent_start, parent_end) = parent.get_range()
self._add_scope_deco(start, end + 1, parent_start, parent_end, base_color, factor)
# next parent scope
start = parent_start
end = parent_end
parent = parent.parent()
factor += factor_step # depends on [control=['while'], data=[]]
# global scope
parent_start = 0
parent_end = self.editor.document().blockCount()
self._add_scope_deco(start, end + 1, parent_start, parent_end, base_color, factor + factor_step) # depends on [control=['if'], data=[]]
else:
self._clear_scope_decos() |
def find_blocks():
"""
Auto-discover INSTALLED_APPS registered_blocks.py modules and fail
silently when not present. This forces an import on them thereby
registering their blocks.
This is a near 1-to-1 copy of how django's admin application registers
models.
"""
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's sizedimage module.
try:
before_import_block_registry = copy.copy(
block_registry._registry
)
import_module('{}.registered_blocks'.format(app))
except:
# Reset the block_registry to the state before the last
# import as this import will have to reoccur on the next request
# and this could raise NotRegistered and AlreadyRegistered
# exceptions (see django ticket #8245).
block_registry._registry = before_import_block_registry
# Decide whether to bubble up this error. If the app just
# doesn't have a stuff module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'registered_blocks'):
raise | def function[find_blocks, parameter[]]:
constant[
Auto-discover INSTALLED_APPS registered_blocks.py modules and fail
silently when not present. This forces an import on them thereby
registering their blocks.
This is a near 1-to-1 copy of how django's admin application registers
models.
]
for taget[name[app]] in starred[name[settings].INSTALLED_APPS] begin[:]
variable[mod] assign[=] call[name[import_module], parameter[name[app]]]
<ast.Try object at 0x7da20c6a9ab0> | keyword[def] identifier[find_blocks] ():
literal[string]
keyword[for] identifier[app] keyword[in] identifier[settings] . identifier[INSTALLED_APPS] :
identifier[mod] = identifier[import_module] ( identifier[app] )
keyword[try] :
identifier[before_import_block_registry] = identifier[copy] . identifier[copy] (
identifier[block_registry] . identifier[_registry]
)
identifier[import_module] ( literal[string] . identifier[format] ( identifier[app] ))
keyword[except] :
identifier[block_registry] . identifier[_registry] = identifier[before_import_block_registry]
keyword[if] identifier[module_has_submodule] ( identifier[mod] , literal[string] ):
keyword[raise] | def find_blocks():
"""
Auto-discover INSTALLED_APPS registered_blocks.py modules and fail
silently when not present. This forces an import on them thereby
registering their blocks.
This is a near 1-to-1 copy of how django's admin application registers
models.
"""
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's sizedimage module.
try:
before_import_block_registry = copy.copy(block_registry._registry)
import_module('{}.registered_blocks'.format(app)) # depends on [control=['try'], data=[]]
except:
# Reset the block_registry to the state before the last
# import as this import will have to reoccur on the next request
# and this could raise NotRegistered and AlreadyRegistered
# exceptions (see django ticket #8245).
block_registry._registry = before_import_block_registry
# Decide whether to bubble up this error. If the app just
# doesn't have a stuff module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'registered_blocks'):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['app']] |
def _chunk_iter(self):
"""Iterator over the blob file."""
for chunk_offset in self._chunk_offsets():
yield self._download_chunk(chunk_offset=chunk_offset,
chunk_size=self._chunk_size) | def function[_chunk_iter, parameter[self]]:
constant[Iterator over the blob file.]
for taget[name[chunk_offset]] in starred[call[name[self]._chunk_offsets, parameter[]]] begin[:]
<ast.Yield object at 0x7da20c76d7e0> | keyword[def] identifier[_chunk_iter] ( identifier[self] ):
literal[string]
keyword[for] identifier[chunk_offset] keyword[in] identifier[self] . identifier[_chunk_offsets] ():
keyword[yield] identifier[self] . identifier[_download_chunk] ( identifier[chunk_offset] = identifier[chunk_offset] ,
identifier[chunk_size] = identifier[self] . identifier[_chunk_size] ) | def _chunk_iter(self):
"""Iterator over the blob file."""
for chunk_offset in self._chunk_offsets():
yield self._download_chunk(chunk_offset=chunk_offset, chunk_size=self._chunk_size) # depends on [control=['for'], data=['chunk_offset']] |
def create(self, resource, uri=None, timeout=-1, custom_headers=None, default_values={}):
"""
Makes a POST request to create a resource when a request body is required.
Args:
resource:
OneView resource dictionary.
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
default_values:
Dictionary with default values grouped by OneView API version. This dictionary will be be merged with
the resource dictionary only if the dictionary does not contain the keys.
This argument is optional and the default value is an empty dictionary.
Ex.:
default_values = {
'200': {"type": "logical-switch-group"},
'300': {"type": "logical-switch-groupV300"}
}
Returns:
Created resource.
"""
if not resource:
logger.exception(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)
raise ValueError(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)
if not uri:
uri = self._uri
logger.debug('Create (uri = %s, resource = %s)' %
(uri, str(resource)))
resource = self.merge_default_values(resource, default_values)
return self.__do_post(uri, resource, timeout, custom_headers) | def function[create, parameter[self, resource, uri, timeout, custom_headers, default_values]]:
constant[
Makes a POST request to create a resource when a request body is required.
Args:
resource:
OneView resource dictionary.
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
default_values:
Dictionary with default values grouped by OneView API version. This dictionary will be be merged with
the resource dictionary only if the dictionary does not contain the keys.
This argument is optional and the default value is an empty dictionary.
Ex.:
default_values = {
'200': {"type": "logical-switch-group"},
'300': {"type": "logical-switch-groupV300"}
}
Returns:
Created resource.
]
if <ast.UnaryOp object at 0x7da2041da3e0> begin[:]
call[name[logger].exception, parameter[name[RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED]]]
<ast.Raise object at 0x7da2041d87f0>
if <ast.UnaryOp object at 0x7da2041d8670> begin[:]
variable[uri] assign[=] name[self]._uri
call[name[logger].debug, parameter[binary_operation[constant[Create (uri = %s, resource = %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2041d94b0>, <ast.Call object at 0x7da2041d8e80>]]]]]
variable[resource] assign[=] call[name[self].merge_default_values, parameter[name[resource], name[default_values]]]
return[call[name[self].__do_post, parameter[name[uri], name[resource], name[timeout], name[custom_headers]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[resource] , identifier[uri] = keyword[None] , identifier[timeout] =- literal[int] , identifier[custom_headers] = keyword[None] , identifier[default_values] ={}):
literal[string]
keyword[if] keyword[not] identifier[resource] :
identifier[logger] . identifier[exception] ( identifier[RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED] )
keyword[raise] identifier[ValueError] ( identifier[RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED] )
keyword[if] keyword[not] identifier[uri] :
identifier[uri] = identifier[self] . identifier[_uri]
identifier[logger] . identifier[debug] ( literal[string] %
( identifier[uri] , identifier[str] ( identifier[resource] )))
identifier[resource] = identifier[self] . identifier[merge_default_values] ( identifier[resource] , identifier[default_values] )
keyword[return] identifier[self] . identifier[__do_post] ( identifier[uri] , identifier[resource] , identifier[timeout] , identifier[custom_headers] ) | def create(self, resource, uri=None, timeout=-1, custom_headers=None, default_values={}):
"""
Makes a POST request to create a resource when a request body is required.
Args:
resource:
OneView resource dictionary.
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
default_values:
Dictionary with default values grouped by OneView API version. This dictionary will be be merged with
the resource dictionary only if the dictionary does not contain the keys.
This argument is optional and the default value is an empty dictionary.
Ex.:
default_values = {
'200': {"type": "logical-switch-group"},
'300': {"type": "logical-switch-groupV300"}
}
Returns:
Created resource.
"""
if not resource:
logger.exception(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)
raise ValueError(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED) # depends on [control=['if'], data=[]]
if not uri:
uri = self._uri # depends on [control=['if'], data=[]]
logger.debug('Create (uri = %s, resource = %s)' % (uri, str(resource)))
resource = self.merge_default_values(resource, default_values)
return self.__do_post(uri, resource, timeout, custom_headers) |
def stack(nick, rest):
'Manage short lists in pmxbot. See !stack help for more info'
atoms = [atom.strip() for atom in rest.split(' ', 1) if atom.strip()]
if len(atoms) == 0:
subcommand = "show"
rest = ""
elif len(atoms) == 1:
subcommand = atoms[0]
rest = ""
else:
subcommand, rest = atoms
start = rest.find("[")
finish = rest.rfind("]")
sp = rest.find(" ")
if (
start != -1 and finish != -1 and start < finish and
(sp == -1 or start < sp)
):
topic, index = [atom.strip() for atom in rest[:finish].split("[", 1)]
if not topic:
topic = nick
new_item = rest[finish + 1:].strip()
else:
topic = nick
index = None
new_item = rest.strip()
if subcommand == "topics" or subcommand == "list":
items = Stack.store.get_topics()
items.sort()
else:
items = Stack.store.get_items(topic)
try:
indices = parse_index(index, items)
except ValueError:
return helpdoc["index"]
if debug:
print("SUBCOMMAND", subcommand.ljust(8), "TOPIC", topic.ljust(8),
"INDICES", str(indices).ljust(12), "ITEM", new_item)
if subcommand == "add":
if not new_item:
return ('!stack add <topic[index]> item: '
'You must provide an item to add.')
if not indices:
items.insert(0, new_item)
else:
for i in reversed(sorted(set(indices))):
if i >= len(items):
items.append(new_item)
else:
items.insert(i + 1, new_item)
Stack.store.save_items(topic, items)
elif subcommand == "pop":
if not indices:
indices = [0]
popped_items = [items.pop(i) for i in reversed(sorted(set(indices)))
if len(items) > i >= 0]
Stack.store.save_items(topic, items)
return output([("-", item) for item in reversed(popped_items)],
"(none popped)", pop=True)
elif subcommand == "show":
if new_item:
return helpdoc["show"]
if not indices:
indices = range(len(items))
return output(
[(i + 1, items[i]) for i in indices if len(items) > i >= 0]
)
elif subcommand == "shuffle":
if not indices:
random.shuffle(items)
else:
items = [items[i] for i in indices if len(items) > i >= 0]
Stack.store.save_items(topic, items)
return output(enumerate(items, 1))
elif subcommand == "topics" or subcommand == "list":
if new_item:
return helpdoc["topics"]
if not indices:
indices = range(len(items))
return output(
[(i + 1, items[i]) for i in indices if len(items) > i >= 0]
)
elif subcommand == "help":
return helpdoc.get(new_item, helpdoc["help"])
else:
return helpdoc["stack"] | def function[stack, parameter[nick, rest]]:
constant[Manage short lists in pmxbot. See !stack help for more info]
variable[atoms] assign[=] <ast.ListComp object at 0x7da1b06cb880>
if compare[call[name[len], parameter[name[atoms]]] equal[==] constant[0]] begin[:]
variable[subcommand] assign[=] constant[show]
variable[rest] assign[=] constant[]
variable[start] assign[=] call[name[rest].find, parameter[constant[[]]]
variable[finish] assign[=] call[name[rest].rfind, parameter[constant[]]]]
variable[sp] assign[=] call[name[rest].find, parameter[constant[ ]]]
if <ast.BoolOp object at 0x7da1b03a0ca0> begin[:]
<ast.Tuple object at 0x7da1b03a1cc0> assign[=] <ast.ListComp object at 0x7da1b03a1570>
if <ast.UnaryOp object at 0x7da1b03a19c0> begin[:]
variable[topic] assign[=] name[nick]
variable[new_item] assign[=] call[call[name[rest]][<ast.Slice object at 0x7da1b03a0dc0>].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b03a0670> begin[:]
variable[items] assign[=] call[name[Stack].store.get_topics, parameter[]]
call[name[items].sort, parameter[]]
<ast.Try object at 0x7da1b03a1c30>
if name[debug] begin[:]
call[name[print], parameter[constant[SUBCOMMAND], call[name[subcommand].ljust, parameter[constant[8]]], constant[TOPIC], call[name[topic].ljust, parameter[constant[8]]], constant[INDICES], call[call[name[str], parameter[name[indices]]].ljust, parameter[constant[12]]], constant[ITEM], name[new_item]]]
if compare[name[subcommand] equal[==] constant[add]] begin[:]
if <ast.UnaryOp object at 0x7da1b03a07f0> begin[:]
return[constant[!stack add <topic[index]> item: You must provide an item to add.]]
if <ast.UnaryOp object at 0x7da1b03a1360> begin[:]
call[name[items].insert, parameter[constant[0], name[new_item]]]
call[name[Stack].store.save_items, parameter[name[topic], name[items]]] | keyword[def] identifier[stack] ( identifier[nick] , identifier[rest] ):
literal[string]
identifier[atoms] =[ identifier[atom] . identifier[strip] () keyword[for] identifier[atom] keyword[in] identifier[rest] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[atom] . identifier[strip] ()]
keyword[if] identifier[len] ( identifier[atoms] )== literal[int] :
identifier[subcommand] = literal[string]
identifier[rest] = literal[string]
keyword[elif] identifier[len] ( identifier[atoms] )== literal[int] :
identifier[subcommand] = identifier[atoms] [ literal[int] ]
identifier[rest] = literal[string]
keyword[else] :
identifier[subcommand] , identifier[rest] = identifier[atoms]
identifier[start] = identifier[rest] . identifier[find] ( literal[string] )
identifier[finish] = identifier[rest] . identifier[rfind] ( literal[string] )
identifier[sp] = identifier[rest] . identifier[find] ( literal[string] )
keyword[if] (
identifier[start] !=- literal[int] keyword[and] identifier[finish] !=- literal[int] keyword[and] identifier[start] < identifier[finish] keyword[and]
( identifier[sp] ==- literal[int] keyword[or] identifier[start] < identifier[sp] )
):
identifier[topic] , identifier[index] =[ identifier[atom] . identifier[strip] () keyword[for] identifier[atom] keyword[in] identifier[rest] [: identifier[finish] ]. identifier[split] ( literal[string] , literal[int] )]
keyword[if] keyword[not] identifier[topic] :
identifier[topic] = identifier[nick]
identifier[new_item] = identifier[rest] [ identifier[finish] + literal[int] :]. identifier[strip] ()
keyword[else] :
identifier[topic] = identifier[nick]
identifier[index] = keyword[None]
identifier[new_item] = identifier[rest] . identifier[strip] ()
keyword[if] identifier[subcommand] == literal[string] keyword[or] identifier[subcommand] == literal[string] :
identifier[items] = identifier[Stack] . identifier[store] . identifier[get_topics] ()
identifier[items] . identifier[sort] ()
keyword[else] :
identifier[items] = identifier[Stack] . identifier[store] . identifier[get_items] ( identifier[topic] )
keyword[try] :
identifier[indices] = identifier[parse_index] ( identifier[index] , identifier[items] )
keyword[except] identifier[ValueError] :
keyword[return] identifier[helpdoc] [ literal[string] ]
keyword[if] identifier[debug] :
identifier[print] ( literal[string] , identifier[subcommand] . identifier[ljust] ( literal[int] ), literal[string] , identifier[topic] . identifier[ljust] ( literal[int] ),
literal[string] , identifier[str] ( identifier[indices] ). identifier[ljust] ( literal[int] ), literal[string] , identifier[new_item] )
keyword[if] identifier[subcommand] == literal[string] :
keyword[if] keyword[not] identifier[new_item] :
keyword[return] ( literal[string]
literal[string] )
keyword[if] keyword[not] identifier[indices] :
identifier[items] . identifier[insert] ( literal[int] , identifier[new_item] )
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[sorted] ( identifier[set] ( identifier[indices] ))):
keyword[if] identifier[i] >= identifier[len] ( identifier[items] ):
identifier[items] . identifier[append] ( identifier[new_item] )
keyword[else] :
identifier[items] . identifier[insert] ( identifier[i] + literal[int] , identifier[new_item] )
identifier[Stack] . identifier[store] . identifier[save_items] ( identifier[topic] , identifier[items] )
keyword[elif] identifier[subcommand] == literal[string] :
keyword[if] keyword[not] identifier[indices] :
identifier[indices] =[ literal[int] ]
identifier[popped_items] =[ identifier[items] . identifier[pop] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[sorted] ( identifier[set] ( identifier[indices] )))
keyword[if] identifier[len] ( identifier[items] )> identifier[i] >= literal[int] ]
identifier[Stack] . identifier[store] . identifier[save_items] ( identifier[topic] , identifier[items] )
keyword[return] identifier[output] ([( literal[string] , identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[reversed] ( identifier[popped_items] )],
literal[string] , identifier[pop] = keyword[True] )
keyword[elif] identifier[subcommand] == literal[string] :
keyword[if] identifier[new_item] :
keyword[return] identifier[helpdoc] [ literal[string] ]
keyword[if] keyword[not] identifier[indices] :
identifier[indices] = identifier[range] ( identifier[len] ( identifier[items] ))
keyword[return] identifier[output] (
[( identifier[i] + literal[int] , identifier[items] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[indices] keyword[if] identifier[len] ( identifier[items] )> identifier[i] >= literal[int] ]
)
keyword[elif] identifier[subcommand] == literal[string] :
keyword[if] keyword[not] identifier[indices] :
identifier[random] . identifier[shuffle] ( identifier[items] )
keyword[else] :
identifier[items] =[ identifier[items] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[indices] keyword[if] identifier[len] ( identifier[items] )> identifier[i] >= literal[int] ]
identifier[Stack] . identifier[store] . identifier[save_items] ( identifier[topic] , identifier[items] )
keyword[return] identifier[output] ( identifier[enumerate] ( identifier[items] , literal[int] ))
keyword[elif] identifier[subcommand] == literal[string] keyword[or] identifier[subcommand] == literal[string] :
keyword[if] identifier[new_item] :
keyword[return] identifier[helpdoc] [ literal[string] ]
keyword[if] keyword[not] identifier[indices] :
identifier[indices] = identifier[range] ( identifier[len] ( identifier[items] ))
keyword[return] identifier[output] (
[( identifier[i] + literal[int] , identifier[items] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[indices] keyword[if] identifier[len] ( identifier[items] )> identifier[i] >= literal[int] ]
)
keyword[elif] identifier[subcommand] == literal[string] :
keyword[return] identifier[helpdoc] . identifier[get] ( identifier[new_item] , identifier[helpdoc] [ literal[string] ])
keyword[else] :
keyword[return] identifier[helpdoc] [ literal[string] ] | def stack(nick, rest):
"""Manage short lists in pmxbot. See !stack help for more info"""
atoms = [atom.strip() for atom in rest.split(' ', 1) if atom.strip()]
if len(atoms) == 0:
subcommand = 'show'
rest = '' # depends on [control=['if'], data=[]]
elif len(atoms) == 1:
subcommand = atoms[0]
rest = '' # depends on [control=['if'], data=[]]
else:
(subcommand, rest) = atoms
start = rest.find('[')
finish = rest.rfind(']')
sp = rest.find(' ')
if start != -1 and finish != -1 and (start < finish) and (sp == -1 or start < sp):
(topic, index) = [atom.strip() for atom in rest[:finish].split('[', 1)]
if not topic:
topic = nick # depends on [control=['if'], data=[]]
new_item = rest[finish + 1:].strip() # depends on [control=['if'], data=[]]
else:
topic = nick
index = None
new_item = rest.strip()
if subcommand == 'topics' or subcommand == 'list':
items = Stack.store.get_topics()
items.sort() # depends on [control=['if'], data=[]]
else:
items = Stack.store.get_items(topic)
try:
indices = parse_index(index, items) # depends on [control=['try'], data=[]]
except ValueError:
return helpdoc['index'] # depends on [control=['except'], data=[]]
if debug:
print('SUBCOMMAND', subcommand.ljust(8), 'TOPIC', topic.ljust(8), 'INDICES', str(indices).ljust(12), 'ITEM', new_item) # depends on [control=['if'], data=[]]
if subcommand == 'add':
if not new_item:
return '!stack add <topic[index]> item: You must provide an item to add.' # depends on [control=['if'], data=[]]
if not indices:
items.insert(0, new_item) # depends on [control=['if'], data=[]]
else:
for i in reversed(sorted(set(indices))):
if i >= len(items):
items.append(new_item) # depends on [control=['if'], data=[]]
else:
items.insert(i + 1, new_item) # depends on [control=['for'], data=['i']]
Stack.store.save_items(topic, items) # depends on [control=['if'], data=[]]
elif subcommand == 'pop':
if not indices:
indices = [0] # depends on [control=['if'], data=[]]
popped_items = [items.pop(i) for i in reversed(sorted(set(indices))) if len(items) > i >= 0]
Stack.store.save_items(topic, items)
return output([('-', item) for item in reversed(popped_items)], '(none popped)', pop=True) # depends on [control=['if'], data=[]]
elif subcommand == 'show':
if new_item:
return helpdoc['show'] # depends on [control=['if'], data=[]]
if not indices:
indices = range(len(items)) # depends on [control=['if'], data=[]]
return output([(i + 1, items[i]) for i in indices if len(items) > i >= 0]) # depends on [control=['if'], data=[]]
elif subcommand == 'shuffle':
if not indices:
random.shuffle(items) # depends on [control=['if'], data=[]]
else:
items = [items[i] for i in indices if len(items) > i >= 0]
Stack.store.save_items(topic, items)
return output(enumerate(items, 1)) # depends on [control=['if'], data=[]]
elif subcommand == 'topics' or subcommand == 'list':
if new_item:
return helpdoc['topics'] # depends on [control=['if'], data=[]]
if not indices:
indices = range(len(items)) # depends on [control=['if'], data=[]]
return output([(i + 1, items[i]) for i in indices if len(items) > i >= 0]) # depends on [control=['if'], data=[]]
elif subcommand == 'help':
return helpdoc.get(new_item, helpdoc['help']) # depends on [control=['if'], data=[]]
else:
return helpdoc['stack'] |
def currencies(self):
"""Get all currencies with non-zero values"""
return [m.currency.code for m in self.monies() if m.amount] | def function[currencies, parameter[self]]:
constant[Get all currencies with non-zero values]
return[<ast.ListComp object at 0x7da20c795c00>] | keyword[def] identifier[currencies] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[m] . identifier[currency] . identifier[code] keyword[for] identifier[m] keyword[in] identifier[self] . identifier[monies] () keyword[if] identifier[m] . identifier[amount] ] | def currencies(self):
"""Get all currencies with non-zero values"""
return [m.currency.code for m in self.monies() if m.amount] |
def count_processors():
"""How many cores does the current computer have ?"""
if 'SLURM_NTASKS' in os.environ: return int(os.environ['SLURM_NTASKS'])
elif 'SLURM_JOB_CPUS_PER_NODE' in os.environ:
text = os.environ['SLURM_JOB_CPUS_PER_NODE']
if is_integer(text): return int(text)
else:
n, N = re.findall("([1-9]+)\(x([1-9]+)\)", text)[0]
return int(n) * int(N)
else: return multiprocessing.cpu_count() | def function[count_processors, parameter[]]:
constant[How many cores does the current computer have ?]
if compare[constant[SLURM_NTASKS] in name[os].environ] begin[:]
return[call[name[int], parameter[call[name[os].environ][constant[SLURM_NTASKS]]]]] | keyword[def] identifier[count_processors] ():
literal[string]
keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] : keyword[return] identifier[int] ( identifier[os] . identifier[environ] [ literal[string] ])
keyword[elif] literal[string] keyword[in] identifier[os] . identifier[environ] :
identifier[text] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[is_integer] ( identifier[text] ): keyword[return] identifier[int] ( identifier[text] )
keyword[else] :
identifier[n] , identifier[N] = identifier[re] . identifier[findall] ( literal[string] , identifier[text] )[ literal[int] ]
keyword[return] identifier[int] ( identifier[n] )* identifier[int] ( identifier[N] )
keyword[else] : keyword[return] identifier[multiprocessing] . identifier[cpu_count] () | def count_processors():
"""How many cores does the current computer have ?"""
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) # depends on [control=['if'], data=[]]
elif 'SLURM_JOB_CPUS_PER_NODE' in os.environ:
text = os.environ['SLURM_JOB_CPUS_PER_NODE']
if is_integer(text):
return int(text) # depends on [control=['if'], data=[]]
else:
(n, N) = re.findall('([1-9]+)\\(x([1-9]+)\\)', text)[0]
return int(n) * int(N) # depends on [control=['if'], data=[]]
else:
return multiprocessing.cpu_count() |
def _finite_well_energy(P, n=1, atol=1e-6):
'''
Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`.
'''
assert n > 0 and n <= _finite_well_states(P)
pi_2 = pi / 2.
r = (1 / (P + pi_2)) * (n * pi_2)
eta = n * pi_2 - arcsin(r) - r * P
w = 1 # relaxation parameter (for succesive relaxation)
while True:
assert r <= 1
if abs(eta) < atol:
break
r2 = r ** 2.
sqrt_1mr2 = sqrt(1. - r2)
denom = (1. + P * sqrt_1mr2)
t1 = P * sqrt_1mr2 / denom * eta
# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2
while True:
next_r = (1 - w) * r + w * (r + t1)
# next_r = (1 - w) * r + w * (r + t1 + t2)
next_eta = n * pi_2 - arcsin(next_r) - next_r * P
# decrease w until eta is converging
if abs(next_eta / eta) < 1:
r = next_r
eta = next_eta
break
else:
w *= 0.5
alpha = P * r
E = 2 * (alpha) ** 2 # hbar**2 / (m * L**2)
return E | def function[_finite_well_energy, parameter[P, n, atol]]:
constant[
Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`.
]
assert[<ast.BoolOp object at 0x7da2041da500>]
variable[pi_2] assign[=] binary_operation[name[pi] / constant[2.0]]
variable[r] assign[=] binary_operation[binary_operation[constant[1] / binary_operation[name[P] + name[pi_2]]] * binary_operation[name[n] * name[pi_2]]]
variable[eta] assign[=] binary_operation[binary_operation[binary_operation[name[n] * name[pi_2]] - call[name[arcsin], parameter[name[r]]]] - binary_operation[name[r] * name[P]]]
variable[w] assign[=] constant[1]
while constant[True] begin[:]
assert[compare[name[r] less_or_equal[<=] constant[1]]]
if compare[call[name[abs], parameter[name[eta]]] less[<] name[atol]] begin[:]
break
variable[r2] assign[=] binary_operation[name[r] ** constant[2.0]]
variable[sqrt_1mr2] assign[=] call[name[sqrt], parameter[binary_operation[constant[1.0] - name[r2]]]]
variable[denom] assign[=] binary_operation[constant[1.0] + binary_operation[name[P] * name[sqrt_1mr2]]]
variable[t1] assign[=] binary_operation[binary_operation[binary_operation[name[P] * name[sqrt_1mr2]] / name[denom]] * name[eta]]
while constant[True] begin[:]
variable[next_r] assign[=] binary_operation[binary_operation[binary_operation[constant[1] - name[w]] * name[r]] + binary_operation[name[w] * binary_operation[name[r] + name[t1]]]]
variable[next_eta] assign[=] binary_operation[binary_operation[binary_operation[name[n] * name[pi_2]] - call[name[arcsin], parameter[name[next_r]]]] - binary_operation[name[next_r] * name[P]]]
if compare[call[name[abs], parameter[binary_operation[name[next_eta] / name[eta]]]] less[<] constant[1]] begin[:]
variable[r] assign[=] name[next_r]
variable[eta] assign[=] name[next_eta]
break
variable[alpha] assign[=] binary_operation[name[P] * name[r]]
variable[E] assign[=] binary_operation[constant[2] * binary_operation[name[alpha] ** constant[2]]]
return[name[E]] | keyword[def] identifier[_finite_well_energy] ( identifier[P] , identifier[n] = literal[int] , identifier[atol] = literal[int] ):
literal[string]
keyword[assert] identifier[n] > literal[int] keyword[and] identifier[n] <= identifier[_finite_well_states] ( identifier[P] )
identifier[pi_2] = identifier[pi] / literal[int]
identifier[r] =( literal[int] /( identifier[P] + identifier[pi_2] ))*( identifier[n] * identifier[pi_2] )
identifier[eta] = identifier[n] * identifier[pi_2] - identifier[arcsin] ( identifier[r] )- identifier[r] * identifier[P]
identifier[w] = literal[int]
keyword[while] keyword[True] :
keyword[assert] identifier[r] <= literal[int]
keyword[if] identifier[abs] ( identifier[eta] )< identifier[atol] :
keyword[break]
identifier[r2] = identifier[r] ** literal[int]
identifier[sqrt_1mr2] = identifier[sqrt] ( literal[int] - identifier[r2] )
identifier[denom] =( literal[int] + identifier[P] * identifier[sqrt_1mr2] )
identifier[t1] = identifier[P] * identifier[sqrt_1mr2] / identifier[denom] * identifier[eta]
keyword[while] keyword[True] :
identifier[next_r] =( literal[int] - identifier[w] )* identifier[r] + identifier[w] *( identifier[r] + identifier[t1] )
identifier[next_eta] = identifier[n] * identifier[pi_2] - identifier[arcsin] ( identifier[next_r] )- identifier[next_r] * identifier[P]
keyword[if] identifier[abs] ( identifier[next_eta] / identifier[eta] )< literal[int] :
identifier[r] = identifier[next_r]
identifier[eta] = identifier[next_eta]
keyword[break]
keyword[else] :
identifier[w] *= literal[int]
identifier[alpha] = identifier[P] * identifier[r]
identifier[E] = literal[int] *( identifier[alpha] )** literal[int]
keyword[return] identifier[E] | def _finite_well_energy(P, n=1, atol=1e-06):
"""
Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`.
"""
assert n > 0 and n <= _finite_well_states(P)
pi_2 = pi / 2.0
r = 1 / (P + pi_2) * (n * pi_2)
eta = n * pi_2 - arcsin(r) - r * P
w = 1 # relaxation parameter (for succesive relaxation)
while True:
assert r <= 1
if abs(eta) < atol:
break # depends on [control=['if'], data=[]]
r2 = r ** 2.0
sqrt_1mr2 = sqrt(1.0 - r2)
denom = 1.0 + P * sqrt_1mr2
t1 = P * sqrt_1mr2 / denom * eta
# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2
while True:
next_r = (1 - w) * r + w * (r + t1)
# next_r = (1 - w) * r + w * (r + t1 + t2)
next_eta = n * pi_2 - arcsin(next_r) - next_r * P
# decrease w until eta is converging
if abs(next_eta / eta) < 1:
r = next_r
eta = next_eta
break # depends on [control=['if'], data=[]]
else:
w *= 0.5 # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]]
alpha = P * r
E = 2 * alpha ** 2 # hbar**2 / (m * L**2)
return E |
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority) | def function[p_invoke, parameter[p]]:
constant[
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
]
variable[priority] assign[=] constant[None]
if compare[call[name[len], parameter[name[p]]] greater[>] constant[5]] begin[:]
variable[priority] assign[=] call[name[int], parameter[call[name[p]][constant[8]]]]
call[name[p]][constant[0]] assign[=] call[name[Trigger], parameter[call[name[p]][constant[2]], call[name[p]][constant[4]], name[priority]]] | keyword[def] identifier[p_invoke] ( identifier[p] ):
literal[string]
identifier[priority] = keyword[None]
keyword[if] identifier[len] ( identifier[p] )> literal[int] :
identifier[priority] = identifier[int] ( identifier[p] [ literal[int] ])
identifier[p] [ literal[int] ]= identifier[Trigger] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[priority] ) | def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8]) # depends on [control=['if'], data=[]]
p[0] = Trigger(p[2], p[4], priority) |
def html(dom: str):
"""
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
"""
r = _get_report()
r.append_body(render.html(dom))
r.stdout_interceptor.write_source('[ADDED] HTML\n') | def function[html, parameter[dom]]:
constant[
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
]
variable[r] assign[=] call[name[_get_report], parameter[]]
call[name[r].append_body, parameter[call[name[render].html, parameter[name[dom]]]]]
call[name[r].stdout_interceptor.write_source, parameter[constant[[ADDED] HTML
]]] | keyword[def] identifier[html] ( identifier[dom] : identifier[str] ):
literal[string]
identifier[r] = identifier[_get_report] ()
identifier[r] . identifier[append_body] ( identifier[render] . identifier[html] ( identifier[dom] ))
identifier[r] . identifier[stdout_interceptor] . identifier[write_source] ( literal[string] ) | def html(dom: str):
"""
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
"""
r = _get_report()
r.append_body(render.html(dom))
r.stdout_interceptor.write_source('[ADDED] HTML\n') |
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]:
"""
Given a tag sequence encoded with IOB1 labels, recode to BIOUL.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
In the BIO scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of a span.
Parameters
----------
tag_sequence : ``List[str]``, required.
The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"].
encoding : `str`, optional, (default = ``IOB1``).
The encoding type to convert from. Must be either "IOB1" or "BIO".
Returns
-------
bioul_sequence: ``List[str]``
The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
"""
if not encoding in {"IOB1", "BIO"}:
raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.")
# pylint: disable=len-as-condition
def replace_label(full_label, new_label):
# example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'
parts = list(full_label.partition('-'))
parts[0] = new_label
return ''.join(parts)
def pop_replace_append(in_stack, out_stack, new_label):
# pop the last element from in_stack, replace the label, append
# to out_stack
tag = in_stack.pop()
new_tag = replace_label(tag, new_label)
out_stack.append(new_tag)
def process_stack(stack, out_stack):
# process a stack of labels, add them to out_stack
if len(stack) == 1:
# just a U token
pop_replace_append(stack, out_stack, 'U')
else:
# need to code as BIL
recoded_stack = []
pop_replace_append(stack, recoded_stack, 'L')
while len(stack) >= 2:
pop_replace_append(stack, recoded_stack, 'I')
pop_replace_append(stack, recoded_stack, 'B')
recoded_stack.reverse()
out_stack.extend(recoded_stack)
# Process the tag_sequence one tag at a time, adding spans to a stack,
# then recode them.
bioul_sequence = []
stack: List[str] = []
for label in tag_sequence:
# need to make a dict like
# token = {'token': 'Matt', "labels": {'conll2003': "B-PER"}
# 'gold': 'I-PER'}
# where 'gold' is the raw value from the CoNLL data set
if label == 'O' and len(stack) == 0:
bioul_sequence.append(label)
elif label == 'O' and len(stack) > 0:
# need to process the entries on the stack plus this one
process_stack(stack, bioul_sequence)
bioul_sequence.append(label)
elif label[0] == 'I':
# check if the previous type is the same as this one
# if it is then append to stack
# otherwise this start a new entity if the type
# is different
if len(stack) == 0:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
stack.append(label)
else:
# check if the previous type is the same as this one
this_type = label.partition('-')[2]
prev_type = stack[-1].partition('-')[2]
if this_type == prev_type:
stack.append(label)
else:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
# a new entity
process_stack(stack, bioul_sequence)
stack.append(label)
elif label[0] == 'B':
if len(stack) > 0:
process_stack(stack, bioul_sequence)
stack.append(label)
else:
raise InvalidTagSequence(tag_sequence)
# process the stack
if len(stack) > 0:
process_stack(stack, bioul_sequence)
return bioul_sequence | def function[to_bioul, parameter[tag_sequence, encoding]]:
constant[
Given a tag sequence encoded with IOB1 labels, recode to BIOUL.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
In the BIO scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of a span.
Parameters
----------
tag_sequence : ``List[str]``, required.
The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"].
encoding : `str`, optional, (default = ``IOB1``).
The encoding type to convert from. Must be either "IOB1" or "BIO".
Returns
-------
bioul_sequence: ``List[str]``
The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
]
if <ast.UnaryOp object at 0x7da20c6e4fd0> begin[:]
<ast.Raise object at 0x7da20c6e6350>
def function[replace_label, parameter[full_label, new_label]]:
variable[parts] assign[=] call[name[list], parameter[call[name[full_label].partition, parameter[constant[-]]]]]
call[name[parts]][constant[0]] assign[=] name[new_label]
return[call[constant[].join, parameter[name[parts]]]]
def function[pop_replace_append, parameter[in_stack, out_stack, new_label]]:
variable[tag] assign[=] call[name[in_stack].pop, parameter[]]
variable[new_tag] assign[=] call[name[replace_label], parameter[name[tag], name[new_label]]]
call[name[out_stack].append, parameter[name[new_tag]]]
def function[process_stack, parameter[stack, out_stack]]:
if compare[call[name[len], parameter[name[stack]]] equal[==] constant[1]] begin[:]
call[name[pop_replace_append], parameter[name[stack], name[out_stack], constant[U]]]
variable[bioul_sequence] assign[=] list[[]]
<ast.AnnAssign object at 0x7da20c993640>
for taget[name[label]] in starred[name[tag_sequence]] begin[:]
if <ast.BoolOp object at 0x7da20c993160> begin[:]
call[name[bioul_sequence].append, parameter[name[label]]]
if compare[call[name[len], parameter[name[stack]]] greater[>] constant[0]] begin[:]
call[name[process_stack], parameter[name[stack], name[bioul_sequence]]]
return[name[bioul_sequence]] | keyword[def] identifier[to_bioul] ( identifier[tag_sequence] : identifier[List] [ identifier[str] ], identifier[encoding] : identifier[str] = literal[string] )-> identifier[List] [ identifier[str] ]:
literal[string]
keyword[if] keyword[not] identifier[encoding] keyword[in] { literal[string] , literal[string] }:
keyword[raise] identifier[ConfigurationError] ( literal[string] )
keyword[def] identifier[replace_label] ( identifier[full_label] , identifier[new_label] ):
identifier[parts] = identifier[list] ( identifier[full_label] . identifier[partition] ( literal[string] ))
identifier[parts] [ literal[int] ]= identifier[new_label]
keyword[return] literal[string] . identifier[join] ( identifier[parts] )
keyword[def] identifier[pop_replace_append] ( identifier[in_stack] , identifier[out_stack] , identifier[new_label] ):
identifier[tag] = identifier[in_stack] . identifier[pop] ()
identifier[new_tag] = identifier[replace_label] ( identifier[tag] , identifier[new_label] )
identifier[out_stack] . identifier[append] ( identifier[new_tag] )
keyword[def] identifier[process_stack] ( identifier[stack] , identifier[out_stack] ):
keyword[if] identifier[len] ( identifier[stack] )== literal[int] :
identifier[pop_replace_append] ( identifier[stack] , identifier[out_stack] , literal[string] )
keyword[else] :
identifier[recoded_stack] =[]
identifier[pop_replace_append] ( identifier[stack] , identifier[recoded_stack] , literal[string] )
keyword[while] identifier[len] ( identifier[stack] )>= literal[int] :
identifier[pop_replace_append] ( identifier[stack] , identifier[recoded_stack] , literal[string] )
identifier[pop_replace_append] ( identifier[stack] , identifier[recoded_stack] , literal[string] )
identifier[recoded_stack] . identifier[reverse] ()
identifier[out_stack] . identifier[extend] ( identifier[recoded_stack] )
identifier[bioul_sequence] =[]
identifier[stack] : identifier[List] [ identifier[str] ]=[]
keyword[for] identifier[label] keyword[in] identifier[tag_sequence] :
keyword[if] identifier[label] == literal[string] keyword[and] identifier[len] ( identifier[stack] )== literal[int] :
identifier[bioul_sequence] . identifier[append] ( identifier[label] )
keyword[elif] identifier[label] == literal[string] keyword[and] identifier[len] ( identifier[stack] )> literal[int] :
identifier[process_stack] ( identifier[stack] , identifier[bioul_sequence] )
identifier[bioul_sequence] . identifier[append] ( identifier[label] )
keyword[elif] identifier[label] [ literal[int] ]== literal[string] :
keyword[if] identifier[len] ( identifier[stack] )== literal[int] :
keyword[if] identifier[encoding] == literal[string] :
keyword[raise] identifier[InvalidTagSequence] ( identifier[tag_sequence] )
identifier[stack] . identifier[append] ( identifier[label] )
keyword[else] :
identifier[this_type] = identifier[label] . identifier[partition] ( literal[string] )[ literal[int] ]
identifier[prev_type] = identifier[stack] [- literal[int] ]. identifier[partition] ( literal[string] )[ literal[int] ]
keyword[if] identifier[this_type] == identifier[prev_type] :
identifier[stack] . identifier[append] ( identifier[label] )
keyword[else] :
keyword[if] identifier[encoding] == literal[string] :
keyword[raise] identifier[InvalidTagSequence] ( identifier[tag_sequence] )
identifier[process_stack] ( identifier[stack] , identifier[bioul_sequence] )
identifier[stack] . identifier[append] ( identifier[label] )
keyword[elif] identifier[label] [ literal[int] ]== literal[string] :
keyword[if] identifier[len] ( identifier[stack] )> literal[int] :
identifier[process_stack] ( identifier[stack] , identifier[bioul_sequence] )
identifier[stack] . identifier[append] ( identifier[label] )
keyword[else] :
keyword[raise] identifier[InvalidTagSequence] ( identifier[tag_sequence] )
keyword[if] identifier[len] ( identifier[stack] )> literal[int] :
identifier[process_stack] ( identifier[stack] , identifier[bioul_sequence] )
keyword[return] identifier[bioul_sequence] | def to_bioul(tag_sequence: List[str], encoding: str='IOB1') -> List[str]:
"""
Given a tag sequence encoded with IOB1 labels, recode to BIOUL.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
In the BIO scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of a span.
Parameters
----------
tag_sequence : ``List[str]``, required.
The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"].
encoding : `str`, optional, (default = ``IOB1``).
The encoding type to convert from. Must be either "IOB1" or "BIO".
Returns
-------
bioul_sequence: ``List[str]``
The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
"""
if not encoding in {'IOB1', 'BIO'}:
raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.") # depends on [control=['if'], data=[]]
# pylint: disable=len-as-condition
def replace_label(full_label, new_label):
# example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'
parts = list(full_label.partition('-'))
parts[0] = new_label
return ''.join(parts)
def pop_replace_append(in_stack, out_stack, new_label):
# pop the last element from in_stack, replace the label, append
# to out_stack
tag = in_stack.pop()
new_tag = replace_label(tag, new_label)
out_stack.append(new_tag)
def process_stack(stack, out_stack):
# process a stack of labels, add them to out_stack
if len(stack) == 1:
# just a U token
pop_replace_append(stack, out_stack, 'U') # depends on [control=['if'], data=[]]
else:
# need to code as BIL
recoded_stack = []
pop_replace_append(stack, recoded_stack, 'L')
while len(stack) >= 2:
pop_replace_append(stack, recoded_stack, 'I') # depends on [control=['while'], data=[]]
pop_replace_append(stack, recoded_stack, 'B')
recoded_stack.reverse()
out_stack.extend(recoded_stack)
# Process the tag_sequence one tag at a time, adding spans to a stack,
# then recode them.
bioul_sequence = []
stack: List[str] = []
for label in tag_sequence:
# need to make a dict like
# token = {'token': 'Matt', "labels": {'conll2003': "B-PER"}
# 'gold': 'I-PER'}
# where 'gold' is the raw value from the CoNLL data set
if label == 'O' and len(stack) == 0:
bioul_sequence.append(label) # depends on [control=['if'], data=[]]
elif label == 'O' and len(stack) > 0:
# need to process the entries on the stack plus this one
process_stack(stack, bioul_sequence)
bioul_sequence.append(label) # depends on [control=['if'], data=[]]
elif label[0] == 'I':
# check if the previous type is the same as this one
# if it is then append to stack
# otherwise this start a new entity if the type
# is different
if len(stack) == 0:
if encoding == 'BIO':
raise InvalidTagSequence(tag_sequence) # depends on [control=['if'], data=[]]
stack.append(label) # depends on [control=['if'], data=[]]
else:
# check if the previous type is the same as this one
this_type = label.partition('-')[2]
prev_type = stack[-1].partition('-')[2]
if this_type == prev_type:
stack.append(label) # depends on [control=['if'], data=[]]
else:
if encoding == 'BIO':
raise InvalidTagSequence(tag_sequence) # depends on [control=['if'], data=[]]
# a new entity
process_stack(stack, bioul_sequence)
stack.append(label) # depends on [control=['if'], data=[]]
elif label[0] == 'B':
if len(stack) > 0:
process_stack(stack, bioul_sequence) # depends on [control=['if'], data=[]]
stack.append(label) # depends on [control=['if'], data=[]]
else:
raise InvalidTagSequence(tag_sequence) # depends on [control=['for'], data=['label']]
# process the stack
if len(stack) > 0:
process_stack(stack, bioul_sequence) # depends on [control=['if'], data=[]]
return bioul_sequence |
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache() | def function[render, parameter[self]]:
constant[
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
]
<ast.Try object at 0x7da2044c2b00>
if <ast.BoolOp object at 0x7da2044c3400> begin[:]
return[name[self].content]
return[call[name[self].render_nocache, parameter[]]] | keyword[def] identifier[render] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[load_content] ()
keyword[except] identifier[template] . identifier[TemplateSyntaxError] :
keyword[raise]
keyword[except] identifier[Exception] :
keyword[if] identifier[is_template_debug_activated] ():
keyword[raise]
identifier[logger] . identifier[exception] ( literal[string] )
keyword[return] literal[string]
keyword[if] identifier[self] . identifier[partial] keyword[or] identifier[self] . identifier[RAW_TOKEN_START] keyword[not] keyword[in] identifier[self] . identifier[content] :
keyword[return] identifier[self] . identifier[content]
keyword[return] identifier[self] . identifier[render_nocache] () | def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content() # depends on [control=['try'], data=[]]
except template.TemplateSyntaxError:
raise # depends on [control=['except'], data=[]]
except Exception:
if is_template_debug_activated():
raise # depends on [control=['if'], data=[]]
logger.exception('Error when rendering template fragment')
return '' # depends on [control=['except'], data=[]]
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content # depends on [control=['if'], data=[]]
return self.render_nocache() |
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs | def function[find_path, parameter[self, start, end, grid]]:
constant[
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
]
name[self].start_time assign[=] call[name[time].time, parameter[]]
name[self].runs assign[=] constant[0]
name[start].opened assign[=] constant[True]
variable[open_list] assign[=] list[[<ast.Name object at 0x7da1b07cba00>]]
while compare[call[name[len], parameter[name[open_list]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0788eb0>
call[name[self].keep_running, parameter[]]
variable[path] assign[=] call[name[self].check_neighbors, parameter[name[start], name[end], name[grid], name[open_list]]]
if name[path] begin[:]
return[tuple[[<ast.Name object at 0x7da1b078ae60>, <ast.Attribute object at 0x7da1b07893f0>]]]
return[tuple[[<ast.List object at 0x7da2045645b0>, <ast.Attribute object at 0x7da204564040>]]] | keyword[def] identifier[find_path] ( identifier[self] , identifier[start] , identifier[end] , identifier[grid] ):
literal[string]
identifier[self] . identifier[start_time] = identifier[time] . identifier[time] ()
identifier[self] . identifier[runs] = literal[int]
identifier[start] . identifier[opened] = keyword[True]
identifier[open_list] =[ identifier[start] ]
keyword[while] identifier[len] ( identifier[open_list] )> literal[int] :
identifier[self] . identifier[runs] += literal[int]
identifier[self] . identifier[keep_running] ()
identifier[path] = identifier[self] . identifier[check_neighbors] ( identifier[start] , identifier[end] , identifier[grid] , identifier[open_list] )
keyword[if] identifier[path] :
keyword[return] identifier[path] , identifier[self] . identifier[runs]
keyword[return] [], identifier[self] . identifier[runs] | def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return (path, self.runs) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# failed to find path
return ([], self.runs) |
def create_readable_dir(entry, section, domain, output):
"""Create the a human-readable directory to link the entry to if needed."""
if domain != 'viral':
full_output_dir = os.path.join(output, 'human_readable', section, domain,
get_genus_label(entry),
get_species_label(entry),
get_strain_label(entry))
else:
full_output_dir = os.path.join(output, 'human_readable', section, domain,
entry['organism_name'].replace(' ', '_'),
get_strain_label(entry, viral=True))
try:
os.makedirs(full_output_dir)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass
else:
raise
return full_output_dir | def function[create_readable_dir, parameter[entry, section, domain, output]]:
constant[Create the a human-readable directory to link the entry to if needed.]
if compare[name[domain] not_equal[!=] constant[viral]] begin[:]
variable[full_output_dir] assign[=] call[name[os].path.join, parameter[name[output], constant[human_readable], name[section], name[domain], call[name[get_genus_label], parameter[name[entry]]], call[name[get_species_label], parameter[name[entry]]], call[name[get_strain_label], parameter[name[entry]]]]]
<ast.Try object at 0x7da1b26ae6b0>
return[name[full_output_dir]] | keyword[def] identifier[create_readable_dir] ( identifier[entry] , identifier[section] , identifier[domain] , identifier[output] ):
literal[string]
keyword[if] identifier[domain] != literal[string] :
identifier[full_output_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[output] , literal[string] , identifier[section] , identifier[domain] ,
identifier[get_genus_label] ( identifier[entry] ),
identifier[get_species_label] ( identifier[entry] ),
identifier[get_strain_label] ( identifier[entry] ))
keyword[else] :
identifier[full_output_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[output] , literal[string] , identifier[section] , identifier[domain] ,
identifier[entry] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ),
identifier[get_strain_label] ( identifier[entry] , identifier[viral] = keyword[True] ))
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[full_output_dir] )
keyword[except] identifier[OSError] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[errno] == identifier[errno] . identifier[EEXIST] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[full_output_dir] ):
keyword[pass]
keyword[else] :
keyword[raise]
keyword[return] identifier[full_output_dir] | def create_readable_dir(entry, section, domain, output):
"""Create the a human-readable directory to link the entry to if needed."""
if domain != 'viral':
full_output_dir = os.path.join(output, 'human_readable', section, domain, get_genus_label(entry), get_species_label(entry), get_strain_label(entry)) # depends on [control=['if'], data=['domain']]
else:
full_output_dir = os.path.join(output, 'human_readable', section, domain, entry['organism_name'].replace(' ', '_'), get_strain_label(entry, viral=True))
try:
os.makedirs(full_output_dir) # depends on [control=['try'], data=[]]
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['err']]
return full_output_dir |
def LSL(value, amount, width):
"""
The ARM LSL (logical left shift) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
"""
if amount == 0:
return value
result, _ = LSL_C(value, amount, width)
return result | def function[LSL, parameter[value, amount, width]]:
constant[
The ARM LSL (logical left shift) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
]
if compare[name[amount] equal[==] constant[0]] begin[:]
return[name[value]]
<ast.Tuple object at 0x7da2054a4640> assign[=] call[name[LSL_C], parameter[name[value], name[amount], name[width]]]
return[name[result]] | keyword[def] identifier[LSL] ( identifier[value] , identifier[amount] , identifier[width] ):
literal[string]
keyword[if] identifier[amount] == literal[int] :
keyword[return] identifier[value]
identifier[result] , identifier[_] = identifier[LSL_C] ( identifier[value] , identifier[amount] , identifier[width] )
keyword[return] identifier[result] | def LSL(value, amount, width):
"""
The ARM LSL (logical left shift) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
"""
if amount == 0:
return value # depends on [control=['if'], data=[]]
(result, _) = LSL_C(value, amount, width)
return result |
def poll(self, timeout=0.0):
"""Modified version of poll() from asyncore module"""
if self.sock_map is None:
Log.warning("Socket map is not registered to Gateway Looper")
readable_lst = []
writable_lst = []
error_lst = []
if self.sock_map is not None:
for fd, obj in self.sock_map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
readable_lst.append(fd)
if is_w and not obj.accepting:
writable_lst.append(fd)
if is_r or is_w:
error_lst.append(fd)
# Add wakeup fd
readable_lst.append(self.pipe_r)
Log.debug("Will select() with timeout: " + str(timeout) + ", with map: " + str(self.sock_map))
try:
readable_lst, writable_lst, error_lst = \
select.select(readable_lst, writable_lst, error_lst, timeout)
except select.error as err:
Log.debug("Trivial error: " + str(err))
if err.args[0] != errno.EINTR:
raise
else:
return
Log.debug("Selected [r]: " + str(readable_lst) +
" [w]: " + str(writable_lst) + " [e]: " + str(error_lst))
if self.pipe_r in readable_lst:
Log.debug("Read from pipe")
os.read(self.pipe_r, 1024)
readable_lst.remove(self.pipe_r)
if self.sock_map is not None:
for fd in readable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
asyncore.read(obj)
for fd in writable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
asyncore.write(obj)
for fd in error_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
# pylint: disable=W0212
asyncore._exception(obj) | def function[poll, parameter[self, timeout]]:
constant[Modified version of poll() from asyncore module]
if compare[name[self].sock_map is constant[None]] begin[:]
call[name[Log].warning, parameter[constant[Socket map is not registered to Gateway Looper]]]
variable[readable_lst] assign[=] list[[]]
variable[writable_lst] assign[=] list[[]]
variable[error_lst] assign[=] list[[]]
if compare[name[self].sock_map is_not constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2054a50c0>, <ast.Name object at 0x7da2054a45b0>]]] in starred[call[name[self].sock_map.items, parameter[]]] begin[:]
variable[is_r] assign[=] call[name[obj].readable, parameter[]]
variable[is_w] assign[=] call[name[obj].writable, parameter[]]
if name[is_r] begin[:]
call[name[readable_lst].append, parameter[name[fd]]]
if <ast.BoolOp object at 0x7da2054a4580> begin[:]
call[name[writable_lst].append, parameter[name[fd]]]
if <ast.BoolOp object at 0x7da2054a4fd0> begin[:]
call[name[error_lst].append, parameter[name[fd]]]
call[name[readable_lst].append, parameter[name[self].pipe_r]]
call[name[Log].debug, parameter[binary_operation[binary_operation[binary_operation[constant[Will select() with timeout: ] + call[name[str], parameter[name[timeout]]]] + constant[, with map: ]] + call[name[str], parameter[name[self].sock_map]]]]]
<ast.Try object at 0x7da2054a4040>
call[name[Log].debug, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Selected [r]: ] + call[name[str], parameter[name[readable_lst]]]] + constant[ [w]: ]] + call[name[str], parameter[name[writable_lst]]]] + constant[ [e]: ]] + call[name[str], parameter[name[error_lst]]]]]]
if compare[name[self].pipe_r in name[readable_lst]] begin[:]
call[name[Log].debug, parameter[constant[Read from pipe]]]
call[name[os].read, parameter[name[self].pipe_r, constant[1024]]]
call[name[readable_lst].remove, parameter[name[self].pipe_r]]
if compare[name[self].sock_map is_not constant[None]] begin[:]
for taget[name[fd]] in starred[name[readable_lst]] begin[:]
variable[obj] assign[=] call[name[self].sock_map.get, parameter[name[fd]]]
if compare[name[obj] is constant[None]] begin[:]
continue
call[name[asyncore].read, parameter[name[obj]]]
for taget[name[fd]] in starred[name[writable_lst]] begin[:]
variable[obj] assign[=] call[name[self].sock_map.get, parameter[name[fd]]]
if compare[name[obj] is constant[None]] begin[:]
continue
call[name[asyncore].write, parameter[name[obj]]]
for taget[name[fd]] in starred[name[error_lst]] begin[:]
variable[obj] assign[=] call[name[self].sock_map.get, parameter[name[fd]]]
if compare[name[obj] is constant[None]] begin[:]
continue
call[name[asyncore]._exception, parameter[name[obj]]] | keyword[def] identifier[poll] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[sock_map] keyword[is] keyword[None] :
identifier[Log] . identifier[warning] ( literal[string] )
identifier[readable_lst] =[]
identifier[writable_lst] =[]
identifier[error_lst] =[]
keyword[if] identifier[self] . identifier[sock_map] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[fd] , identifier[obj] keyword[in] identifier[self] . identifier[sock_map] . identifier[items] ():
identifier[is_r] = identifier[obj] . identifier[readable] ()
identifier[is_w] = identifier[obj] . identifier[writable] ()
keyword[if] identifier[is_r] :
identifier[readable_lst] . identifier[append] ( identifier[fd] )
keyword[if] identifier[is_w] keyword[and] keyword[not] identifier[obj] . identifier[accepting] :
identifier[writable_lst] . identifier[append] ( identifier[fd] )
keyword[if] identifier[is_r] keyword[or] identifier[is_w] :
identifier[error_lst] . identifier[append] ( identifier[fd] )
identifier[readable_lst] . identifier[append] ( identifier[self] . identifier[pipe_r] )
identifier[Log] . identifier[debug] ( literal[string] + identifier[str] ( identifier[timeout] )+ literal[string] + identifier[str] ( identifier[self] . identifier[sock_map] ))
keyword[try] :
identifier[readable_lst] , identifier[writable_lst] , identifier[error_lst] = identifier[select] . identifier[select] ( identifier[readable_lst] , identifier[writable_lst] , identifier[error_lst] , identifier[timeout] )
keyword[except] identifier[select] . identifier[error] keyword[as] identifier[err] :
identifier[Log] . identifier[debug] ( literal[string] + identifier[str] ( identifier[err] ))
keyword[if] identifier[err] . identifier[args] [ literal[int] ]!= identifier[errno] . identifier[EINTR] :
keyword[raise]
keyword[else] :
keyword[return]
identifier[Log] . identifier[debug] ( literal[string] + identifier[str] ( identifier[readable_lst] )+
literal[string] + identifier[str] ( identifier[writable_lst] )+ literal[string] + identifier[str] ( identifier[error_lst] ))
keyword[if] identifier[self] . identifier[pipe_r] keyword[in] identifier[readable_lst] :
identifier[Log] . identifier[debug] ( literal[string] )
identifier[os] . identifier[read] ( identifier[self] . identifier[pipe_r] , literal[int] )
identifier[readable_lst] . identifier[remove] ( identifier[self] . identifier[pipe_r] )
keyword[if] identifier[self] . identifier[sock_map] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[fd] keyword[in] identifier[readable_lst] :
identifier[obj] = identifier[self] . identifier[sock_map] . identifier[get] ( identifier[fd] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[continue]
identifier[asyncore] . identifier[read] ( identifier[obj] )
keyword[for] identifier[fd] keyword[in] identifier[writable_lst] :
identifier[obj] = identifier[self] . identifier[sock_map] . identifier[get] ( identifier[fd] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[continue]
identifier[asyncore] . identifier[write] ( identifier[obj] )
keyword[for] identifier[fd] keyword[in] identifier[error_lst] :
identifier[obj] = identifier[self] . identifier[sock_map] . identifier[get] ( identifier[fd] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[continue]
identifier[asyncore] . identifier[_exception] ( identifier[obj] ) | def poll(self, timeout=0.0):
"""Modified version of poll() from asyncore module"""
if self.sock_map is None:
Log.warning('Socket map is not registered to Gateway Looper') # depends on [control=['if'], data=[]]
readable_lst = []
writable_lst = []
error_lst = []
if self.sock_map is not None:
for (fd, obj) in self.sock_map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
readable_lst.append(fd) # depends on [control=['if'], data=[]]
if is_w and (not obj.accepting):
writable_lst.append(fd) # depends on [control=['if'], data=[]]
if is_r or is_w:
error_lst.append(fd) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Add wakeup fd
readable_lst.append(self.pipe_r)
Log.debug('Will select() with timeout: ' + str(timeout) + ', with map: ' + str(self.sock_map))
try:
(readable_lst, writable_lst, error_lst) = select.select(readable_lst, writable_lst, error_lst, timeout) # depends on [control=['try'], data=[]]
except select.error as err:
Log.debug('Trivial error: ' + str(err))
if err.args[0] != errno.EINTR:
raise # depends on [control=['if'], data=[]]
else:
return # depends on [control=['except'], data=['err']]
Log.debug('Selected [r]: ' + str(readable_lst) + ' [w]: ' + str(writable_lst) + ' [e]: ' + str(error_lst))
if self.pipe_r in readable_lst:
Log.debug('Read from pipe')
os.read(self.pipe_r, 1024)
readable_lst.remove(self.pipe_r) # depends on [control=['if'], data=['readable_lst']]
if self.sock_map is not None:
for fd in readable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue # depends on [control=['if'], data=[]]
asyncore.read(obj) # depends on [control=['for'], data=['fd']]
for fd in writable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue # depends on [control=['if'], data=[]]
asyncore.write(obj) # depends on [control=['for'], data=['fd']]
for fd in error_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue # depends on [control=['if'], data=[]]
# pylint: disable=W0212
asyncore._exception(obj) # depends on [control=['for'], data=['fd']] # depends on [control=['if'], data=[]] |
def representCleanOpenAPIPathItem(dumper, data):
"""
Unpack operation key/values before representing an OpenAPIPathItem
"""
dct = _orderedCleanDict(data)
if '_operations' in dct:
items = sorted(data._operations.items())
for k, op in items:
dct[k] = op
del dct['_operations']
return dumper.yaml_representers[type(dct)](dumper, dct) | def function[representCleanOpenAPIPathItem, parameter[dumper, data]]:
constant[
Unpack operation key/values before representing an OpenAPIPathItem
]
variable[dct] assign[=] call[name[_orderedCleanDict], parameter[name[data]]]
if compare[constant[_operations] in name[dct]] begin[:]
variable[items] assign[=] call[name[sorted], parameter[call[name[data]._operations.items, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da2054a4eb0>, <ast.Name object at 0x7da2054a7970>]]] in starred[name[items]] begin[:]
call[name[dct]][name[k]] assign[=] name[op]
<ast.Delete object at 0x7da2054a7670>
return[call[call[name[dumper].yaml_representers][call[name[type], parameter[name[dct]]]], parameter[name[dumper], name[dct]]]] | keyword[def] identifier[representCleanOpenAPIPathItem] ( identifier[dumper] , identifier[data] ):
literal[string]
identifier[dct] = identifier[_orderedCleanDict] ( identifier[data] )
keyword[if] literal[string] keyword[in] identifier[dct] :
identifier[items] = identifier[sorted] ( identifier[data] . identifier[_operations] . identifier[items] ())
keyword[for] identifier[k] , identifier[op] keyword[in] identifier[items] :
identifier[dct] [ identifier[k] ]= identifier[op]
keyword[del] identifier[dct] [ literal[string] ]
keyword[return] identifier[dumper] . identifier[yaml_representers] [ identifier[type] ( identifier[dct] )]( identifier[dumper] , identifier[dct] ) | def representCleanOpenAPIPathItem(dumper, data):
"""
Unpack operation key/values before representing an OpenAPIPathItem
"""
dct = _orderedCleanDict(data)
if '_operations' in dct:
items = sorted(data._operations.items())
for (k, op) in items:
dct[k] = op # depends on [control=['for'], data=[]]
del dct['_operations'] # depends on [control=['if'], data=['dct']]
return dumper.yaml_representers[type(dct)](dumper, dct) |
def wallace_reducer(wire_array_2, result_bitwidth, final_adder=kogge_stone):
"""
The reduction and final adding part of a dada tree. Useful for adding many numbers together
The use of single bitwidth wires is to allow for additional flexibility
:param [[Wirevector]] wire_array_2: An array of arrays of single bitwidth
wirevectors
:param int result_bitwidth: The bitwidth you want for the resulting wire.
Used to eliminate unnessary wires.
:param final_adder: The adder used for the final addition
:return: wirevector of length result_wirevector
"""
# verification that the wires are actually wirevectors of length 1
for wire_set in wire_array_2:
for a_wire in wire_set:
if not isinstance(a_wire, pyrtl.WireVector) or len(a_wire) != 1:
raise pyrtl.PyrtlError(
"The item {} is not a valid element for the wire_array_2. "
"It must be a WireVector of bitwidth 1".format(a_wire))
while not all(len(i) <= 2 for i in wire_array_2):
deferred = [[] for weight in range(result_bitwidth + 1)]
for i, w_array in enumerate(wire_array_2): # Start with low weights and start reducing
while len(w_array) >= 3:
cout, sum = _one_bit_add_no_concat(*(w_array.pop(0) for j in range(3)))
deferred[i].append(sum)
deferred[i + 1].append(cout)
if len(w_array) == 2:
cout, sum = half_adder(*w_array)
deferred[i].append(sum)
deferred[i + 1].append(cout)
else:
deferred[i].extend(w_array)
wire_array_2 = deferred[:result_bitwidth]
# At this stage in the multiplication we have only 2 wire vectors left.
# now we need to add them up
result = _sparse_adder(wire_array_2, final_adder)
if len(result) > result_bitwidth:
return result[:result_bitwidth]
else:
return result | def function[wallace_reducer, parameter[wire_array_2, result_bitwidth, final_adder]]:
constant[
The reduction and final adding part of a dada tree. Useful for adding many numbers together
The use of single bitwidth wires is to allow for additional flexibility
:param [[Wirevector]] wire_array_2: An array of arrays of single bitwidth
wirevectors
:param int result_bitwidth: The bitwidth you want for the resulting wire.
Used to eliminate unnessary wires.
:param final_adder: The adder used for the final addition
:return: wirevector of length result_wirevector
]
for taget[name[wire_set]] in starred[name[wire_array_2]] begin[:]
for taget[name[a_wire]] in starred[name[wire_set]] begin[:]
if <ast.BoolOp object at 0x7da20c794730> begin[:]
<ast.Raise object at 0x7da20c7969b0>
while <ast.UnaryOp object at 0x7da20c795780> begin[:]
variable[deferred] assign[=] <ast.ListComp object at 0x7da20c795f00>
for taget[tuple[[<ast.Name object at 0x7da20c7942b0>, <ast.Name object at 0x7da20c796140>]]] in starred[call[name[enumerate], parameter[name[wire_array_2]]]] begin[:]
while compare[call[name[len], parameter[name[w_array]]] greater_or_equal[>=] constant[3]] begin[:]
<ast.Tuple object at 0x7da20c794520> assign[=] call[name[_one_bit_add_no_concat], parameter[<ast.Starred object at 0x7da20c7962c0>]]
call[call[name[deferred]][name[i]].append, parameter[name[sum]]]
call[call[name[deferred]][binary_operation[name[i] + constant[1]]].append, parameter[name[cout]]]
if compare[call[name[len], parameter[name[w_array]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da20c6e7220> assign[=] call[name[half_adder], parameter[<ast.Starred object at 0x7da20c6e5a80>]]
call[call[name[deferred]][name[i]].append, parameter[name[sum]]]
call[call[name[deferred]][binary_operation[name[i] + constant[1]]].append, parameter[name[cout]]]
variable[wire_array_2] assign[=] call[name[deferred]][<ast.Slice object at 0x7da18eb54940>]
variable[result] assign[=] call[name[_sparse_adder], parameter[name[wire_array_2], name[final_adder]]]
if compare[call[name[len], parameter[name[result]]] greater[>] name[result_bitwidth]] begin[:]
return[call[name[result]][<ast.Slice object at 0x7da18eb55870>]] | keyword[def] identifier[wallace_reducer] ( identifier[wire_array_2] , identifier[result_bitwidth] , identifier[final_adder] = identifier[kogge_stone] ):
literal[string]
keyword[for] identifier[wire_set] keyword[in] identifier[wire_array_2] :
keyword[for] identifier[a_wire] keyword[in] identifier[wire_set] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[a_wire] , identifier[pyrtl] . identifier[WireVector] ) keyword[or] identifier[len] ( identifier[a_wire] )!= literal[int] :
keyword[raise] identifier[pyrtl] . identifier[PyrtlError] (
literal[string]
literal[string] . identifier[format] ( identifier[a_wire] ))
keyword[while] keyword[not] identifier[all] ( identifier[len] ( identifier[i] )<= literal[int] keyword[for] identifier[i] keyword[in] identifier[wire_array_2] ):
identifier[deferred] =[[] keyword[for] identifier[weight] keyword[in] identifier[range] ( identifier[result_bitwidth] + literal[int] )]
keyword[for] identifier[i] , identifier[w_array] keyword[in] identifier[enumerate] ( identifier[wire_array_2] ):
keyword[while] identifier[len] ( identifier[w_array] )>= literal[int] :
identifier[cout] , identifier[sum] = identifier[_one_bit_add_no_concat] (*( identifier[w_array] . identifier[pop] ( literal[int] ) keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] )))
identifier[deferred] [ identifier[i] ]. identifier[append] ( identifier[sum] )
identifier[deferred] [ identifier[i] + literal[int] ]. identifier[append] ( identifier[cout] )
keyword[if] identifier[len] ( identifier[w_array] )== literal[int] :
identifier[cout] , identifier[sum] = identifier[half_adder] (* identifier[w_array] )
identifier[deferred] [ identifier[i] ]. identifier[append] ( identifier[sum] )
identifier[deferred] [ identifier[i] + literal[int] ]. identifier[append] ( identifier[cout] )
keyword[else] :
identifier[deferred] [ identifier[i] ]. identifier[extend] ( identifier[w_array] )
identifier[wire_array_2] = identifier[deferred] [: identifier[result_bitwidth] ]
identifier[result] = identifier[_sparse_adder] ( identifier[wire_array_2] , identifier[final_adder] )
keyword[if] identifier[len] ( identifier[result] )> identifier[result_bitwidth] :
keyword[return] identifier[result] [: identifier[result_bitwidth] ]
keyword[else] :
keyword[return] identifier[result] | def wallace_reducer(wire_array_2, result_bitwidth, final_adder=kogge_stone):
"""
The reduction and final adding part of a dada tree. Useful for adding many numbers together
The use of single bitwidth wires is to allow for additional flexibility
:param [[Wirevector]] wire_array_2: An array of arrays of single bitwidth
wirevectors
:param int result_bitwidth: The bitwidth you want for the resulting wire.
Used to eliminate unnessary wires.
:param final_adder: The adder used for the final addition
:return: wirevector of length result_wirevector
"""
# verification that the wires are actually wirevectors of length 1
for wire_set in wire_array_2:
for a_wire in wire_set:
if not isinstance(a_wire, pyrtl.WireVector) or len(a_wire) != 1:
raise pyrtl.PyrtlError('The item {} is not a valid element for the wire_array_2. It must be a WireVector of bitwidth 1'.format(a_wire)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a_wire']] # depends on [control=['for'], data=['wire_set']]
while not all((len(i) <= 2 for i in wire_array_2)):
deferred = [[] for weight in range(result_bitwidth + 1)]
for (i, w_array) in enumerate(wire_array_2): # Start with low weights and start reducing
while len(w_array) >= 3:
(cout, sum) = _one_bit_add_no_concat(*(w_array.pop(0) for j in range(3)))
deferred[i].append(sum)
deferred[i + 1].append(cout) # depends on [control=['while'], data=[]]
if len(w_array) == 2:
(cout, sum) = half_adder(*w_array)
deferred[i].append(sum)
deferred[i + 1].append(cout) # depends on [control=['if'], data=[]]
else:
deferred[i].extend(w_array) # depends on [control=['for'], data=[]]
wire_array_2 = deferred[:result_bitwidth] # depends on [control=['while'], data=[]]
# At this stage in the multiplication we have only 2 wire vectors left.
# now we need to add them up
result = _sparse_adder(wire_array_2, final_adder)
if len(result) > result_bitwidth:
return result[:result_bitwidth] # depends on [control=['if'], data=['result_bitwidth']]
else:
return result |
def make_data(n,m):
"""creates example data set"""
I = range(1,n+1)
J = range(1,m+1)
x,y,w = {},{},{}
for i in I:
x[i] = random.randint(0,100)
y[i] = random.randint(0,100)
w[i] = random.randint(1,5)
return I,J,x,y,w | def function[make_data, parameter[n, m]]:
constant[creates example data set]
variable[I] assign[=] call[name[range], parameter[constant[1], binary_operation[name[n] + constant[1]]]]
variable[J] assign[=] call[name[range], parameter[constant[1], binary_operation[name[m] + constant[1]]]]
<ast.Tuple object at 0x7da1b1701690> assign[=] tuple[[<ast.Dict object at 0x7da1b1701210>, <ast.Dict object at 0x7da1b1701a50>, <ast.Dict object at 0x7da1b1703af0>]]
for taget[name[i]] in starred[name[I]] begin[:]
call[name[x]][name[i]] assign[=] call[name[random].randint, parameter[constant[0], constant[100]]]
call[name[y]][name[i]] assign[=] call[name[random].randint, parameter[constant[0], constant[100]]]
call[name[w]][name[i]] assign[=] call[name[random].randint, parameter[constant[1], constant[5]]]
return[tuple[[<ast.Name object at 0x7da1b18e5f30>, <ast.Name object at 0x7da1b18e6680>, <ast.Name object at 0x7da1b18e6dd0>, <ast.Name object at 0x7da1b18e4ee0>, <ast.Name object at 0x7da1b18e4580>]]] | keyword[def] identifier[make_data] ( identifier[n] , identifier[m] ):
literal[string]
identifier[I] = identifier[range] ( literal[int] , identifier[n] + literal[int] )
identifier[J] = identifier[range] ( literal[int] , identifier[m] + literal[int] )
identifier[x] , identifier[y] , identifier[w] ={},{},{}
keyword[for] identifier[i] keyword[in] identifier[I] :
identifier[x] [ identifier[i] ]= identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[y] [ identifier[i] ]= identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[w] [ identifier[i] ]= identifier[random] . identifier[randint] ( literal[int] , literal[int] )
keyword[return] identifier[I] , identifier[J] , identifier[x] , identifier[y] , identifier[w] | def make_data(n, m):
"""creates example data set"""
I = range(1, n + 1)
J = range(1, m + 1)
(x, y, w) = ({}, {}, {})
for i in I:
x[i] = random.randint(0, 100)
y[i] = random.randint(0, 100)
w[i] = random.randint(1, 5) # depends on [control=['for'], data=['i']]
return (I, J, x, y, w) |
def encode(self, word, max_length=4):
"""Return the Statistics Canada code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 4) of the code to return
Returns
-------
str
The Statistics Canada name code value
Examples
--------
>>> pe = StatisticsCanada()
>>> pe.encode('Christopher')
'CHRS'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHM'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
if not word:
return ''
code = word[1:]
for vowel in self._uc_vy_set:
code = code.replace(vowel, '')
code = word[0] + code
code = self._delete_consecutive_repeats(code)
code = code.replace(' ', '')
return code[:max_length] | def function[encode, parameter[self, word, max_length]]:
constant[Return the Statistics Canada code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 4) of the code to return
Returns
-------
str
The Statistics Canada name code value
Examples
--------
>>> pe = StatisticsCanada()
>>> pe.encode('Christopher')
'CHRS'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHM'
]
variable[word] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[word].upper, parameter[]]]]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[ß], constant[SS]]]
variable[word] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b00b0640>]]
if <ast.UnaryOp object at 0x7da1b00b0d30> begin[:]
return[constant[]]
variable[code] assign[=] call[name[word]][<ast.Slice object at 0x7da1b00b0ee0>]
for taget[name[vowel]] in starred[name[self]._uc_vy_set] begin[:]
variable[code] assign[=] call[name[code].replace, parameter[name[vowel], constant[]]]
variable[code] assign[=] binary_operation[call[name[word]][constant[0]] + name[code]]
variable[code] assign[=] call[name[self]._delete_consecutive_repeats, parameter[name[code]]]
variable[code] assign[=] call[name[code].replace, parameter[constant[ ], constant[]]]
return[call[name[code]][<ast.Slice object at 0x7da1b00b1090>]] | keyword[def] identifier[encode] ( identifier[self] , identifier[word] , identifier[max_length] = literal[int] ):
literal[string]
identifier[word] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[word] . identifier[upper] ()))
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[word] keyword[if] identifier[c] keyword[in] identifier[self] . identifier[_uc_set] )
keyword[if] keyword[not] identifier[word] :
keyword[return] literal[string]
identifier[code] = identifier[word] [ literal[int] :]
keyword[for] identifier[vowel] keyword[in] identifier[self] . identifier[_uc_vy_set] :
identifier[code] = identifier[code] . identifier[replace] ( identifier[vowel] , literal[string] )
identifier[code] = identifier[word] [ literal[int] ]+ identifier[code]
identifier[code] = identifier[self] . identifier[_delete_consecutive_repeats] ( identifier[code] )
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[code] [: identifier[max_length] ] | def encode(self, word, max_length=4):
"""Return the Statistics Canada code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 4) of the code to return
Returns
-------
str
The Statistics Canada name code value
Examples
--------
>>> pe = StatisticsCanada()
>>> pe.encode('Christopher')
'CHRS'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHM'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join((c for c in word if c in self._uc_set))
if not word:
return '' # depends on [control=['if'], data=[]]
code = word[1:]
for vowel in self._uc_vy_set:
code = code.replace(vowel, '') # depends on [control=['for'], data=['vowel']]
code = word[0] + code
code = self._delete_consecutive_repeats(code)
code = code.replace(' ', '')
return code[:max_length] |
def get_pipeline_steps(pipeline, steps_group):
"""Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
"""
logger.debug("starting")
assert pipeline
assert steps_group
logger.debug(f"retrieving {steps_group} steps from pipeline")
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(
f"{steps_group}: sequence has no elements. So it won't do "
"anything.")
logger.debug("done")
return None
steps_count = len(steps)
logger.debug(f"{steps_count} steps found under {steps_group} in "
"pipeline definition.")
logger.debug("done")
return steps
else:
logger.debug(
f"pipeline doesn't have a {steps_group} collection. Add a "
f"{steps_group}: sequence to the yaml if you want {steps_group} "
"actually to do something.")
logger.debug("done")
return None | def function[get_pipeline_steps, parameter[pipeline, steps_group]]:
constant[Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
]
call[name[logger].debug, parameter[constant[starting]]]
assert[name[pipeline]]
assert[name[steps_group]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da20c992b60>]]
if compare[name[steps_group] in name[pipeline]] begin[:]
variable[steps] assign[=] call[name[pipeline]][name[steps_group]]
if compare[name[steps] is constant[None]] begin[:]
call[name[logger].warn, parameter[<ast.JoinedStr object at 0x7da20c6ab400>]]
call[name[logger].debug, parameter[constant[done]]]
return[constant[None]]
variable[steps_count] assign[=] call[name[len], parameter[name[steps]]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da207f99bd0>]]
call[name[logger].debug, parameter[constant[done]]]
return[name[steps]] | keyword[def] identifier[get_pipeline_steps] ( identifier[pipeline] , identifier[steps_group] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[assert] identifier[pipeline]
keyword[assert] identifier[steps_group]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[steps_group] keyword[in] identifier[pipeline] :
identifier[steps] = identifier[pipeline] [ identifier[steps_group] ]
keyword[if] identifier[steps] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] (
literal[string]
literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[None]
identifier[steps_count] = identifier[len] ( identifier[steps] )
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[steps]
keyword[else] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[None] | def get_pipeline_steps(pipeline, steps_group):
"""Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
"""
logger.debug('starting')
assert pipeline
assert steps_group
logger.debug(f'retrieving {steps_group} steps from pipeline')
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(f"{steps_group}: sequence has no elements. So it won't do anything.")
logger.debug('done')
return None # depends on [control=['if'], data=[]]
steps_count = len(steps)
logger.debug(f'{steps_count} steps found under {steps_group} in pipeline definition.')
logger.debug('done')
return steps # depends on [control=['if'], data=['steps_group', 'pipeline']]
else:
logger.debug(f"pipeline doesn't have a {steps_group} collection. Add a {steps_group}: sequence to the yaml if you want {steps_group} actually to do something.")
logger.debug('done')
return None |
def set_data(self, data):
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
data = array.array('B', struct.unpack("{}B".format(len(data) * 4), struct.pack("{}I".format(len(data)), *data)))
self._intf._send_tcp_data(data) | def function[set_data, parameter[self, data]]:
constant[ Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
]
variable[data] assign[=] call[name[array].array, parameter[constant[B], call[name[struct].unpack, parameter[call[constant[{}B].format, parameter[binary_operation[call[name[len], parameter[name[data]]] * constant[4]]]], call[name[struct].pack, parameter[call[constant[{}I].format, parameter[call[name[len], parameter[name[data]]]]], <ast.Starred object at 0x7da1b0506590>]]]]]]
call[name[self]._intf._send_tcp_data, parameter[name[data]]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[array] . identifier[array] ( literal[string] , identifier[struct] . identifier[unpack] ( literal[string] . identifier[format] ( identifier[len] ( identifier[data] )* literal[int] ), identifier[struct] . identifier[pack] ( literal[string] . identifier[format] ( identifier[len] ( identifier[data] )),* identifier[data] )))
identifier[self] . identifier[_intf] . identifier[_send_tcp_data] ( identifier[data] ) | def set_data(self, data):
""" Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
"""
data = array.array('B', struct.unpack('{}B'.format(len(data) * 4), struct.pack('{}I'.format(len(data)), *data)))
self._intf._send_tcp_data(data) |
def pretty(self, start, end, e, messages=None):
"""Pretties up the output error message so it is readable
and designates where the error came from"""
log.debug("Displaying document from lines '%i' to '%i'", start, end)
errorlist = []
if len(e.context) > 0:
errorlist = e.context
else:
errorlist.append(e)
for error in errorlist:
validator = error.validator
if validator == "required":
# Handle required fields
msg = error.message
messages.append("Between lines %d - %d. %s" % (start, end, msg))
elif validator == "additionalProperties":
# Handle additional properties not allowed
if len(error.message) > 256:
msg = error.message[:253] + "..."
else:
msg = error.message
messages.append("Between lines %d - %d. %s" % (start, end, msg))
elif len(error.relative_path) > 0:
# Handle other cases where we can loop through the lines
# get the JSON path to traverse through the file
jsonpath = error.relative_path
array_index = 0
current_start = start
foundline = 0
found = False
context = collections.deque(maxlen=20)
tag = " <<<<<<<<< Expects: %s <<<<<<<<<\n"""
for cnt, path in enumerate(error.relative_path):
# Need to set the key we are looking, and then check the array count
# if it is an array, we have some interesting checks to do
if int(cnt) % 2 == 0:
# we know we have some array account
# array_index keeps track of the array count we are looking for or number
# of matches we need to skip over before we get to the one we care about
# check if previous array_index > 0. if so, then we know we need to use
# that one to track down the specific instance of this nested key.
# later on, we utilize this array_index loop through
# if array_index == 0:
array_index = jsonpath[cnt]
match_count = 0
continue
elif int(cnt) % 2 == 1:
# we know we have some key name
# current_key keeps track of the key we are looking for in the JSON Path
current_key = jsonpath[cnt]
for linenum in range(current_start, end):
line = linecache.getline(self.ymlfile, linenum)
# Check if line contains the error
if ":" in line:
l = line.split(':')
key = l[0]
value = ':'.join(l[1:])
# TODO:
# Handle maxItems TBD
# Handle minItems TBD
# Handle in-order (bytes) TBD
# Handle uniqueness TBD
# Handle cases where key in yml file is hexadecimal
try:
key = int(key.strip(), 16)
except ValueError:
key = key.strip()
if str(key) == current_key:
# check if we are at our match_count and end of the path
if match_count == array_index:
# check if we are at end of the jsonpath
if cnt == len(jsonpath)-1:
# we are at the end of path so let's stop here'
if error.validator == "type":
if value.strip() == str(error.instance):
errormsg = "Value '%s' should be of type '%s'" % (error.instance, str(error.validator_value))
line = line.replace("\n", (tag % errormsg))
foundline = linenum
found = True
elif value.strip() == "" and error.instance is None:
errormsg = "Missing value for %s." % key
line = line.replace("\n", (tag % errormsg))
foundline = linenum
found = True
elif not found:
# print "EXTRA FOO"
# print match_count
# print array_index
# print current_key
# print line
# otherwise change the start to the current line
current_start = linenum
break
match_count += 1
# for the context queue, we want to get the error to appear in
# the middle of the error output. to do so, we will only append
# to the queue in 2 cases:
#
# 1. before we find the error (found == False). we can
# just keep pushing on the queue until we find it in the YAML.
# 2. once we find the error (found == True), we just want to push
# onto the queue until the the line is in the middle
if not found or (found and context.maxlen > (linenum-foundline)*2):
context.append(line)
elif found and context.maxlen <= (linenum-foundline)*2:
break
# Loop through the queue and generate a readable msg output
out = ""
for line in context:
out += line
if foundline:
msg = "Error found on line %d in %s:\n\n%s" % (foundline, self.ymlfile, out)
messages.append(msg)
# reset the line it was found on and the context
foundline = 0
context.clear()
linecache.clearcache()
else:
messages.append(error.message) | def function[pretty, parameter[self, start, end, e, messages]]:
constant[Pretties up the output error message so it is readable
and designates where the error came from]
call[name[log].debug, parameter[constant[Displaying document from lines '%i' to '%i'], name[start], name[end]]]
variable[errorlist] assign[=] list[[]]
if compare[call[name[len], parameter[name[e].context]] greater[>] constant[0]] begin[:]
variable[errorlist] assign[=] name[e].context
for taget[name[error]] in starred[name[errorlist]] begin[:]
variable[validator] assign[=] name[error].validator
if compare[name[validator] equal[==] constant[required]] begin[:]
variable[msg] assign[=] name[error].message
call[name[messages].append, parameter[binary_operation[constant[Between lines %d - %d. %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09d0c0>, <ast.Name object at 0x7da18f09f700>, <ast.Name object at 0x7da18f09d000>]]]]] | keyword[def] identifier[pretty] ( identifier[self] , identifier[start] , identifier[end] , identifier[e] , identifier[messages] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[start] , identifier[end] )
identifier[errorlist] =[]
keyword[if] identifier[len] ( identifier[e] . identifier[context] )> literal[int] :
identifier[errorlist] = identifier[e] . identifier[context]
keyword[else] :
identifier[errorlist] . identifier[append] ( identifier[e] )
keyword[for] identifier[error] keyword[in] identifier[errorlist] :
identifier[validator] = identifier[error] . identifier[validator]
keyword[if] identifier[validator] == literal[string] :
identifier[msg] = identifier[error] . identifier[message]
identifier[messages] . identifier[append] ( literal[string] %( identifier[start] , identifier[end] , identifier[msg] ))
keyword[elif] identifier[validator] == literal[string] :
keyword[if] identifier[len] ( identifier[error] . identifier[message] )> literal[int] :
identifier[msg] = identifier[error] . identifier[message] [: literal[int] ]+ literal[string]
keyword[else] :
identifier[msg] = identifier[error] . identifier[message]
identifier[messages] . identifier[append] ( literal[string] %( identifier[start] , identifier[end] , identifier[msg] ))
keyword[elif] identifier[len] ( identifier[error] . identifier[relative_path] )> literal[int] :
identifier[jsonpath] = identifier[error] . identifier[relative_path]
identifier[array_index] = literal[int]
identifier[current_start] = identifier[start]
identifier[foundline] = literal[int]
identifier[found] = keyword[False]
identifier[context] = identifier[collections] . identifier[deque] ( identifier[maxlen] = literal[int] )
identifier[tag] = literal[string] literal[string]
keyword[for] identifier[cnt] , identifier[path] keyword[in] identifier[enumerate] ( identifier[error] . identifier[relative_path] ):
keyword[if] identifier[int] ( identifier[cnt] )% literal[int] == literal[int] :
identifier[array_index] = identifier[jsonpath] [ identifier[cnt] ]
identifier[match_count] = literal[int]
keyword[continue]
keyword[elif] identifier[int] ( identifier[cnt] )% literal[int] == literal[int] :
identifier[current_key] = identifier[jsonpath] [ identifier[cnt] ]
keyword[for] identifier[linenum] keyword[in] identifier[range] ( identifier[current_start] , identifier[end] ):
identifier[line] = identifier[linecache] . identifier[getline] ( identifier[self] . identifier[ymlfile] , identifier[linenum] )
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[l] = identifier[line] . identifier[split] ( literal[string] )
identifier[key] = identifier[l] [ literal[int] ]
identifier[value] = literal[string] . identifier[join] ( identifier[l] [ literal[int] :])
keyword[try] :
identifier[key] = identifier[int] ( identifier[key] . identifier[strip] (), literal[int] )
keyword[except] identifier[ValueError] :
identifier[key] = identifier[key] . identifier[strip] ()
keyword[if] identifier[str] ( identifier[key] )== identifier[current_key] :
keyword[if] identifier[match_count] == identifier[array_index] :
keyword[if] identifier[cnt] == identifier[len] ( identifier[jsonpath] )- literal[int] :
keyword[if] identifier[error] . identifier[validator] == literal[string] :
keyword[if] identifier[value] . identifier[strip] ()== identifier[str] ( identifier[error] . identifier[instance] ):
identifier[errormsg] = literal[string] %( identifier[error] . identifier[instance] , identifier[str] ( identifier[error] . identifier[validator_value] ))
identifier[line] = identifier[line] . identifier[replace] ( literal[string] ,( identifier[tag] % identifier[errormsg] ))
identifier[foundline] = identifier[linenum]
identifier[found] = keyword[True]
keyword[elif] identifier[value] . identifier[strip] ()== literal[string] keyword[and] identifier[error] . identifier[instance] keyword[is] keyword[None] :
identifier[errormsg] = literal[string] % identifier[key]
identifier[line] = identifier[line] . identifier[replace] ( literal[string] ,( identifier[tag] % identifier[errormsg] ))
identifier[foundline] = identifier[linenum]
identifier[found] = keyword[True]
keyword[elif] keyword[not] identifier[found] :
identifier[current_start] = identifier[linenum]
keyword[break]
identifier[match_count] += literal[int]
keyword[if] keyword[not] identifier[found] keyword[or] ( identifier[found] keyword[and] identifier[context] . identifier[maxlen] >( identifier[linenum] - identifier[foundline] )* literal[int] ):
identifier[context] . identifier[append] ( identifier[line] )
keyword[elif] identifier[found] keyword[and] identifier[context] . identifier[maxlen] <=( identifier[linenum] - identifier[foundline] )* literal[int] :
keyword[break]
identifier[out] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[context] :
identifier[out] += identifier[line]
keyword[if] identifier[foundline] :
identifier[msg] = literal[string] %( identifier[foundline] , identifier[self] . identifier[ymlfile] , identifier[out] )
identifier[messages] . identifier[append] ( identifier[msg] )
identifier[foundline] = literal[int]
identifier[context] . identifier[clear] ()
identifier[linecache] . identifier[clearcache] ()
keyword[else] :
identifier[messages] . identifier[append] ( identifier[error] . identifier[message] ) | def pretty(self, start, end, e, messages=None):
"""Pretties up the output error message so it is readable
and designates where the error came from"""
log.debug("Displaying document from lines '%i' to '%i'", start, end)
errorlist = []
if len(e.context) > 0:
errorlist = e.context # depends on [control=['if'], data=[]]
else:
errorlist.append(e)
for error in errorlist:
validator = error.validator
if validator == 'required':
# Handle required fields
msg = error.message
messages.append('Between lines %d - %d. %s' % (start, end, msg)) # depends on [control=['if'], data=[]]
elif validator == 'additionalProperties':
# Handle additional properties not allowed
if len(error.message) > 256:
msg = error.message[:253] + '...' # depends on [control=['if'], data=[]]
else:
msg = error.message
messages.append('Between lines %d - %d. %s' % (start, end, msg)) # depends on [control=['if'], data=[]]
elif len(error.relative_path) > 0:
# Handle other cases where we can loop through the lines
# get the JSON path to traverse through the file
jsonpath = error.relative_path
array_index = 0
current_start = start
foundline = 0
found = False
context = collections.deque(maxlen=20)
tag = ' <<<<<<<<< Expects: %s <<<<<<<<<\n'
for (cnt, path) in enumerate(error.relative_path):
# Need to set the key we are looking, and then check the array count
# if it is an array, we have some interesting checks to do
if int(cnt) % 2 == 0:
# we know we have some array account
# array_index keeps track of the array count we are looking for or number
# of matches we need to skip over before we get to the one we care about
# check if previous array_index > 0. if so, then we know we need to use
# that one to track down the specific instance of this nested key.
# later on, we utilize this array_index loop through
# if array_index == 0:
array_index = jsonpath[cnt]
match_count = 0
continue # depends on [control=['if'], data=[]]
elif int(cnt) % 2 == 1:
# we know we have some key name
# current_key keeps track of the key we are looking for in the JSON Path
current_key = jsonpath[cnt] # depends on [control=['if'], data=[]]
for linenum in range(current_start, end):
line = linecache.getline(self.ymlfile, linenum)
# Check if line contains the error
if ':' in line:
l = line.split(':')
key = l[0]
value = ':'.join(l[1:])
# TODO:
# Handle maxItems TBD
# Handle minItems TBD
# Handle in-order (bytes) TBD
# Handle uniqueness TBD
# Handle cases where key in yml file is hexadecimal
try:
key = int(key.strip(), 16) # depends on [control=['try'], data=[]]
except ValueError:
key = key.strip() # depends on [control=['except'], data=[]]
if str(key) == current_key:
# check if we are at our match_count and end of the path
if match_count == array_index:
# check if we are at end of the jsonpath
if cnt == len(jsonpath) - 1:
# we are at the end of path so let's stop here'
if error.validator == 'type':
if value.strip() == str(error.instance):
errormsg = "Value '%s' should be of type '%s'" % (error.instance, str(error.validator_value))
line = line.replace('\n', tag % errormsg)
foundline = linenum
found = True # depends on [control=['if'], data=[]]
elif value.strip() == '' and error.instance is None:
errormsg = 'Missing value for %s.' % key
line = line.replace('\n', tag % errormsg)
foundline = linenum
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not found:
# print "EXTRA FOO"
# print match_count
# print array_index
# print current_key
# print line
# otherwise change the start to the current line
current_start = linenum
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
match_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['line']]
# for the context queue, we want to get the error to appear in
# the middle of the error output. to do so, we will only append
# to the queue in 2 cases:
#
# 1. before we find the error (found == False). we can
# just keep pushing on the queue until we find it in the YAML.
# 2. once we find the error (found == True), we just want to push
# onto the queue until the the line is in the middle
if not found or (found and context.maxlen > (linenum - foundline) * 2):
context.append(line) # depends on [control=['if'], data=[]]
elif found and context.maxlen <= (linenum - foundline) * 2:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['linenum']]
# Loop through the queue and generate a readable msg output
out = ''
for line in context:
out += line # depends on [control=['for'], data=['line']]
if foundline:
msg = 'Error found on line %d in %s:\n\n%s' % (foundline, self.ymlfile, out)
messages.append(msg)
# reset the line it was found on and the context
foundline = 0
context.clear() # depends on [control=['if'], data=[]]
linecache.clearcache() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
messages.append(error.message) # depends on [control=['for'], data=['error']] |
def toarray(self):
"""
Returns the contents as a local array.
Will likely cause memory problems for large objects.
"""
rdd = self._rdd if self._ordered else self._rdd.sortByKey()
x = rdd.values().collect()
return asarray(x).reshape(self.shape) | def function[toarray, parameter[self]]:
constant[
Returns the contents as a local array.
Will likely cause memory problems for large objects.
]
variable[rdd] assign[=] <ast.IfExp object at 0x7da1b01c2bf0>
variable[x] assign[=] call[call[name[rdd].values, parameter[]].collect, parameter[]]
return[call[call[name[asarray], parameter[name[x]]].reshape, parameter[name[self].shape]]] | keyword[def] identifier[toarray] ( identifier[self] ):
literal[string]
identifier[rdd] = identifier[self] . identifier[_rdd] keyword[if] identifier[self] . identifier[_ordered] keyword[else] identifier[self] . identifier[_rdd] . identifier[sortByKey] ()
identifier[x] = identifier[rdd] . identifier[values] (). identifier[collect] ()
keyword[return] identifier[asarray] ( identifier[x] ). identifier[reshape] ( identifier[self] . identifier[shape] ) | def toarray(self):
"""
Returns the contents as a local array.
Will likely cause memory problems for large objects.
"""
rdd = self._rdd if self._ordered else self._rdd.sortByKey()
x = rdd.values().collect()
return asarray(x).reshape(self.shape) |
def get_all_anonymous_mappings(self, struct1, struct2, niggli=True,
include_dist=False):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. Returns a dictionary of species
substitutions that are within tolerance
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
niggli (bool): Find niggli cell in preprocessing
include_dist (bool): Return the maximin distance with each mapping
Returns:
list of species mappings that map struct1 to struct2.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2,
niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
break_on_match=not include_dist)
if matches:
if include_dist:
return [(m[0], m[1][0]) for m in matches]
else:
return [m[0] for m in matches] | def function[get_all_anonymous_mappings, parameter[self, struct1, struct2, niggli, include_dist]]:
constant[
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. Returns a dictionary of species
substitutions that are within tolerance
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
niggli (bool): Find niggli cell in preprocessing
include_dist (bool): Return the maximin distance with each mapping
Returns:
list of species mappings that map struct1 to struct2.
]
<ast.Tuple object at 0x7da1b1cd7ca0> assign[=] call[name[self]._process_species, parameter[list[[<ast.Name object at 0x7da1b1cd7d30>, <ast.Name object at 0x7da1b1cd7e20>]]]]
<ast.Tuple object at 0x7da1b1cd6e00> assign[=] call[name[self]._preprocess, parameter[name[struct1], name[struct2], name[niggli]]]
variable[matches] assign[=] call[name[self]._anonymous_match, parameter[name[struct1], name[struct2], name[fu], name[s1_supercell]]]
if name[matches] begin[:]
if name[include_dist] begin[:]
return[<ast.ListComp object at 0x7da1b1c96d10>] | keyword[def] identifier[get_all_anonymous_mappings] ( identifier[self] , identifier[struct1] , identifier[struct2] , identifier[niggli] = keyword[True] ,
identifier[include_dist] = keyword[False] ):
literal[string]
identifier[struct1] , identifier[struct2] = identifier[self] . identifier[_process_species] ([ identifier[struct1] , identifier[struct2] ])
identifier[struct1] , identifier[struct2] , identifier[fu] , identifier[s1_supercell] = identifier[self] . identifier[_preprocess] ( identifier[struct1] , identifier[struct2] ,
identifier[niggli] )
identifier[matches] = identifier[self] . identifier[_anonymous_match] ( identifier[struct1] , identifier[struct2] , identifier[fu] , identifier[s1_supercell] ,
identifier[break_on_match] = keyword[not] identifier[include_dist] )
keyword[if] identifier[matches] :
keyword[if] identifier[include_dist] :
keyword[return] [( identifier[m] [ literal[int] ], identifier[m] [ literal[int] ][ literal[int] ]) keyword[for] identifier[m] keyword[in] identifier[matches] ]
keyword[else] :
keyword[return] [ identifier[m] [ literal[int] ] keyword[for] identifier[m] keyword[in] identifier[matches] ] | def get_all_anonymous_mappings(self, struct1, struct2, niggli=True, include_dist=False):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. Returns a dictionary of species
substitutions that are within tolerance
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
niggli (bool): Find niggli cell in preprocessing
include_dist (bool): Return the maximin distance with each mapping
Returns:
list of species mappings that map struct1 to struct2.
"""
(struct1, struct2) = self._process_species([struct1, struct2])
(struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2, niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, break_on_match=not include_dist)
if matches:
if include_dist:
return [(m[0], m[1][0]) for m in matches] # depends on [control=['if'], data=[]]
else:
return [m[0] for m in matches] # depends on [control=['if'], data=[]] |
def after_output(command_status):
"""
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
"""
if command_status not in range(256):
raise ValueError("command_status must be an integer in the range 0-255")
sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status))
# Flushing is important as the command timing feature maybe based on
# AFTER_OUTPUT in the future.
sys.stdout.flush() | def function[after_output, parameter[command_status]]:
constant[
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
]
if compare[name[command_status] <ast.NotIn object at 0x7da2590d7190> call[name[range], parameter[constant[256]]]] begin[:]
<ast.Raise object at 0x7da1b0f52d70>
call[name[sys].stdout.write, parameter[call[name[AFTER_OUTPUT].format, parameter[]]]]
call[name[sys].stdout.flush, parameter[]] | keyword[def] identifier[after_output] ( identifier[command_status] ):
literal[string]
keyword[if] identifier[command_status] keyword[not] keyword[in] identifier[range] ( literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( identifier[AFTER_OUTPUT] . identifier[format] ( identifier[command_status] = identifier[command_status] ))
identifier[sys] . identifier[stdout] . identifier[flush] () | def after_output(command_status):
"""
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
"""
if command_status not in range(256):
raise ValueError('command_status must be an integer in the range 0-255') # depends on [control=['if'], data=[]]
sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status))
# Flushing is important as the command timing feature maybe based on
# AFTER_OUTPUT in the future.
sys.stdout.flush() |
def create_timezone(tz, first_date=None, last_date=None):
"""
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
"""
if isinstance(tz, pytz.tzinfo.StaticTzInfo):
return _create_timezone_static(tz)
# TODO last_date = None, recurring to infinity
first_date = dt.datetime.today() if not first_date else to_naive_utc(first_date)
last_date = dt.datetime.today() if not last_date else to_naive_utc(last_date)
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
# This is not a reliable way of determining if a transition is for
# daylight savings.
# From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard
# and GMT+12:00 (NZ Summer Time) as daylight savings time.
# From 1941 GMT+12:00 (NZ Standard Time) became standard time.
# So NZST (NZ Summer/Standard Time) can refer to standard or daylight
# savings time. And this code depends on the random order the _tzinfos
# are returned.
# dst = {
# one[2]: 'DST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# bst = {
# one[2]: 'BST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# ...
# if dst[name] or bst[name]:
# looking for the first and last transition time we need to include
first_num, last_num = 0, len(tz._utc_transition_times) - 1
first_tt = tz._utc_transition_times[0]
last_tt = tz._utc_transition_times[-1]
for num, transtime in enumerate(tz._utc_transition_times):
if transtime > first_tt and transtime < first_date:
first_num = num
first_tt = transtime
if transtime < last_tt and transtime > last_date:
last_num = num
last_tt = transtime
timezones = dict()
for num in range(first_num, last_num + 1):
name = tz._transition_info[num][2]
if name in timezones:
ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
if 'RDATE' in timezones[name]:
timezones[name]['RDATE'].dts.append(
icalendar.prop.vDDDTypes(ttime))
else:
timezones[name].add('RDATE', ttime)
continue
if tz._transition_info[num][1]:
subcomp = icalendar.TimezoneDaylight()
else:
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz._transition_info[num][2])
subcomp.add(
'DTSTART',
tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
timezones[name] = subcomp
for subcomp in timezones.values():
timezone.add_component(subcomp)
return timezone | def function[create_timezone, parameter[tz, first_date, last_date]]:
constant[
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
]
if call[name[isinstance], parameter[name[tz], name[pytz].tzinfo.StaticTzInfo]] begin[:]
return[call[name[_create_timezone_static], parameter[name[tz]]]]
variable[first_date] assign[=] <ast.IfExp object at 0x7da18dc99240>
variable[last_date] assign[=] <ast.IfExp object at 0x7da18dc9b7f0>
variable[timezone] assign[=] call[name[icalendar].Timezone, parameter[]]
call[name[timezone].add, parameter[constant[TZID], name[tz]]]
<ast.Tuple object at 0x7da18dc9a9e0> assign[=] tuple[[<ast.Constant object at 0x7da18dc99ff0>, <ast.BinOp object at 0x7da18dc9aef0>]]
variable[first_tt] assign[=] call[name[tz]._utc_transition_times][constant[0]]
variable[last_tt] assign[=] call[name[tz]._utc_transition_times][<ast.UnaryOp object at 0x7da18dc9bfd0>]
for taget[tuple[[<ast.Name object at 0x7da18dc981c0>, <ast.Name object at 0x7da18dc9a230>]]] in starred[call[name[enumerate], parameter[name[tz]._utc_transition_times]]] begin[:]
if <ast.BoolOp object at 0x7da18dc9bc70> begin[:]
variable[first_num] assign[=] name[num]
variable[first_tt] assign[=] name[transtime]
if <ast.BoolOp object at 0x7da18dc99300> begin[:]
variable[last_num] assign[=] name[num]
variable[last_tt] assign[=] name[transtime]
variable[timezones] assign[=] call[name[dict], parameter[]]
for taget[name[num]] in starred[call[name[range], parameter[name[first_num], binary_operation[name[last_num] + constant[1]]]]] begin[:]
variable[name] assign[=] call[call[name[tz]._transition_info][name[num]]][constant[2]]
if compare[name[name] in name[timezones]] begin[:]
variable[ttime] assign[=] call[call[name[tz].fromutc, parameter[call[name[tz]._utc_transition_times][name[num]]]].replace, parameter[]]
if compare[constant[RDATE] in call[name[timezones]][name[name]]] begin[:]
call[call[call[name[timezones]][name[name]]][constant[RDATE]].dts.append, parameter[call[name[icalendar].prop.vDDDTypes, parameter[name[ttime]]]]]
continue
if call[call[name[tz]._transition_info][name[num]]][constant[1]] begin[:]
variable[subcomp] assign[=] call[name[icalendar].TimezoneDaylight, parameter[]]
call[name[subcomp].add, parameter[constant[TZNAME], call[call[name[tz]._transition_info][name[num]]][constant[2]]]]
call[name[subcomp].add, parameter[constant[DTSTART], call[call[name[tz].fromutc, parameter[call[name[tz]._utc_transition_times][name[num]]]].replace, parameter[]]]]
call[name[subcomp].add, parameter[constant[TZOFFSETTO], call[call[name[tz]._transition_info][name[num]]][constant[0]]]]
call[name[subcomp].add, parameter[constant[TZOFFSETFROM], call[call[name[tz]._transition_info][binary_operation[name[num] - constant[1]]]][constant[0]]]]
call[name[timezones]][name[name]] assign[=] name[subcomp]
for taget[name[subcomp]] in starred[call[name[timezones].values, parameter[]]] begin[:]
call[name[timezone].add_component, parameter[name[subcomp]]]
return[name[timezone]] | keyword[def] identifier[create_timezone] ( identifier[tz] , identifier[first_date] = keyword[None] , identifier[last_date] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[tz] , identifier[pytz] . identifier[tzinfo] . identifier[StaticTzInfo] ):
keyword[return] identifier[_create_timezone_static] ( identifier[tz] )
identifier[first_date] = identifier[dt] . identifier[datetime] . identifier[today] () keyword[if] keyword[not] identifier[first_date] keyword[else] identifier[to_naive_utc] ( identifier[first_date] )
identifier[last_date] = identifier[dt] . identifier[datetime] . identifier[today] () keyword[if] keyword[not] identifier[last_date] keyword[else] identifier[to_naive_utc] ( identifier[last_date] )
identifier[timezone] = identifier[icalendar] . identifier[Timezone] ()
identifier[timezone] . identifier[add] ( literal[string] , identifier[tz] )
identifier[first_num] , identifier[last_num] = literal[int] , identifier[len] ( identifier[tz] . identifier[_utc_transition_times] )- literal[int]
identifier[first_tt] = identifier[tz] . identifier[_utc_transition_times] [ literal[int] ]
identifier[last_tt] = identifier[tz] . identifier[_utc_transition_times] [- literal[int] ]
keyword[for] identifier[num] , identifier[transtime] keyword[in] identifier[enumerate] ( identifier[tz] . identifier[_utc_transition_times] ):
keyword[if] identifier[transtime] > identifier[first_tt] keyword[and] identifier[transtime] < identifier[first_date] :
identifier[first_num] = identifier[num]
identifier[first_tt] = identifier[transtime]
keyword[if] identifier[transtime] < identifier[last_tt] keyword[and] identifier[transtime] > identifier[last_date] :
identifier[last_num] = identifier[num]
identifier[last_tt] = identifier[transtime]
identifier[timezones] = identifier[dict] ()
keyword[for] identifier[num] keyword[in] identifier[range] ( identifier[first_num] , identifier[last_num] + literal[int] ):
identifier[name] = identifier[tz] . identifier[_transition_info] [ identifier[num] ][ literal[int] ]
keyword[if] identifier[name] keyword[in] identifier[timezones] :
identifier[ttime] = identifier[tz] . identifier[fromutc] ( identifier[tz] . identifier[_utc_transition_times] [ identifier[num] ]). identifier[replace] ( identifier[tzinfo] = keyword[None] )
keyword[if] literal[string] keyword[in] identifier[timezones] [ identifier[name] ]:
identifier[timezones] [ identifier[name] ][ literal[string] ]. identifier[dts] . identifier[append] (
identifier[icalendar] . identifier[prop] . identifier[vDDDTypes] ( identifier[ttime] ))
keyword[else] :
identifier[timezones] [ identifier[name] ]. identifier[add] ( literal[string] , identifier[ttime] )
keyword[continue]
keyword[if] identifier[tz] . identifier[_transition_info] [ identifier[num] ][ literal[int] ]:
identifier[subcomp] = identifier[icalendar] . identifier[TimezoneDaylight] ()
keyword[else] :
identifier[subcomp] = identifier[icalendar] . identifier[TimezoneStandard] ()
identifier[subcomp] . identifier[add] ( literal[string] , identifier[tz] . identifier[_transition_info] [ identifier[num] ][ literal[int] ])
identifier[subcomp] . identifier[add] (
literal[string] ,
identifier[tz] . identifier[fromutc] ( identifier[tz] . identifier[_utc_transition_times] [ identifier[num] ]). identifier[replace] ( identifier[tzinfo] = keyword[None] ))
identifier[subcomp] . identifier[add] ( literal[string] , identifier[tz] . identifier[_transition_info] [ identifier[num] ][ literal[int] ])
identifier[subcomp] . identifier[add] ( literal[string] , identifier[tz] . identifier[_transition_info] [ identifier[num] - literal[int] ][ literal[int] ])
identifier[timezones] [ identifier[name] ]= identifier[subcomp]
keyword[for] identifier[subcomp] keyword[in] identifier[timezones] . identifier[values] ():
identifier[timezone] . identifier[add_component] ( identifier[subcomp] )
keyword[return] identifier[timezone] | def create_timezone(tz, first_date=None, last_date=None):
"""
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
"""
if isinstance(tz, pytz.tzinfo.StaticTzInfo):
return _create_timezone_static(tz) # depends on [control=['if'], data=[]]
# TODO last_date = None, recurring to infinity
first_date = dt.datetime.today() if not first_date else to_naive_utc(first_date)
last_date = dt.datetime.today() if not last_date else to_naive_utc(last_date)
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
# This is not a reliable way of determining if a transition is for
# daylight savings.
# From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard
# and GMT+12:00 (NZ Summer Time) as daylight savings time.
# From 1941 GMT+12:00 (NZ Standard Time) became standard time.
# So NZST (NZ Summer/Standard Time) can refer to standard or daylight
# savings time. And this code depends on the random order the _tzinfos
# are returned.
# dst = {
# one[2]: 'DST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# bst = {
# one[2]: 'BST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# ...
# if dst[name] or bst[name]:
# looking for the first and last transition time we need to include
(first_num, last_num) = (0, len(tz._utc_transition_times) - 1)
first_tt = tz._utc_transition_times[0]
last_tt = tz._utc_transition_times[-1]
for (num, transtime) in enumerate(tz._utc_transition_times):
if transtime > first_tt and transtime < first_date:
first_num = num
first_tt = transtime # depends on [control=['if'], data=[]]
if transtime < last_tt and transtime > last_date:
last_num = num
last_tt = transtime # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
timezones = dict()
for num in range(first_num, last_num + 1):
name = tz._transition_info[num][2]
if name in timezones:
ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
if 'RDATE' in timezones[name]:
timezones[name]['RDATE'].dts.append(icalendar.prop.vDDDTypes(ttime)) # depends on [control=['if'], data=[]]
else:
timezones[name].add('RDATE', ttime)
continue # depends on [control=['if'], data=['name', 'timezones']]
if tz._transition_info[num][1]:
subcomp = icalendar.TimezoneDaylight() # depends on [control=['if'], data=[]]
else:
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz._transition_info[num][2])
subcomp.add('DTSTART', tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
timezones[name] = subcomp # depends on [control=['for'], data=['num']]
for subcomp in timezones.values():
timezone.add_component(subcomp) # depends on [control=['for'], data=['subcomp']]
return timezone |
def select_splitfilejob_instance(curr_exe):
"""
This function returns an instance of the class that is appropriate for
splitting an output file up within workflow (for e.g. splitbank).
Parameters
----------
curr_exe : string
The name of the Executable that is being used.
curr_section : string
The name of the section storing options for this executble
Returns
--------
exe class : sub-class of pycbc.workflow.core.Executable
The class that holds the utility functions appropriate
for the given Executable. This class **must** contain
* exe_class.create_job()
and the job returned by this **must** contain
* job.create_node()
"""
if curr_exe == 'pycbc_hdf5_splitbank':
exe_class = PycbcSplitBankExecutable
elif curr_exe == 'pycbc_splitbank':
exe_class = PycbcSplitBankXmlExecutable
elif curr_exe == 'pycbc_split_inspinj':
exe_class = PycbcSplitInspinjExecutable
else:
# Should we try some sort of default class??
err_string = "No class exists for Executable %s" %(curr_exe,)
raise NotImplementedError(err_string)
return exe_class | def function[select_splitfilejob_instance, parameter[curr_exe]]:
constant[
This function returns an instance of the class that is appropriate for
splitting an output file up within workflow (for e.g. splitbank).
Parameters
----------
curr_exe : string
The name of the Executable that is being used.
curr_section : string
The name of the section storing options for this executble
Returns
--------
exe class : sub-class of pycbc.workflow.core.Executable
The class that holds the utility functions appropriate
for the given Executable. This class **must** contain
* exe_class.create_job()
and the job returned by this **must** contain
* job.create_node()
]
if compare[name[curr_exe] equal[==] constant[pycbc_hdf5_splitbank]] begin[:]
variable[exe_class] assign[=] name[PycbcSplitBankExecutable]
return[name[exe_class]] | keyword[def] identifier[select_splitfilejob_instance] ( identifier[curr_exe] ):
literal[string]
keyword[if] identifier[curr_exe] == literal[string] :
identifier[exe_class] = identifier[PycbcSplitBankExecutable]
keyword[elif] identifier[curr_exe] == literal[string] :
identifier[exe_class] = identifier[PycbcSplitBankXmlExecutable]
keyword[elif] identifier[curr_exe] == literal[string] :
identifier[exe_class] = identifier[PycbcSplitInspinjExecutable]
keyword[else] :
identifier[err_string] = literal[string] %( identifier[curr_exe] ,)
keyword[raise] identifier[NotImplementedError] ( identifier[err_string] )
keyword[return] identifier[exe_class] | def select_splitfilejob_instance(curr_exe):
"""
This function returns an instance of the class that is appropriate for
splitting an output file up within workflow (for e.g. splitbank).
Parameters
----------
curr_exe : string
The name of the Executable that is being used.
curr_section : string
The name of the section storing options for this executble
Returns
--------
exe class : sub-class of pycbc.workflow.core.Executable
The class that holds the utility functions appropriate
for the given Executable. This class **must** contain
* exe_class.create_job()
and the job returned by this **must** contain
* job.create_node()
"""
if curr_exe == 'pycbc_hdf5_splitbank':
exe_class = PycbcSplitBankExecutable # depends on [control=['if'], data=[]]
elif curr_exe == 'pycbc_splitbank':
exe_class = PycbcSplitBankXmlExecutable # depends on [control=['if'], data=[]]
elif curr_exe == 'pycbc_split_inspinj':
exe_class = PycbcSplitInspinjExecutable # depends on [control=['if'], data=[]]
else:
# Should we try some sort of default class??
err_string = 'No class exists for Executable %s' % (curr_exe,)
raise NotImplementedError(err_string)
return exe_class |
def analyze(self):
"""Run analysis."""
precision = 'DP' if self.kernel.datatype == 'double' else 'SP'
self.calculate_cache_access()
self.results['max_perf'] = self.conv_perf(self.machine['clock'] * self.cores * \
self.machine['FLOPs per cycle'][precision]['total']) | def function[analyze, parameter[self]]:
constant[Run analysis.]
variable[precision] assign[=] <ast.IfExp object at 0x7da20c6ab970>
call[name[self].calculate_cache_access, parameter[]]
call[name[self].results][constant[max_perf]] assign[=] call[name[self].conv_perf, parameter[binary_operation[binary_operation[call[name[self].machine][constant[clock]] * name[self].cores] * call[call[call[name[self].machine][constant[FLOPs per cycle]]][name[precision]]][constant[total]]]]] | keyword[def] identifier[analyze] ( identifier[self] ):
literal[string]
identifier[precision] = literal[string] keyword[if] identifier[self] . identifier[kernel] . identifier[datatype] == literal[string] keyword[else] literal[string]
identifier[self] . identifier[calculate_cache_access] ()
identifier[self] . identifier[results] [ literal[string] ]= identifier[self] . identifier[conv_perf] ( identifier[self] . identifier[machine] [ literal[string] ]* identifier[self] . identifier[cores] * identifier[self] . identifier[machine] [ literal[string] ][ identifier[precision] ][ literal[string] ]) | def analyze(self):
"""Run analysis."""
precision = 'DP' if self.kernel.datatype == 'double' else 'SP'
self.calculate_cache_access()
self.results['max_perf'] = self.conv_perf(self.machine['clock'] * self.cores * self.machine['FLOPs per cycle'][precision]['total']) |
def run_projected_dos(self,
sigma=None,
freq_min=None,
freq_max=None,
freq_pitch=None,
use_tetrahedron_method=True,
direction=None,
xyz_projection=False):
"""Calculate projected DOS from phonons on sampling mesh.
Parameters
----------
sigma : float, optional
Smearing width for smearing method. Default is None
freq_min, freq_max, freq_pitch : float, optional
Minimum and maximum frequencies in which range DOS is computed
with the specified interval (freq_pitch).
Defaults are None and they are automatically determined.
use_tetrahedron_method : float, optional
Use tetrahedron method when this is True. When sigma is set,
smearing method is used.
direction : array_like, optional
Specific projection direction. This is specified three values
along basis vectors or the primitive cell. Default is None,
i.e., no projection.
xyz_projection : bool, optional
This determines whether projected along Cartesian directions or
not. Default is False, i.e., no projection.
"""
self._pdos = None
if self._mesh is None:
msg = "run_mesh has to be done before PDOS calculation."
raise RuntimeError(msg)
if not self._mesh.with_eigenvectors:
msg = "run_mesh has to be called with with_eigenvectors=True."
raise RuntimeError(msg)
if np.prod(self._mesh.mesh_numbers) != len(self._mesh.ir_grid_points):
msg = "run_mesh has to be done with is_mesh_symmetry=False."
raise RuntimeError(msg)
if direction is not None:
direction_cart = np.dot(direction, self._primitive.get_cell())
else:
direction_cart = None
self._pdos = PartialDos(self._mesh,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
direction=direction_cart,
xyz_projection=xyz_projection)
self._pdos.set_draw_area(freq_min, freq_max, freq_pitch)
self._pdos.run() | def function[run_projected_dos, parameter[self, sigma, freq_min, freq_max, freq_pitch, use_tetrahedron_method, direction, xyz_projection]]:
constant[Calculate projected DOS from phonons on sampling mesh.
Parameters
----------
sigma : float, optional
Smearing width for smearing method. Default is None
freq_min, freq_max, freq_pitch : float, optional
Minimum and maximum frequencies in which range DOS is computed
with the specified interval (freq_pitch).
Defaults are None and they are automatically determined.
use_tetrahedron_method : float, optional
Use tetrahedron method when this is True. When sigma is set,
smearing method is used.
direction : array_like, optional
Specific projection direction. This is specified three values
along basis vectors or the primitive cell. Default is None,
i.e., no projection.
xyz_projection : bool, optional
This determines whether projected along Cartesian directions or
not. Default is False, i.e., no projection.
]
name[self]._pdos assign[=] constant[None]
if compare[name[self]._mesh is constant[None]] begin[:]
variable[msg] assign[=] constant[run_mesh has to be done before PDOS calculation.]
<ast.Raise object at 0x7da18fe92c50>
if <ast.UnaryOp object at 0x7da18fe91930> begin[:]
variable[msg] assign[=] constant[run_mesh has to be called with with_eigenvectors=True.]
<ast.Raise object at 0x7da18fe91c30>
if compare[call[name[np].prod, parameter[name[self]._mesh.mesh_numbers]] not_equal[!=] call[name[len], parameter[name[self]._mesh.ir_grid_points]]] begin[:]
variable[msg] assign[=] constant[run_mesh has to be done with is_mesh_symmetry=False.]
<ast.Raise object at 0x7da18fe91570>
if compare[name[direction] is_not constant[None]] begin[:]
variable[direction_cart] assign[=] call[name[np].dot, parameter[name[direction], call[name[self]._primitive.get_cell, parameter[]]]]
name[self]._pdos assign[=] call[name[PartialDos], parameter[name[self]._mesh]]
call[name[self]._pdos.set_draw_area, parameter[name[freq_min], name[freq_max], name[freq_pitch]]]
call[name[self]._pdos.run, parameter[]] | keyword[def] identifier[run_projected_dos] ( identifier[self] ,
identifier[sigma] = keyword[None] ,
identifier[freq_min] = keyword[None] ,
identifier[freq_max] = keyword[None] ,
identifier[freq_pitch] = keyword[None] ,
identifier[use_tetrahedron_method] = keyword[True] ,
identifier[direction] = keyword[None] ,
identifier[xyz_projection] = keyword[False] ):
literal[string]
identifier[self] . identifier[_pdos] = keyword[None]
keyword[if] identifier[self] . identifier[_mesh] keyword[is] keyword[None] :
identifier[msg] = literal[string]
keyword[raise] identifier[RuntimeError] ( identifier[msg] )
keyword[if] keyword[not] identifier[self] . identifier[_mesh] . identifier[with_eigenvectors] :
identifier[msg] = literal[string]
keyword[raise] identifier[RuntimeError] ( identifier[msg] )
keyword[if] identifier[np] . identifier[prod] ( identifier[self] . identifier[_mesh] . identifier[mesh_numbers] )!= identifier[len] ( identifier[self] . identifier[_mesh] . identifier[ir_grid_points] ):
identifier[msg] = literal[string]
keyword[raise] identifier[RuntimeError] ( identifier[msg] )
keyword[if] identifier[direction] keyword[is] keyword[not] keyword[None] :
identifier[direction_cart] = identifier[np] . identifier[dot] ( identifier[direction] , identifier[self] . identifier[_primitive] . identifier[get_cell] ())
keyword[else] :
identifier[direction_cart] = keyword[None]
identifier[self] . identifier[_pdos] = identifier[PartialDos] ( identifier[self] . identifier[_mesh] ,
identifier[sigma] = identifier[sigma] ,
identifier[use_tetrahedron_method] = identifier[use_tetrahedron_method] ,
identifier[direction] = identifier[direction_cart] ,
identifier[xyz_projection] = identifier[xyz_projection] )
identifier[self] . identifier[_pdos] . identifier[set_draw_area] ( identifier[freq_min] , identifier[freq_max] , identifier[freq_pitch] )
identifier[self] . identifier[_pdos] . identifier[run] () | def run_projected_dos(self, sigma=None, freq_min=None, freq_max=None, freq_pitch=None, use_tetrahedron_method=True, direction=None, xyz_projection=False):
"""Calculate projected DOS from phonons on sampling mesh.
Parameters
----------
sigma : float, optional
Smearing width for smearing method. Default is None
freq_min, freq_max, freq_pitch : float, optional
Minimum and maximum frequencies in which range DOS is computed
with the specified interval (freq_pitch).
Defaults are None and they are automatically determined.
use_tetrahedron_method : float, optional
Use tetrahedron method when this is True. When sigma is set,
smearing method is used.
direction : array_like, optional
Specific projection direction. This is specified three values
along basis vectors or the primitive cell. Default is None,
i.e., no projection.
xyz_projection : bool, optional
This determines whether projected along Cartesian directions or
not. Default is False, i.e., no projection.
"""
self._pdos = None
if self._mesh is None:
msg = 'run_mesh has to be done before PDOS calculation.'
raise RuntimeError(msg) # depends on [control=['if'], data=[]]
if not self._mesh.with_eigenvectors:
msg = 'run_mesh has to be called with with_eigenvectors=True.'
raise RuntimeError(msg) # depends on [control=['if'], data=[]]
if np.prod(self._mesh.mesh_numbers) != len(self._mesh.ir_grid_points):
msg = 'run_mesh has to be done with is_mesh_symmetry=False.'
raise RuntimeError(msg) # depends on [control=['if'], data=[]]
if direction is not None:
direction_cart = np.dot(direction, self._primitive.get_cell()) # depends on [control=['if'], data=['direction']]
else:
direction_cart = None
self._pdos = PartialDos(self._mesh, sigma=sigma, use_tetrahedron_method=use_tetrahedron_method, direction=direction_cart, xyz_projection=xyz_projection)
self._pdos.set_draw_area(freq_min, freq_max, freq_pitch)
self._pdos.run() |
def cancel_reservation(self):
"""
This method cancel record set for hotel room reservation line
------------------------------------------------------------------
@param self: The object pointer
@return: cancel record set for hotel room reservation line.
"""
room_res_line_obj = self.env['hotel.room.reservation.line']
hotel_res_line_obj = self.env['hotel_reservation.line']
self.state = 'cancel'
room_reservation_line = room_res_line_obj.search([('reservation_id',
'in', self.ids)])
room_reservation_line.write({'state': 'unassigned'})
room_reservation_line.unlink()
reservation_lines = hotel_res_line_obj.search([('line_id',
'in', self.ids)])
for reservation_line in reservation_lines:
reservation_line.reserve.write({'isroom': True,
'status': 'available'})
return True | def function[cancel_reservation, parameter[self]]:
constant[
This method cancel record set for hotel room reservation line
------------------------------------------------------------------
@param self: The object pointer
@return: cancel record set for hotel room reservation line.
]
variable[room_res_line_obj] assign[=] call[name[self].env][constant[hotel.room.reservation.line]]
variable[hotel_res_line_obj] assign[=] call[name[self].env][constant[hotel_reservation.line]]
name[self].state assign[=] constant[cancel]
variable[room_reservation_line] assign[=] call[name[room_res_line_obj].search, parameter[list[[<ast.Tuple object at 0x7da2047eb160>]]]]
call[name[room_reservation_line].write, parameter[dictionary[[<ast.Constant object at 0x7da20c7942e0>], [<ast.Constant object at 0x7da20c794a60>]]]]
call[name[room_reservation_line].unlink, parameter[]]
variable[reservation_lines] assign[=] call[name[hotel_res_line_obj].search, parameter[list[[<ast.Tuple object at 0x7da20c796530>]]]]
for taget[name[reservation_line]] in starred[name[reservation_lines]] begin[:]
call[name[reservation_line].reserve.write, parameter[dictionary[[<ast.Constant object at 0x7da20c794790>, <ast.Constant object at 0x7da20c795690>], [<ast.Constant object at 0x7da20c795e40>, <ast.Constant object at 0x7da20c7961a0>]]]]
return[constant[True]] | keyword[def] identifier[cancel_reservation] ( identifier[self] ):
literal[string]
identifier[room_res_line_obj] = identifier[self] . identifier[env] [ literal[string] ]
identifier[hotel_res_line_obj] = identifier[self] . identifier[env] [ literal[string] ]
identifier[self] . identifier[state] = literal[string]
identifier[room_reservation_line] = identifier[room_res_line_obj] . identifier[search] ([( literal[string] ,
literal[string] , identifier[self] . identifier[ids] )])
identifier[room_reservation_line] . identifier[write] ({ literal[string] : literal[string] })
identifier[room_reservation_line] . identifier[unlink] ()
identifier[reservation_lines] = identifier[hotel_res_line_obj] . identifier[search] ([( literal[string] ,
literal[string] , identifier[self] . identifier[ids] )])
keyword[for] identifier[reservation_line] keyword[in] identifier[reservation_lines] :
identifier[reservation_line] . identifier[reserve] . identifier[write] ({ literal[string] : keyword[True] ,
literal[string] : literal[string] })
keyword[return] keyword[True] | def cancel_reservation(self):
"""
This method cancel record set for hotel room reservation line
------------------------------------------------------------------
@param self: The object pointer
@return: cancel record set for hotel room reservation line.
"""
room_res_line_obj = self.env['hotel.room.reservation.line']
hotel_res_line_obj = self.env['hotel_reservation.line']
self.state = 'cancel'
room_reservation_line = room_res_line_obj.search([('reservation_id', 'in', self.ids)])
room_reservation_line.write({'state': 'unassigned'})
room_reservation_line.unlink()
reservation_lines = hotel_res_line_obj.search([('line_id', 'in', self.ids)])
for reservation_line in reservation_lines:
reservation_line.reserve.write({'isroom': True, 'status': 'available'}) # depends on [control=['for'], data=['reservation_line']]
return True |
def hash(self):
"""Return checksum to identify pages in same series."""
return hash(
self._shape + (
self.tilewidth, self.tilelength, self.tiledepth,
self.bitspersample, self.fillorder, self.predictor,
self.extrasamples, self.photometric, self.compression,
self.planarconfig)) | def function[hash, parameter[self]]:
constant[Return checksum to identify pages in same series.]
return[call[name[hash], parameter[binary_operation[name[self]._shape + tuple[[<ast.Attribute object at 0x7da1b18ce740>, <ast.Attribute object at 0x7da1b18ce7a0>, <ast.Attribute object at 0x7da1b18ce800>, <ast.Attribute object at 0x7da1b18ce9b0>, <ast.Attribute object at 0x7da1b18cea10>, <ast.Attribute object at 0x7da1b18ce950>, <ast.Attribute object at 0x7da1b18ce8f0>, <ast.Attribute object at 0x7da1b185f940>, <ast.Attribute object at 0x7da1b185f9d0>, <ast.Attribute object at 0x7da1b185fb80>]]]]]] | keyword[def] identifier[hash] ( identifier[self] ):
literal[string]
keyword[return] identifier[hash] (
identifier[self] . identifier[_shape] +(
identifier[self] . identifier[tilewidth] , identifier[self] . identifier[tilelength] , identifier[self] . identifier[tiledepth] ,
identifier[self] . identifier[bitspersample] , identifier[self] . identifier[fillorder] , identifier[self] . identifier[predictor] ,
identifier[self] . identifier[extrasamples] , identifier[self] . identifier[photometric] , identifier[self] . identifier[compression] ,
identifier[self] . identifier[planarconfig] )) | def hash(self):
"""Return checksum to identify pages in same series."""
return hash(self._shape + (self.tilewidth, self.tilelength, self.tiledepth, self.bitspersample, self.fillorder, self.predictor, self.extrasamples, self.photometric, self.compression, self.planarconfig)) |
def get_user(self, user_id, password):
"""
Retrieve a user record
:param user_id:
the user ID
:param password:
password
:return:
A :class:`meteorpi_model.User` if everything is correct
:raises:
ValueError if the user is found but password is incorrect or if the user is not found.
"""
self.con.execute('SELECT uid, pwHash FROM archive_users WHERE userId = %s;', (user_id,))
results = self.con.fetchall()
if len(results) == 0:
raise ValueError("No such user")
pw_hash = results[0]['pwHash']
# Check the password
if not passlib.hash.bcrypt.verify(password, pw_hash):
raise ValueError("Incorrect password")
# Fetch list of roles
self.con.execute('SELECT name FROM archive_roles r INNER JOIN archive_user_roles u ON u.roleId=r.uid '
'WHERE u.userId = %s;', (results[0]['uid'],))
role_list = [row['name'] for row in self.con.fetchall()]
return mp.User(user_id=user_id, roles=role_list) | def function[get_user, parameter[self, user_id, password]]:
constant[
Retrieve a user record
:param user_id:
the user ID
:param password:
password
:return:
A :class:`meteorpi_model.User` if everything is correct
:raises:
ValueError if the user is found but password is incorrect or if the user is not found.
]
call[name[self].con.execute, parameter[constant[SELECT uid, pwHash FROM archive_users WHERE userId = %s;], tuple[[<ast.Name object at 0x7da1b0a3c250>]]]]
variable[results] assign[=] call[name[self].con.fetchall, parameter[]]
if compare[call[name[len], parameter[name[results]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0a3c070>
variable[pw_hash] assign[=] call[call[name[results]][constant[0]]][constant[pwHash]]
if <ast.UnaryOp object at 0x7da1b0a3d810> begin[:]
<ast.Raise object at 0x7da1b0a3c0d0>
call[name[self].con.execute, parameter[constant[SELECT name FROM archive_roles r INNER JOIN archive_user_roles u ON u.roleId=r.uid WHERE u.userId = %s;], tuple[[<ast.Subscript object at 0x7da1b0ab4880>]]]]
variable[role_list] assign[=] <ast.ListComp object at 0x7da1b0ab67d0>
return[call[name[mp].User, parameter[]]] | keyword[def] identifier[get_user] ( identifier[self] , identifier[user_id] , identifier[password] ):
literal[string]
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,( identifier[user_id] ,))
identifier[results] = identifier[self] . identifier[con] . identifier[fetchall] ()
keyword[if] identifier[len] ( identifier[results] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[pw_hash] = identifier[results] [ literal[int] ][ literal[string] ]
keyword[if] keyword[not] identifier[passlib] . identifier[hash] . identifier[bcrypt] . identifier[verify] ( identifier[password] , identifier[pw_hash] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[con] . identifier[execute] ( literal[string]
literal[string] ,( identifier[results] [ literal[int] ][ literal[string] ],))
identifier[role_list] =[ identifier[row] [ literal[string] ] keyword[for] identifier[row] keyword[in] identifier[self] . identifier[con] . identifier[fetchall] ()]
keyword[return] identifier[mp] . identifier[User] ( identifier[user_id] = identifier[user_id] , identifier[roles] = identifier[role_list] ) | def get_user(self, user_id, password):
"""
Retrieve a user record
:param user_id:
the user ID
:param password:
password
:return:
A :class:`meteorpi_model.User` if everything is correct
:raises:
ValueError if the user is found but password is incorrect or if the user is not found.
"""
self.con.execute('SELECT uid, pwHash FROM archive_users WHERE userId = %s;', (user_id,))
results = self.con.fetchall()
if len(results) == 0:
raise ValueError('No such user') # depends on [control=['if'], data=[]]
pw_hash = results[0]['pwHash']
# Check the password
if not passlib.hash.bcrypt.verify(password, pw_hash):
raise ValueError('Incorrect password') # depends on [control=['if'], data=[]]
# Fetch list of roles
self.con.execute('SELECT name FROM archive_roles r INNER JOIN archive_user_roles u ON u.roleId=r.uid WHERE u.userId = %s;', (results[0]['uid'],))
role_list = [row['name'] for row in self.con.fetchall()]
return mp.User(user_id=user_id, roles=role_list) |
def calcSubpopAvg(data,reference,cutoffs,weights=None):
'''
Calculates the average of (weighted) data between cutoff percentiles of a
reference variable.
Parameters
----------
data : numpy.array
A 1D array of float data.
reference : numpy.array
A 1D array of float data of the same length as data.
cutoffs : [(float,float)]
A list of doubles with the lower and upper percentile bounds (should be
in [0,1]).
weights : numpy.array
A weighting vector for the data.
Returns
-------
slice_avg
The (weighted) average of data that falls within the cutoff percentiles
of reference.
'''
if weights is None: # Set equiprobable weights if none were given
weights = np.ones(data.size)
# Sort the data and generate a cumulative distribution
order = np.argsort(reference)
data_sorted = data[order]
weights_sorted = weights[order]
cum_dist = np.cumsum(weights_sorted)/np.sum(weights_sorted)
# For each set of cutoffs, calculate the average of data that falls within
# the cutoff percentiles of reference
slice_avg = []
for j in range(len(cutoffs)):
bot = np.searchsorted(cum_dist,cutoffs[j][0])
top = np.searchsorted(cum_dist,cutoffs[j][1])
slice_avg.append(np.sum(data_sorted[bot:top]*weights_sorted[bot:top])/
np.sum(weights_sorted[bot:top]))
return slice_avg | def function[calcSubpopAvg, parameter[data, reference, cutoffs, weights]]:
constant[
Calculates the average of (weighted) data between cutoff percentiles of a
reference variable.
Parameters
----------
data : numpy.array
A 1D array of float data.
reference : numpy.array
A 1D array of float data of the same length as data.
cutoffs : [(float,float)]
A list of doubles with the lower and upper percentile bounds (should be
in [0,1]).
weights : numpy.array
A weighting vector for the data.
Returns
-------
slice_avg
The (weighted) average of data that falls within the cutoff percentiles
of reference.
]
if compare[name[weights] is constant[None]] begin[:]
variable[weights] assign[=] call[name[np].ones, parameter[name[data].size]]
variable[order] assign[=] call[name[np].argsort, parameter[name[reference]]]
variable[data_sorted] assign[=] call[name[data]][name[order]]
variable[weights_sorted] assign[=] call[name[weights]][name[order]]
variable[cum_dist] assign[=] binary_operation[call[name[np].cumsum, parameter[name[weights_sorted]]] / call[name[np].sum, parameter[name[weights_sorted]]]]
variable[slice_avg] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[cutoffs]]]]]] begin[:]
variable[bot] assign[=] call[name[np].searchsorted, parameter[name[cum_dist], call[call[name[cutoffs]][name[j]]][constant[0]]]]
variable[top] assign[=] call[name[np].searchsorted, parameter[name[cum_dist], call[call[name[cutoffs]][name[j]]][constant[1]]]]
call[name[slice_avg].append, parameter[binary_operation[call[name[np].sum, parameter[binary_operation[call[name[data_sorted]][<ast.Slice object at 0x7da1b074fe50>] * call[name[weights_sorted]][<ast.Slice object at 0x7da1b074f250>]]]] / call[name[np].sum, parameter[call[name[weights_sorted]][<ast.Slice object at 0x7da1b074f730>]]]]]]
return[name[slice_avg]] | keyword[def] identifier[calcSubpopAvg] ( identifier[data] , identifier[reference] , identifier[cutoffs] , identifier[weights] = keyword[None] ):
literal[string]
keyword[if] identifier[weights] keyword[is] keyword[None] :
identifier[weights] = identifier[np] . identifier[ones] ( identifier[data] . identifier[size] )
identifier[order] = identifier[np] . identifier[argsort] ( identifier[reference] )
identifier[data_sorted] = identifier[data] [ identifier[order] ]
identifier[weights_sorted] = identifier[weights] [ identifier[order] ]
identifier[cum_dist] = identifier[np] . identifier[cumsum] ( identifier[weights_sorted] )/ identifier[np] . identifier[sum] ( identifier[weights_sorted] )
identifier[slice_avg] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[cutoffs] )):
identifier[bot] = identifier[np] . identifier[searchsorted] ( identifier[cum_dist] , identifier[cutoffs] [ identifier[j] ][ literal[int] ])
identifier[top] = identifier[np] . identifier[searchsorted] ( identifier[cum_dist] , identifier[cutoffs] [ identifier[j] ][ literal[int] ])
identifier[slice_avg] . identifier[append] ( identifier[np] . identifier[sum] ( identifier[data_sorted] [ identifier[bot] : identifier[top] ]* identifier[weights_sorted] [ identifier[bot] : identifier[top] ])/
identifier[np] . identifier[sum] ( identifier[weights_sorted] [ identifier[bot] : identifier[top] ]))
keyword[return] identifier[slice_avg] | def calcSubpopAvg(data, reference, cutoffs, weights=None):
"""
Calculates the average of (weighted) data between cutoff percentiles of a
reference variable.
Parameters
----------
data : numpy.array
A 1D array of float data.
reference : numpy.array
A 1D array of float data of the same length as data.
cutoffs : [(float,float)]
A list of doubles with the lower and upper percentile bounds (should be
in [0,1]).
weights : numpy.array
A weighting vector for the data.
Returns
-------
slice_avg
The (weighted) average of data that falls within the cutoff percentiles
of reference.
"""
if weights is None: # Set equiprobable weights if none were given
weights = np.ones(data.size) # depends on [control=['if'], data=['weights']]
# Sort the data and generate a cumulative distribution
order = np.argsort(reference)
data_sorted = data[order]
weights_sorted = weights[order]
cum_dist = np.cumsum(weights_sorted) / np.sum(weights_sorted)
# For each set of cutoffs, calculate the average of data that falls within
# the cutoff percentiles of reference
slice_avg = []
for j in range(len(cutoffs)):
bot = np.searchsorted(cum_dist, cutoffs[j][0])
top = np.searchsorted(cum_dist, cutoffs[j][1])
slice_avg.append(np.sum(data_sorted[bot:top] * weights_sorted[bot:top]) / np.sum(weights_sorted[bot:top])) # depends on [control=['for'], data=['j']]
return slice_avg |
def deploy_config(model, initial_instance_count, instance_type, endpoint_name=None, tags=None):
"""Export Airflow deploy config from a SageMaker model
Args:
model (sagemaker.model.Model): The SageMaker model to export the Airflow config from.
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
initial_instance_count (int): The initial number of instances to run in the
``Endpoint`` created from this ``Model``.
endpoint_name (str): The name of the endpoint to create (default: None).
If not specified, a unique endpoint name will be created.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
Returns:
dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow.
"""
model_base_config = model_config(instance_type, model)
production_variant = sagemaker.production_variant(model.name, instance_type, initial_instance_count)
name = model.name
config_options = {'EndpointConfigName': name, 'ProductionVariants': [production_variant]}
if tags is not None:
config_options['Tags'] = tags
endpoint_name = endpoint_name or name
endpoint_base_config = {
'EndpointName': endpoint_name,
'EndpointConfigName': name
}
config = {
'Model': model_base_config,
'EndpointConfig': config_options,
'Endpoint': endpoint_base_config
}
# if there is s3 operations needed for model, move it to root level of config
s3_operations = model_base_config.pop('S3Operations', None)
if s3_operations is not None:
config['S3Operations'] = s3_operations
return config | def function[deploy_config, parameter[model, initial_instance_count, instance_type, endpoint_name, tags]]:
constant[Export Airflow deploy config from a SageMaker model
Args:
model (sagemaker.model.Model): The SageMaker model to export the Airflow config from.
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
initial_instance_count (int): The initial number of instances to run in the
``Endpoint`` created from this ``Model``.
endpoint_name (str): The name of the endpoint to create (default: None).
If not specified, a unique endpoint name will be created.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
Returns:
dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow.
]
variable[model_base_config] assign[=] call[name[model_config], parameter[name[instance_type], name[model]]]
variable[production_variant] assign[=] call[name[sagemaker].production_variant, parameter[name[model].name, name[instance_type], name[initial_instance_count]]]
variable[name] assign[=] name[model].name
variable[config_options] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c78220>, <ast.Constant object at 0x7da1b1c7af80>], [<ast.Name object at 0x7da1b1c78670>, <ast.List object at 0x7da1b1c78610>]]
if compare[name[tags] is_not constant[None]] begin[:]
call[name[config_options]][constant[Tags]] assign[=] name[tags]
variable[endpoint_name] assign[=] <ast.BoolOp object at 0x7da1b1c6bf40>
variable[endpoint_base_config] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e1390>, <ast.Constant object at 0x7da1b21e1870>], [<ast.Name object at 0x7da1b21e3c40>, <ast.Name object at 0x7da1b21e2c50>]]
variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e0460>, <ast.Constant object at 0x7da1b21e3010>, <ast.Constant object at 0x7da1b21e1bd0>], [<ast.Name object at 0x7da1b21e3af0>, <ast.Name object at 0x7da1b21e3a30>, <ast.Name object at 0x7da1b21e0d30>]]
variable[s3_operations] assign[=] call[name[model_base_config].pop, parameter[constant[S3Operations], constant[None]]]
if compare[name[s3_operations] is_not constant[None]] begin[:]
call[name[config]][constant[S3Operations]] assign[=] name[s3_operations]
return[name[config]] | keyword[def] identifier[deploy_config] ( identifier[model] , identifier[initial_instance_count] , identifier[instance_type] , identifier[endpoint_name] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[model_base_config] = identifier[model_config] ( identifier[instance_type] , identifier[model] )
identifier[production_variant] = identifier[sagemaker] . identifier[production_variant] ( identifier[model] . identifier[name] , identifier[instance_type] , identifier[initial_instance_count] )
identifier[name] = identifier[model] . identifier[name]
identifier[config_options] ={ literal[string] : identifier[name] , literal[string] :[ identifier[production_variant] ]}
keyword[if] identifier[tags] keyword[is] keyword[not] keyword[None] :
identifier[config_options] [ literal[string] ]= identifier[tags]
identifier[endpoint_name] = identifier[endpoint_name] keyword[or] identifier[name]
identifier[endpoint_base_config] ={
literal[string] : identifier[endpoint_name] ,
literal[string] : identifier[name]
}
identifier[config] ={
literal[string] : identifier[model_base_config] ,
literal[string] : identifier[config_options] ,
literal[string] : identifier[endpoint_base_config]
}
identifier[s3_operations] = identifier[model_base_config] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[s3_operations] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ]= identifier[s3_operations]
keyword[return] identifier[config] | def deploy_config(model, initial_instance_count, instance_type, endpoint_name=None, tags=None):
"""Export Airflow deploy config from a SageMaker model
Args:
model (sagemaker.model.Model): The SageMaker model to export the Airflow config from.
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
initial_instance_count (int): The initial number of instances to run in the
``Endpoint`` created from this ``Model``.
endpoint_name (str): The name of the endpoint to create (default: None).
If not specified, a unique endpoint name will be created.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
Returns:
dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow.
"""
model_base_config = model_config(instance_type, model)
production_variant = sagemaker.production_variant(model.name, instance_type, initial_instance_count)
name = model.name
config_options = {'EndpointConfigName': name, 'ProductionVariants': [production_variant]}
if tags is not None:
config_options['Tags'] = tags # depends on [control=['if'], data=['tags']]
endpoint_name = endpoint_name or name
endpoint_base_config = {'EndpointName': endpoint_name, 'EndpointConfigName': name}
config = {'Model': model_base_config, 'EndpointConfig': config_options, 'Endpoint': endpoint_base_config}
# if there is s3 operations needed for model, move it to root level of config
s3_operations = model_base_config.pop('S3Operations', None)
if s3_operations is not None:
config['S3Operations'] = s3_operations # depends on [control=['if'], data=['s3_operations']]
return config |
def copyNode(self, extended):
"""Do a copy of the node. """
ret = libxml2mod.xmlCopyNode(self._o, extended)
if ret is None:raise treeError('xmlCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | def function[copyNode, parameter[self, extended]]:
constant[Do a copy of the node. ]
variable[ret] assign[=] call[name[libxml2mod].xmlCopyNode, parameter[name[self]._o, name[extended]]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1fa58d0>
variable[__tmp] assign[=] call[name[xmlNode], parameter[]]
return[name[__tmp]] | keyword[def] identifier[copyNode] ( identifier[self] , identifier[extended] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlCopyNode] ( identifier[self] . identifier[_o] , identifier[extended] )
keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] )
identifier[__tmp] = identifier[xmlNode] ( identifier[_obj] = identifier[ret] )
keyword[return] identifier[__tmp] | def copyNode(self, extended):
"""Do a copy of the node. """
ret = libxml2mod.xmlCopyNode(self._o, extended)
if ret is None:
raise treeError('xmlCopyNode() failed') # depends on [control=['if'], data=[]]
__tmp = xmlNode(_obj=ret)
return __tmp |
def AddStopObject(self, stop, problem_reporter=None):
"""Add Stop object to this schedule if stop_id is non-blank."""
assert stop._schedule is None
if not problem_reporter:
problem_reporter = self.problem_reporter
if not stop.stop_id:
return
if stop.stop_id in self.stops:
problem_reporter.DuplicateID('stop_id', stop.stop_id)
return
stop._schedule = weakref.proxy(self)
self.AddTableColumns('stops', stop._ColumnNames())
self.stops[stop.stop_id] = stop
if hasattr(stop, 'zone_id') and stop.zone_id:
self.fare_zones[stop.zone_id] = True | def function[AddStopObject, parameter[self, stop, problem_reporter]]:
constant[Add Stop object to this schedule if stop_id is non-blank.]
assert[compare[name[stop]._schedule is constant[None]]]
if <ast.UnaryOp object at 0x7da1b160c4f0> begin[:]
variable[problem_reporter] assign[=] name[self].problem_reporter
if <ast.UnaryOp object at 0x7da1b160c3a0> begin[:]
return[None]
if compare[name[stop].stop_id in name[self].stops] begin[:]
call[name[problem_reporter].DuplicateID, parameter[constant[stop_id], name[stop].stop_id]]
return[None]
name[stop]._schedule assign[=] call[name[weakref].proxy, parameter[name[self]]]
call[name[self].AddTableColumns, parameter[constant[stops], call[name[stop]._ColumnNames, parameter[]]]]
call[name[self].stops][name[stop].stop_id] assign[=] name[stop]
if <ast.BoolOp object at 0x7da1b160faf0> begin[:]
call[name[self].fare_zones][name[stop].zone_id] assign[=] constant[True] | keyword[def] identifier[AddStopObject] ( identifier[self] , identifier[stop] , identifier[problem_reporter] = keyword[None] ):
literal[string]
keyword[assert] identifier[stop] . identifier[_schedule] keyword[is] keyword[None]
keyword[if] keyword[not] identifier[problem_reporter] :
identifier[problem_reporter] = identifier[self] . identifier[problem_reporter]
keyword[if] keyword[not] identifier[stop] . identifier[stop_id] :
keyword[return]
keyword[if] identifier[stop] . identifier[stop_id] keyword[in] identifier[self] . identifier[stops] :
identifier[problem_reporter] . identifier[DuplicateID] ( literal[string] , identifier[stop] . identifier[stop_id] )
keyword[return]
identifier[stop] . identifier[_schedule] = identifier[weakref] . identifier[proxy] ( identifier[self] )
identifier[self] . identifier[AddTableColumns] ( literal[string] , identifier[stop] . identifier[_ColumnNames] ())
identifier[self] . identifier[stops] [ identifier[stop] . identifier[stop_id] ]= identifier[stop]
keyword[if] identifier[hasattr] ( identifier[stop] , literal[string] ) keyword[and] identifier[stop] . identifier[zone_id] :
identifier[self] . identifier[fare_zones] [ identifier[stop] . identifier[zone_id] ]= keyword[True] | def AddStopObject(self, stop, problem_reporter=None):
"""Add Stop object to this schedule if stop_id is non-blank."""
assert stop._schedule is None
if not problem_reporter:
problem_reporter = self.problem_reporter # depends on [control=['if'], data=[]]
if not stop.stop_id:
return # depends on [control=['if'], data=[]]
if stop.stop_id in self.stops:
problem_reporter.DuplicateID('stop_id', stop.stop_id)
return # depends on [control=['if'], data=[]]
stop._schedule = weakref.proxy(self)
self.AddTableColumns('stops', stop._ColumnNames())
self.stops[stop.stop_id] = stop
if hasattr(stop, 'zone_id') and stop.zone_id:
self.fare_zones[stop.zone_id] = True # depends on [control=['if'], data=[]] |
def scan(self, table, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
count=False, exclusive_start_key=None, item_class=Item):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
"""
sf = self.dynamize_scan_filter(scan_filter)
response = True
n = 0
while response:
if response is True:
pass
elif response.has_key("LastEvaluatedKey"):
exclusive_start_key = response['LastEvaluatedKey']
else:
break
response = self.layer1.scan(table.name, sf,
attributes_to_get,request_limit,
count, exclusive_start_key,
object_hook=item_object_hook)
if response:
for item in response['Items']:
if max_results and n == max_results:
break
yield item_class(table, attrs=item)
n += 1 | def function[scan, parameter[self, table, scan_filter, attributes_to_get, request_limit, max_results, count, exclusive_start_key, item_class]]:
constant[
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
]
variable[sf] assign[=] call[name[self].dynamize_scan_filter, parameter[name[scan_filter]]]
variable[response] assign[=] constant[True]
variable[n] assign[=] constant[0]
while name[response] begin[:]
if compare[name[response] is constant[True]] begin[:]
pass
variable[response] assign[=] call[name[self].layer1.scan, parameter[name[table].name, name[sf], name[attributes_to_get], name[request_limit], name[count], name[exclusive_start_key]]]
if name[response] begin[:]
for taget[name[item]] in starred[call[name[response]][constant[Items]]] begin[:]
if <ast.BoolOp object at 0x7da1b269d6c0> begin[:]
break
<ast.Yield object at 0x7da1b269f7f0>
<ast.AugAssign object at 0x7da1b269f190> | keyword[def] identifier[scan] ( identifier[self] , identifier[table] , identifier[scan_filter] = keyword[None] ,
identifier[attributes_to_get] = keyword[None] , identifier[request_limit] = keyword[None] , identifier[max_results] = keyword[None] ,
identifier[count] = keyword[False] , identifier[exclusive_start_key] = keyword[None] , identifier[item_class] = identifier[Item] ):
literal[string]
identifier[sf] = identifier[self] . identifier[dynamize_scan_filter] ( identifier[scan_filter] )
identifier[response] = keyword[True]
identifier[n] = literal[int]
keyword[while] identifier[response] :
keyword[if] identifier[response] keyword[is] keyword[True] :
keyword[pass]
keyword[elif] identifier[response] . identifier[has_key] ( literal[string] ):
identifier[exclusive_start_key] = identifier[response] [ literal[string] ]
keyword[else] :
keyword[break]
identifier[response] = identifier[self] . identifier[layer1] . identifier[scan] ( identifier[table] . identifier[name] , identifier[sf] ,
identifier[attributes_to_get] , identifier[request_limit] ,
identifier[count] , identifier[exclusive_start_key] ,
identifier[object_hook] = identifier[item_object_hook] )
keyword[if] identifier[response] :
keyword[for] identifier[item] keyword[in] identifier[response] [ literal[string] ]:
keyword[if] identifier[max_results] keyword[and] identifier[n] == identifier[max_results] :
keyword[break]
keyword[yield] identifier[item_class] ( identifier[table] , identifier[attrs] = identifier[item] )
identifier[n] += literal[int] | def scan(self, table, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
"""
sf = self.dynamize_scan_filter(scan_filter)
response = True
n = 0
while response:
if response is True:
pass # depends on [control=['if'], data=[]]
elif response.has_key('LastEvaluatedKey'):
exclusive_start_key = response['LastEvaluatedKey'] # depends on [control=['if'], data=[]]
else:
break
response = self.layer1.scan(table.name, sf, attributes_to_get, request_limit, count, exclusive_start_key, object_hook=item_object_hook)
if response:
for item in response['Items']:
if max_results and n == max_results:
break # depends on [control=['if'], data=[]]
yield item_class(table, attrs=item)
n += 1 # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def pkgconfig(*packages, **kw):
"""
Query pkg-config for library compile and linking options. Return configuration in distutils
Extension format.
Usage:
pkgconfig('opencv')
pkgconfig('opencv', 'libavformat')
pkgconfig('opencv', optional='--static')
pkgconfig('opencv', config=c)
returns e.g.
{'extra_compile_args': [],
'extra_link_args': [],
'include_dirs': ['/usr/include/ffmpeg'],
'libraries': ['avformat'],
'library_dirs': []}
Intended use:
distutils.core.Extension('pyextension', sources=['source.cpp'], **c)
Set PKG_CONFIG_PATH environment variable for nonstandard library locations.
based on work of Micah Dowty (http://code.activestate.com/recipes/502261-python-distutils-pkg-config/)
"""
config = kw.setdefault('config', {})
optional_args = kw.setdefault('optional', '')
# { <distutils Extension arg>: [<pkg config option>, <prefix length to strip>], ...}
flag_map = {'include_dirs': ['--cflags-only-I', 2],
'library_dirs': ['--libs-only-L', 2],
'libraries': ['--libs-only-l', 2],
'extra_compile_args': ['--cflags-only-other', 0],
'extra_link_args': ['--libs-only-other', 0],
}
for package in packages:
for distutils_key, (pkg_option, n) in flag_map.items():
items = subprocess.check_output(['pkg-config', optional_args, pkg_option, package]).decode('utf8').split()
config.setdefault(distutils_key, []).extend([i[n:] for i in items])
return config | def function[pkgconfig, parameter[]]:
constant[
Query pkg-config for library compile and linking options. Return configuration in distutils
Extension format.
Usage:
pkgconfig('opencv')
pkgconfig('opencv', 'libavformat')
pkgconfig('opencv', optional='--static')
pkgconfig('opencv', config=c)
returns e.g.
{'extra_compile_args': [],
'extra_link_args': [],
'include_dirs': ['/usr/include/ffmpeg'],
'libraries': ['avformat'],
'library_dirs': []}
Intended use:
distutils.core.Extension('pyextension', sources=['source.cpp'], **c)
Set PKG_CONFIG_PATH environment variable for nonstandard library locations.
based on work of Micah Dowty (http://code.activestate.com/recipes/502261-python-distutils-pkg-config/)
]
variable[config] assign[=] call[name[kw].setdefault, parameter[constant[config], dictionary[[], []]]]
variable[optional_args] assign[=] call[name[kw].setdefault, parameter[constant[optional], constant[]]]
variable[flag_map] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5ae0>, <ast.Constant object at 0x7da2054a6470>, <ast.Constant object at 0x7da2054a6920>, <ast.Constant object at 0x7da2054a65c0>, <ast.Constant object at 0x7da2054a4070>], [<ast.List object at 0x7da2054a7370>, <ast.List object at 0x7da2054a7d00>, <ast.List object at 0x7da2054a68c0>, <ast.List object at 0x7da2054a4490>, <ast.List object at 0x7da2054a63b0>]]
for taget[name[package]] in starred[name[packages]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2054a6050>, <ast.Tuple object at 0x7da2054a6e00>]]] in starred[call[name[flag_map].items, parameter[]]] begin[:]
variable[items] assign[=] call[call[call[name[subprocess].check_output, parameter[list[[<ast.Constant object at 0x7da2054a5ab0>, <ast.Name object at 0x7da2054a4ca0>, <ast.Name object at 0x7da2054a6d10>, <ast.Name object at 0x7da2054a5f00>]]]].decode, parameter[constant[utf8]]].split, parameter[]]
call[call[name[config].setdefault, parameter[name[distutils_key], list[[]]]].extend, parameter[<ast.ListComp object at 0x7da2054a41c0>]]
return[name[config]] | keyword[def] identifier[pkgconfig] (* identifier[packages] ,** identifier[kw] ):
literal[string]
identifier[config] = identifier[kw] . identifier[setdefault] ( literal[string] ,{})
identifier[optional_args] = identifier[kw] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[flag_map] ={ literal[string] :[ literal[string] , literal[int] ],
literal[string] :[ literal[string] , literal[int] ],
literal[string] :[ literal[string] , literal[int] ],
literal[string] :[ literal[string] , literal[int] ],
literal[string] :[ literal[string] , literal[int] ],
}
keyword[for] identifier[package] keyword[in] identifier[packages] :
keyword[for] identifier[distutils_key] ,( identifier[pkg_option] , identifier[n] ) keyword[in] identifier[flag_map] . identifier[items] ():
identifier[items] = identifier[subprocess] . identifier[check_output] ([ literal[string] , identifier[optional_args] , identifier[pkg_option] , identifier[package] ]). identifier[decode] ( literal[string] ). identifier[split] ()
identifier[config] . identifier[setdefault] ( identifier[distutils_key] ,[]). identifier[extend] ([ identifier[i] [ identifier[n] :] keyword[for] identifier[i] keyword[in] identifier[items] ])
keyword[return] identifier[config] | def pkgconfig(*packages, **kw):
"""
Query pkg-config for library compile and linking options. Return configuration in distutils
Extension format.
Usage:
pkgconfig('opencv')
pkgconfig('opencv', 'libavformat')
pkgconfig('opencv', optional='--static')
pkgconfig('opencv', config=c)
returns e.g.
{'extra_compile_args': [],
'extra_link_args': [],
'include_dirs': ['/usr/include/ffmpeg'],
'libraries': ['avformat'],
'library_dirs': []}
Intended use:
distutils.core.Extension('pyextension', sources=['source.cpp'], **c)
Set PKG_CONFIG_PATH environment variable for nonstandard library locations.
based on work of Micah Dowty (http://code.activestate.com/recipes/502261-python-distutils-pkg-config/)
"""
config = kw.setdefault('config', {})
optional_args = kw.setdefault('optional', '')
# { <distutils Extension arg>: [<pkg config option>, <prefix length to strip>], ...}
flag_map = {'include_dirs': ['--cflags-only-I', 2], 'library_dirs': ['--libs-only-L', 2], 'libraries': ['--libs-only-l', 2], 'extra_compile_args': ['--cflags-only-other', 0], 'extra_link_args': ['--libs-only-other', 0]}
for package in packages:
for (distutils_key, (pkg_option, n)) in flag_map.items():
items = subprocess.check_output(['pkg-config', optional_args, pkg_option, package]).decode('utf8').split()
config.setdefault(distutils_key, []).extend([i[n:] for i in items]) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['package']]
return config |
def SETNBE(cpu, dest):
"""
Sets byte if not below or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, Operators.AND(cpu.CF == False, cpu.ZF == False), 1, 0)) | def function[SETNBE, parameter[cpu, dest]]:
constant[
Sets byte if not below or equal.
:param cpu: current CPU.
:param dest: destination operand.
]
call[name[dest].write, parameter[call[name[Operators].ITEBV, parameter[name[dest].size, call[name[Operators].AND, parameter[compare[name[cpu].CF equal[==] constant[False]], compare[name[cpu].ZF equal[==] constant[False]]]], constant[1], constant[0]]]]] | keyword[def] identifier[SETNBE] ( identifier[cpu] , identifier[dest] ):
literal[string]
identifier[dest] . identifier[write] ( identifier[Operators] . identifier[ITEBV] ( identifier[dest] . identifier[size] , identifier[Operators] . identifier[AND] ( identifier[cpu] . identifier[CF] == keyword[False] , identifier[cpu] . identifier[ZF] == keyword[False] ), literal[int] , literal[int] )) | def SETNBE(cpu, dest):
"""
Sets byte if not below or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, Operators.AND(cpu.CF == False, cpu.ZF == False), 1, 0)) |
def watch(self, keys, on_watch, filters=None, start_revision=None, return_previous=None):
"""
Watch one or more keys or key sets and invoke a callback.
Watch watches for events happening or that have happened. The entire event history
can be watched starting from the last compaction revision.
:param keys: Watch these keys / key sets.
:type keys: list of bytes or list of instance of :class:`txaioetcd.KeySet`
:param on_watch: The callback to invoke upon receiving
a watch event.
:type on_watch: callable
:param filters: Any filters to apply.
:param start_revision: start_revision is an optional
revision to watch from (inclusive). No start_revision is "now".
:type start_revision: int
:param return_previous: Flag to request returning previous values.
:returns: A deferred that just fires when watching has started successfully,
or which fires with an error in case the watching could not be started.
:rtype: twisted.internet.Deferred
"""
d = self._start_watching(keys, on_watch, filters, start_revision, return_previous)
#
# ODD: Trying to use a parameter instead of *args errors out as soon as the
# parameter is accessed.
#
def on_err(*args):
if args[0].type not in [CancelledError, ResponseFailed]:
self.log.warn('etcd watch terminated with "{error}"', error=args[0].type)
return args[0]
d.addErrback(on_err)
return d | def function[watch, parameter[self, keys, on_watch, filters, start_revision, return_previous]]:
constant[
Watch one or more keys or key sets and invoke a callback.
Watch watches for events happening or that have happened. The entire event history
can be watched starting from the last compaction revision.
:param keys: Watch these keys / key sets.
:type keys: list of bytes or list of instance of :class:`txaioetcd.KeySet`
:param on_watch: The callback to invoke upon receiving
a watch event.
:type on_watch: callable
:param filters: Any filters to apply.
:param start_revision: start_revision is an optional
revision to watch from (inclusive). No start_revision is "now".
:type start_revision: int
:param return_previous: Flag to request returning previous values.
:returns: A deferred that just fires when watching has started successfully,
or which fires with an error in case the watching could not be started.
:rtype: twisted.internet.Deferred
]
variable[d] assign[=] call[name[self]._start_watching, parameter[name[keys], name[on_watch], name[filters], name[start_revision], name[return_previous]]]
def function[on_err, parameter[]]:
if compare[call[name[args]][constant[0]].type <ast.NotIn object at 0x7da2590d7190> list[[<ast.Name object at 0x7da20c6aa7a0>, <ast.Name object at 0x7da20c6ab9a0>]]] begin[:]
call[name[self].log.warn, parameter[constant[etcd watch terminated with "{error}"]]]
return[call[name[args]][constant[0]]]
call[name[d].addErrback, parameter[name[on_err]]]
return[name[d]] | keyword[def] identifier[watch] ( identifier[self] , identifier[keys] , identifier[on_watch] , identifier[filters] = keyword[None] , identifier[start_revision] = keyword[None] , identifier[return_previous] = keyword[None] ):
literal[string]
identifier[d] = identifier[self] . identifier[_start_watching] ( identifier[keys] , identifier[on_watch] , identifier[filters] , identifier[start_revision] , identifier[return_previous] )
keyword[def] identifier[on_err] (* identifier[args] ):
keyword[if] identifier[args] [ literal[int] ]. identifier[type] keyword[not] keyword[in] [ identifier[CancelledError] , identifier[ResponseFailed] ]:
identifier[self] . identifier[log] . identifier[warn] ( literal[string] , identifier[error] = identifier[args] [ literal[int] ]. identifier[type] )
keyword[return] identifier[args] [ literal[int] ]
identifier[d] . identifier[addErrback] ( identifier[on_err] )
keyword[return] identifier[d] | def watch(self, keys, on_watch, filters=None, start_revision=None, return_previous=None):
"""
Watch one or more keys or key sets and invoke a callback.
Watch watches for events happening or that have happened. The entire event history
can be watched starting from the last compaction revision.
:param keys: Watch these keys / key sets.
:type keys: list of bytes or list of instance of :class:`txaioetcd.KeySet`
:param on_watch: The callback to invoke upon receiving
a watch event.
:type on_watch: callable
:param filters: Any filters to apply.
:param start_revision: start_revision is an optional
revision to watch from (inclusive). No start_revision is "now".
:type start_revision: int
:param return_previous: Flag to request returning previous values.
:returns: A deferred that just fires when watching has started successfully,
or which fires with an error in case the watching could not be started.
:rtype: twisted.internet.Deferred
"""
d = self._start_watching(keys, on_watch, filters, start_revision, return_previous)
#
# ODD: Trying to use a parameter instead of *args errors out as soon as the
# parameter is accessed.
#
def on_err(*args):
if args[0].type not in [CancelledError, ResponseFailed]:
self.log.warn('etcd watch terminated with "{error}"', error=args[0].type)
return args[0] # depends on [control=['if'], data=[]]
d.addErrback(on_err)
return d |
def read_config(
config_filepath,
logger=logging.getLogger('ProsperCommon'),
):
"""fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
logger (:obj:`logging.Logger`): logger to catch error msgs
"""
config_parser = configparser.ConfigParser(
interpolation=ExtendedInterpolation(),
allow_no_value=True,
delimiters=('='),
inline_comment_prefixes=('#')
)
logger.debug('config_filepath=%s', config_filepath)
with open(config_filepath, 'r') as filehandle:
config_parser.read_file(filehandle)
return config_parser | def function[read_config, parameter[config_filepath, logger]]:
constant[fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
logger (:obj:`logging.Logger`): logger to catch error msgs
]
variable[config_parser] assign[=] call[name[configparser].ConfigParser, parameter[]]
call[name[logger].debug, parameter[constant[config_filepath=%s], name[config_filepath]]]
with call[name[open], parameter[name[config_filepath], constant[r]]] begin[:]
call[name[config_parser].read_file, parameter[name[filehandle]]]
return[name[config_parser]] | keyword[def] identifier[read_config] (
identifier[config_filepath] ,
identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] ),
):
literal[string]
identifier[config_parser] = identifier[configparser] . identifier[ConfigParser] (
identifier[interpolation] = identifier[ExtendedInterpolation] (),
identifier[allow_no_value] = keyword[True] ,
identifier[delimiters] =( literal[string] ),
identifier[inline_comment_prefixes] =( literal[string] )
)
identifier[logger] . identifier[debug] ( literal[string] , identifier[config_filepath] )
keyword[with] identifier[open] ( identifier[config_filepath] , literal[string] ) keyword[as] identifier[filehandle] :
identifier[config_parser] . identifier[read_file] ( identifier[filehandle] )
keyword[return] identifier[config_parser] | def read_config(config_filepath, logger=logging.getLogger('ProsperCommon')):
"""fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
logger (:obj:`logging.Logger`): logger to catch error msgs
"""
config_parser = configparser.ConfigParser(interpolation=ExtendedInterpolation(), allow_no_value=True, delimiters='=', inline_comment_prefixes='#')
logger.debug('config_filepath=%s', config_filepath)
with open(config_filepath, 'r') as filehandle:
config_parser.read_file(filehandle) # depends on [control=['with'], data=['filehandle']]
return config_parser |
def serialize_instance(obj):
"""
对象序列化
:param:
* obj: (object) 对象实例
:return:
* obj_dict: (dict) 对象序列化字典
举例如下::
print('--- serialize_instance demo ---')
# 定义两个对象
class Obj(object):
def __init__(self, a, b):
self.a = a
self.b = b
class ObjB(object):
def __init__(self, x, y):
self.x = x
self.y = y
# 对象序列化
b = ObjB('string', [item for item in range(10)])
obj_ = Obj(1, b)
print(serialize_instance(obj_))
print('---')
执行结果::
--- serialize_instance demo ---
{'__classname__': 'Obj', 'a': 1,
'b': {'__classname__': 'ObjB', 'x': 'string', 'y': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}
---
"""
obj_dict = {'__classname__': type(obj).__name__}
obj_dict.update(obj.__dict__)
for key, value in obj_dict.items():
if not isinstance(value, commonDataType):
sub_dict = serialize_instance(value)
obj_dict.update({key: sub_dict})
else:
continue
return obj_dict | def function[serialize_instance, parameter[obj]]:
constant[
对象序列化
:param:
* obj: (object) 对象实例
:return:
* obj_dict: (dict) 对象序列化字典
举例如下::
print('--- serialize_instance demo ---')
# 定义两个对象
class Obj(object):
def __init__(self, a, b):
self.a = a
self.b = b
class ObjB(object):
def __init__(self, x, y):
self.x = x
self.y = y
# 对象序列化
b = ObjB('string', [item for item in range(10)])
obj_ = Obj(1, b)
print(serialize_instance(obj_))
print('---')
执行结果::
--- serialize_instance demo ---
{'__classname__': 'Obj', 'a': 1,
'b': {'__classname__': 'ObjB', 'x': 'string', 'y': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}
---
]
variable[obj_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18ede6ec0>], [<ast.Attribute object at 0x7da18ede7be0>]]
call[name[obj_dict].update, parameter[name[obj].__dict__]]
for taget[tuple[[<ast.Name object at 0x7da18ede75b0>, <ast.Name object at 0x7da18ede4dc0>]]] in starred[call[name[obj_dict].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18ede4a60> begin[:]
variable[sub_dict] assign[=] call[name[serialize_instance], parameter[name[value]]]
call[name[obj_dict].update, parameter[dictionary[[<ast.Name object at 0x7da18ede49a0>], [<ast.Name object at 0x7da18ede68f0>]]]]
return[name[obj_dict]] | keyword[def] identifier[serialize_instance] ( identifier[obj] ):
literal[string]
identifier[obj_dict] ={ literal[string] : identifier[type] ( identifier[obj] ). identifier[__name__] }
identifier[obj_dict] . identifier[update] ( identifier[obj] . identifier[__dict__] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj_dict] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[commonDataType] ):
identifier[sub_dict] = identifier[serialize_instance] ( identifier[value] )
identifier[obj_dict] . identifier[update] ({ identifier[key] : identifier[sub_dict] })
keyword[else] :
keyword[continue]
keyword[return] identifier[obj_dict] | def serialize_instance(obj):
"""
对象序列化
:param:
* obj: (object) 对象实例
:return:
* obj_dict: (dict) 对象序列化字典
举例如下::
print('--- serialize_instance demo ---')
# 定义两个对象
class Obj(object):
def __init__(self, a, b):
self.a = a
self.b = b
class ObjB(object):
def __init__(self, x, y):
self.x = x
self.y = y
# 对象序列化
b = ObjB('string', [item for item in range(10)])
obj_ = Obj(1, b)
print(serialize_instance(obj_))
print('---')
执行结果::
--- serialize_instance demo ---
{'__classname__': 'Obj', 'a': 1,
'b': {'__classname__': 'ObjB', 'x': 'string', 'y': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}
---
"""
obj_dict = {'__classname__': type(obj).__name__}
obj_dict.update(obj.__dict__)
for (key, value) in obj_dict.items():
if not isinstance(value, commonDataType):
sub_dict = serialize_instance(value)
obj_dict.update({key: sub_dict}) # depends on [control=['if'], data=[]]
else:
continue # depends on [control=['for'], data=[]]
return obj_dict |
def check(self, var):
"""Return True if the variable matches the specified type."""
return (isinstance(var, _num_type) and
(self._lower_bound is None or var >= self._lower_bound) and
(self._upper_bound is None or var <= self._upper_bound)) | def function[check, parameter[self, var]]:
constant[Return True if the variable matches the specified type.]
return[<ast.BoolOp object at 0x7da1b03ba620>] | keyword[def] identifier[check] ( identifier[self] , identifier[var] ):
literal[string]
keyword[return] ( identifier[isinstance] ( identifier[var] , identifier[_num_type] ) keyword[and]
( identifier[self] . identifier[_lower_bound] keyword[is] keyword[None] keyword[or] identifier[var] >= identifier[self] . identifier[_lower_bound] ) keyword[and]
( identifier[self] . identifier[_upper_bound] keyword[is] keyword[None] keyword[or] identifier[var] <= identifier[self] . identifier[_upper_bound] )) | def check(self, var):
"""Return True if the variable matches the specified type."""
return isinstance(var, _num_type) and (self._lower_bound is None or var >= self._lower_bound) and (self._upper_bound is None or var <= self._upper_bound) |
def get_gradebook_columns_by_search(self, gradebook_column_query, gradebook_column_search):
"""Pass through to provider GradebookColumnSearchSession.get_gradebook_columns_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_gradebook_columns_by_search(gradebook_column_query, gradebook_column_search) | def function[get_gradebook_columns_by_search, parameter[self, gradebook_column_query, gradebook_column_search]]:
constant[Pass through to provider GradebookColumnSearchSession.get_gradebook_columns_by_search]
if <ast.UnaryOp object at 0x7da18bc70070> begin[:]
<ast.Raise object at 0x7da18bc70b80>
return[call[name[self]._provider_session.get_gradebook_columns_by_search, parameter[name[gradebook_column_query], name[gradebook_column_search]]]] | keyword[def] identifier[get_gradebook_columns_by_search] ( identifier[self] , identifier[gradebook_column_query] , identifier[gradebook_column_search] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_can] ( literal[string] ):
keyword[raise] identifier[PermissionDenied] ()
keyword[return] identifier[self] . identifier[_provider_session] . identifier[get_gradebook_columns_by_search] ( identifier[gradebook_column_query] , identifier[gradebook_column_search] ) | def get_gradebook_columns_by_search(self, gradebook_column_query, gradebook_column_search):
"""Pass through to provider GradebookColumnSearchSession.get_gradebook_columns_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied() # depends on [control=['if'], data=[]]
return self._provider_session.get_gradebook_columns_by_search(gradebook_column_query, gradebook_column_search) |
def get(self, name):
"""Get a device model property.
Args:
name (str): The name of the property to get
"""
name = str(name)
if name not in self._properties:
raise ArgumentError("Unknown property in DeviceModel", name=name)
return self._properties[name] | def function[get, parameter[self, name]]:
constant[Get a device model property.
Args:
name (str): The name of the property to get
]
variable[name] assign[=] call[name[str], parameter[name[name]]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._properties] begin[:]
<ast.Raise object at 0x7da1b26afeb0>
return[call[name[self]._properties][name[name]]] | keyword[def] identifier[get] ( identifier[self] , identifier[name] ):
literal[string]
identifier[name] = identifier[str] ( identifier[name] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_properties] :
keyword[raise] identifier[ArgumentError] ( literal[string] , identifier[name] = identifier[name] )
keyword[return] identifier[self] . identifier[_properties] [ identifier[name] ] | def get(self, name):
"""Get a device model property.
Args:
name (str): The name of the property to get
"""
name = str(name)
if name not in self._properties:
raise ArgumentError('Unknown property in DeviceModel', name=name) # depends on [control=['if'], data=['name']]
return self._properties[name] |
def _module_to_generators(pb_module):
'''
Convert a protobuf module to a dict of generators.
This is typically used with modules that contain multiple type definitions.
'''
if not pb_module:
return None
message_types = pb_module.DESCRIPTOR.message_types_by_name
return {k: ProtobufGenerator(v) for k, v in message_types.items()} | def function[_module_to_generators, parameter[pb_module]]:
constant[
Convert a protobuf module to a dict of generators.
This is typically used with modules that contain multiple type definitions.
]
if <ast.UnaryOp object at 0x7da1b1721e40> begin[:]
return[constant[None]]
variable[message_types] assign[=] name[pb_module].DESCRIPTOR.message_types_by_name
return[<ast.DictComp object at 0x7da1b17f8310>] | keyword[def] identifier[_module_to_generators] ( identifier[pb_module] ):
literal[string]
keyword[if] keyword[not] identifier[pb_module] :
keyword[return] keyword[None]
identifier[message_types] = identifier[pb_module] . identifier[DESCRIPTOR] . identifier[message_types_by_name]
keyword[return] { identifier[k] : identifier[ProtobufGenerator] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[message_types] . identifier[items] ()} | def _module_to_generators(pb_module):
"""
Convert a protobuf module to a dict of generators.
This is typically used with modules that contain multiple type definitions.
"""
if not pb_module:
return None # depends on [control=['if'], data=[]]
message_types = pb_module.DESCRIPTOR.message_types_by_name
return {k: ProtobufGenerator(v) for (k, v) in message_types.items()} |
def infer_complexes(stmts):
"""Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
interact_stmts = _get_statements_by_type(stmts, Modification)
linked_stmts = []
for mstmt in interact_stmts:
if mstmt.enz is None:
continue
st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence)
linked_stmts.append(st)
return linked_stmts | def function[infer_complexes, parameter[stmts]]:
constant[Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
]
variable[interact_stmts] assign[=] call[name[_get_statements_by_type], parameter[name[stmts], name[Modification]]]
variable[linked_stmts] assign[=] list[[]]
for taget[name[mstmt]] in starred[name[interact_stmts]] begin[:]
if compare[name[mstmt].enz is constant[None]] begin[:]
continue
variable[st] assign[=] call[name[Complex], parameter[list[[<ast.Attribute object at 0x7da18bccab90>, <ast.Attribute object at 0x7da18bccbc10>]]]]
call[name[linked_stmts].append, parameter[name[st]]]
return[name[linked_stmts]] | keyword[def] identifier[infer_complexes] ( identifier[stmts] ):
literal[string]
identifier[interact_stmts] = identifier[_get_statements_by_type] ( identifier[stmts] , identifier[Modification] )
identifier[linked_stmts] =[]
keyword[for] identifier[mstmt] keyword[in] identifier[interact_stmts] :
keyword[if] identifier[mstmt] . identifier[enz] keyword[is] keyword[None] :
keyword[continue]
identifier[st] = identifier[Complex] ([ identifier[mstmt] . identifier[enz] , identifier[mstmt] . identifier[sub] ], identifier[evidence] = identifier[mstmt] . identifier[evidence] )
identifier[linked_stmts] . identifier[append] ( identifier[st] )
keyword[return] identifier[linked_stmts] | def infer_complexes(stmts):
"""Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
interact_stmts = _get_statements_by_type(stmts, Modification)
linked_stmts = []
for mstmt in interact_stmts:
if mstmt.enz is None:
continue # depends on [control=['if'], data=[]]
st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence)
linked_stmts.append(st) # depends on [control=['for'], data=['mstmt']]
return linked_stmts |
def register_action(action):
"""
Adds an action to the parser cli.
:param action(BaseAction): a subclass of the BaseAction class
"""
sub = _subparsers.add_parser(action.meta('cmd'), help=action.meta('help'))
sub.set_defaults(cmd=action.meta('cmd'))
for (name, arg) in action.props().items():
sub.add_argument(arg.name, arg.flag, **arg.options)
_actions[action.meta('cmd')] = action | def function[register_action, parameter[action]]:
constant[
Adds an action to the parser cli.
:param action(BaseAction): a subclass of the BaseAction class
]
variable[sub] assign[=] call[name[_subparsers].add_parser, parameter[call[name[action].meta, parameter[constant[cmd]]]]]
call[name[sub].set_defaults, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c992350>, <ast.Name object at 0x7da20c9912a0>]]] in starred[call[call[name[action].props, parameter[]].items, parameter[]]] begin[:]
call[name[sub].add_argument, parameter[name[arg].name, name[arg].flag]]
call[name[_actions]][call[name[action].meta, parameter[constant[cmd]]]] assign[=] name[action] | keyword[def] identifier[register_action] ( identifier[action] ):
literal[string]
identifier[sub] = identifier[_subparsers] . identifier[add_parser] ( identifier[action] . identifier[meta] ( literal[string] ), identifier[help] = identifier[action] . identifier[meta] ( literal[string] ))
identifier[sub] . identifier[set_defaults] ( identifier[cmd] = identifier[action] . identifier[meta] ( literal[string] ))
keyword[for] ( identifier[name] , identifier[arg] ) keyword[in] identifier[action] . identifier[props] (). identifier[items] ():
identifier[sub] . identifier[add_argument] ( identifier[arg] . identifier[name] , identifier[arg] . identifier[flag] ,** identifier[arg] . identifier[options] )
identifier[_actions] [ identifier[action] . identifier[meta] ( literal[string] )]= identifier[action] | def register_action(action):
"""
Adds an action to the parser cli.
:param action(BaseAction): a subclass of the BaseAction class
"""
sub = _subparsers.add_parser(action.meta('cmd'), help=action.meta('help'))
sub.set_defaults(cmd=action.meta('cmd'))
for (name, arg) in action.props().items():
sub.add_argument(arg.name, arg.flag, **arg.options)
_actions[action.meta('cmd')] = action # depends on [control=['for'], data=[]] |
def greenlet_admin(self):
""" This greenlet is used to get status information about the worker
when --admin_port was given
"""
if self.config["processes"] > 1:
self.log.debug(
"Admin server disabled because of multiple processes.")
return
class Devnull(object):
def write(self, *_):
pass
from gevent import pywsgi
def admin_routes(env, start_response):
path = env["PATH_INFO"]
status = "200 OK"
res = ""
if path in ["/", "/report", "/report_mem"]:
report = self.get_worker_report(with_memory=(path == "/report_mem"))
res = bytes(json_stdlib.dumps(report, cls=MongoJSONEncoder), 'utf-8')
elif path == "/wait_for_idle":
self.wait_for_idle()
res = bytes("idle", "utf-8")
else:
status = "404 Not Found"
start_response(status, [('Content-Type', 'application/json')])
return [res]
server = pywsgi.WSGIServer((self.config["admin_ip"], self.config["admin_port"]), admin_routes, log=Devnull())
try:
self.log.debug("Starting admin server on port %s" % self.config["admin_port"])
server.serve_forever()
except Exception as e: # pylint: disable=broad-except
self.log.debug("Error in admin server : %s" % e) | def function[greenlet_admin, parameter[self]]:
constant[ This greenlet is used to get status information about the worker
when --admin_port was given
]
if compare[call[name[self].config][constant[processes]] greater[>] constant[1]] begin[:]
call[name[self].log.debug, parameter[constant[Admin server disabled because of multiple processes.]]]
return[None]
class class[Devnull, parameter[]] begin[:]
def function[write, parameter[self]]:
pass
from relative_module[gevent] import module[pywsgi]
def function[admin_routes, parameter[env, start_response]]:
variable[path] assign[=] call[name[env]][constant[PATH_INFO]]
variable[status] assign[=] constant[200 OK]
variable[res] assign[=] constant[]
if compare[name[path] in list[[<ast.Constant object at 0x7da1b07cd720>, <ast.Constant object at 0x7da1b07cd6f0>, <ast.Constant object at 0x7da1b07cd6c0>]]] begin[:]
variable[report] assign[=] call[name[self].get_worker_report, parameter[]]
variable[res] assign[=] call[name[bytes], parameter[call[name[json_stdlib].dumps, parameter[name[report]]], constant[utf-8]]]
call[name[start_response], parameter[name[status], list[[<ast.Tuple object at 0x7da1b07cc580>]]]]
return[list[[<ast.Name object at 0x7da1b07cc460>]]]
variable[server] assign[=] call[name[pywsgi].WSGIServer, parameter[tuple[[<ast.Subscript object at 0x7da1b07ce1d0>, <ast.Subscript object at 0x7da1b07ce290>]], name[admin_routes]]]
<ast.Try object at 0x7da1b07ce410> | keyword[def] identifier[greenlet_admin] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[config] [ literal[string] ]> literal[int] :
identifier[self] . identifier[log] . identifier[debug] (
literal[string] )
keyword[return]
keyword[class] identifier[Devnull] ( identifier[object] ):
keyword[def] identifier[write] ( identifier[self] ,* identifier[_] ):
keyword[pass]
keyword[from] identifier[gevent] keyword[import] identifier[pywsgi]
keyword[def] identifier[admin_routes] ( identifier[env] , identifier[start_response] ):
identifier[path] = identifier[env] [ literal[string] ]
identifier[status] = literal[string]
identifier[res] = literal[string]
keyword[if] identifier[path] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[report] = identifier[self] . identifier[get_worker_report] ( identifier[with_memory] =( identifier[path] == literal[string] ))
identifier[res] = identifier[bytes] ( identifier[json_stdlib] . identifier[dumps] ( identifier[report] , identifier[cls] = identifier[MongoJSONEncoder] ), literal[string] )
keyword[elif] identifier[path] == literal[string] :
identifier[self] . identifier[wait_for_idle] ()
identifier[res] = identifier[bytes] ( literal[string] , literal[string] )
keyword[else] :
identifier[status] = literal[string]
identifier[start_response] ( identifier[status] ,[( literal[string] , literal[string] )])
keyword[return] [ identifier[res] ]
identifier[server] = identifier[pywsgi] . identifier[WSGIServer] (( identifier[self] . identifier[config] [ literal[string] ], identifier[self] . identifier[config] [ literal[string] ]), identifier[admin_routes] , identifier[log] = identifier[Devnull] ())
keyword[try] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[config] [ literal[string] ])
identifier[server] . identifier[serve_forever] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] % identifier[e] ) | def greenlet_admin(self):
""" This greenlet is used to get status information about the worker
when --admin_port was given
"""
if self.config['processes'] > 1:
self.log.debug('Admin server disabled because of multiple processes.')
return # depends on [control=['if'], data=[]]
class Devnull(object):
def write(self, *_):
pass
from gevent import pywsgi
def admin_routes(env, start_response):
path = env['PATH_INFO']
status = '200 OK'
res = ''
if path in ['/', '/report', '/report_mem']:
report = self.get_worker_report(with_memory=path == '/report_mem')
res = bytes(json_stdlib.dumps(report, cls=MongoJSONEncoder), 'utf-8') # depends on [control=['if'], data=['path']]
elif path == '/wait_for_idle':
self.wait_for_idle()
res = bytes('idle', 'utf-8') # depends on [control=['if'], data=[]]
else:
status = '404 Not Found'
start_response(status, [('Content-Type', 'application/json')])
return [res]
server = pywsgi.WSGIServer((self.config['admin_ip'], self.config['admin_port']), admin_routes, log=Devnull())
try:
self.log.debug('Starting admin server on port %s' % self.config['admin_port'])
server.serve_forever() # depends on [control=['try'], data=[]]
except Exception as e: # pylint: disable=broad-except
self.log.debug('Error in admin server : %s' % e) # depends on [control=['except'], data=['e']] |
def list_grad(self):
"""Returns gradient buffers on all contexts, in the same order
as :py:meth:`values`."""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter '%s' " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, list) | def function[list_grad, parameter[self]]:
constant[Returns gradient buffers on all contexts, in the same order
as :py:meth:`values`.]
if <ast.BoolOp object at 0x7da1b200b880> begin[:]
<ast.Raise object at 0x7da1b200bf40>
return[call[name[self]._check_and_get, parameter[name[self]._grad, name[list]]]] | keyword[def] identifier[list_grad] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_data] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_grad] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] (
literal[string] literal[string] %( identifier[self] . identifier[name] ))
keyword[return] identifier[self] . identifier[_check_and_get] ( identifier[self] . identifier[_grad] , identifier[list] ) | def list_grad(self):
"""Returns gradient buffers on all contexts, in the same order
as :py:meth:`values`."""
if self._data is not None and self._grad is None:
raise RuntimeError("Cannot get gradient array for Parameter '%s' because grad_req='null'" % self.name) # depends on [control=['if'], data=[]]
return self._check_and_get(self._grad, list) |
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x)) | def function[predictOnValues, parameter[self, dstream]]:
constant[
Make predictions on a keyed dstream.
Returns a transformed dstream object.
]
call[name[self]._validate, parameter[name[dstream]]]
return[call[name[dstream].mapValues, parameter[<ast.Lambda object at 0x7da204960b50>]]] | keyword[def] identifier[predictOnValues] ( identifier[self] , identifier[dstream] ):
literal[string]
identifier[self] . identifier[_validate] ( identifier[dstream] )
keyword[return] identifier[dstream] . identifier[mapValues] ( keyword[lambda] identifier[x] : identifier[self] . identifier[_model] . identifier[predict] ( identifier[x] )) | def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x)) |
def generate_oauth2_headers(self):
"""Generates header for oauth2
"""
encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8'))
headers={
'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')),
'Content-Type': 'application/x-www-form-urlencoded'
}
return headers | def function[generate_oauth2_headers, parameter[self]]:
constant[Generates header for oauth2
]
variable[encoded_credentials] assign[=] call[name[base64].b64encode, parameter[call[call[constant[{0}:{1}].format, parameter[name[self].consumer_key, name[self].consumer_secret]].encode, parameter[constant[utf-8]]]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1afea8f40>, <ast.Constant object at 0x7da1afeabf70>], [<ast.Call object at 0x7da1afea81f0>, <ast.Constant object at 0x7da1afeab580>]]
return[name[headers]] | keyword[def] identifier[generate_oauth2_headers] ( identifier[self] ):
literal[string]
identifier[encoded_credentials] = identifier[base64] . identifier[b64encode] (( literal[string] . identifier[format] ( identifier[self] . identifier[consumer_key] , identifier[self] . identifier[consumer_secret] )). identifier[encode] ( literal[string] ))
identifier[headers] ={
literal[string] : literal[string] . identifier[format] ( identifier[encoded_credentials] . identifier[decode] ( literal[string] )),
literal[string] : literal[string]
}
keyword[return] identifier[headers] | def generate_oauth2_headers(self):
"""Generates header for oauth2
"""
encoded_credentials = base64.b64encode('{0}:{1}'.format(self.consumer_key, self.consumer_secret).encode('utf-8'))
headers = {'Authorization': 'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded'}
return headers |
def forward(self, x):
"""
Args:
x (:class:`torch.FloatTensor` [batch size, sequence length, rnn hidden size]): Input to
apply dropout too.
"""
if not self.training or not self.p:
return x
x = x.clone()
mask = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.p)
mask = mask.div_(1 - self.p)
mask = mask.expand_as(x)
return x * mask | def function[forward, parameter[self, x]]:
constant[
Args:
x (:class:`torch.FloatTensor` [batch size, sequence length, rnn hidden size]): Input to
apply dropout too.
]
if <ast.BoolOp object at 0x7da1b1b441c0> begin[:]
return[name[x]]
variable[x] assign[=] call[name[x].clone, parameter[]]
variable[mask] assign[=] call[call[name[x].new_empty, parameter[constant[1], call[name[x].size, parameter[constant[1]]], call[name[x].size, parameter[constant[2]]]]].bernoulli_, parameter[binary_operation[constant[1] - name[self].p]]]
variable[mask] assign[=] call[name[mask].div_, parameter[binary_operation[constant[1] - name[self].p]]]
variable[mask] assign[=] call[name[mask].expand_as, parameter[name[x]]]
return[binary_operation[name[x] * name[mask]]] | keyword[def] identifier[forward] ( identifier[self] , identifier[x] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[training] keyword[or] keyword[not] identifier[self] . identifier[p] :
keyword[return] identifier[x]
identifier[x] = identifier[x] . identifier[clone] ()
identifier[mask] = identifier[x] . identifier[new_empty] ( literal[int] , identifier[x] . identifier[size] ( literal[int] ), identifier[x] . identifier[size] ( literal[int] ), identifier[requires_grad] = keyword[False] ). identifier[bernoulli_] ( literal[int] - identifier[self] . identifier[p] )
identifier[mask] = identifier[mask] . identifier[div_] ( literal[int] - identifier[self] . identifier[p] )
identifier[mask] = identifier[mask] . identifier[expand_as] ( identifier[x] )
keyword[return] identifier[x] * identifier[mask] | def forward(self, x):
"""
Args:
x (:class:`torch.FloatTensor` [batch size, sequence length, rnn hidden size]): Input to
apply dropout too.
"""
if not self.training or not self.p:
return x # depends on [control=['if'], data=[]]
x = x.clone()
mask = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.p)
mask = mask.div_(1 - self.p)
mask = mask.expand_as(x)
return x * mask |
def learn_sfa(self, mma=None):
"""
Implements the high level loop of the algorithm for learning a
Mealy machine.
Args:
mma:
Returns:
MealyMachine: A model for the Mealy machine to be learned.
"""
logging.info('Initializing learning procedure.')
if mma:
self._init_table_from_dfa(mma)
else:
self._init_table()
logging.info('Generating a closed and consistent observation table.')
while True:
closed = False
# Make sure that the table is closed
while not closed:
logging.debug('Checking if table is closed.')
closed, s = self.observation_table.is_closed()
if not closed:
logging.debug('Closing table.')
self._ot_make_closed(s)
else:
logging.debug('Table closed.')
# Create conjecture
sfa = self.get_sfa_conjecture()
logging.info('Generated conjecture machine with %d states.',
len(list(sfa.states)))
# _check correctness
logging.debug('Running equivalence query.')
found, counter_example = self._equivalence_query(sfa)
# Are we done?
if found:
logging.info('No counterexample found. Hypothesis is correct!')
break
# Add the new experiments into the table to reiterate the
# learning loop
logging.info(
'Processing counterexample %s with length %d.',
counter_example,
len(counter_example))
self._process_counter_example(sfa, counter_example)
logging.info('Learning complete.')
return '', sfa | def function[learn_sfa, parameter[self, mma]]:
constant[
Implements the high level loop of the algorithm for learning a
Mealy machine.
Args:
mma:
Returns:
MealyMachine: A model for the Mealy machine to be learned.
]
call[name[logging].info, parameter[constant[Initializing learning procedure.]]]
if name[mma] begin[:]
call[name[self]._init_table_from_dfa, parameter[name[mma]]]
call[name[logging].info, parameter[constant[Generating a closed and consistent observation table.]]]
while constant[True] begin[:]
variable[closed] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da18bcc8520> begin[:]
call[name[logging].debug, parameter[constant[Checking if table is closed.]]]
<ast.Tuple object at 0x7da18bcc9cf0> assign[=] call[name[self].observation_table.is_closed, parameter[]]
if <ast.UnaryOp object at 0x7da18bccb640> begin[:]
call[name[logging].debug, parameter[constant[Closing table.]]]
call[name[self]._ot_make_closed, parameter[name[s]]]
variable[sfa] assign[=] call[name[self].get_sfa_conjecture, parameter[]]
call[name[logging].info, parameter[constant[Generated conjecture machine with %d states.], call[name[len], parameter[call[name[list], parameter[name[sfa].states]]]]]]
call[name[logging].debug, parameter[constant[Running equivalence query.]]]
<ast.Tuple object at 0x7da18bccb0d0> assign[=] call[name[self]._equivalence_query, parameter[name[sfa]]]
if name[found] begin[:]
call[name[logging].info, parameter[constant[No counterexample found. Hypothesis is correct!]]]
break
call[name[logging].info, parameter[constant[Processing counterexample %s with length %d.], name[counter_example], call[name[len], parameter[name[counter_example]]]]]
call[name[self]._process_counter_example, parameter[name[sfa], name[counter_example]]]
call[name[logging].info, parameter[constant[Learning complete.]]]
return[tuple[[<ast.Constant object at 0x7da18bccbf40>, <ast.Name object at 0x7da18bcc95d0>]]] | keyword[def] identifier[learn_sfa] ( identifier[self] , identifier[mma] = keyword[None] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
keyword[if] identifier[mma] :
identifier[self] . identifier[_init_table_from_dfa] ( identifier[mma] )
keyword[else] :
identifier[self] . identifier[_init_table] ()
identifier[logging] . identifier[info] ( literal[string] )
keyword[while] keyword[True] :
identifier[closed] = keyword[False]
keyword[while] keyword[not] identifier[closed] :
identifier[logging] . identifier[debug] ( literal[string] )
identifier[closed] , identifier[s] = identifier[self] . identifier[observation_table] . identifier[is_closed] ()
keyword[if] keyword[not] identifier[closed] :
identifier[logging] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_ot_make_closed] ( identifier[s] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] )
identifier[sfa] = identifier[self] . identifier[get_sfa_conjecture] ()
identifier[logging] . identifier[info] ( literal[string] ,
identifier[len] ( identifier[list] ( identifier[sfa] . identifier[states] )))
identifier[logging] . identifier[debug] ( literal[string] )
identifier[found] , identifier[counter_example] = identifier[self] . identifier[_equivalence_query] ( identifier[sfa] )
keyword[if] identifier[found] :
identifier[logging] . identifier[info] ( literal[string] )
keyword[break]
identifier[logging] . identifier[info] (
literal[string] ,
identifier[counter_example] ,
identifier[len] ( identifier[counter_example] ))
identifier[self] . identifier[_process_counter_example] ( identifier[sfa] , identifier[counter_example] )
identifier[logging] . identifier[info] ( literal[string] )
keyword[return] literal[string] , identifier[sfa] | def learn_sfa(self, mma=None):
"""
Implements the high level loop of the algorithm for learning a
Mealy machine.
Args:
mma:
Returns:
MealyMachine: A model for the Mealy machine to be learned.
"""
logging.info('Initializing learning procedure.')
if mma:
self._init_table_from_dfa(mma) # depends on [control=['if'], data=[]]
else:
self._init_table()
logging.info('Generating a closed and consistent observation table.')
while True:
closed = False
# Make sure that the table is closed
while not closed:
logging.debug('Checking if table is closed.')
(closed, s) = self.observation_table.is_closed()
if not closed:
logging.debug('Closing table.')
self._ot_make_closed(s) # depends on [control=['if'], data=[]]
else:
logging.debug('Table closed.') # depends on [control=['while'], data=[]]
# Create conjecture
sfa = self.get_sfa_conjecture()
logging.info('Generated conjecture machine with %d states.', len(list(sfa.states)))
# _check correctness
logging.debug('Running equivalence query.')
(found, counter_example) = self._equivalence_query(sfa)
# Are we done?
if found:
logging.info('No counterexample found. Hypothesis is correct!')
break # depends on [control=['if'], data=[]]
# Add the new experiments into the table to reiterate the
# learning loop
logging.info('Processing counterexample %s with length %d.', counter_example, len(counter_example))
self._process_counter_example(sfa, counter_example) # depends on [control=['while'], data=[]]
logging.info('Learning complete.')
return ('', sfa) |
def can_vote_in_poll(self, poll, user):
""" Given a poll, checks whether the user can answer to it. """
# First we have to check if the poll is curently open
if poll.duration:
poll_dtend = poll.created + dt.timedelta(days=poll.duration)
if poll_dtend < now():
return False
# Is this user allowed to vote in polls in the current forum?
can_vote = (
self._perform_basic_permission_check(poll.topic.forum, user, 'can_vote_in_polls') and
not poll.topic.is_locked
)
# Retrieve the user votes for the considered poll
user_votes = TopicPollVote.objects.filter(poll_option__poll=poll)
if user.is_anonymous:
forum_key = get_anonymous_user_forum_key(user)
if forum_key:
user_votes = user_votes.filter(anonymous_key=forum_key)
else:
# If the forum key of the anonymous user cannot be retrieved, the user should not be
# allowed to vote in the considered poll.
user_votes = user_votes.none()
can_vote = False
else:
user_votes = user_votes.filter(voter=user)
# If the user has already voted, they can vote again if the vote changes are allowed
if user_votes.exists() and can_vote:
can_vote = poll.user_changes
return can_vote | def function[can_vote_in_poll, parameter[self, poll, user]]:
constant[ Given a poll, checks whether the user can answer to it. ]
if name[poll].duration begin[:]
variable[poll_dtend] assign[=] binary_operation[name[poll].created + call[name[dt].timedelta, parameter[]]]
if compare[name[poll_dtend] less[<] call[name[now], parameter[]]] begin[:]
return[constant[False]]
variable[can_vote] assign[=] <ast.BoolOp object at 0x7da207f98dc0>
variable[user_votes] assign[=] call[name[TopicPollVote].objects.filter, parameter[]]
if name[user].is_anonymous begin[:]
variable[forum_key] assign[=] call[name[get_anonymous_user_forum_key], parameter[name[user]]]
if name[forum_key] begin[:]
variable[user_votes] assign[=] call[name[user_votes].filter, parameter[]]
if <ast.BoolOp object at 0x7da207f9bd90> begin[:]
variable[can_vote] assign[=] name[poll].user_changes
return[name[can_vote]] | keyword[def] identifier[can_vote_in_poll] ( identifier[self] , identifier[poll] , identifier[user] ):
literal[string]
keyword[if] identifier[poll] . identifier[duration] :
identifier[poll_dtend] = identifier[poll] . identifier[created] + identifier[dt] . identifier[timedelta] ( identifier[days] = identifier[poll] . identifier[duration] )
keyword[if] identifier[poll_dtend] < identifier[now] ():
keyword[return] keyword[False]
identifier[can_vote] =(
identifier[self] . identifier[_perform_basic_permission_check] ( identifier[poll] . identifier[topic] . identifier[forum] , identifier[user] , literal[string] ) keyword[and]
keyword[not] identifier[poll] . identifier[topic] . identifier[is_locked]
)
identifier[user_votes] = identifier[TopicPollVote] . identifier[objects] . identifier[filter] ( identifier[poll_option__poll] = identifier[poll] )
keyword[if] identifier[user] . identifier[is_anonymous] :
identifier[forum_key] = identifier[get_anonymous_user_forum_key] ( identifier[user] )
keyword[if] identifier[forum_key] :
identifier[user_votes] = identifier[user_votes] . identifier[filter] ( identifier[anonymous_key] = identifier[forum_key] )
keyword[else] :
identifier[user_votes] = identifier[user_votes] . identifier[none] ()
identifier[can_vote] = keyword[False]
keyword[else] :
identifier[user_votes] = identifier[user_votes] . identifier[filter] ( identifier[voter] = identifier[user] )
keyword[if] identifier[user_votes] . identifier[exists] () keyword[and] identifier[can_vote] :
identifier[can_vote] = identifier[poll] . identifier[user_changes]
keyword[return] identifier[can_vote] | def can_vote_in_poll(self, poll, user):
""" Given a poll, checks whether the user can answer to it. """
# First we have to check if the poll is curently open
if poll.duration:
poll_dtend = poll.created + dt.timedelta(days=poll.duration)
if poll_dtend < now():
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Is this user allowed to vote in polls in the current forum?
can_vote = self._perform_basic_permission_check(poll.topic.forum, user, 'can_vote_in_polls') and (not poll.topic.is_locked)
# Retrieve the user votes for the considered poll
user_votes = TopicPollVote.objects.filter(poll_option__poll=poll)
if user.is_anonymous:
forum_key = get_anonymous_user_forum_key(user)
if forum_key:
user_votes = user_votes.filter(anonymous_key=forum_key) # depends on [control=['if'], data=[]]
else:
# If the forum key of the anonymous user cannot be retrieved, the user should not be
# allowed to vote in the considered poll.
user_votes = user_votes.none()
can_vote = False # depends on [control=['if'], data=[]]
else:
user_votes = user_votes.filter(voter=user)
# If the user has already voted, they can vote again if the vote changes are allowed
if user_votes.exists() and can_vote:
can_vote = poll.user_changes # depends on [control=['if'], data=[]]
return can_vote |
def pi (self, data):
"""
Print HTML pi.
@param data: the tag data
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<?%s?>" % data) | def function[pi, parameter[self, data]]:
constant[
Print HTML pi.
@param data: the tag data
@type data: string
@return: None
]
variable[data] assign[=] call[name[data].encode, parameter[name[self].encoding, constant[ignore]]]
call[name[self].fd.write, parameter[binary_operation[constant[<?%s?>] <ast.Mod object at 0x7da2590d6920> name[data]]]] | keyword[def] identifier[pi] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[encode] ( identifier[self] . identifier[encoding] , literal[string] )
identifier[self] . identifier[fd] . identifier[write] ( literal[string] % identifier[data] ) | def pi(self, data):
"""
Print HTML pi.
@param data: the tag data
@type data: string
@return: None
"""
data = data.encode(self.encoding, 'ignore')
self.fd.write('<?%s?>' % data) |
def parse_balanced_image(self, markup):
""" Corrects Wikipedia image markup.
Images have a description inside their link markup that
can contain link markup itself, make sure the outer "[" and "]" brackets
delimiting the image are balanced correctly (e.g. no [[ ]] ]]).
Called from parse_images().
"""
opened = 0
closed = 0
for i in range(len(markup)):
if markup[i] == "[": opened += 1
if markup[i] == "]": closed += 1
if opened == closed:
return markup[:i+1]
return markup | def function[parse_balanced_image, parameter[self, markup]]:
constant[ Corrects Wikipedia image markup.
Images have a description inside their link markup that
can contain link markup itself, make sure the outer "[" and "]" brackets
delimiting the image are balanced correctly (e.g. no [[ ]] ]]).
Called from parse_images().
]
variable[opened] assign[=] constant[0]
variable[closed] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[markup]]]]]] begin[:]
if compare[call[name[markup]][name[i]] equal[==] constant[[]] begin[:]
<ast.AugAssign object at 0x7da1b00f7f40>
if compare[call[name[markup]][name[i]] equal[==] constant[]]] begin[:]
<ast.AugAssign object at 0x7da1b00f6e30>
if compare[name[opened] equal[==] name[closed]] begin[:]
return[call[name[markup]][<ast.Slice object at 0x7da1b00f51b0>]]
return[name[markup]] | keyword[def] identifier[parse_balanced_image] ( identifier[self] , identifier[markup] ):
literal[string]
identifier[opened] = literal[int]
identifier[closed] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[markup] )):
keyword[if] identifier[markup] [ identifier[i] ]== literal[string] : identifier[opened] += literal[int]
keyword[if] identifier[markup] [ identifier[i] ]== literal[string] : identifier[closed] += literal[int]
keyword[if] identifier[opened] == identifier[closed] :
keyword[return] identifier[markup] [: identifier[i] + literal[int] ]
keyword[return] identifier[markup] | def parse_balanced_image(self, markup):
""" Corrects Wikipedia image markup.
Images have a description inside their link markup that
can contain link markup itself, make sure the outer "[" and "]" brackets
delimiting the image are balanced correctly (e.g. no [[ ]] ]]).
Called from parse_images().
"""
opened = 0
closed = 0
for i in range(len(markup)):
if markup[i] == '[':
opened += 1 # depends on [control=['if'], data=[]]
if markup[i] == ']':
closed += 1 # depends on [control=['if'], data=[]]
if opened == closed:
return markup[:i + 1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return markup |
def find_file_in_load_dirs(relpath):
"""If given relative path exists in one of DevAssistant load paths,
return its full path.
Args:
relpath: a relative path, e.g. "assitants/crt/test.yaml"
Returns:
absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml
or None if file is not found
"""
if relpath.startswith(os.path.sep):
relpath = relpath.lstrip(os.path.sep)
for ld in settings.DATA_DIRECTORIES:
possible_path = os.path.join(ld, relpath)
if os.path.exists(possible_path):
return possible_path | def function[find_file_in_load_dirs, parameter[relpath]]:
constant[If given relative path exists in one of DevAssistant load paths,
return its full path.
Args:
relpath: a relative path, e.g. "assitants/crt/test.yaml"
Returns:
absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml
or None if file is not found
]
if call[name[relpath].startswith, parameter[name[os].path.sep]] begin[:]
variable[relpath] assign[=] call[name[relpath].lstrip, parameter[name[os].path.sep]]
for taget[name[ld]] in starred[name[settings].DATA_DIRECTORIES] begin[:]
variable[possible_path] assign[=] call[name[os].path.join, parameter[name[ld], name[relpath]]]
if call[name[os].path.exists, parameter[name[possible_path]]] begin[:]
return[name[possible_path]] | keyword[def] identifier[find_file_in_load_dirs] ( identifier[relpath] ):
literal[string]
keyword[if] identifier[relpath] . identifier[startswith] ( identifier[os] . identifier[path] . identifier[sep] ):
identifier[relpath] = identifier[relpath] . identifier[lstrip] ( identifier[os] . identifier[path] . identifier[sep] )
keyword[for] identifier[ld] keyword[in] identifier[settings] . identifier[DATA_DIRECTORIES] :
identifier[possible_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[ld] , identifier[relpath] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[possible_path] ):
keyword[return] identifier[possible_path] | def find_file_in_load_dirs(relpath):
"""If given relative path exists in one of DevAssistant load paths,
return its full path.
Args:
relpath: a relative path, e.g. "assitants/crt/test.yaml"
Returns:
absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml
or None if file is not found
"""
if relpath.startswith(os.path.sep):
relpath = relpath.lstrip(os.path.sep) # depends on [control=['if'], data=[]]
for ld in settings.DATA_DIRECTORIES:
possible_path = os.path.join(ld, relpath)
if os.path.exists(possible_path):
return possible_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ld']] |
def dropbox_post_factory(request):
"""receives a UUID via the request and returns either a fresh or an existing dropbox
for it"""
try:
max_age = int(request.registry.settings.get('post_token_max_age_seconds'))
except Exception:
max_age = 300
try:
drop_id = parse_post_token(
token=request.matchdict['token'],
secret=request.registry.settings['post_secret'],
max_age=max_age)
except SignatureExpired:
raise HTTPGone('dropbox expired')
except Exception: # don't be too specific on the reason for the error
raise HTTPNotFound('no such dropbox')
dropbox = request.registry.settings['dropbox_container'].get_dropbox(drop_id)
if dropbox.status_int >= 20:
raise HTTPGone('dropbox already in processing, no longer accepts data')
return dropbox | def function[dropbox_post_factory, parameter[request]]:
constant[receives a UUID via the request and returns either a fresh or an existing dropbox
for it]
<ast.Try object at 0x7da1affc1ea0>
<ast.Try object at 0x7da1affc1600>
variable[dropbox] assign[=] call[call[name[request].registry.settings][constant[dropbox_container]].get_dropbox, parameter[name[drop_id]]]
if compare[name[dropbox].status_int greater_or_equal[>=] constant[20]] begin[:]
<ast.Raise object at 0x7da1affc0670>
return[name[dropbox]] | keyword[def] identifier[dropbox_post_factory] ( identifier[request] ):
literal[string]
keyword[try] :
identifier[max_age] = identifier[int] ( identifier[request] . identifier[registry] . identifier[settings] . identifier[get] ( literal[string] ))
keyword[except] identifier[Exception] :
identifier[max_age] = literal[int]
keyword[try] :
identifier[drop_id] = identifier[parse_post_token] (
identifier[token] = identifier[request] . identifier[matchdict] [ literal[string] ],
identifier[secret] = identifier[request] . identifier[registry] . identifier[settings] [ literal[string] ],
identifier[max_age] = identifier[max_age] )
keyword[except] identifier[SignatureExpired] :
keyword[raise] identifier[HTTPGone] ( literal[string] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[HTTPNotFound] ( literal[string] )
identifier[dropbox] = identifier[request] . identifier[registry] . identifier[settings] [ literal[string] ]. identifier[get_dropbox] ( identifier[drop_id] )
keyword[if] identifier[dropbox] . identifier[status_int] >= literal[int] :
keyword[raise] identifier[HTTPGone] ( literal[string] )
keyword[return] identifier[dropbox] | def dropbox_post_factory(request):
"""receives a UUID via the request and returns either a fresh or an existing dropbox
for it"""
try:
max_age = int(request.registry.settings.get('post_token_max_age_seconds')) # depends on [control=['try'], data=[]]
except Exception:
max_age = 300 # depends on [control=['except'], data=[]]
try:
drop_id = parse_post_token(token=request.matchdict['token'], secret=request.registry.settings['post_secret'], max_age=max_age) # depends on [control=['try'], data=[]]
except SignatureExpired:
raise HTTPGone('dropbox expired') # depends on [control=['except'], data=[]]
except Exception: # don't be too specific on the reason for the error
raise HTTPNotFound('no such dropbox') # depends on [control=['except'], data=[]]
dropbox = request.registry.settings['dropbox_container'].get_dropbox(drop_id)
if dropbox.status_int >= 20:
raise HTTPGone('dropbox already in processing, no longer accepts data') # depends on [control=['if'], data=[]]
return dropbox |
def remove(self, docid):
"""
Remove a document from the database.
"""
docid = int(docid)
self.store.executeSQL(self.removeSQL, (docid,)) | def function[remove, parameter[self, docid]]:
constant[
Remove a document from the database.
]
variable[docid] assign[=] call[name[int], parameter[name[docid]]]
call[name[self].store.executeSQL, parameter[name[self].removeSQL, tuple[[<ast.Name object at 0x7da1b0a4f430>]]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[docid] ):
literal[string]
identifier[docid] = identifier[int] ( identifier[docid] )
identifier[self] . identifier[store] . identifier[executeSQL] ( identifier[self] . identifier[removeSQL] ,( identifier[docid] ,)) | def remove(self, docid):
"""
Remove a document from the database.
"""
docid = int(docid)
self.store.executeSQL(self.removeSQL, (docid,)) |
def retrieve_in(self, key, *paths, **kwargs):
"""Atomically fetch one or more paths from a document.
Convenience method for retrieval operations. This functions
identically to :meth:`lookup_in`. As such, the following two
forms are equivalent:
.. code-block:: python
import couchbase.subdocument as SD
rv = cb.lookup_in(key,
SD.get('email'),
SD.get('name'),
SD.get('friends.therock')
email, name, friend = rv
.. code-block:: python
rv = cb.retrieve_in(key, 'email', 'name', 'friends.therock')
email, name, friend = rv
.. seealso:: :meth:`lookup_in`
"""
import couchbase.subdocument as SD
return self.lookup_in(key, *tuple(SD.get(x) for x in paths), **kwargs) | def function[retrieve_in, parameter[self, key]]:
constant[Atomically fetch one or more paths from a document.
Convenience method for retrieval operations. This functions
identically to :meth:`lookup_in`. As such, the following two
forms are equivalent:
.. code-block:: python
import couchbase.subdocument as SD
rv = cb.lookup_in(key,
SD.get('email'),
SD.get('name'),
SD.get('friends.therock')
email, name, friend = rv
.. code-block:: python
rv = cb.retrieve_in(key, 'email', 'name', 'friends.therock')
email, name, friend = rv
.. seealso:: :meth:`lookup_in`
]
import module[couchbase.subdocument] as alias[SD]
return[call[name[self].lookup_in, parameter[name[key], <ast.Starred object at 0x7da18c4cf730>]]] | keyword[def] identifier[retrieve_in] ( identifier[self] , identifier[key] ,* identifier[paths] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[couchbase] . identifier[subdocument] keyword[as] identifier[SD]
keyword[return] identifier[self] . identifier[lookup_in] ( identifier[key] ,* identifier[tuple] ( identifier[SD] . identifier[get] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[paths] ),** identifier[kwargs] ) | def retrieve_in(self, key, *paths, **kwargs):
"""Atomically fetch one or more paths from a document.
Convenience method for retrieval operations. This functions
identically to :meth:`lookup_in`. As such, the following two
forms are equivalent:
.. code-block:: python
import couchbase.subdocument as SD
rv = cb.lookup_in(key,
SD.get('email'),
SD.get('name'),
SD.get('friends.therock')
email, name, friend = rv
.. code-block:: python
rv = cb.retrieve_in(key, 'email', 'name', 'friends.therock')
email, name, friend = rv
.. seealso:: :meth:`lookup_in`
"""
import couchbase.subdocument as SD
return self.lookup_in(key, *tuple((SD.get(x) for x in paths)), **kwargs) |
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None | def function[splitport, parameter[host]]:
constant[splitport('host:port') --> 'host', 'port'.]
<ast.Global object at 0x7da20c992440>
if compare[name[_portprog] is constant[None]] begin[:]
import module[re]
variable[_portprog] assign[=] call[name[re].compile, parameter[constant[^(.*):([0-9]+)$]]]
variable[match] assign[=] call[name[_portprog].match, parameter[name[host]]]
if name[match] begin[:]
return[call[name[match].group, parameter[constant[1], constant[2]]]]
return[tuple[[<ast.Name object at 0x7da18bcc91b0>, <ast.Constant object at 0x7da18bcc9a80>]]] | keyword[def] identifier[splitport] ( identifier[host] ):
literal[string]
keyword[global] identifier[_portprog]
keyword[if] identifier[_portprog] keyword[is] keyword[None] :
keyword[import] identifier[re]
identifier[_portprog] = identifier[re] . identifier[compile] ( literal[string] )
identifier[match] = identifier[_portprog] . identifier[match] ( identifier[host] )
keyword[if] identifier[match] : keyword[return] identifier[match] . identifier[group] ( literal[int] , literal[int] )
keyword[return] identifier[host] , keyword[None] | def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$') # depends on [control=['if'], data=['_portprog']]
match = _portprog.match(host)
if match:
return match.group(1, 2) # depends on [control=['if'], data=[]]
return (host, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.