code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def update(self, series_list):
"""Updates widget content from series_list
Parameters
----------
series_list: List of dict
\tList of dicts with data from all series
"""
if not series_list:
self.series_notebook.AddPage(wx.Panel(self, -1), _("+"))
return
self.updating = True
# Delete all tabs in the notebook
self.series_notebook.DeleteAllPages()
# Add as many tabs as there are series in code
for page, attrdict in enumerate(series_list):
series_panel = SeriesPanel(self.grid, attrdict)
name = "Series"
self.series_notebook.InsertPage(page, series_panel, name)
self.series_notebook.AddPage(wx.Panel(self, -1), _("+"))
self.updating = False
|
def function[update, parameter[self, series_list]]:
constant[Updates widget content from series_list
Parameters
----------
series_list: List of dict
List of dicts with data from all series
]
if <ast.UnaryOp object at 0x7da1b2347ac0> begin[:]
call[name[self].series_notebook.AddPage, parameter[call[name[wx].Panel, parameter[name[self], <ast.UnaryOp object at 0x7da1b2345390>]], call[name[_], parameter[constant[+]]]]]
return[None]
name[self].updating assign[=] constant[True]
call[name[self].series_notebook.DeleteAllPages, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b160b7f0>, <ast.Name object at 0x7da1b160b6d0>]]] in starred[call[name[enumerate], parameter[name[series_list]]]] begin[:]
variable[series_panel] assign[=] call[name[SeriesPanel], parameter[name[self].grid, name[attrdict]]]
variable[name] assign[=] constant[Series]
call[name[self].series_notebook.InsertPage, parameter[name[page], name[series_panel], name[name]]]
call[name[self].series_notebook.AddPage, parameter[call[name[wx].Panel, parameter[name[self], <ast.UnaryOp object at 0x7da1b17f8190>]], call[name[_], parameter[constant[+]]]]]
name[self].updating assign[=] constant[False]
|
keyword[def] identifier[update] ( identifier[self] , identifier[series_list] ):
literal[string]
keyword[if] keyword[not] identifier[series_list] :
identifier[self] . identifier[series_notebook] . identifier[AddPage] ( identifier[wx] . identifier[Panel] ( identifier[self] ,- literal[int] ), identifier[_] ( literal[string] ))
keyword[return]
identifier[self] . identifier[updating] = keyword[True]
identifier[self] . identifier[series_notebook] . identifier[DeleteAllPages] ()
keyword[for] identifier[page] , identifier[attrdict] keyword[in] identifier[enumerate] ( identifier[series_list] ):
identifier[series_panel] = identifier[SeriesPanel] ( identifier[self] . identifier[grid] , identifier[attrdict] )
identifier[name] = literal[string]
identifier[self] . identifier[series_notebook] . identifier[InsertPage] ( identifier[page] , identifier[series_panel] , identifier[name] )
identifier[self] . identifier[series_notebook] . identifier[AddPage] ( identifier[wx] . identifier[Panel] ( identifier[self] ,- literal[int] ), identifier[_] ( literal[string] ))
identifier[self] . identifier[updating] = keyword[False]
|
def update(self, series_list):
"""Updates widget content from series_list
Parameters
----------
series_list: List of dict
List of dicts with data from all series
"""
if not series_list:
self.series_notebook.AddPage(wx.Panel(self, -1), _('+'))
return # depends on [control=['if'], data=[]]
self.updating = True
# Delete all tabs in the notebook
self.series_notebook.DeleteAllPages()
# Add as many tabs as there are series in code
for (page, attrdict) in enumerate(series_list):
series_panel = SeriesPanel(self.grid, attrdict)
name = 'Series'
self.series_notebook.InsertPage(page, series_panel, name) # depends on [control=['for'], data=[]]
self.series_notebook.AddPage(wx.Panel(self, -1), _('+'))
self.updating = False
|
def unbind_key(pymux, variables):
"""
Remove key binding.
"""
key = variables['<key>']
needs_prefix = not variables['-n']
pymux.key_bindings_manager.remove_custom_binding(
key, needs_prefix=needs_prefix)
|
def function[unbind_key, parameter[pymux, variables]]:
constant[
Remove key binding.
]
variable[key] assign[=] call[name[variables]][constant[<key>]]
variable[needs_prefix] assign[=] <ast.UnaryOp object at 0x7da20e954c70>
call[name[pymux].key_bindings_manager.remove_custom_binding, parameter[name[key]]]
|
keyword[def] identifier[unbind_key] ( identifier[pymux] , identifier[variables] ):
literal[string]
identifier[key] = identifier[variables] [ literal[string] ]
identifier[needs_prefix] = keyword[not] identifier[variables] [ literal[string] ]
identifier[pymux] . identifier[key_bindings_manager] . identifier[remove_custom_binding] (
identifier[key] , identifier[needs_prefix] = identifier[needs_prefix] )
|
def unbind_key(pymux, variables):
"""
Remove key binding.
"""
key = variables['<key>']
needs_prefix = not variables['-n']
pymux.key_bindings_manager.remove_custom_binding(key, needs_prefix=needs_prefix)
|
def list_custom_images(call=None):
'''
Return a dict of all custom VM images on the cloud provider.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_vlans function must be called with -f or --function.'
)
ret = {}
conn = get_conn('SoftLayer_Account')
response = conn.getBlockDeviceTemplateGroups()
for image in response:
if 'globalIdentifier' not in image:
continue
ret[image['name']] = {
'id': image['id'],
'name': image['name'],
'globalIdentifier': image['globalIdentifier'],
}
if 'note' in image:
ret[image['name']]['note'] = image['note']
return ret
|
def function[list_custom_images, parameter[call]]:
constant[
Return a dict of all custom VM images on the cloud provider.
]
if compare[name[call] not_equal[!=] constant[function]] begin[:]
<ast.Raise object at 0x7da1b1c23340>
variable[ret] assign[=] dictionary[[], []]
variable[conn] assign[=] call[name[get_conn], parameter[constant[SoftLayer_Account]]]
variable[response] assign[=] call[name[conn].getBlockDeviceTemplateGroups, parameter[]]
for taget[name[image]] in starred[name[response]] begin[:]
if compare[constant[globalIdentifier] <ast.NotIn object at 0x7da2590d7190> name[image]] begin[:]
continue
call[name[ret]][call[name[image]][constant[name]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c20430>, <ast.Constant object at 0x7da1b1c204c0>, <ast.Constant object at 0x7da1b1c23e80>], [<ast.Subscript object at 0x7da1b1c20730>, <ast.Subscript object at 0x7da1b1c23760>, <ast.Subscript object at 0x7da1b1c23a90>]]
if compare[constant[note] in name[image]] begin[:]
call[call[name[ret]][call[name[image]][constant[name]]]][constant[note]] assign[=] call[name[image]][constant[note]]
return[name[ret]]
|
keyword[def] identifier[list_custom_images] ( identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
identifier[ret] ={}
identifier[conn] = identifier[get_conn] ( literal[string] )
identifier[response] = identifier[conn] . identifier[getBlockDeviceTemplateGroups] ()
keyword[for] identifier[image] keyword[in] identifier[response] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[image] :
keyword[continue]
identifier[ret] [ identifier[image] [ literal[string] ]]={
literal[string] : identifier[image] [ literal[string] ],
literal[string] : identifier[image] [ literal[string] ],
literal[string] : identifier[image] [ literal[string] ],
}
keyword[if] literal[string] keyword[in] identifier[image] :
identifier[ret] [ identifier[image] [ literal[string] ]][ literal[string] ]= identifier[image] [ literal[string] ]
keyword[return] identifier[ret]
|
def list_custom_images(call=None):
"""
Return a dict of all custom VM images on the cloud provider.
"""
if call != 'function':
raise SaltCloudSystemExit('The list_vlans function must be called with -f or --function.') # depends on [control=['if'], data=[]]
ret = {}
conn = get_conn('SoftLayer_Account')
response = conn.getBlockDeviceTemplateGroups()
for image in response:
if 'globalIdentifier' not in image:
continue # depends on [control=['if'], data=[]]
ret[image['name']] = {'id': image['id'], 'name': image['name'], 'globalIdentifier': image['globalIdentifier']}
if 'note' in image:
ret[image['name']]['note'] = image['note'] # depends on [control=['if'], data=['image']] # depends on [control=['for'], data=['image']]
return ret
|
def get_create_security_group_commands(self, sg_id, sg_rules):
"""Commands for creating ACL"""
cmds = []
in_rules, eg_rules = self._format_rules_for_eos(sg_rules)
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(in_rule)
cmds.append("exit")
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(eg_rule)
cmds.append("exit")
return cmds
|
def function[get_create_security_group_commands, parameter[self, sg_id, sg_rules]]:
constant[Commands for creating ACL]
variable[cmds] assign[=] list[[]]
<ast.Tuple object at 0x7da1b1952410> assign[=] call[name[self]._format_rules_for_eos, parameter[name[sg_rules]]]
call[name[cmds].append, parameter[binary_operation[constant[ip access-list %s dynamic] <ast.Mod object at 0x7da2590d6920> call[name[self]._acl_name, parameter[name[sg_id], name[n_const].INGRESS_DIRECTION]]]]]
for taget[name[in_rule]] in starred[name[in_rules]] begin[:]
call[name[cmds].append, parameter[name[in_rule]]]
call[name[cmds].append, parameter[constant[exit]]]
call[name[cmds].append, parameter[binary_operation[constant[ip access-list %s dynamic] <ast.Mod object at 0x7da2590d6920> call[name[self]._acl_name, parameter[name[sg_id], name[n_const].EGRESS_DIRECTION]]]]]
for taget[name[eg_rule]] in starred[name[eg_rules]] begin[:]
call[name[cmds].append, parameter[name[eg_rule]]]
call[name[cmds].append, parameter[constant[exit]]]
return[name[cmds]]
|
keyword[def] identifier[get_create_security_group_commands] ( identifier[self] , identifier[sg_id] , identifier[sg_rules] ):
literal[string]
identifier[cmds] =[]
identifier[in_rules] , identifier[eg_rules] = identifier[self] . identifier[_format_rules_for_eos] ( identifier[sg_rules] )
identifier[cmds] . identifier[append] ( literal[string] %
identifier[self] . identifier[_acl_name] ( identifier[sg_id] , identifier[n_const] . identifier[INGRESS_DIRECTION] ))
keyword[for] identifier[in_rule] keyword[in] identifier[in_rules] :
identifier[cmds] . identifier[append] ( identifier[in_rule] )
identifier[cmds] . identifier[append] ( literal[string] )
identifier[cmds] . identifier[append] ( literal[string] %
identifier[self] . identifier[_acl_name] ( identifier[sg_id] , identifier[n_const] . identifier[EGRESS_DIRECTION] ))
keyword[for] identifier[eg_rule] keyword[in] identifier[eg_rules] :
identifier[cmds] . identifier[append] ( identifier[eg_rule] )
identifier[cmds] . identifier[append] ( literal[string] )
keyword[return] identifier[cmds]
|
def get_create_security_group_commands(self, sg_id, sg_rules):
"""Commands for creating ACL"""
cmds = []
(in_rules, eg_rules) = self._format_rules_for_eos(sg_rules)
cmds.append('ip access-list %s dynamic' % self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(in_rule) # depends on [control=['for'], data=['in_rule']]
cmds.append('exit')
cmds.append('ip access-list %s dynamic' % self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(eg_rule) # depends on [control=['for'], data=['eg_rule']]
cmds.append('exit')
return cmds
|
def forward(self, x, **kwargs):
"""
Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights.
"""
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
distance_x, diff_x = self.distance_function(x, self.weights)
distance_y, diff_y = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return activation, diff_x, diff_y
|
def function[forward, parameter[self, x]]:
constant[
Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights.
]
variable[prev] assign[=] call[name[kwargs]][constant[prev_activation]]
<ast.Tuple object at 0x7da1b27eea70> assign[=] call[name[self].distance_function, parameter[name[x], name[self].weights]]
<ast.Tuple object at 0x7da1b27ee9e0> assign[=] call[name[self].distance_function, parameter[name[prev], name[self].context_weights]]
variable[x_] assign[=] binary_operation[name[distance_x] * name[self].alpha]
variable[y_] assign[=] binary_operation[name[distance_y] * name[self].beta]
variable[activation] assign[=] call[name[np].exp, parameter[<ast.UnaryOp object at 0x7da1b27ee890>]]
return[tuple[[<ast.Name object at 0x7da1b27ec100>, <ast.Name object at 0x7da1b2727e80>, <ast.Name object at 0x7da1b2726500>]]]
|
keyword[def] identifier[forward] ( identifier[self] , identifier[x] ,** identifier[kwargs] ):
literal[string]
identifier[prev] = identifier[kwargs] [ literal[string] ]
identifier[distance_x] , identifier[diff_x] = identifier[self] . identifier[distance_function] ( identifier[x] , identifier[self] . identifier[weights] )
identifier[distance_y] , identifier[diff_y] = identifier[self] . identifier[distance_function] ( identifier[prev] , identifier[self] . identifier[context_weights] )
identifier[x_] = identifier[distance_x] * identifier[self] . identifier[alpha]
identifier[y_] = identifier[distance_y] * identifier[self] . identifier[beta]
identifier[activation] = identifier[np] . identifier[exp] (-( identifier[x_] + identifier[y_] ))
keyword[return] identifier[activation] , identifier[diff_x] , identifier[diff_y]
|
def forward(self, x, **kwargs):
"""
Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights.
"""
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
(distance_x, diff_x) = self.distance_function(x, self.weights)
(distance_y, diff_y) = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return (activation, diff_x, diff_y)
|
def get_op_or_tensor_by_name(name):
"""
Get either tf.Operation of tf.Tensor from names.
Args:
name (list[str] or str): names of operations or tensors.
Raises:
KeyError, if the name doesn't exist
"""
G = tfv1.get_default_graph()
def f(n):
if len(n) >= 3 and n[-2] == ':':
return G.get_tensor_by_name(n)
else:
return G.get_operation_by_name(n)
if not isinstance(name, list):
return f(name)
else:
return list(map(f, name))
|
def function[get_op_or_tensor_by_name, parameter[name]]:
constant[
Get either tf.Operation of tf.Tensor from names.
Args:
name (list[str] or str): names of operations or tensors.
Raises:
KeyError, if the name doesn't exist
]
variable[G] assign[=] call[name[tfv1].get_default_graph, parameter[]]
def function[f, parameter[n]]:
if <ast.BoolOp object at 0x7da18f09cf10> begin[:]
return[call[name[G].get_tensor_by_name, parameter[name[n]]]]
if <ast.UnaryOp object at 0x7da18f09e560> begin[:]
return[call[name[f], parameter[name[name]]]]
|
keyword[def] identifier[get_op_or_tensor_by_name] ( identifier[name] ):
literal[string]
identifier[G] = identifier[tfv1] . identifier[get_default_graph] ()
keyword[def] identifier[f] ( identifier[n] ):
keyword[if] identifier[len] ( identifier[n] )>= literal[int] keyword[and] identifier[n] [- literal[int] ]== literal[string] :
keyword[return] identifier[G] . identifier[get_tensor_by_name] ( identifier[n] )
keyword[else] :
keyword[return] identifier[G] . identifier[get_operation_by_name] ( identifier[n] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[list] ):
keyword[return] identifier[f] ( identifier[name] )
keyword[else] :
keyword[return] identifier[list] ( identifier[map] ( identifier[f] , identifier[name] ))
|
def get_op_or_tensor_by_name(name):
"""
Get either tf.Operation of tf.Tensor from names.
Args:
name (list[str] or str): names of operations or tensors.
Raises:
KeyError, if the name doesn't exist
"""
G = tfv1.get_default_graph()
def f(n):
if len(n) >= 3 and n[-2] == ':':
return G.get_tensor_by_name(n) # depends on [control=['if'], data=[]]
else:
return G.get_operation_by_name(n)
if not isinstance(name, list):
return f(name) # depends on [control=['if'], data=[]]
else:
return list(map(f, name))
|
def init_copy(self, connection):
"""
Perform pre-copy sql - such as creating table, truncating, or removing data older than x.
"""
if not self.does_schema_exist(connection):
logger.info("Creating schema for %s", self.table)
self.create_schema(connection)
if not self.does_table_exist(connection):
logger.info("Creating table %s", self.table)
self.create_table(connection)
if self.enable_metadata_columns:
self._add_metadata_columns(connection)
if self.do_truncate_table:
logger.info("Truncating table %s", self.table)
self.truncate_table(connection)
if self.do_prune():
logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table)
self.prune(connection)
|
def function[init_copy, parameter[self, connection]]:
constant[
Perform pre-copy sql - such as creating table, truncating, or removing data older than x.
]
if <ast.UnaryOp object at 0x7da18f723700> begin[:]
call[name[logger].info, parameter[constant[Creating schema for %s], name[self].table]]
call[name[self].create_schema, parameter[name[connection]]]
if <ast.UnaryOp object at 0x7da18f7220e0> begin[:]
call[name[logger].info, parameter[constant[Creating table %s], name[self].table]]
call[name[self].create_table, parameter[name[connection]]]
if name[self].enable_metadata_columns begin[:]
call[name[self]._add_metadata_columns, parameter[name[connection]]]
if name[self].do_truncate_table begin[:]
call[name[logger].info, parameter[constant[Truncating table %s], name[self].table]]
call[name[self].truncate_table, parameter[name[connection]]]
if call[name[self].do_prune, parameter[]] begin[:]
call[name[logger].info, parameter[constant[Removing %s older than %s from %s], name[self].prune_column, name[self].prune_date, name[self].prune_table]]
call[name[self].prune, parameter[name[connection]]]
|
keyword[def] identifier[init_copy] ( identifier[self] , identifier[connection] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[does_schema_exist] ( identifier[connection] ):
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[table] )
identifier[self] . identifier[create_schema] ( identifier[connection] )
keyword[if] keyword[not] identifier[self] . identifier[does_table_exist] ( identifier[connection] ):
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[table] )
identifier[self] . identifier[create_table] ( identifier[connection] )
keyword[if] identifier[self] . identifier[enable_metadata_columns] :
identifier[self] . identifier[_add_metadata_columns] ( identifier[connection] )
keyword[if] identifier[self] . identifier[do_truncate_table] :
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[table] )
identifier[self] . identifier[truncate_table] ( identifier[connection] )
keyword[if] identifier[self] . identifier[do_prune] ():
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[prune_column] , identifier[self] . identifier[prune_date] , identifier[self] . identifier[prune_table] )
identifier[self] . identifier[prune] ( identifier[connection] )
|
def init_copy(self, connection):
"""
Perform pre-copy sql - such as creating table, truncating, or removing data older than x.
"""
if not self.does_schema_exist(connection):
logger.info('Creating schema for %s', self.table)
self.create_schema(connection) # depends on [control=['if'], data=[]]
if not self.does_table_exist(connection):
logger.info('Creating table %s', self.table)
self.create_table(connection) # depends on [control=['if'], data=[]]
if self.enable_metadata_columns:
self._add_metadata_columns(connection) # depends on [control=['if'], data=[]]
if self.do_truncate_table:
logger.info('Truncating table %s', self.table)
self.truncate_table(connection) # depends on [control=['if'], data=[]]
if self.do_prune():
logger.info('Removing %s older than %s from %s', self.prune_column, self.prune_date, self.prune_table)
self.prune(connection) # depends on [control=['if'], data=[]]
|
def update_safety_check(first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent) -> None:
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in second_dict.items():
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
|
def function[update_safety_check, parameter[first_dict, second_dict, compat]]:
constant[Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
]
for taget[tuple[[<ast.Name object at 0x7da207f99ed0>, <ast.Name object at 0x7da207f9b640>]]] in starred[call[name[second_dict].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da207f98340> begin[:]
<ast.Raise object at 0x7da207f99690>
|
keyword[def] identifier[update_safety_check] ( identifier[first_dict] : identifier[MutableMapping] [ identifier[K] , identifier[V] ],
identifier[second_dict] : identifier[Mapping] [ identifier[K] , identifier[V] ],
identifier[compat] : identifier[Callable] [[ identifier[V] , identifier[V] ], identifier[bool] ]= identifier[equivalent] )-> keyword[None] :
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[second_dict] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[first_dict] keyword[and] keyword[not] identifier[compat] ( identifier[v] , identifier[first_dict] [ identifier[k] ]):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[k] )
|
def update_safety_check(first_dict: MutableMapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool]=equivalent) -> None:
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for (k, v) in second_dict.items():
if k in first_dict and (not compat(v, first_dict[k])):
raise ValueError('unsafe to merge dictionaries without overriding values; conflicting key %r' % k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def setM1Coast(self, device=DEFAULT_DEVICE_ID):
"""
Set motor 1 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
cmd = self._COMMAND.get('m1-coast')
self._writeData(cmd, device)
|
def function[setM1Coast, parameter[self, device]]:
constant[
Set motor 1 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
]
variable[cmd] assign[=] call[name[self]._COMMAND.get, parameter[constant[m1-coast]]]
call[name[self]._writeData, parameter[name[cmd], name[device]]]
|
keyword[def] identifier[setM1Coast] ( identifier[self] , identifier[device] = identifier[DEFAULT_DEVICE_ID] ):
literal[string]
identifier[cmd] = identifier[self] . identifier[_COMMAND] . identifier[get] ( literal[string] )
identifier[self] . identifier[_writeData] ( identifier[cmd] , identifier[device] )
|
def setM1Coast(self, device=DEFAULT_DEVICE_ID):
"""
Set motor 1 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
cmd = self._COMMAND.get('m1-coast')
self._writeData(cmd, device)
|
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = time.clock()
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
st, props = pysolvers.maplechrono_propagate(self.maplesat, assumptions, phase_saving)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if self.use_timer:
self.call_time = time.clock() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
|
def function[propagate, parameter[self, assumptions, phase_saving]]:
constant[
Propagate a given set of assumption literals.
]
if name[self].maplesat begin[:]
if name[self].use_timer begin[:]
variable[start_time] assign[=] call[name[time].clock, parameter[]]
variable[def_sigint_handler] assign[=] call[name[signal].signal, parameter[name[signal].SIGINT, name[signal].SIG_DFL]]
<ast.Tuple object at 0x7da1b11d52d0> assign[=] call[name[pysolvers].maplechrono_propagate, parameter[name[self].maplesat, name[assumptions], name[phase_saving]]]
variable[def_sigint_handler] assign[=] call[name[signal].signal, parameter[name[signal].SIGINT, name[def_sigint_handler]]]
if name[self].use_timer begin[:]
name[self].call_time assign[=] binary_operation[call[name[time].clock, parameter[]] - name[start_time]]
<ast.AugAssign object at 0x7da1b11d6b60>
return[tuple[[<ast.Call object at 0x7da1b11d76d0>, <ast.IfExp object at 0x7da1b11d7ac0>]]]
|
keyword[def] identifier[propagate] ( identifier[self] , identifier[assumptions] =[], identifier[phase_saving] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[maplesat] :
keyword[if] identifier[self] . identifier[use_timer] :
identifier[start_time] = identifier[time] . identifier[clock] ()
identifier[def_sigint_handler] = identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[signal] . identifier[SIG_DFL] )
identifier[st] , identifier[props] = identifier[pysolvers] . identifier[maplechrono_propagate] ( identifier[self] . identifier[maplesat] , identifier[assumptions] , identifier[phase_saving] )
identifier[def_sigint_handler] = identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[def_sigint_handler] )
keyword[if] identifier[self] . identifier[use_timer] :
identifier[self] . identifier[call_time] = identifier[time] . identifier[clock] ()- identifier[start_time]
identifier[self] . identifier[accu_time] += identifier[self] . identifier[call_time]
keyword[return] identifier[bool] ( identifier[st] ), identifier[props] keyword[if] identifier[props] != keyword[None] keyword[else] []
|
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = time.clock() # depends on [control=['if'], data=[]]
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
(st, props) = pysolvers.maplechrono_propagate(self.maplesat, assumptions, phase_saving)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if self.use_timer:
self.call_time = time.clock() - start_time
self.accu_time += self.call_time # depends on [control=['if'], data=[]]
return (bool(st), props if props != None else []) # depends on [control=['if'], data=[]]
|
def destroy(self):
"""
Delete the page. May delete the whole document if it's actually the
last page.
"""
logger.info("Destroying page: %s" % self)
if self.doc.nb_pages <= 1:
self.doc.destroy()
return
doc_pages = self.doc.pages[:]
current_doc_nb_pages = self.doc.nb_pages
paths = [
self.__get_box_path(),
self.__get_img_path(),
self._get_thumb_path(),
]
for path in paths:
if self.fs.exists(path):
self.fs.unlink(path)
for page_nb in range(self.page_nb + 1, current_doc_nb_pages):
page = doc_pages[page_nb]
page.change_index(offset=-1)
|
def function[destroy, parameter[self]]:
constant[
Delete the page. May delete the whole document if it's actually the
last page.
]
call[name[logger].info, parameter[binary_operation[constant[Destroying page: %s] <ast.Mod object at 0x7da2590d6920> name[self]]]]
if compare[name[self].doc.nb_pages less_or_equal[<=] constant[1]] begin[:]
call[name[self].doc.destroy, parameter[]]
return[None]
variable[doc_pages] assign[=] call[name[self].doc.pages][<ast.Slice object at 0x7da18f810310>]
variable[current_doc_nb_pages] assign[=] name[self].doc.nb_pages
variable[paths] assign[=] list[[<ast.Call object at 0x7da18f811f00>, <ast.Call object at 0x7da18f813610>, <ast.Call object at 0x7da18f813820>]]
for taget[name[path]] in starred[name[paths]] begin[:]
if call[name[self].fs.exists, parameter[name[path]]] begin[:]
call[name[self].fs.unlink, parameter[name[path]]]
for taget[name[page_nb]] in starred[call[name[range], parameter[binary_operation[name[self].page_nb + constant[1]], name[current_doc_nb_pages]]]] begin[:]
variable[page] assign[=] call[name[doc_pages]][name[page_nb]]
call[name[page].change_index, parameter[]]
|
keyword[def] identifier[destroy] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] % identifier[self] )
keyword[if] identifier[self] . identifier[doc] . identifier[nb_pages] <= literal[int] :
identifier[self] . identifier[doc] . identifier[destroy] ()
keyword[return]
identifier[doc_pages] = identifier[self] . identifier[doc] . identifier[pages] [:]
identifier[current_doc_nb_pages] = identifier[self] . identifier[doc] . identifier[nb_pages]
identifier[paths] =[
identifier[self] . identifier[__get_box_path] (),
identifier[self] . identifier[__get_img_path] (),
identifier[self] . identifier[_get_thumb_path] (),
]
keyword[for] identifier[path] keyword[in] identifier[paths] :
keyword[if] identifier[self] . identifier[fs] . identifier[exists] ( identifier[path] ):
identifier[self] . identifier[fs] . identifier[unlink] ( identifier[path] )
keyword[for] identifier[page_nb] keyword[in] identifier[range] ( identifier[self] . identifier[page_nb] + literal[int] , identifier[current_doc_nb_pages] ):
identifier[page] = identifier[doc_pages] [ identifier[page_nb] ]
identifier[page] . identifier[change_index] ( identifier[offset] =- literal[int] )
|
def destroy(self):
"""
Delete the page. May delete the whole document if it's actually the
last page.
"""
logger.info('Destroying page: %s' % self)
if self.doc.nb_pages <= 1:
self.doc.destroy()
return # depends on [control=['if'], data=[]]
doc_pages = self.doc.pages[:]
current_doc_nb_pages = self.doc.nb_pages
paths = [self.__get_box_path(), self.__get_img_path(), self._get_thumb_path()]
for path in paths:
if self.fs.exists(path):
self.fs.unlink(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
for page_nb in range(self.page_nb + 1, current_doc_nb_pages):
page = doc_pages[page_nb]
page.change_index(offset=-1) # depends on [control=['for'], data=['page_nb']]
|
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j
|
def function[read, parameter[self]]:
constant[Iterate over all JSON input (Generator)]
for taget[name[line]] in starred[call[name[self].io.read, parameter[]]] begin[:]
with call[name[self].parse_line, parameter[name[line]]] begin[:]
<ast.Yield object at 0x7da204347b50>
|
keyword[def] identifier[read] ( identifier[self] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[io] . identifier[read] ():
keyword[with] identifier[self] . identifier[parse_line] ( identifier[line] ) keyword[as] identifier[j] :
keyword[yield] identifier[j]
|
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j # depends on [control=['with'], data=['j']] # depends on [control=['for'], data=['line']]
|
def put(self, key, data):
"""Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it.
"""
try:
return self._dstore.put(key, data)
finally:
self.cache.delete(key)
|
def function[put, parameter[self, key, data]]:
constant[Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it.
]
<ast.Try object at 0x7da1b1a96c80>
|
keyword[def] identifier[put] ( identifier[self] , identifier[key] , identifier[data] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_dstore] . identifier[put] ( identifier[key] , identifier[data] )
keyword[finally] :
identifier[self] . identifier[cache] . identifier[delete] ( identifier[key] )
|
def put(self, key, data):
"""Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it.
"""
try:
return self._dstore.put(key, data) # depends on [control=['try'], data=[]]
finally:
self.cache.delete(key)
|
def error(cls, name, message, *args):
"""
Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).error(message, *args)
|
def function[error, parameter[cls, name, message]]:
constant[
Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
]
call[call[name[cls].getLogger, parameter[name[name]]].error, parameter[name[message], <ast.Starred object at 0x7da18f00cfd0>]]
|
keyword[def] identifier[error] ( identifier[cls] , identifier[name] , identifier[message] ,* identifier[args] ):
literal[string]
identifier[cls] . identifier[getLogger] ( identifier[name] ). identifier[error] ( identifier[message] ,* identifier[args] )
|
def error(cls, name, message, *args):
"""
Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).error(message, *args)
|
async def turn_on(self, switch=None):
"""Turn on relay."""
if switch is not None:
switch = codecs.decode(switch.rjust(2, '0'), 'hex')
packet = self.protocol.format_packet(b"\x10" + switch + b"\x01")
else:
packet = self.protocol.format_packet(b"\x0a")
states = await self._send(packet)
return states
|
<ast.AsyncFunctionDef object at 0x7da1b276b160>
|
keyword[async] keyword[def] identifier[turn_on] ( identifier[self] , identifier[switch] = keyword[None] ):
literal[string]
keyword[if] identifier[switch] keyword[is] keyword[not] keyword[None] :
identifier[switch] = identifier[codecs] . identifier[decode] ( identifier[switch] . identifier[rjust] ( literal[int] , literal[string] ), literal[string] )
identifier[packet] = identifier[self] . identifier[protocol] . identifier[format_packet] ( literal[string] + identifier[switch] + literal[string] )
keyword[else] :
identifier[packet] = identifier[self] . identifier[protocol] . identifier[format_packet] ( literal[string] )
identifier[states] = keyword[await] identifier[self] . identifier[_send] ( identifier[packet] )
keyword[return] identifier[states]
|
async def turn_on(self, switch=None):
"""Turn on relay."""
if switch is not None:
switch = codecs.decode(switch.rjust(2, '0'), 'hex')
packet = self.protocol.format_packet(b'\x10' + switch + b'\x01') # depends on [control=['if'], data=['switch']]
else:
packet = self.protocol.format_packet(b'\n')
states = await self._send(packet)
return states
|
def read_core_register(self, reg):
"""
read CPU register
Unpack floating point register values
"""
regIndex = register_name_to_index(reg)
regValue = self.read_core_register_raw(regIndex)
# Convert int to float.
if is_single_float_register(regIndex):
regValue = conversion.u32_to_float32(regValue)
elif is_double_float_register(regIndex):
regValue = conversion.u64_to_float64(regValue)
return regValue
|
def function[read_core_register, parameter[self, reg]]:
constant[
read CPU register
Unpack floating point register values
]
variable[regIndex] assign[=] call[name[register_name_to_index], parameter[name[reg]]]
variable[regValue] assign[=] call[name[self].read_core_register_raw, parameter[name[regIndex]]]
if call[name[is_single_float_register], parameter[name[regIndex]]] begin[:]
variable[regValue] assign[=] call[name[conversion].u32_to_float32, parameter[name[regValue]]]
return[name[regValue]]
|
keyword[def] identifier[read_core_register] ( identifier[self] , identifier[reg] ):
literal[string]
identifier[regIndex] = identifier[register_name_to_index] ( identifier[reg] )
identifier[regValue] = identifier[self] . identifier[read_core_register_raw] ( identifier[regIndex] )
keyword[if] identifier[is_single_float_register] ( identifier[regIndex] ):
identifier[regValue] = identifier[conversion] . identifier[u32_to_float32] ( identifier[regValue] )
keyword[elif] identifier[is_double_float_register] ( identifier[regIndex] ):
identifier[regValue] = identifier[conversion] . identifier[u64_to_float64] ( identifier[regValue] )
keyword[return] identifier[regValue]
|
def read_core_register(self, reg):
"""
read CPU register
Unpack floating point register values
"""
regIndex = register_name_to_index(reg)
regValue = self.read_core_register_raw(regIndex)
# Convert int to float.
if is_single_float_register(regIndex):
regValue = conversion.u32_to_float32(regValue) # depends on [control=['if'], data=[]]
elif is_double_float_register(regIndex):
regValue = conversion.u64_to_float64(regValue) # depends on [control=['if'], data=[]]
return regValue
|
def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False,
draw=False, remove=False, priority=None):
"""
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
"""
if data is None and self.data is not None:
data = self.data
else:
self.data = data
self.ax = ax
if data is None: # nothing to do if no data is given
return
self.no_auto_update = not (
not self.no_auto_update or not data.psy.no_auto_update)
data.psy.plotter = self
if not make_plot: # stop here if we shall not plot
return
self.logger.debug("Initializing plot...")
if remove:
self.logger.debug(" Removing old formatoptions...")
for fmto in self._fmtos:
try:
fmto.remove()
except Exception:
self.logger.debug(
"Could not remove %s while initializing", fmto.key,
exc_info=True)
if clear:
self.logger.debug(" Clearing axes...")
self.ax.clear()
self.cleared = True
# get the formatoptions. We sort them here by key to make sure that the
# order always stays the same (easier for debugging)
fmto_groups = self._grouped_fmtos(self._sorted_by_priority(
sorted(self._fmtos, key=lambda fmto: fmto.key)))
self.plot_data = self.data
self._updating = True
for fmto_priority, grouper in fmto_groups:
if priority is None or fmto_priority == priority:
self._plot_by_priority(fmto_priority, grouper,
initializing=True)
self._release_all(True) # finish the update
self.cleared = False
self.replot = False
self._initialized = True
self._updating = False
if draw is None:
draw = rcParams['auto_draw']
if draw:
self.draw()
if rcParams['auto_show']:
self.show()
|
def function[initialize_plot, parameter[self, data, ax, make_plot, clear, draw, remove, priority]]:
constant[
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
]
if <ast.BoolOp object at 0x7da18f00f940> begin[:]
variable[data] assign[=] name[self].data
name[self].ax assign[=] name[ax]
if compare[name[data] is constant[None]] begin[:]
return[None]
name[self].no_auto_update assign[=] <ast.UnaryOp object at 0x7da18f00e590>
name[data].psy.plotter assign[=] name[self]
if <ast.UnaryOp object at 0x7da18f00ffd0> begin[:]
return[None]
call[name[self].logger.debug, parameter[constant[Initializing plot...]]]
if name[remove] begin[:]
call[name[self].logger.debug, parameter[constant[ Removing old formatoptions...]]]
for taget[name[fmto]] in starred[name[self]._fmtos] begin[:]
<ast.Try object at 0x7da18f00eb60>
if name[clear] begin[:]
call[name[self].logger.debug, parameter[constant[ Clearing axes...]]]
call[name[self].ax.clear, parameter[]]
name[self].cleared assign[=] constant[True]
variable[fmto_groups] assign[=] call[name[self]._grouped_fmtos, parameter[call[name[self]._sorted_by_priority, parameter[call[name[sorted], parameter[name[self]._fmtos]]]]]]
name[self].plot_data assign[=] name[self].data
name[self]._updating assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da18f00d390>, <ast.Name object at 0x7da18f00ed40>]]] in starred[name[fmto_groups]] begin[:]
if <ast.BoolOp object at 0x7da18f00df60> begin[:]
call[name[self]._plot_by_priority, parameter[name[fmto_priority], name[grouper]]]
call[name[self]._release_all, parameter[constant[True]]]
name[self].cleared assign[=] constant[False]
name[self].replot assign[=] constant[False]
name[self]._initialized assign[=] constant[True]
name[self]._updating assign[=] constant[False]
if compare[name[draw] is constant[None]] begin[:]
variable[draw] assign[=] call[name[rcParams]][constant[auto_draw]]
if name[draw] begin[:]
call[name[self].draw, parameter[]]
if call[name[rcParams]][constant[auto_show]] begin[:]
call[name[self].show, parameter[]]
|
keyword[def] identifier[initialize_plot] ( identifier[self] , identifier[data] = keyword[None] , identifier[ax] = keyword[None] , identifier[make_plot] = keyword[True] , identifier[clear] = keyword[False] ,
identifier[draw] = keyword[False] , identifier[remove] = keyword[False] , identifier[priority] = keyword[None] ):
literal[string]
keyword[if] identifier[data] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[data] = identifier[self] . identifier[data]
keyword[else] :
identifier[self] . identifier[data] = identifier[data]
identifier[self] . identifier[ax] = identifier[ax]
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return]
identifier[self] . identifier[no_auto_update] = keyword[not] (
keyword[not] identifier[self] . identifier[no_auto_update] keyword[or] keyword[not] identifier[data] . identifier[psy] . identifier[no_auto_update] )
identifier[data] . identifier[psy] . identifier[plotter] = identifier[self]
keyword[if] keyword[not] identifier[make_plot] :
keyword[return]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[remove] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[for] identifier[fmto] keyword[in] identifier[self] . identifier[_fmtos] :
keyword[try] :
identifier[fmto] . identifier[remove] ()
keyword[except] identifier[Exception] :
identifier[self] . identifier[logger] . identifier[debug] (
literal[string] , identifier[fmto] . identifier[key] ,
identifier[exc_info] = keyword[True] )
keyword[if] identifier[clear] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[ax] . identifier[clear] ()
identifier[self] . identifier[cleared] = keyword[True]
identifier[fmto_groups] = identifier[self] . identifier[_grouped_fmtos] ( identifier[self] . identifier[_sorted_by_priority] (
identifier[sorted] ( identifier[self] . identifier[_fmtos] , identifier[key] = keyword[lambda] identifier[fmto] : identifier[fmto] . identifier[key] )))
identifier[self] . identifier[plot_data] = identifier[self] . identifier[data]
identifier[self] . identifier[_updating] = keyword[True]
keyword[for] identifier[fmto_priority] , identifier[grouper] keyword[in] identifier[fmto_groups] :
keyword[if] identifier[priority] keyword[is] keyword[None] keyword[or] identifier[fmto_priority] == identifier[priority] :
identifier[self] . identifier[_plot_by_priority] ( identifier[fmto_priority] , identifier[grouper] ,
identifier[initializing] = keyword[True] )
identifier[self] . identifier[_release_all] ( keyword[True] )
identifier[self] . identifier[cleared] = keyword[False]
identifier[self] . identifier[replot] = keyword[False]
identifier[self] . identifier[_initialized] = keyword[True]
identifier[self] . identifier[_updating] = keyword[False]
keyword[if] identifier[draw] keyword[is] keyword[None] :
identifier[draw] = identifier[rcParams] [ literal[string] ]
keyword[if] identifier[draw] :
identifier[self] . identifier[draw] ()
keyword[if] identifier[rcParams] [ literal[string] ]:
identifier[self] . identifier[show] ()
|
def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False, draw=False, remove=False, priority=None):
"""
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
"""
if data is None and self.data is not None:
data = self.data # depends on [control=['if'], data=[]]
else:
self.data = data
self.ax = ax
if data is None: # nothing to do if no data is given
return # depends on [control=['if'], data=[]]
self.no_auto_update = not (not self.no_auto_update or not data.psy.no_auto_update)
data.psy.plotter = self
if not make_plot: # stop here if we shall not plot
return # depends on [control=['if'], data=[]]
self.logger.debug('Initializing plot...')
if remove:
self.logger.debug(' Removing old formatoptions...')
for fmto in self._fmtos:
try:
fmto.remove() # depends on [control=['try'], data=[]]
except Exception:
self.logger.debug('Could not remove %s while initializing', fmto.key, exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['fmto']] # depends on [control=['if'], data=[]]
if clear:
self.logger.debug(' Clearing axes...')
self.ax.clear()
self.cleared = True # depends on [control=['if'], data=[]]
# get the formatoptions. We sort them here by key to make sure that the
# order always stays the same (easier for debugging)
fmto_groups = self._grouped_fmtos(self._sorted_by_priority(sorted(self._fmtos, key=lambda fmto: fmto.key)))
self.plot_data = self.data
self._updating = True
for (fmto_priority, grouper) in fmto_groups:
if priority is None or fmto_priority == priority:
self._plot_by_priority(fmto_priority, grouper, initializing=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self._release_all(True) # finish the update
self.cleared = False
self.replot = False
self._initialized = True
self._updating = False
if draw is None:
draw = rcParams['auto_draw'] # depends on [control=['if'], data=['draw']]
if draw:
self.draw()
if rcParams['auto_show']:
self.show() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def extract(archive, directory, suffix=None, unpack_single_dir=False,
check_extract_file=None, progress_callback=None, default_mode='755'):
"""
Extract the contents of *archive* to the specified *directory*. This
function ensures that no file is extracted outside of the target directory
(which can theoretically happen if the arcname is not relative or points
to a parent directory).
# Parameters
archive (str, archive-like): The filename of an archive or an already
opened archive.
directory (str): Path to the directory to unpack the contents to.
unpack_single_dir (bool): If this is True and if the archive contains only
a single top-level directory, its contents will be placed directly into
the target *directory*.
"""
if isinstance(archive, str):
with open(archive, suffix=suffix) as archive:
return extract(archive, directory, None, unpack_single_dir,
check_extract_file, progress_callback, default_mode)
if isinstance(default_mode, str):
default_mode = int(default_mode, 8)
if progress_callback:
progress_callback(-1, 0, None)
names = archive.getnames()
# Find out if we have only one top-level directory.
toplevel_dirs = set()
for name in names:
parts = name.split('/')
if len(parts) > 1:
toplevel_dirs.add(parts[0])
if unpack_single_dir and len(toplevel_dirs) == 1:
stripdir = next(iter(toplevel_dirs)) + '/'
else:
stripdir = None
for index, name in enumerate(names):
if progress_callback:
progress_callback(index + 1, len(names), name)
if name.startswith('..') or name.startswith('/') or os.path.isabs(name):
continue
if check_extract_file and not check_extract_file(name):
continue
if name.endswith('/'):
continue
if stripdir:
filename = name[len(stripdir):]
if not filename:
continue
else:
filename = name
info = archive.getmember(name)
src = archive.extractfile(name)
if not src:
continue
try:
filename = os.path.join(directory, filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with builtins.open(filename, 'wb') as dst:
shutil.copyfileobj(src, dst)
os.chmod(filename, info.mode or default_mode)
os.utime(filename, (-1, info.mtime))
finally:
src.close()
if progress_callback:
progress_callback(len(names), len(names), None)
|
def function[extract, parameter[archive, directory, suffix, unpack_single_dir, check_extract_file, progress_callback, default_mode]]:
constant[
Extract the contents of *archive* to the specified *directory*. This
function ensures that no file is extracted outside of the target directory
(which can theoretically happen if the arcname is not relative or points
to a parent directory).
# Parameters
archive (str, archive-like): The filename of an archive or an already
opened archive.
directory (str): Path to the directory to unpack the contents to.
unpack_single_dir (bool): If this is True and if the archive contains only
a single top-level directory, its contents will be placed directly into
the target *directory*.
]
if call[name[isinstance], parameter[name[archive], name[str]]] begin[:]
with call[name[open], parameter[name[archive]]] begin[:]
return[call[name[extract], parameter[name[archive], name[directory], constant[None], name[unpack_single_dir], name[check_extract_file], name[progress_callback], name[default_mode]]]]
if call[name[isinstance], parameter[name[default_mode], name[str]]] begin[:]
variable[default_mode] assign[=] call[name[int], parameter[name[default_mode], constant[8]]]
if name[progress_callback] begin[:]
call[name[progress_callback], parameter[<ast.UnaryOp object at 0x7da20e9b3f70>, constant[0], constant[None]]]
variable[names] assign[=] call[name[archive].getnames, parameter[]]
variable[toplevel_dirs] assign[=] call[name[set], parameter[]]
for taget[name[name]] in starred[name[names]] begin[:]
variable[parts] assign[=] call[name[name].split, parameter[constant[/]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:]
call[name[toplevel_dirs].add, parameter[call[name[parts]][constant[0]]]]
if <ast.BoolOp object at 0x7da20e9b2710> begin[:]
variable[stripdir] assign[=] binary_operation[call[name[next], parameter[call[name[iter], parameter[name[toplevel_dirs]]]]] + constant[/]]
for taget[tuple[[<ast.Name object at 0x7da18bc70ac0>, <ast.Name object at 0x7da18bc71210>]]] in starred[call[name[enumerate], parameter[name[names]]]] begin[:]
if name[progress_callback] begin[:]
call[name[progress_callback], parameter[binary_operation[name[index] + constant[1]], call[name[len], parameter[name[names]]], name[name]]]
if <ast.BoolOp object at 0x7da18bc707f0> begin[:]
continue
if <ast.BoolOp object at 0x7da18bc70e20> begin[:]
continue
if call[name[name].endswith, parameter[constant[/]]] begin[:]
continue
if name[stripdir] begin[:]
variable[filename] assign[=] call[name[name]][<ast.Slice object at 0x7da18bc71570>]
if <ast.UnaryOp object at 0x7da18bc70e80> begin[:]
continue
variable[info] assign[=] call[name[archive].getmember, parameter[name[name]]]
variable[src] assign[=] call[name[archive].extractfile, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da18bc722f0> begin[:]
continue
<ast.Try object at 0x7da18bc73ee0>
if name[progress_callback] begin[:]
call[name[progress_callback], parameter[call[name[len], parameter[name[names]]], call[name[len], parameter[name[names]]], constant[None]]]
|
keyword[def] identifier[extract] ( identifier[archive] , identifier[directory] , identifier[suffix] = keyword[None] , identifier[unpack_single_dir] = keyword[False] ,
identifier[check_extract_file] = keyword[None] , identifier[progress_callback] = keyword[None] , identifier[default_mode] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[archive] , identifier[str] ):
keyword[with] identifier[open] ( identifier[archive] , identifier[suffix] = identifier[suffix] ) keyword[as] identifier[archive] :
keyword[return] identifier[extract] ( identifier[archive] , identifier[directory] , keyword[None] , identifier[unpack_single_dir] ,
identifier[check_extract_file] , identifier[progress_callback] , identifier[default_mode] )
keyword[if] identifier[isinstance] ( identifier[default_mode] , identifier[str] ):
identifier[default_mode] = identifier[int] ( identifier[default_mode] , literal[int] )
keyword[if] identifier[progress_callback] :
identifier[progress_callback] (- literal[int] , literal[int] , keyword[None] )
identifier[names] = identifier[archive] . identifier[getnames] ()
identifier[toplevel_dirs] = identifier[set] ()
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[parts] = identifier[name] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
identifier[toplevel_dirs] . identifier[add] ( identifier[parts] [ literal[int] ])
keyword[if] identifier[unpack_single_dir] keyword[and] identifier[len] ( identifier[toplevel_dirs] )== literal[int] :
identifier[stripdir] = identifier[next] ( identifier[iter] ( identifier[toplevel_dirs] ))+ literal[string]
keyword[else] :
identifier[stripdir] = keyword[None]
keyword[for] identifier[index] , identifier[name] keyword[in] identifier[enumerate] ( identifier[names] ):
keyword[if] identifier[progress_callback] :
identifier[progress_callback] ( identifier[index] + literal[int] , identifier[len] ( identifier[names] ), identifier[name] )
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[os] . identifier[path] . identifier[isabs] ( identifier[name] ):
keyword[continue]
keyword[if] identifier[check_extract_file] keyword[and] keyword[not] identifier[check_extract_file] ( identifier[name] ):
keyword[continue]
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[stripdir] :
identifier[filename] = identifier[name] [ identifier[len] ( identifier[stripdir] ):]
keyword[if] keyword[not] identifier[filename] :
keyword[continue]
keyword[else] :
identifier[filename] = identifier[name]
identifier[info] = identifier[archive] . identifier[getmember] ( identifier[name] )
identifier[src] = identifier[archive] . identifier[extractfile] ( identifier[name] )
keyword[if] keyword[not] identifier[src] :
keyword[continue]
keyword[try] :
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] )
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dirname] ):
identifier[os] . identifier[makedirs] ( identifier[dirname] )
keyword[with] identifier[builtins] . identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[dst] :
identifier[shutil] . identifier[copyfileobj] ( identifier[src] , identifier[dst] )
identifier[os] . identifier[chmod] ( identifier[filename] , identifier[info] . identifier[mode] keyword[or] identifier[default_mode] )
identifier[os] . identifier[utime] ( identifier[filename] ,(- literal[int] , identifier[info] . identifier[mtime] ))
keyword[finally] :
identifier[src] . identifier[close] ()
keyword[if] identifier[progress_callback] :
identifier[progress_callback] ( identifier[len] ( identifier[names] ), identifier[len] ( identifier[names] ), keyword[None] )
|
def extract(archive, directory, suffix=None, unpack_single_dir=False, check_extract_file=None, progress_callback=None, default_mode='755'):
"""
Extract the contents of *archive* to the specified *directory*. This
function ensures that no file is extracted outside of the target directory
(which can theoretically happen if the arcname is not relative or points
to a parent directory).
# Parameters
archive (str, archive-like): The filename of an archive or an already
opened archive.
directory (str): Path to the directory to unpack the contents to.
unpack_single_dir (bool): If this is True and if the archive contains only
a single top-level directory, its contents will be placed directly into
the target *directory*.
"""
if isinstance(archive, str):
with open(archive, suffix=suffix) as archive:
return extract(archive, directory, None, unpack_single_dir, check_extract_file, progress_callback, default_mode) # depends on [control=['with'], data=['archive']] # depends on [control=['if'], data=[]]
if isinstance(default_mode, str):
default_mode = int(default_mode, 8) # depends on [control=['if'], data=[]]
if progress_callback:
progress_callback(-1, 0, None) # depends on [control=['if'], data=[]]
names = archive.getnames()
# Find out if we have only one top-level directory.
toplevel_dirs = set()
for name in names:
parts = name.split('/')
if len(parts) > 1:
toplevel_dirs.add(parts[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
if unpack_single_dir and len(toplevel_dirs) == 1:
stripdir = next(iter(toplevel_dirs)) + '/' # depends on [control=['if'], data=[]]
else:
stripdir = None
for (index, name) in enumerate(names):
if progress_callback:
progress_callback(index + 1, len(names), name) # depends on [control=['if'], data=[]]
if name.startswith('..') or name.startswith('/') or os.path.isabs(name):
continue # depends on [control=['if'], data=[]]
if check_extract_file and (not check_extract_file(name)):
continue # depends on [control=['if'], data=[]]
if name.endswith('/'):
continue # depends on [control=['if'], data=[]]
if stripdir:
filename = name[len(stripdir):]
if not filename:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
filename = name
info = archive.getmember(name)
src = archive.extractfile(name)
if not src:
continue # depends on [control=['if'], data=[]]
try:
filename = os.path.join(directory, filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname) # depends on [control=['if'], data=[]]
with builtins.open(filename, 'wb') as dst:
shutil.copyfileobj(src, dst) # depends on [control=['with'], data=['dst']]
os.chmod(filename, info.mode or default_mode)
os.utime(filename, (-1, info.mtime)) # depends on [control=['try'], data=[]]
finally:
src.close() # depends on [control=['for'], data=[]]
if progress_callback:
progress_callback(len(names), len(names), None) # depends on [control=['if'], data=[]]
|
def _populateFromVariantFile(self, varFile, dataUrl, indexFile):
"""
Populates the instance variables of this VariantSet from the specified
pysam VariantFile object.
"""
if varFile.index is None:
raise exceptions.NotIndexedException(dataUrl)
for chrom in varFile.index:
# Unlike Tabix indices, CSI indices include all contigs defined
# in the BCF header. Thus we must test each one to see if
# records exist or else they are likely to trigger spurious
# overlapping errors.
chrom, _, _ = self.sanitizeVariantFileFetch(chrom)
if not isEmptyIter(varFile.fetch(chrom)):
if chrom in self._chromFileMap:
raise exceptions.OverlappingVcfException(dataUrl, chrom)
self._chromFileMap[chrom] = dataUrl, indexFile
self._updateMetadata(varFile)
self._updateCallSetIds(varFile)
self._updateVariantAnnotationSets(varFile, dataUrl)
|
def function[_populateFromVariantFile, parameter[self, varFile, dataUrl, indexFile]]:
constant[
Populates the instance variables of this VariantSet from the specified
pysam VariantFile object.
]
if compare[name[varFile].index is constant[None]] begin[:]
<ast.Raise object at 0x7da18f58f160>
for taget[name[chrom]] in starred[name[varFile].index] begin[:]
<ast.Tuple object at 0x7da18f58ee90> assign[=] call[name[self].sanitizeVariantFileFetch, parameter[name[chrom]]]
if <ast.UnaryOp object at 0x7da18f58d5a0> begin[:]
if compare[name[chrom] in name[self]._chromFileMap] begin[:]
<ast.Raise object at 0x7da18f811420>
call[name[self]._chromFileMap][name[chrom]] assign[=] tuple[[<ast.Name object at 0x7da18f810e20>, <ast.Name object at 0x7da18f8134f0>]]
call[name[self]._updateMetadata, parameter[name[varFile]]]
call[name[self]._updateCallSetIds, parameter[name[varFile]]]
call[name[self]._updateVariantAnnotationSets, parameter[name[varFile], name[dataUrl]]]
|
keyword[def] identifier[_populateFromVariantFile] ( identifier[self] , identifier[varFile] , identifier[dataUrl] , identifier[indexFile] ):
literal[string]
keyword[if] identifier[varFile] . identifier[index] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[NotIndexedException] ( identifier[dataUrl] )
keyword[for] identifier[chrom] keyword[in] identifier[varFile] . identifier[index] :
identifier[chrom] , identifier[_] , identifier[_] = identifier[self] . identifier[sanitizeVariantFileFetch] ( identifier[chrom] )
keyword[if] keyword[not] identifier[isEmptyIter] ( identifier[varFile] . identifier[fetch] ( identifier[chrom] )):
keyword[if] identifier[chrom] keyword[in] identifier[self] . identifier[_chromFileMap] :
keyword[raise] identifier[exceptions] . identifier[OverlappingVcfException] ( identifier[dataUrl] , identifier[chrom] )
identifier[self] . identifier[_chromFileMap] [ identifier[chrom] ]= identifier[dataUrl] , identifier[indexFile]
identifier[self] . identifier[_updateMetadata] ( identifier[varFile] )
identifier[self] . identifier[_updateCallSetIds] ( identifier[varFile] )
identifier[self] . identifier[_updateVariantAnnotationSets] ( identifier[varFile] , identifier[dataUrl] )
|
def _populateFromVariantFile(self, varFile, dataUrl, indexFile):
"""
Populates the instance variables of this VariantSet from the specified
pysam VariantFile object.
"""
if varFile.index is None:
raise exceptions.NotIndexedException(dataUrl) # depends on [control=['if'], data=[]]
for chrom in varFile.index:
# Unlike Tabix indices, CSI indices include all contigs defined
# in the BCF header. Thus we must test each one to see if
# records exist or else they are likely to trigger spurious
# overlapping errors.
(chrom, _, _) = self.sanitizeVariantFileFetch(chrom)
if not isEmptyIter(varFile.fetch(chrom)):
if chrom in self._chromFileMap:
raise exceptions.OverlappingVcfException(dataUrl, chrom) # depends on [control=['if'], data=['chrom']] # depends on [control=['if'], data=[]]
self._chromFileMap[chrom] = (dataUrl, indexFile) # depends on [control=['for'], data=['chrom']]
self._updateMetadata(varFile)
self._updateCallSetIds(varFile)
self._updateVariantAnnotationSets(varFile, dataUrl)
|
def _assign_clusters(self):
"""Assign the samples to the closest centroids to create clusters
"""
self.clusters = np.array([self._closest_centroid(x) for x in self._X])
|
def function[_assign_clusters, parameter[self]]:
constant[Assign the samples to the closest centroids to create clusters
]
name[self].clusters assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18fe92e60>]]
|
keyword[def] identifier[_assign_clusters] ( identifier[self] ):
literal[string]
identifier[self] . identifier[clusters] = identifier[np] . identifier[array] ([ identifier[self] . identifier[_closest_centroid] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[_X] ])
|
def _assign_clusters(self):
"""Assign the samples to the closest centroids to create clusters
"""
self.clusters = np.array([self._closest_centroid(x) for x in self._X])
|
def cytherize(args, file):
"""
Used by core to integrate all the pieces of information, and to interface
with the user. Compiles and cleans up.
"""
if isOutDated(file):
if isUpdated(file):
response = initiateCompilation(args, file)
else:
response = {'returncode': WAIT_FOR_FIX, 'output': ''}
else:
if args['timestamp']:
response = {'returncode': SKIPPED_COMPILATION, 'output': ''}
else:
response = initiateCompilation(args, file)
###########################################################################
time.sleep(INTERVAL)
if response['returncode'] == ERROR_PASSOFF:
file['stamp_if_error'] = time.time()
if args['watch']:
if len(args['filenames']) > 1:
output = "Error in file: '{}'; Cyther will wait until it is" \
"fixed...\n".format(file['file_path'])
else:
output = "Cyther will wait for you to fix this error before" \
"it tries to compile again...\n"
else:
output = "Error in source file, see above\n"
elif response['returncode'] == SKIPPED_COMPILATION:
if not args['watch']:
output = 'Skipping compilation: source file not updated since' \
'last compile\n'
else:
output = ''
elif response['returncode'] == WAIT_FOR_FIX:
output = ''
elif response['returncode'] == FINE:
if args['watch']:
if len(args['filenames']) > 1:
output = "Compiled the file '{}'\n".format(file['file_path'])
else:
output = 'Compiled the file\n'
else:
if not args['concise']:
output = 'Compilation complete\n'
else:
output = ''
else:
raise CytherError("Unrecognized return value '{}'"
"".format(response['returncode']))
response['output'] += output
###########################################################################
condition = response['returncode'] == SKIPPED_COMPILATION and not args[
'watch']
if (args['execute'] or args['timer']) and response[
'returncode'] == FINE or condition:
ret = cueExtractAndRun(args, file)
response['output'] += ret['output']
###########################################################################
if args['watch']:
if response['returncode'] == FINE or response[
'returncode'] == ERROR_PASSOFF:
if response['returncode'] == FINE:
args['watch_stats']['compiles'] += 1
else:
args['watch_stats']['errors'] += 1
args['watch_stats']['counter'] += 1
response['output'] += \
WATCH_STATS_TEMPLATE.format(args['watch_stats']['counter'],
args['watch_stats']['compiles'],
args['watch_stats']['errors'],
args['watch_stats']['polls'])
else:
args['watch_stats']['polls'] += 1
###########################################################################
if args['watch']:
if response['returncode'] == 1:
print(response['output'] + '\n')
else:
if response['output']:
print(response['output'])
else:
if response['returncode'] == 1:
if args['error']:
raise CytherError(response['output'])
else:
print(response['output'])
else:
print(response['output'])
|
def function[cytherize, parameter[args, file]]:
constant[
Used by core to integrate all the pieces of information, and to interface
with the user. Compiles and cleans up.
]
if call[name[isOutDated], parameter[name[file]]] begin[:]
if call[name[isUpdated], parameter[name[file]]] begin[:]
variable[response] assign[=] call[name[initiateCompilation], parameter[name[args], name[file]]]
call[name[time].sleep, parameter[name[INTERVAL]]]
if compare[call[name[response]][constant[returncode]] equal[==] name[ERROR_PASSOFF]] begin[:]
call[name[file]][constant[stamp_if_error]] assign[=] call[name[time].time, parameter[]]
if call[name[args]][constant[watch]] begin[:]
if compare[call[name[len], parameter[call[name[args]][constant[filenames]]]] greater[>] constant[1]] begin[:]
variable[output] assign[=] call[constant[Error in file: '{}'; Cyther will wait until it isfixed...
].format, parameter[call[name[file]][constant[file_path]]]]
<ast.AugAssign object at 0x7da204623700>
variable[condition] assign[=] <ast.BoolOp object at 0x7da204622a40>
if <ast.BoolOp object at 0x7da2046229e0> begin[:]
variable[ret] assign[=] call[name[cueExtractAndRun], parameter[name[args], name[file]]]
<ast.AugAssign object at 0x7da2044c0ca0>
if call[name[args]][constant[watch]] begin[:]
if <ast.BoolOp object at 0x7da2044c3010> begin[:]
if compare[call[name[response]][constant[returncode]] equal[==] name[FINE]] begin[:]
<ast.AugAssign object at 0x7da2044c27d0>
<ast.AugAssign object at 0x7da2044c37f0>
<ast.AugAssign object at 0x7da18fe93c10>
if call[name[args]][constant[watch]] begin[:]
if compare[call[name[response]][constant[returncode]] equal[==] constant[1]] begin[:]
call[name[print], parameter[binary_operation[call[name[response]][constant[output]] + constant[
]]]]
|
keyword[def] identifier[cytherize] ( identifier[args] , identifier[file] ):
literal[string]
keyword[if] identifier[isOutDated] ( identifier[file] ):
keyword[if] identifier[isUpdated] ( identifier[file] ):
identifier[response] = identifier[initiateCompilation] ( identifier[args] , identifier[file] )
keyword[else] :
identifier[response] ={ literal[string] : identifier[WAIT_FOR_FIX] , literal[string] : literal[string] }
keyword[else] :
keyword[if] identifier[args] [ literal[string] ]:
identifier[response] ={ literal[string] : identifier[SKIPPED_COMPILATION] , literal[string] : literal[string] }
keyword[else] :
identifier[response] = identifier[initiateCompilation] ( identifier[args] , identifier[file] )
identifier[time] . identifier[sleep] ( identifier[INTERVAL] )
keyword[if] identifier[response] [ literal[string] ]== identifier[ERROR_PASSOFF] :
identifier[file] [ literal[string] ]= identifier[time] . identifier[time] ()
keyword[if] identifier[args] [ literal[string] ]:
keyword[if] identifier[len] ( identifier[args] [ literal[string] ])> literal[int] :
identifier[output] = literal[string] literal[string] . identifier[format] ( identifier[file] [ literal[string] ])
keyword[else] :
identifier[output] = literal[string] literal[string]
keyword[else] :
identifier[output] = literal[string]
keyword[elif] identifier[response] [ literal[string] ]== identifier[SKIPPED_COMPILATION] :
keyword[if] keyword[not] identifier[args] [ literal[string] ]:
identifier[output] = literal[string] literal[string]
keyword[else] :
identifier[output] = literal[string]
keyword[elif] identifier[response] [ literal[string] ]== identifier[WAIT_FOR_FIX] :
identifier[output] = literal[string]
keyword[elif] identifier[response] [ literal[string] ]== identifier[FINE] :
keyword[if] identifier[args] [ literal[string] ]:
keyword[if] identifier[len] ( identifier[args] [ literal[string] ])> literal[int] :
identifier[output] = literal[string] . identifier[format] ( identifier[file] [ literal[string] ])
keyword[else] :
identifier[output] = literal[string]
keyword[else] :
keyword[if] keyword[not] identifier[args] [ literal[string] ]:
identifier[output] = literal[string]
keyword[else] :
identifier[output] = literal[string]
keyword[else] :
keyword[raise] identifier[CytherError] ( literal[string]
literal[string] . identifier[format] ( identifier[response] [ literal[string] ]))
identifier[response] [ literal[string] ]+= identifier[output]
identifier[condition] = identifier[response] [ literal[string] ]== identifier[SKIPPED_COMPILATION] keyword[and] keyword[not] identifier[args] [
literal[string] ]
keyword[if] ( identifier[args] [ literal[string] ] keyword[or] identifier[args] [ literal[string] ]) keyword[and] identifier[response] [
literal[string] ]== identifier[FINE] keyword[or] identifier[condition] :
identifier[ret] = identifier[cueExtractAndRun] ( identifier[args] , identifier[file] )
identifier[response] [ literal[string] ]+= identifier[ret] [ literal[string] ]
keyword[if] identifier[args] [ literal[string] ]:
keyword[if] identifier[response] [ literal[string] ]== identifier[FINE] keyword[or] identifier[response] [
literal[string] ]== identifier[ERROR_PASSOFF] :
keyword[if] identifier[response] [ literal[string] ]== identifier[FINE] :
identifier[args] [ literal[string] ][ literal[string] ]+= literal[int]
keyword[else] :
identifier[args] [ literal[string] ][ literal[string] ]+= literal[int]
identifier[args] [ literal[string] ][ literal[string] ]+= literal[int]
identifier[response] [ literal[string] ]+= identifier[WATCH_STATS_TEMPLATE] . identifier[format] ( identifier[args] [ literal[string] ][ literal[string] ],
identifier[args] [ literal[string] ][ literal[string] ],
identifier[args] [ literal[string] ][ literal[string] ],
identifier[args] [ literal[string] ][ literal[string] ])
keyword[else] :
identifier[args] [ literal[string] ][ literal[string] ]+= literal[int]
keyword[if] identifier[args] [ literal[string] ]:
keyword[if] identifier[response] [ literal[string] ]== literal[int] :
identifier[print] ( identifier[response] [ literal[string] ]+ literal[string] )
keyword[else] :
keyword[if] identifier[response] [ literal[string] ]:
identifier[print] ( identifier[response] [ literal[string] ])
keyword[else] :
keyword[if] identifier[response] [ literal[string] ]== literal[int] :
keyword[if] identifier[args] [ literal[string] ]:
keyword[raise] identifier[CytherError] ( identifier[response] [ literal[string] ])
keyword[else] :
identifier[print] ( identifier[response] [ literal[string] ])
keyword[else] :
identifier[print] ( identifier[response] [ literal[string] ])
|
def cytherize(args, file):
"""
Used by core to integrate all the pieces of information, and to interface
with the user. Compiles and cleans up.
"""
if isOutDated(file):
if isUpdated(file):
response = initiateCompilation(args, file) # depends on [control=['if'], data=[]]
else:
response = {'returncode': WAIT_FOR_FIX, 'output': ''} # depends on [control=['if'], data=[]]
elif args['timestamp']:
response = {'returncode': SKIPPED_COMPILATION, 'output': ''} # depends on [control=['if'], data=[]]
else:
response = initiateCompilation(args, file)
###########################################################################
time.sleep(INTERVAL)
if response['returncode'] == ERROR_PASSOFF:
file['stamp_if_error'] = time.time()
if args['watch']:
if len(args['filenames']) > 1:
output = "Error in file: '{}'; Cyther will wait until it isfixed...\n".format(file['file_path']) # depends on [control=['if'], data=[]]
else:
output = 'Cyther will wait for you to fix this error beforeit tries to compile again...\n' # depends on [control=['if'], data=[]]
else:
output = 'Error in source file, see above\n' # depends on [control=['if'], data=[]]
elif response['returncode'] == SKIPPED_COMPILATION:
if not args['watch']:
output = 'Skipping compilation: source file not updated sincelast compile\n' # depends on [control=['if'], data=[]]
else:
output = '' # depends on [control=['if'], data=[]]
elif response['returncode'] == WAIT_FOR_FIX:
output = '' # depends on [control=['if'], data=[]]
elif response['returncode'] == FINE:
if args['watch']:
if len(args['filenames']) > 1:
output = "Compiled the file '{}'\n".format(file['file_path']) # depends on [control=['if'], data=[]]
else:
output = 'Compiled the file\n' # depends on [control=['if'], data=[]]
elif not args['concise']:
output = 'Compilation complete\n' # depends on [control=['if'], data=[]]
else:
output = '' # depends on [control=['if'], data=[]]
else:
raise CytherError("Unrecognized return value '{}'".format(response['returncode']))
response['output'] += output
###########################################################################
condition = response['returncode'] == SKIPPED_COMPILATION and (not args['watch'])
if (args['execute'] or args['timer']) and response['returncode'] == FINE or condition:
ret = cueExtractAndRun(args, file)
response['output'] += ret['output'] # depends on [control=['if'], data=[]]
###########################################################################
if args['watch']:
if response['returncode'] == FINE or response['returncode'] == ERROR_PASSOFF:
if response['returncode'] == FINE:
args['watch_stats']['compiles'] += 1 # depends on [control=['if'], data=[]]
else:
args['watch_stats']['errors'] += 1
args['watch_stats']['counter'] += 1
response['output'] += WATCH_STATS_TEMPLATE.format(args['watch_stats']['counter'], args['watch_stats']['compiles'], args['watch_stats']['errors'], args['watch_stats']['polls']) # depends on [control=['if'], data=[]]
else:
args['watch_stats']['polls'] += 1 # depends on [control=['if'], data=[]]
###########################################################################
if args['watch']:
if response['returncode'] == 1:
print(response['output'] + '\n') # depends on [control=['if'], data=[]]
elif response['output']:
print(response['output']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif response['returncode'] == 1:
if args['error']:
raise CytherError(response['output']) # depends on [control=['if'], data=[]]
else:
print(response['output']) # depends on [control=['if'], data=[]]
else:
print(response['output'])
|
def _apply_orthogonal_view(self):
"""Orthogonal view with respect to current aspect ratio
"""
left, right, bottom, top = self.get_view_coordinates()
glOrtho(left, right, bottom, top, -10, 0)
|
def function[_apply_orthogonal_view, parameter[self]]:
constant[Orthogonal view with respect to current aspect ratio
]
<ast.Tuple object at 0x7da2041daef0> assign[=] call[name[self].get_view_coordinates, parameter[]]
call[name[glOrtho], parameter[name[left], name[right], name[bottom], name[top], <ast.UnaryOp object at 0x7da2041da4d0>, constant[0]]]
|
keyword[def] identifier[_apply_orthogonal_view] ( identifier[self] ):
literal[string]
identifier[left] , identifier[right] , identifier[bottom] , identifier[top] = identifier[self] . identifier[get_view_coordinates] ()
identifier[glOrtho] ( identifier[left] , identifier[right] , identifier[bottom] , identifier[top] ,- literal[int] , literal[int] )
|
def _apply_orthogonal_view(self):
"""Orthogonal view with respect to current aspect ratio
"""
(left, right, bottom, top) = self.get_view_coordinates()
glOrtho(left, right, bottom, top, -10, 0)
|
async def cache_instruments(self,
require: Dict[top_types.Mount, str] = None):
"""
- Get the attached instrument on each mount and
- Cache their pipette configs from pipette-config.json
If specified, the require element should be a dict of mounts to
instrument models describing the instruments expected to be present.
This can save a subsequent of :py:attr:`attached_instruments` and also
serves as the hook for the hardware simulator to decide what is
attached.
"""
checked_require = require or {}
self._log.info("Updating instrument model cache")
found = self._backend.get_attached_instruments(checked_require)
for mount, instrument_data in found.items():
model = instrument_data.get('model')
if model is not None:
p = Pipette(model,
self._config.instrument_offset[mount.name.lower()],
instrument_data['id'])
self._attached_instruments[mount] = p
else:
self._attached_instruments[mount] = None
mod_log.info("Instruments found: {}".format(
self._attached_instruments))
|
<ast.AsyncFunctionDef object at 0x7da1b086ec50>
|
keyword[async] keyword[def] identifier[cache_instruments] ( identifier[self] ,
identifier[require] : identifier[Dict] [ identifier[top_types] . identifier[Mount] , identifier[str] ]= keyword[None] ):
literal[string]
identifier[checked_require] = identifier[require] keyword[or] {}
identifier[self] . identifier[_log] . identifier[info] ( literal[string] )
identifier[found] = identifier[self] . identifier[_backend] . identifier[get_attached_instruments] ( identifier[checked_require] )
keyword[for] identifier[mount] , identifier[instrument_data] keyword[in] identifier[found] . identifier[items] ():
identifier[model] = identifier[instrument_data] . identifier[get] ( literal[string] )
keyword[if] identifier[model] keyword[is] keyword[not] keyword[None] :
identifier[p] = identifier[Pipette] ( identifier[model] ,
identifier[self] . identifier[_config] . identifier[instrument_offset] [ identifier[mount] . identifier[name] . identifier[lower] ()],
identifier[instrument_data] [ literal[string] ])
identifier[self] . identifier[_attached_instruments] [ identifier[mount] ]= identifier[p]
keyword[else] :
identifier[self] . identifier[_attached_instruments] [ identifier[mount] ]= keyword[None]
identifier[mod_log] . identifier[info] ( literal[string] . identifier[format] (
identifier[self] . identifier[_attached_instruments] ))
|
async def cache_instruments(self, require: Dict[top_types.Mount, str]=None):
"""
- Get the attached instrument on each mount and
- Cache their pipette configs from pipette-config.json
If specified, the require element should be a dict of mounts to
instrument models describing the instruments expected to be present.
This can save a subsequent of :py:attr:`attached_instruments` and also
serves as the hook for the hardware simulator to decide what is
attached.
"""
checked_require = require or {}
self._log.info('Updating instrument model cache')
found = self._backend.get_attached_instruments(checked_require)
for (mount, instrument_data) in found.items():
model = instrument_data.get('model')
if model is not None:
p = Pipette(model, self._config.instrument_offset[mount.name.lower()], instrument_data['id'])
self._attached_instruments[mount] = p # depends on [control=['if'], data=['model']]
else:
self._attached_instruments[mount] = None # depends on [control=['for'], data=[]]
mod_log.info('Instruments found: {}'.format(self._attached_instruments))
|
def qc_data(self, tests, alias=None):
"""
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# We'll get a result for each curve here.
r = {m: c.quality(tests, alias) for m, c in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for m, results in r.items():
if m in s:
results.update(s[m])
return r
|
def function[qc_data, parameter[self, tests, alias]]:
constant[
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
]
variable[r] assign[=] <ast.DictComp object at 0x7da1b23637f0>
variable[s] assign[=] call[name[self].qc_curve_group, parameter[name[tests]]]
for taget[tuple[[<ast.Name object at 0x7da1b2362f20>, <ast.Name object at 0x7da1b23629b0>]]] in starred[call[name[r].items, parameter[]]] begin[:]
if compare[name[m] in name[s]] begin[:]
call[name[results].update, parameter[call[name[s]][name[m]]]]
return[name[r]]
|
keyword[def] identifier[qc_data] ( identifier[self] , identifier[tests] , identifier[alias] = keyword[None] ):
literal[string]
identifier[r] ={ identifier[m] : identifier[c] . identifier[quality] ( identifier[tests] , identifier[alias] ) keyword[for] identifier[m] , identifier[c] keyword[in] identifier[self] . identifier[data] . identifier[items] ()}
identifier[s] = identifier[self] . identifier[qc_curve_group] ( identifier[tests] , identifier[alias] = identifier[alias] )
keyword[for] identifier[m] , identifier[results] keyword[in] identifier[r] . identifier[items] ():
keyword[if] identifier[m] keyword[in] identifier[s] :
identifier[results] . identifier[update] ( identifier[s] [ identifier[m] ])
keyword[return] identifier[r]
|
def qc_data(self, tests, alias=None):
"""
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# We'll get a result for each curve here.
r = {m: c.quality(tests, alias) for (m, c) in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for (m, results) in r.items():
if m in s:
results.update(s[m]) # depends on [control=['if'], data=['m', 's']] # depends on [control=['for'], data=[]]
return r
|
def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out)
|
def function[stream_subsegments, parameter[self]]:
constant[
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
]
variable[segment] assign[=] call[name[self].current_segment, parameter[]]
if call[name[self].streaming.is_eligible, parameter[name[segment]]] begin[:]
call[name[self].streaming.stream, parameter[name[segment], name[self]._stream_subsegment_out]]
|
keyword[def] identifier[stream_subsegments] ( identifier[self] ):
literal[string]
identifier[segment] = identifier[self] . identifier[current_segment] ()
keyword[if] identifier[self] . identifier[streaming] . identifier[is_eligible] ( identifier[segment] ):
identifier[self] . identifier[streaming] . identifier[stream] ( identifier[segment] , identifier[self] . identifier[_stream_subsegment_out] )
|
def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) # depends on [control=['if'], data=[]]
|
def workspace_from_nothing(self, directory, mets_basename='mets.xml', clobber_mets=False):
"""
Create an empty workspace.
"""
if directory is None:
directory = tempfile.mkdtemp(prefix=TMP_PREFIX)
if not exists(directory):
makedirs(directory)
mets_fpath = join(directory, mets_basename)
if not clobber_mets and exists(mets_fpath):
raise Exception("Not clobbering existing mets.xml in '%s'." % directory)
mets = OcrdMets.empty_mets()
with open(mets_fpath, 'wb') as fmets:
log.info("Writing %s", mets_fpath)
fmets.write(mets.to_xml(xmllint=True))
return Workspace(self, directory, mets)
|
def function[workspace_from_nothing, parameter[self, directory, mets_basename, clobber_mets]]:
constant[
Create an empty workspace.
]
if compare[name[directory] is constant[None]] begin[:]
variable[directory] assign[=] call[name[tempfile].mkdtemp, parameter[]]
if <ast.UnaryOp object at 0x7da18f58c6a0> begin[:]
call[name[makedirs], parameter[name[directory]]]
variable[mets_fpath] assign[=] call[name[join], parameter[name[directory], name[mets_basename]]]
if <ast.BoolOp object at 0x7da1b0383640> begin[:]
<ast.Raise object at 0x7da1b0380c40>
variable[mets] assign[=] call[name[OcrdMets].empty_mets, parameter[]]
with call[name[open], parameter[name[mets_fpath], constant[wb]]] begin[:]
call[name[log].info, parameter[constant[Writing %s], name[mets_fpath]]]
call[name[fmets].write, parameter[call[name[mets].to_xml, parameter[]]]]
return[call[name[Workspace], parameter[name[self], name[directory], name[mets]]]]
|
keyword[def] identifier[workspace_from_nothing] ( identifier[self] , identifier[directory] , identifier[mets_basename] = literal[string] , identifier[clobber_mets] = keyword[False] ):
literal[string]
keyword[if] identifier[directory] keyword[is] keyword[None] :
identifier[directory] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = identifier[TMP_PREFIX] )
keyword[if] keyword[not] identifier[exists] ( identifier[directory] ):
identifier[makedirs] ( identifier[directory] )
identifier[mets_fpath] = identifier[join] ( identifier[directory] , identifier[mets_basename] )
keyword[if] keyword[not] identifier[clobber_mets] keyword[and] identifier[exists] ( identifier[mets_fpath] ):
keyword[raise] identifier[Exception] ( literal[string] % identifier[directory] )
identifier[mets] = identifier[OcrdMets] . identifier[empty_mets] ()
keyword[with] identifier[open] ( identifier[mets_fpath] , literal[string] ) keyword[as] identifier[fmets] :
identifier[log] . identifier[info] ( literal[string] , identifier[mets_fpath] )
identifier[fmets] . identifier[write] ( identifier[mets] . identifier[to_xml] ( identifier[xmllint] = keyword[True] ))
keyword[return] identifier[Workspace] ( identifier[self] , identifier[directory] , identifier[mets] )
|
def workspace_from_nothing(self, directory, mets_basename='mets.xml', clobber_mets=False):
"""
Create an empty workspace.
"""
if directory is None:
directory = tempfile.mkdtemp(prefix=TMP_PREFIX) # depends on [control=['if'], data=['directory']]
if not exists(directory):
makedirs(directory) # depends on [control=['if'], data=[]]
mets_fpath = join(directory, mets_basename)
if not clobber_mets and exists(mets_fpath):
raise Exception("Not clobbering existing mets.xml in '%s'." % directory) # depends on [control=['if'], data=[]]
mets = OcrdMets.empty_mets()
with open(mets_fpath, 'wb') as fmets:
log.info('Writing %s', mets_fpath)
fmets.write(mets.to_xml(xmllint=True)) # depends on [control=['with'], data=['fmets']]
return Workspace(self, directory, mets)
|
def ipvoid_check(ip):
"""Checks IPVoid.com for info on an IP address"""
if not is_IPv4Address(ip):
return None
return_dict = {}
headers = {'User-Agent': useragent}
url = 'http://ipvoid.com/scan/%s/' % ip
response = requests.get(url, headers=headers)
data = BeautifulSoup(response.text)
if data.findAll('span', attrs={'class': 'label label-success'}):
return None
elif data.findAll('span', attrs={'class': 'label label-danger'}):
for each in data.findAll('img', alt='Alert'):
detect_site = each.parent.parent.td.text.lstrip()
detect_url = each.parent.a['href']
return_dict[detect_site] = detect_url
return return_dict
|
def function[ipvoid_check, parameter[ip]]:
constant[Checks IPVoid.com for info on an IP address]
if <ast.UnaryOp object at 0x7da1b28ae8f0> begin[:]
return[constant[None]]
variable[return_dict] assign[=] dictionary[[], []]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b28ae9b0>], [<ast.Name object at 0x7da1b28ad960>]]
variable[url] assign[=] binary_operation[constant[http://ipvoid.com/scan/%s/] <ast.Mod object at 0x7da2590d6920> name[ip]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
variable[data] assign[=] call[name[BeautifulSoup], parameter[name[response].text]]
if call[name[data].findAll, parameter[constant[span]]] begin[:]
return[constant[None]]
return[name[return_dict]]
|
keyword[def] identifier[ipvoid_check] ( identifier[ip] ):
literal[string]
keyword[if] keyword[not] identifier[is_IPv4Address] ( identifier[ip] ):
keyword[return] keyword[None]
identifier[return_dict] ={}
identifier[headers] ={ literal[string] : identifier[useragent] }
identifier[url] = literal[string] % identifier[ip]
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] )
identifier[data] = identifier[BeautifulSoup] ( identifier[response] . identifier[text] )
keyword[if] identifier[data] . identifier[findAll] ( literal[string] , identifier[attrs] ={ literal[string] : literal[string] }):
keyword[return] keyword[None]
keyword[elif] identifier[data] . identifier[findAll] ( literal[string] , identifier[attrs] ={ literal[string] : literal[string] }):
keyword[for] identifier[each] keyword[in] identifier[data] . identifier[findAll] ( literal[string] , identifier[alt] = literal[string] ):
identifier[detect_site] = identifier[each] . identifier[parent] . identifier[parent] . identifier[td] . identifier[text] . identifier[lstrip] ()
identifier[detect_url] = identifier[each] . identifier[parent] . identifier[a] [ literal[string] ]
identifier[return_dict] [ identifier[detect_site] ]= identifier[detect_url]
keyword[return] identifier[return_dict]
|
def ipvoid_check(ip):
"""Checks IPVoid.com for info on an IP address"""
if not is_IPv4Address(ip):
return None # depends on [control=['if'], data=[]]
return_dict = {}
headers = {'User-Agent': useragent}
url = 'http://ipvoid.com/scan/%s/' % ip
response = requests.get(url, headers=headers)
data = BeautifulSoup(response.text)
if data.findAll('span', attrs={'class': 'label label-success'}):
return None # depends on [control=['if'], data=[]]
elif data.findAll('span', attrs={'class': 'label label-danger'}):
for each in data.findAll('img', alt='Alert'):
detect_site = each.parent.parent.td.text.lstrip()
detect_url = each.parent.a['href']
return_dict[detect_site] = detect_url # depends on [control=['for'], data=['each']] # depends on [control=['if'], data=[]]
return return_dict
|
def _set_zfcp_config_files(self, fcp, target_wwpn, target_lun):
"""rhel6 set WWPN and LUN in configuration files"""
device = '0.0.%s' % fcp
set_zfcp_conf = 'echo "%(device)s %(wwpn)s %(lun)s" >> /etc/zfcp.conf'\
% {'device': device, 'wwpn': target_wwpn,
'lun': target_lun}
trigger_uevent = 'echo "add" >> /sys/bus/ccw/devices/%s/uevent\n'\
% device
return '\n'.join((set_zfcp_conf,
trigger_uevent))
|
def function[_set_zfcp_config_files, parameter[self, fcp, target_wwpn, target_lun]]:
constant[rhel6 set WWPN and LUN in configuration files]
variable[device] assign[=] binary_operation[constant[0.0.%s] <ast.Mod object at 0x7da2590d6920> name[fcp]]
variable[set_zfcp_conf] assign[=] binary_operation[constant[echo "%(device)s %(wwpn)s %(lun)s" >> /etc/zfcp.conf] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da20e955270>, <ast.Constant object at 0x7da20e957c70>, <ast.Constant object at 0x7da20e955d80>], [<ast.Name object at 0x7da20e9547f0>, <ast.Name object at 0x7da20e956230>, <ast.Name object at 0x7da20e9578e0>]]]
variable[trigger_uevent] assign[=] binary_operation[constant[echo "add" >> /sys/bus/ccw/devices/%s/uevent
] <ast.Mod object at 0x7da2590d6920> name[device]]
return[call[constant[
].join, parameter[tuple[[<ast.Name object at 0x7da20e955630>, <ast.Name object at 0x7da20e955ab0>]]]]]
|
keyword[def] identifier[_set_zfcp_config_files] ( identifier[self] , identifier[fcp] , identifier[target_wwpn] , identifier[target_lun] ):
literal[string]
identifier[device] = literal[string] % identifier[fcp]
identifier[set_zfcp_conf] = literal[string] %{ literal[string] : identifier[device] , literal[string] : identifier[target_wwpn] ,
literal[string] : identifier[target_lun] }
identifier[trigger_uevent] = literal[string] % identifier[device]
keyword[return] literal[string] . identifier[join] (( identifier[set_zfcp_conf] ,
identifier[trigger_uevent] ))
|
def _set_zfcp_config_files(self, fcp, target_wwpn, target_lun):
"""rhel6 set WWPN and LUN in configuration files"""
device = '0.0.%s' % fcp
set_zfcp_conf = 'echo "%(device)s %(wwpn)s %(lun)s" >> /etc/zfcp.conf' % {'device': device, 'wwpn': target_wwpn, 'lun': target_lun}
trigger_uevent = 'echo "add" >> /sys/bus/ccw/devices/%s/uevent\n' % device
return '\n'.join((set_zfcp_conf, trigger_uevent))
|
def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out
|
def function[nanfill, parameter[a, f_a]]:
constant[Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
]
variable[a] assign[=] call[name[checkma], parameter[name[a]]]
variable[ndv] assign[=] name[a].fill_value
variable[b] assign[=] call[name[f_a], parameter[call[name[a].filled, parameter[name[np].nan]], <ast.Starred object at 0x7da1b06b7cd0>]]
variable[out] assign[=] call[name[np].ma.fix_invalid, parameter[name[b]]]
call[name[out].set_fill_value, parameter[name[ndv]]]
return[name[out]]
|
keyword[def] identifier[nanfill] ( identifier[a] , identifier[f_a] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[a] = identifier[checkma] ( identifier[a] )
identifier[ndv] = identifier[a] . identifier[fill_value]
identifier[b] = identifier[f_a] ( identifier[a] . identifier[filled] ( identifier[np] . identifier[nan] ),* identifier[args] ,** identifier[kwargs] )
identifier[out] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[b] , identifier[copy] = keyword[False] )
identifier[out] . identifier[set_fill_value] ( identifier[ndv] )
keyword[return] identifier[out]
|
def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out
|
def camel_to_snake(name):
"""Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
|
def function[camel_to_snake, parameter[name]]:
constant[Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
]
variable[s1] assign[=] call[name[re].sub, parameter[constant[(.)([A-Z][a-z]+)], constant[\1_\2], name[name]]]
return[call[call[name[re].sub, parameter[constant[([a-z0-9])([A-Z])], constant[\1_\2], name[s1]]].lower, parameter[]]]
|
keyword[def] identifier[camel_to_snake] ( identifier[name] ):
literal[string]
identifier[s1] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[name] )
keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[s1] ). identifier[lower] ()
|
def camel_to_snake(name):
"""Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', '\\1_\\2', name)
return re.sub('([a-z0-9])([A-Z])', '\\1_\\2', s1).lower()
|
def cursor_after(self):
"""Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if isinstance(self._cursor_after, BaseException):
raise self._cursor_after
return self._cursor_after
|
def function[cursor_after, parameter[self]]:
constant[Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
]
if call[name[isinstance], parameter[name[self]._cursor_after, name[BaseException]]] begin[:]
<ast.Raise object at 0x7da18f58c730>
return[name[self]._cursor_after]
|
keyword[def] identifier[cursor_after] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_cursor_after] , identifier[BaseException] ):
keyword[raise] identifier[self] . identifier[_cursor_after]
keyword[return] identifier[self] . identifier[_cursor_after]
|
def cursor_after(self):
"""Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if isinstance(self._cursor_after, BaseException):
raise self._cursor_after # depends on [control=['if'], data=[]]
return self._cursor_after
|
def delete(self, ids):
"""
Method to undeploy pool's by their ids
:param ids: Identifiers of deployed pool's
:return: Empty Dict
"""
url = build_uri_with_ids('api/v3/pool/deploy/%s/', ids)
return super(ApiPoolDeploy, self).delete(url)
|
def function[delete, parameter[self, ids]]:
constant[
Method to undeploy pool's by their ids
:param ids: Identifiers of deployed pool's
:return: Empty Dict
]
variable[url] assign[=] call[name[build_uri_with_ids], parameter[constant[api/v3/pool/deploy/%s/], name[ids]]]
return[call[call[name[super], parameter[name[ApiPoolDeploy], name[self]]].delete, parameter[name[url]]]]
|
keyword[def] identifier[delete] ( identifier[self] , identifier[ids] ):
literal[string]
identifier[url] = identifier[build_uri_with_ids] ( literal[string] , identifier[ids] )
keyword[return] identifier[super] ( identifier[ApiPoolDeploy] , identifier[self] ). identifier[delete] ( identifier[url] )
|
def delete(self, ids):
"""
Method to undeploy pool's by their ids
:param ids: Identifiers of deployed pool's
:return: Empty Dict
"""
url = build_uri_with_ids('api/v3/pool/deploy/%s/', ids)
return super(ApiPoolDeploy, self).delete(url)
|
def create_threadpool_executed_func(original_func):
"""
Returns a function wrapper that defers function calls execute inside gevent's threadpool but keeps any exception
or backtrace in the caller's context.
:param original_func: function to wrap
:returns: wrapper function
"""
def wrapped_func(*args, **kwargs):
try:
result = original_func(*args, **kwargs)
return True, result
except:
return False, sys.exc_info()
def new_func(*args, **kwargs):
status, result = gevent.get_hub().threadpool.apply(wrapped_func, args, kwargs)
if status:
return result
else:
six.reraise(*result)
new_func.__name__ = original_func.__name__
new_func.__doc__ = "(gevent-friendly)" + (" " + original_func.__doc__ if original_func.__doc__ is not None else "")
return new_func
|
def function[create_threadpool_executed_func, parameter[original_func]]:
constant[
Returns a function wrapper that defers function calls execute inside gevent's threadpool but keeps any exception
or backtrace in the caller's context.
:param original_func: function to wrap
:returns: wrapper function
]
def function[wrapped_func, parameter[]]:
<ast.Try object at 0x7da1aff6c2b0>
def function[new_func, parameter[]]:
<ast.Tuple object at 0x7da1aff6c790> assign[=] call[call[name[gevent].get_hub, parameter[]].threadpool.apply, parameter[name[wrapped_func], name[args], name[kwargs]]]
if name[status] begin[:]
return[name[result]]
name[new_func].__name__ assign[=] name[original_func].__name__
name[new_func].__doc__ assign[=] binary_operation[constant[(gevent-friendly)] + <ast.IfExp object at 0x7da1affd7310>]
return[name[new_func]]
|
keyword[def] identifier[create_threadpool_executed_func] ( identifier[original_func] ):
literal[string]
keyword[def] identifier[wrapped_func] (* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
identifier[result] = identifier[original_func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] keyword[True] , identifier[result]
keyword[except] :
keyword[return] keyword[False] , identifier[sys] . identifier[exc_info] ()
keyword[def] identifier[new_func] (* identifier[args] ,** identifier[kwargs] ):
identifier[status] , identifier[result] = identifier[gevent] . identifier[get_hub] (). identifier[threadpool] . identifier[apply] ( identifier[wrapped_func] , identifier[args] , identifier[kwargs] )
keyword[if] identifier[status] :
keyword[return] identifier[result]
keyword[else] :
identifier[six] . identifier[reraise] (* identifier[result] )
identifier[new_func] . identifier[__name__] = identifier[original_func] . identifier[__name__]
identifier[new_func] . identifier[__doc__] = literal[string] +( literal[string] + identifier[original_func] . identifier[__doc__] keyword[if] identifier[original_func] . identifier[__doc__] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] )
keyword[return] identifier[new_func]
|
def create_threadpool_executed_func(original_func):
"""
Returns a function wrapper that defers function calls execute inside gevent's threadpool but keeps any exception
or backtrace in the caller's context.
:param original_func: function to wrap
:returns: wrapper function
"""
def wrapped_func(*args, **kwargs):
try:
result = original_func(*args, **kwargs)
return (True, result) # depends on [control=['try'], data=[]]
except:
return (False, sys.exc_info()) # depends on [control=['except'], data=[]]
def new_func(*args, **kwargs):
(status, result) = gevent.get_hub().threadpool.apply(wrapped_func, args, kwargs)
if status:
return result # depends on [control=['if'], data=[]]
else:
six.reraise(*result)
new_func.__name__ = original_func.__name__
new_func.__doc__ = '(gevent-friendly)' + (' ' + original_func.__doc__ if original_func.__doc__ is not None else '')
return new_func
|
async def generate_license(self, title, avatar, badges=None, widgets=None):
"""Generate a license.
This function is a coroutine.
Parameters:
title: str - title of the license
avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image
badges: list - list of 1-3 direct image urls. Same requirements as avatar (optional)
widgets: list - list of 1-3 strings to fill the three boxes with (optional)
Return Type: image data"""
if not isinstance(title, str):
raise TypeError("type of 'title' must be str.")
if not isinstance(avatar, str):
raise TypeError("type of 'avatar' must be str.")
if badges and not isinstance(badges, list):
raise TypeError("type of 'badges' must be list.")
if widgets and not isinstance(widgets, list):
raise TypeError("type of 'widgets' must be list.")
data = {"title": title, "avatar": avatar}
if badges and len(badges) <= 3:
data['badges'] = badges
if widgets and len(widgets) <= 3:
data['widgets'] = widgets
async with aiohttp.ClientSession() as session:
async with session.post("https://api.weeb.sh/auto-image/license", headers=self.__headers, data=data) as resp:
if resp.status == 200:
return await resp.read()
else:
raise Exception((await resp.json())['message'])
|
<ast.AsyncFunctionDef object at 0x7da1b26ae9b0>
|
keyword[async] keyword[def] identifier[generate_license] ( identifier[self] , identifier[title] , identifier[avatar] , identifier[badges] = keyword[None] , identifier[widgets] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[title] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[avatar] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[badges] keyword[and] keyword[not] identifier[isinstance] ( identifier[badges] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[widgets] keyword[and] keyword[not] identifier[isinstance] ( identifier[widgets] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[data] ={ literal[string] : identifier[title] , literal[string] : identifier[avatar] }
keyword[if] identifier[badges] keyword[and] identifier[len] ( identifier[badges] )<= literal[int] :
identifier[data] [ literal[string] ]= identifier[badges]
keyword[if] identifier[widgets] keyword[and] identifier[len] ( identifier[widgets] )<= literal[int] :
identifier[data] [ literal[string] ]= identifier[widgets]
keyword[async] keyword[with] identifier[aiohttp] . identifier[ClientSession] () keyword[as] identifier[session] :
keyword[async] keyword[with] identifier[session] . identifier[post] ( literal[string] , identifier[headers] = identifier[self] . identifier[__headers] , identifier[data] = identifier[data] ) keyword[as] identifier[resp] :
keyword[if] identifier[resp] . identifier[status] == literal[int] :
keyword[return] keyword[await] identifier[resp] . identifier[read] ()
keyword[else] :
keyword[raise] identifier[Exception] (( keyword[await] identifier[resp] . identifier[json] ())[ literal[string] ])
|
async def generate_license(self, title, avatar, badges=None, widgets=None):
"""Generate a license.
This function is a coroutine.
Parameters:
title: str - title of the license
avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image
badges: list - list of 1-3 direct image urls. Same requirements as avatar (optional)
widgets: list - list of 1-3 strings to fill the three boxes with (optional)
Return Type: image data"""
if not isinstance(title, str):
raise TypeError("type of 'title' must be str.") # depends on [control=['if'], data=[]]
if not isinstance(avatar, str):
raise TypeError("type of 'avatar' must be str.") # depends on [control=['if'], data=[]]
if badges and (not isinstance(badges, list)):
raise TypeError("type of 'badges' must be list.") # depends on [control=['if'], data=[]]
if widgets and (not isinstance(widgets, list)):
raise TypeError("type of 'widgets' must be list.") # depends on [control=['if'], data=[]]
data = {'title': title, 'avatar': avatar}
if badges and len(badges) <= 3:
data['badges'] = badges # depends on [control=['if'], data=[]]
if widgets and len(widgets) <= 3:
data['widgets'] = widgets # depends on [control=['if'], data=[]]
async with aiohttp.ClientSession() as session:
async with session.post('https://api.weeb.sh/auto-image/license', headers=self.__headers, data=data) as resp:
if resp.status == 200:
return await resp.read() # depends on [control=['if'], data=[]]
else:
raise Exception((await resp.json())['message'])
|
def directionality(image, min_distance = 4, threshold = 0.1, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns the directionality of the image texture.
It is just a value representing the strength of directionality, not the specific direction.
An edge detection is applied on the image. Then the edge strength and directional angle between
the image axis are computed. A histogram of the directional angles is than used to calculate a
qualitative value for directionality in ONE image layer. Note that there are n choose 2 layers
in a n dimensional image.
Warning
-------
Experimental. There are still issues with finding the right maxs and mins in histogram and
predefining the number of bins for the histogram.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image or a slice object
min_distance : int
minimal Distance between 2 local minima or maxima in the histogram. Default is 4.
threshold : float
Defines a threshold between 0 and 1. It is used to ignore angles of low edge strength
in the histogram. Default is 0.1.
Returns
-------
directionality : array
Fdir is a value between 0 and 1. 1 represents a high directionality.
Returns the directionality of an image in relation to one special image layer.
The returned values are sorted like this. The axis are named v,w,x,y,z
for a five dimensional image:
w x y z v x y z v w
arctan(delta)| delta = ---,---,---,---,---, ---,---,---,---,---
v w x y z v w x y z
There are always n choose k axis relations; n=image.ndim, k=2 (2 axis in every image layer).
See Also
--------
"""
image = numpy.asarray(image)
ndim = image.ndim
# set default mask or apply given mask
if not type(mask) is slice:
if not type(mask[0] is slice):
mask = numpy.array(mask, copy=False, dtype = numpy.bool)
image = image[mask]
# set default voxel spacing if not suppliec
if None == voxelspacing:
voxelspacing = tuple([1.] * ndim)
if len(voxelspacing) != ndim:
print("Voxel spacing and image dimensions do not fit.")
return None
# Calculate amount of combinations: n choose k, normalizing factor r and voxel spacing.
n = (factorial(ndim)/(2*factorial(ndim-2)))
pi1_2 = numpy.pi/2.0
r=1.0 / (pi1_2**2)
vs = [slice(None,None,numpy.rint(ii)) for ii in voxelspacing]
# Allocate memory, define constants
Fdir = numpy.empty(n)
# calculate differences by using Sobel-filter. (Maybe other filter kernel like Prewitt will do a better job)
E = [sobel(image, axis=ndim-1-i) for i in range(ndim)]
# The edge strength e(x,y) is used for thresholding.
e = sum(E) / float(ndim)
border = [numpy.percentile(e, 1),numpy.percentile(e, 99)]
e[e < border[0]] = 0
e[e > border[1]] = border[1]
e -= border[0]
e /= border[1]
em = e > threshold
for i in range(n):
A = numpy.arctan((E[(i + (ndim+i)/ndim) % ndim][vs]) / (E[i%ndim][vs]+numpy.spacing(1))) # [0 , pi/2]
A = A[em[vs]]
# Calculate number of bins for the histogram. Watch out, this is just a work around!
# @TODO: Write a more stable code to prevent for minimum and maximum repetition when the same value in the Histogram appears multiple times in a row. Example: image = numpy.zeros([10,10]), image[:,::3] = 1
bins = numpy.unique(A).size + min_distance
H = numpy.histogram(A, bins = bins, density=True)[0] # [0 , 1]
H[H < numpy.percentile(H,1)] = 0.0
H_peaks, H_valleys, H_range = find_valley_range(H)
summe = 0.0
for idx_ap in range(len(H_peaks)):
for range_idx in range( H_valleys[idx_ap], H_valleys[idx_ap]+H_range[idx_ap]):
a=range_idx % len(H)
summe += (((pi1_2*a)/bins - (pi1_2 * H_peaks[idx_ap])/bins) **2) * H[a]
Fdir[i] = 1.0 - r * summe
return Fdir
|
def function[directionality, parameter[image, min_distance, threshold, voxelspacing, mask]]:
constant[
Takes a simple or multi-spectral image and returns the directionality of the image texture.
It is just a value representing the strength of directionality, not the specific direction.
An edge detection is applied on the image. Then the edge strength and directional angle between
the image axis are computed. A histogram of the directional angles is than used to calculate a
qualitative value for directionality in ONE image layer. Note that there are n choose 2 layers
in a n dimensional image.
Warning
-------
Experimental. There are still issues with finding the right maxs and mins in histogram and
predefining the number of bins for the histogram.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image or a slice object
min_distance : int
minimal Distance between 2 local minima or maxima in the histogram. Default is 4.
threshold : float
Defines a threshold between 0 and 1. It is used to ignore angles of low edge strength
in the histogram. Default is 0.1.
Returns
-------
directionality : array
Fdir is a value between 0 and 1. 1 represents a high directionality.
Returns the directionality of an image in relation to one special image layer.
The returned values are sorted like this. The axis are named v,w,x,y,z
for a five dimensional image:
w x y z v x y z v w
arctan(delta)| delta = ---,---,---,---,---, ---,---,---,---,---
v w x y z v w x y z
There are always n choose k axis relations; n=image.ndim, k=2 (2 axis in every image layer).
See Also
--------
]
variable[image] assign[=] call[name[numpy].asarray, parameter[name[image]]]
variable[ndim] assign[=] name[image].ndim
if <ast.UnaryOp object at 0x7da1b110a560> begin[:]
if <ast.UnaryOp object at 0x7da1b110a800> begin[:]
variable[mask] assign[=] call[name[numpy].array, parameter[name[mask]]]
variable[image] assign[=] call[name[image]][name[mask]]
if compare[constant[None] equal[==] name[voxelspacing]] begin[:]
variable[voxelspacing] assign[=] call[name[tuple], parameter[binary_operation[list[[<ast.Constant object at 0x7da1b1108e80>]] * name[ndim]]]]
if compare[call[name[len], parameter[name[voxelspacing]]] not_equal[!=] name[ndim]] begin[:]
call[name[print], parameter[constant[Voxel spacing and image dimensions do not fit.]]]
return[constant[None]]
variable[n] assign[=] binary_operation[call[name[factorial], parameter[name[ndim]]] / binary_operation[constant[2] * call[name[factorial], parameter[binary_operation[name[ndim] - constant[2]]]]]]
variable[pi1_2] assign[=] binary_operation[name[numpy].pi / constant[2.0]]
variable[r] assign[=] binary_operation[constant[1.0] / binary_operation[name[pi1_2] ** constant[2]]]
variable[vs] assign[=] <ast.ListComp object at 0x7da1b1109930>
variable[Fdir] assign[=] call[name[numpy].empty, parameter[name[n]]]
variable[E] assign[=] <ast.ListComp object at 0x7da1b1109480>
variable[e] assign[=] binary_operation[call[name[sum], parameter[name[E]]] / call[name[float], parameter[name[ndim]]]]
variable[border] assign[=] list[[<ast.Call object at 0x7da1b1109f30>, <ast.Call object at 0x7da1b1108b80>]]
call[name[e]][compare[name[e] less[<] call[name[border]][constant[0]]]] assign[=] constant[0]
call[name[e]][compare[name[e] greater[>] call[name[border]][constant[1]]]] assign[=] call[name[border]][constant[1]]
<ast.AugAssign object at 0x7da1b1108790>
<ast.AugAssign object at 0x7da1b11085b0>
variable[em] assign[=] compare[name[e] greater[>] name[threshold]]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
variable[A] assign[=] call[name[numpy].arctan, parameter[binary_operation[call[call[name[E]][binary_operation[binary_operation[name[i] + binary_operation[binary_operation[name[ndim] + name[i]] / name[ndim]]] <ast.Mod object at 0x7da2590d6920> name[ndim]]]][name[vs]] / binary_operation[call[call[name[E]][binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> name[ndim]]]][name[vs]] + call[name[numpy].spacing, parameter[constant[1]]]]]]]
variable[A] assign[=] call[name[A]][call[name[em]][name[vs]]]
variable[bins] assign[=] binary_operation[call[name[numpy].unique, parameter[name[A]]].size + name[min_distance]]
variable[H] assign[=] call[call[name[numpy].histogram, parameter[name[A]]]][constant[0]]
call[name[H]][compare[name[H] less[<] call[name[numpy].percentile, parameter[name[H], constant[1]]]]] assign[=] constant[0.0]
<ast.Tuple object at 0x7da2044c3760> assign[=] call[name[find_valley_range], parameter[name[H]]]
variable[summe] assign[=] constant[0.0]
for taget[name[idx_ap]] in starred[call[name[range], parameter[call[name[len], parameter[name[H_peaks]]]]]] begin[:]
for taget[name[range_idx]] in starred[call[name[range], parameter[call[name[H_valleys]][name[idx_ap]], binary_operation[call[name[H_valleys]][name[idx_ap]] + call[name[H_range]][name[idx_ap]]]]]] begin[:]
variable[a] assign[=] binary_operation[name[range_idx] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[H]]]]
<ast.AugAssign object at 0x7da2044c2410>
call[name[Fdir]][name[i]] assign[=] binary_operation[constant[1.0] - binary_operation[name[r] * name[summe]]]
return[name[Fdir]]
|
keyword[def] identifier[directionality] ( identifier[image] , identifier[min_distance] = literal[int] , identifier[threshold] = literal[int] , identifier[voxelspacing] = keyword[None] , identifier[mask] = identifier[slice] ( keyword[None] )):
literal[string]
identifier[image] = identifier[numpy] . identifier[asarray] ( identifier[image] )
identifier[ndim] = identifier[image] . identifier[ndim]
keyword[if] keyword[not] identifier[type] ( identifier[mask] ) keyword[is] identifier[slice] :
keyword[if] keyword[not] identifier[type] ( identifier[mask] [ literal[int] ] keyword[is] identifier[slice] ):
identifier[mask] = identifier[numpy] . identifier[array] ( identifier[mask] , identifier[copy] = keyword[False] , identifier[dtype] = identifier[numpy] . identifier[bool] )
identifier[image] = identifier[image] [ identifier[mask] ]
keyword[if] keyword[None] == identifier[voxelspacing] :
identifier[voxelspacing] = identifier[tuple] ([ literal[int] ]* identifier[ndim] )
keyword[if] identifier[len] ( identifier[voxelspacing] )!= identifier[ndim] :
identifier[print] ( literal[string] )
keyword[return] keyword[None]
identifier[n] =( identifier[factorial] ( identifier[ndim] )/( literal[int] * identifier[factorial] ( identifier[ndim] - literal[int] )))
identifier[pi1_2] = identifier[numpy] . identifier[pi] / literal[int]
identifier[r] = literal[int] /( identifier[pi1_2] ** literal[int] )
identifier[vs] =[ identifier[slice] ( keyword[None] , keyword[None] , identifier[numpy] . identifier[rint] ( identifier[ii] )) keyword[for] identifier[ii] keyword[in] identifier[voxelspacing] ]
identifier[Fdir] = identifier[numpy] . identifier[empty] ( identifier[n] )
identifier[E] =[ identifier[sobel] ( identifier[image] , identifier[axis] = identifier[ndim] - literal[int] - identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ndim] )]
identifier[e] = identifier[sum] ( identifier[E] )/ identifier[float] ( identifier[ndim] )
identifier[border] =[ identifier[numpy] . identifier[percentile] ( identifier[e] , literal[int] ), identifier[numpy] . identifier[percentile] ( identifier[e] , literal[int] )]
identifier[e] [ identifier[e] < identifier[border] [ literal[int] ]]= literal[int]
identifier[e] [ identifier[e] > identifier[border] [ literal[int] ]]= identifier[border] [ literal[int] ]
identifier[e] -= identifier[border] [ literal[int] ]
identifier[e] /= identifier[border] [ literal[int] ]
identifier[em] = identifier[e] > identifier[threshold]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[A] = identifier[numpy] . identifier[arctan] (( identifier[E] [( identifier[i] +( identifier[ndim] + identifier[i] )/ identifier[ndim] )% identifier[ndim] ][ identifier[vs] ])/( identifier[E] [ identifier[i] % identifier[ndim] ][ identifier[vs] ]+ identifier[numpy] . identifier[spacing] ( literal[int] )))
identifier[A] = identifier[A] [ identifier[em] [ identifier[vs] ]]
identifier[bins] = identifier[numpy] . identifier[unique] ( identifier[A] ). identifier[size] + identifier[min_distance]
identifier[H] = identifier[numpy] . identifier[histogram] ( identifier[A] , identifier[bins] = identifier[bins] , identifier[density] = keyword[True] )[ literal[int] ]
identifier[H] [ identifier[H] < identifier[numpy] . identifier[percentile] ( identifier[H] , literal[int] )]= literal[int]
identifier[H_peaks] , identifier[H_valleys] , identifier[H_range] = identifier[find_valley_range] ( identifier[H] )
identifier[summe] = literal[int]
keyword[for] identifier[idx_ap] keyword[in] identifier[range] ( identifier[len] ( identifier[H_peaks] )):
keyword[for] identifier[range_idx] keyword[in] identifier[range] ( identifier[H_valleys] [ identifier[idx_ap] ], identifier[H_valleys] [ identifier[idx_ap] ]+ identifier[H_range] [ identifier[idx_ap] ]):
identifier[a] = identifier[range_idx] % identifier[len] ( identifier[H] )
identifier[summe] +=((( identifier[pi1_2] * identifier[a] )/ identifier[bins] -( identifier[pi1_2] * identifier[H_peaks] [ identifier[idx_ap] ])/ identifier[bins] )** literal[int] )* identifier[H] [ identifier[a] ]
identifier[Fdir] [ identifier[i] ]= literal[int] - identifier[r] * identifier[summe]
keyword[return] identifier[Fdir]
|
def directionality(image, min_distance=4, threshold=0.1, voxelspacing=None, mask=slice(None)):
"""
Takes a simple or multi-spectral image and returns the directionality of the image texture.
It is just a value representing the strength of directionality, not the specific direction.
An edge detection is applied on the image. Then the edge strength and directional angle between
the image axis are computed. A histogram of the directional angles is than used to calculate a
qualitative value for directionality in ONE image layer. Note that there are n choose 2 layers
in a n dimensional image.
Warning
-------
Experimental. There are still issues with finding the right maxs and mins in histogram and
predefining the number of bins for the histogram.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image or a slice object
min_distance : int
minimal Distance between 2 local minima or maxima in the histogram. Default is 4.
threshold : float
Defines a threshold between 0 and 1. It is used to ignore angles of low edge strength
in the histogram. Default is 0.1.
Returns
-------
directionality : array
Fdir is a value between 0 and 1. 1 represents a high directionality.
Returns the directionality of an image in relation to one special image layer.
The returned values are sorted like this. The axis are named v,w,x,y,z
for a five dimensional image:
w x y z v x y z v w
arctan(delta)| delta = ---,---,---,---,---, ---,---,---,---,---
v w x y z v w x y z
There are always n choose k axis relations; n=image.ndim, k=2 (2 axis in every image layer).
See Also
--------
"""
image = numpy.asarray(image)
ndim = image.ndim
# set default mask or apply given mask
if not type(mask) is slice:
if not type(mask[0] is slice):
mask = numpy.array(mask, copy=False, dtype=numpy.bool) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
image = image[mask]
# set default voxel spacing if not suppliec
if None == voxelspacing:
voxelspacing = tuple([1.0] * ndim) # depends on [control=['if'], data=['voxelspacing']]
if len(voxelspacing) != ndim:
print('Voxel spacing and image dimensions do not fit.')
return None # depends on [control=['if'], data=[]] # Calculate amount of combinations: n choose k, normalizing factor r and voxel spacing.
n = factorial(ndim) / (2 * factorial(ndim - 2))
pi1_2 = numpy.pi / 2.0
r = 1.0 / pi1_2 ** 2
vs = [slice(None, None, numpy.rint(ii)) for ii in voxelspacing]
# Allocate memory, define constants
Fdir = numpy.empty(n)
# calculate differences by using Sobel-filter. (Maybe other filter kernel like Prewitt will do a better job)
E = [sobel(image, axis=ndim - 1 - i) for i in range(ndim)]
# The edge strength e(x,y) is used for thresholding.
e = sum(E) / float(ndim)
border = [numpy.percentile(e, 1), numpy.percentile(e, 99)]
e[e < border[0]] = 0
e[e > border[1]] = border[1]
e -= border[0]
e /= border[1]
em = e > threshold
for i in range(n):
A = numpy.arctan(E[(i + (ndim + i) / ndim) % ndim][vs] / (E[i % ndim][vs] + numpy.spacing(1))) # [0 , pi/2]
A = A[em[vs]] # Calculate number of bins for the histogram. Watch out, this is just a work around!
# @TODO: Write a more stable code to prevent for minimum and maximum repetition when the same value in the Histogram appears multiple times in a row. Example: image = numpy.zeros([10,10]), image[:,::3] = 1
bins = numpy.unique(A).size + min_distance
H = numpy.histogram(A, bins=bins, density=True)[0] # [0 , 1]
H[H < numpy.percentile(H, 1)] = 0.0
(H_peaks, H_valleys, H_range) = find_valley_range(H)
summe = 0.0
for idx_ap in range(len(H_peaks)):
for range_idx in range(H_valleys[idx_ap], H_valleys[idx_ap] + H_range[idx_ap]):
a = range_idx % len(H)
summe += (pi1_2 * a / bins - pi1_2 * H_peaks[idx_ap] / bins) ** 2 * H[a] # depends on [control=['for'], data=['range_idx']] # depends on [control=['for'], data=['idx_ap']]
Fdir[i] = 1.0 - r * summe # depends on [control=['for'], data=['i']]
return Fdir
|
def cc(self, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The prototype for the given function, as a SimType or a C-style function declaration that
can be parsed into a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
"""
return self._default_cc(arch=self.project.arch,
args=args,
ret_val=ret_val,
sp_delta=sp_delta,
func_ty=func_ty)
|
def function[cc, parameter[self, args, ret_val, sp_delta, func_ty]]:
constant[
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The prototype for the given function, as a SimType or a C-style function declaration that
can be parsed into a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
]
return[call[name[self]._default_cc, parameter[]]]
|
keyword[def] identifier[cc] ( identifier[self] , identifier[args] = keyword[None] , identifier[ret_val] = keyword[None] , identifier[sp_delta] = keyword[None] , identifier[func_ty] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_default_cc] ( identifier[arch] = identifier[self] . identifier[project] . identifier[arch] ,
identifier[args] = identifier[args] ,
identifier[ret_val] = identifier[ret_val] ,
identifier[sp_delta] = identifier[sp_delta] ,
identifier[func_ty] = identifier[func_ty] )
|
def cc(self, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The prototype for the given function, as a SimType or a C-style function declaration that
can be parsed into a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
"""
return self._default_cc(arch=self.project.arch, args=args, ret_val=ret_val, sp_delta=sp_delta, func_ty=func_ty)
|
def build_columns(self, X, term=-1, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
if term == -1:
term = range(len(self._terms))
term = list(np.atleast_1d(term))
columns = []
for term_id in term:
columns.append(self._terms[term_id].build_columns(X, verbose=verbose))
return sp.sparse.hstack(columns, format='csc')
|
def function[build_columns, parameter[self, X, term, verbose]]:
constant[construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
]
if compare[name[term] equal[==] <ast.UnaryOp object at 0x7da18f00c8e0>] begin[:]
variable[term] assign[=] call[name[range], parameter[call[name[len], parameter[name[self]._terms]]]]
variable[term] assign[=] call[name[list], parameter[call[name[np].atleast_1d, parameter[name[term]]]]]
variable[columns] assign[=] list[[]]
for taget[name[term_id]] in starred[name[term]] begin[:]
call[name[columns].append, parameter[call[call[name[self]._terms][name[term_id]].build_columns, parameter[name[X]]]]]
return[call[name[sp].sparse.hstack, parameter[name[columns]]]]
|
keyword[def] identifier[build_columns] ( identifier[self] , identifier[X] , identifier[term] =- literal[int] , identifier[verbose] = keyword[False] ):
literal[string]
keyword[if] identifier[term] ==- literal[int] :
identifier[term] = identifier[range] ( identifier[len] ( identifier[self] . identifier[_terms] ))
identifier[term] = identifier[list] ( identifier[np] . identifier[atleast_1d] ( identifier[term] ))
identifier[columns] =[]
keyword[for] identifier[term_id] keyword[in] identifier[term] :
identifier[columns] . identifier[append] ( identifier[self] . identifier[_terms] [ identifier[term_id] ]. identifier[build_columns] ( identifier[X] , identifier[verbose] = identifier[verbose] ))
keyword[return] identifier[sp] . identifier[sparse] . identifier[hstack] ( identifier[columns] , identifier[format] = literal[string] )
|
def build_columns(self, X, term=-1, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
if term == -1:
term = range(len(self._terms)) # depends on [control=['if'], data=['term']]
term = list(np.atleast_1d(term))
columns = []
for term_id in term:
columns.append(self._terms[term_id].build_columns(X, verbose=verbose)) # depends on [control=['for'], data=['term_id']]
return sp.sparse.hstack(columns, format='csc')
|
def detect_complex_func(func):
"""Detect the cyclomatic complexity of the contract functions
shouldn't be greater than 7
"""
result = []
code_complexity = compute_cyclomatic_complexity(func)
if code_complexity > ComplexFunction.MAX_CYCLOMATIC_COMPLEXITY:
result.append({
"func": func,
"cause": ComplexFunction.CAUSE_CYCLOMATIC
})
"""Detect the number of external calls in the func
shouldn't be greater than 5
"""
count = 0
for node in func.nodes:
for ir in node.irs:
if isinstance(ir, (HighLevelCall, LowLevelCall, LibraryCall)):
count += 1
if count > ComplexFunction.MAX_EXTERNAL_CALLS:
result.append({
"func": func,
"cause": ComplexFunction.CAUSE_EXTERNAL_CALL
})
"""Checks the number of the state variables written
shouldn't be greater than 10
"""
if len(func.state_variables_written) > ComplexFunction.MAX_STATE_VARIABLES:
result.append({
"func": func,
"cause": ComplexFunction.CAUSE_STATE_VARS
})
return result
|
def function[detect_complex_func, parameter[func]]:
constant[Detect the cyclomatic complexity of the contract functions
shouldn't be greater than 7
]
variable[result] assign[=] list[[]]
variable[code_complexity] assign[=] call[name[compute_cyclomatic_complexity], parameter[name[func]]]
if compare[name[code_complexity] greater[>] name[ComplexFunction].MAX_CYCLOMATIC_COMPLEXITY] begin[:]
call[name[result].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6e61d0>, <ast.Constant object at 0x7da20c6e6ef0>], [<ast.Name object at 0x7da20c6e7d30>, <ast.Attribute object at 0x7da20c6e6a40>]]]]
constant[Detect the number of external calls in the func
shouldn't be greater than 5
]
variable[count] assign[=] constant[0]
for taget[name[node]] in starred[name[func].nodes] begin[:]
for taget[name[ir]] in starred[name[node].irs] begin[:]
if call[name[isinstance], parameter[name[ir], tuple[[<ast.Name object at 0x7da20c6e5ba0>, <ast.Name object at 0x7da20c6e5000>, <ast.Name object at 0x7da20c6e6ce0>]]]] begin[:]
<ast.AugAssign object at 0x7da20c6e6d40>
if compare[name[count] greater[>] name[ComplexFunction].MAX_EXTERNAL_CALLS] begin[:]
call[name[result].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6e6320>, <ast.Constant object at 0x7da20c6e7fd0>], [<ast.Name object at 0x7da20c6e5c00>, <ast.Attribute object at 0x7da20c6e5990>]]]]
constant[Checks the number of the state variables written
shouldn't be greater than 10
]
if compare[call[name[len], parameter[name[func].state_variables_written]] greater[>] name[ComplexFunction].MAX_STATE_VARIABLES] begin[:]
call[name[result].append, parameter[dictionary[[<ast.Constant object at 0x7da18c4cdf00>, <ast.Constant object at 0x7da18c4ce500>], [<ast.Name object at 0x7da18c4cce80>, <ast.Attribute object at 0x7da18c4cdf30>]]]]
return[name[result]]
|
keyword[def] identifier[detect_complex_func] ( identifier[func] ):
literal[string]
identifier[result] =[]
identifier[code_complexity] = identifier[compute_cyclomatic_complexity] ( identifier[func] )
keyword[if] identifier[code_complexity] > identifier[ComplexFunction] . identifier[MAX_CYCLOMATIC_COMPLEXITY] :
identifier[result] . identifier[append] ({
literal[string] : identifier[func] ,
literal[string] : identifier[ComplexFunction] . identifier[CAUSE_CYCLOMATIC]
})
literal[string]
identifier[count] = literal[int]
keyword[for] identifier[node] keyword[in] identifier[func] . identifier[nodes] :
keyword[for] identifier[ir] keyword[in] identifier[node] . identifier[irs] :
keyword[if] identifier[isinstance] ( identifier[ir] ,( identifier[HighLevelCall] , identifier[LowLevelCall] , identifier[LibraryCall] )):
identifier[count] += literal[int]
keyword[if] identifier[count] > identifier[ComplexFunction] . identifier[MAX_EXTERNAL_CALLS] :
identifier[result] . identifier[append] ({
literal[string] : identifier[func] ,
literal[string] : identifier[ComplexFunction] . identifier[CAUSE_EXTERNAL_CALL]
})
literal[string]
keyword[if] identifier[len] ( identifier[func] . identifier[state_variables_written] )> identifier[ComplexFunction] . identifier[MAX_STATE_VARIABLES] :
identifier[result] . identifier[append] ({
literal[string] : identifier[func] ,
literal[string] : identifier[ComplexFunction] . identifier[CAUSE_STATE_VARS]
})
keyword[return] identifier[result]
|
def detect_complex_func(func):
"""Detect the cyclomatic complexity of the contract functions
shouldn't be greater than 7
"""
result = []
code_complexity = compute_cyclomatic_complexity(func)
if code_complexity > ComplexFunction.MAX_CYCLOMATIC_COMPLEXITY:
result.append({'func': func, 'cause': ComplexFunction.CAUSE_CYCLOMATIC}) # depends on [control=['if'], data=[]]
"Detect the number of external calls in the func\n shouldn't be greater than 5\n "
count = 0
for node in func.nodes:
for ir in node.irs:
if isinstance(ir, (HighLevelCall, LowLevelCall, LibraryCall)):
count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ir']] # depends on [control=['for'], data=['node']]
if count > ComplexFunction.MAX_EXTERNAL_CALLS:
result.append({'func': func, 'cause': ComplexFunction.CAUSE_EXTERNAL_CALL}) # depends on [control=['if'], data=[]]
"Checks the number of the state variables written\n shouldn't be greater than 10\n "
if len(func.state_variables_written) > ComplexFunction.MAX_STATE_VARIABLES:
result.append({'func': func, 'cause': ComplexFunction.CAUSE_STATE_VARS}) # depends on [control=['if'], data=[]]
return result
|
def post_process_fieldsets(context, fieldset):
"""
Removes a few fields from FeinCMS admin inlines, those being
``id``, ``DELETE`` and ``ORDER`` currently.
Additionally, it ensures that dynamically added fields (i.e.
``ApplicationContent``'s ``admin_fields`` option) are shown.
"""
# abort if fieldset is customized
if fieldset.model_admin.fieldsets:
return fieldset
fields_to_include = set(fieldset.form.fields.keys())
for f in ('id', 'DELETE', 'ORDER'):
fields_to_include.discard(f)
def _filter_recursive(fields):
ret = []
for f in fields:
if isinstance(f, (list, tuple)):
# Several fields on one line
sub = _filter_recursive(f)
# Only add if there's at least one field left
if sub:
ret.append(sub)
elif f in fields_to_include:
ret.append(f)
fields_to_include.discard(f)
return ret
new_fields = _filter_recursive(fieldset.fields)
# Add all other fields (ApplicationContent's admin_fields) to
# the end of the fieldset
for f in fields_to_include:
new_fields.append(f)
if context.get('request'):
new_fields.extend(list(
fieldset.model_admin.get_readonly_fields(
context.get('request'),
context.get('original'),
)
))
fieldset.fields = new_fields
return ''
|
def function[post_process_fieldsets, parameter[context, fieldset]]:
constant[
Removes a few fields from FeinCMS admin inlines, those being
``id``, ``DELETE`` and ``ORDER`` currently.
Additionally, it ensures that dynamically added fields (i.e.
``ApplicationContent``'s ``admin_fields`` option) are shown.
]
if name[fieldset].model_admin.fieldsets begin[:]
return[name[fieldset]]
variable[fields_to_include] assign[=] call[name[set], parameter[call[name[fieldset].form.fields.keys, parameter[]]]]
for taget[name[f]] in starred[tuple[[<ast.Constant object at 0x7da1b0dc11e0>, <ast.Constant object at 0x7da1b0dc1de0>, <ast.Constant object at 0x7da1b0dc1a50>]]] begin[:]
call[name[fields_to_include].discard, parameter[name[f]]]
def function[_filter_recursive, parameter[fields]]:
variable[ret] assign[=] list[[]]
for taget[name[f]] in starred[name[fields]] begin[:]
if call[name[isinstance], parameter[name[f], tuple[[<ast.Name object at 0x7da1b0dc13c0>, <ast.Name object at 0x7da1b0dc0850>]]]] begin[:]
variable[sub] assign[=] call[name[_filter_recursive], parameter[name[f]]]
if name[sub] begin[:]
call[name[ret].append, parameter[name[sub]]]
return[name[ret]]
variable[new_fields] assign[=] call[name[_filter_recursive], parameter[name[fieldset].fields]]
for taget[name[f]] in starred[name[fields_to_include]] begin[:]
call[name[new_fields].append, parameter[name[f]]]
if call[name[context].get, parameter[constant[request]]] begin[:]
call[name[new_fields].extend, parameter[call[name[list], parameter[call[name[fieldset].model_admin.get_readonly_fields, parameter[call[name[context].get, parameter[constant[request]]], call[name[context].get, parameter[constant[original]]]]]]]]]
name[fieldset].fields assign[=] name[new_fields]
return[constant[]]
|
keyword[def] identifier[post_process_fieldsets] ( identifier[context] , identifier[fieldset] ):
literal[string]
keyword[if] identifier[fieldset] . identifier[model_admin] . identifier[fieldsets] :
keyword[return] identifier[fieldset]
identifier[fields_to_include] = identifier[set] ( identifier[fieldset] . identifier[form] . identifier[fields] . identifier[keys] ())
keyword[for] identifier[f] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[fields_to_include] . identifier[discard] ( identifier[f] )
keyword[def] identifier[_filter_recursive] ( identifier[fields] ):
identifier[ret] =[]
keyword[for] identifier[f] keyword[in] identifier[fields] :
keyword[if] identifier[isinstance] ( identifier[f] ,( identifier[list] , identifier[tuple] )):
identifier[sub] = identifier[_filter_recursive] ( identifier[f] )
keyword[if] identifier[sub] :
identifier[ret] . identifier[append] ( identifier[sub] )
keyword[elif] identifier[f] keyword[in] identifier[fields_to_include] :
identifier[ret] . identifier[append] ( identifier[f] )
identifier[fields_to_include] . identifier[discard] ( identifier[f] )
keyword[return] identifier[ret]
identifier[new_fields] = identifier[_filter_recursive] ( identifier[fieldset] . identifier[fields] )
keyword[for] identifier[f] keyword[in] identifier[fields_to_include] :
identifier[new_fields] . identifier[append] ( identifier[f] )
keyword[if] identifier[context] . identifier[get] ( literal[string] ):
identifier[new_fields] . identifier[extend] ( identifier[list] (
identifier[fieldset] . identifier[model_admin] . identifier[get_readonly_fields] (
identifier[context] . identifier[get] ( literal[string] ),
identifier[context] . identifier[get] ( literal[string] ),
)
))
identifier[fieldset] . identifier[fields] = identifier[new_fields]
keyword[return] literal[string]
|
def post_process_fieldsets(context, fieldset):
"""
Removes a few fields from FeinCMS admin inlines, those being
``id``, ``DELETE`` and ``ORDER`` currently.
Additionally, it ensures that dynamically added fields (i.e.
``ApplicationContent``'s ``admin_fields`` option) are shown.
"""
# abort if fieldset is customized
if fieldset.model_admin.fieldsets:
return fieldset # depends on [control=['if'], data=[]]
fields_to_include = set(fieldset.form.fields.keys())
for f in ('id', 'DELETE', 'ORDER'):
fields_to_include.discard(f) # depends on [control=['for'], data=['f']]
def _filter_recursive(fields):
ret = []
for f in fields:
if isinstance(f, (list, tuple)):
# Several fields on one line
sub = _filter_recursive(f)
# Only add if there's at least one field left
if sub:
ret.append(sub) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif f in fields_to_include:
ret.append(f)
fields_to_include.discard(f) # depends on [control=['if'], data=['f', 'fields_to_include']] # depends on [control=['for'], data=['f']]
return ret
new_fields = _filter_recursive(fieldset.fields)
# Add all other fields (ApplicationContent's admin_fields) to
# the end of the fieldset
for f in fields_to_include:
new_fields.append(f) # depends on [control=['for'], data=['f']]
if context.get('request'):
new_fields.extend(list(fieldset.model_admin.get_readonly_fields(context.get('request'), context.get('original')))) # depends on [control=['if'], data=[]]
fieldset.fields = new_fields
return ''
|
def fost_hmac_url_signature(
key, secret, host, path, query_string, expires):
"""
Return a signature that corresponds to the signed URL.
"""
if query_string:
document = '%s%s?%s\n%s' % (host, path, query_string, expires)
else:
document = '%s%s\n%s' % (host, path, expires)
signature = sha1_hmac(secret, document)
return signature
|
def function[fost_hmac_url_signature, parameter[key, secret, host, path, query_string, expires]]:
constant[
Return a signature that corresponds to the signed URL.
]
if name[query_string] begin[:]
variable[document] assign[=] binary_operation[constant[%s%s?%s
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2047e8eb0>, <ast.Name object at 0x7da2047eb7f0>, <ast.Name object at 0x7da2047e8640>, <ast.Name object at 0x7da2047e9ba0>]]]
variable[signature] assign[=] call[name[sha1_hmac], parameter[name[secret], name[document]]]
return[name[signature]]
|
keyword[def] identifier[fost_hmac_url_signature] (
identifier[key] , identifier[secret] , identifier[host] , identifier[path] , identifier[query_string] , identifier[expires] ):
literal[string]
keyword[if] identifier[query_string] :
identifier[document] = literal[string] %( identifier[host] , identifier[path] , identifier[query_string] , identifier[expires] )
keyword[else] :
identifier[document] = literal[string] %( identifier[host] , identifier[path] , identifier[expires] )
identifier[signature] = identifier[sha1_hmac] ( identifier[secret] , identifier[document] )
keyword[return] identifier[signature]
|
def fost_hmac_url_signature(key, secret, host, path, query_string, expires):
"""
Return a signature that corresponds to the signed URL.
"""
if query_string:
document = '%s%s?%s\n%s' % (host, path, query_string, expires) # depends on [control=['if'], data=[]]
else:
document = '%s%s\n%s' % (host, path, expires)
signature = sha1_hmac(secret, document)
return signature
|
def deserialize_base64(attr):
"""Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
if isinstance(attr, ET.Element):
attr = attr.text
padding = '=' * (3 - (len(attr) + 3) % 4)
attr = attr + padding
encoded = attr.replace('-', '+').replace('_', '/')
return b64decode(encoded)
|
def function[deserialize_base64, parameter[attr]]:
constant[Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
]
if call[name[isinstance], parameter[name[attr], name[ET].Element]] begin[:]
variable[attr] assign[=] name[attr].text
variable[padding] assign[=] binary_operation[constant[=] * binary_operation[constant[3] - binary_operation[binary_operation[call[name[len], parameter[name[attr]]] + constant[3]] <ast.Mod object at 0x7da2590d6920> constant[4]]]]
variable[attr] assign[=] binary_operation[name[attr] + name[padding]]
variable[encoded] assign[=] call[call[name[attr].replace, parameter[constant[-], constant[+]]].replace, parameter[constant[_], constant[/]]]
return[call[name[b64decode], parameter[name[encoded]]]]
|
keyword[def] identifier[deserialize_base64] ( identifier[attr] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[attr] , identifier[ET] . identifier[Element] ):
identifier[attr] = identifier[attr] . identifier[text]
identifier[padding] = literal[string] *( literal[int] -( identifier[len] ( identifier[attr] )+ literal[int] )% literal[int] )
identifier[attr] = identifier[attr] + identifier[padding]
identifier[encoded] = identifier[attr] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[b64decode] ( identifier[encoded] )
|
def deserialize_base64(attr):
"""Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
if isinstance(attr, ET.Element):
attr = attr.text # depends on [control=['if'], data=[]]
padding = '=' * (3 - (len(attr) + 3) % 4)
attr = attr + padding
encoded = attr.replace('-', '+').replace('_', '/')
return b64decode(encoded)
|
def _deserialize(self, value, attr, data):
"""Deserialize string by sanitizing HTML."""
value = super(SanitizedHTML, self)._deserialize(value, attr, data)
return bleach.clean(
value,
tags=self.tags,
attributes=self.attrs,
strip=True,
).strip()
|
def function[_deserialize, parameter[self, value, attr, data]]:
constant[Deserialize string by sanitizing HTML.]
variable[value] assign[=] call[call[name[super], parameter[name[SanitizedHTML], name[self]]]._deserialize, parameter[name[value], name[attr], name[data]]]
return[call[call[name[bleach].clean, parameter[name[value]]].strip, parameter[]]]
|
keyword[def] identifier[_deserialize] ( identifier[self] , identifier[value] , identifier[attr] , identifier[data] ):
literal[string]
identifier[value] = identifier[super] ( identifier[SanitizedHTML] , identifier[self] ). identifier[_deserialize] ( identifier[value] , identifier[attr] , identifier[data] )
keyword[return] identifier[bleach] . identifier[clean] (
identifier[value] ,
identifier[tags] = identifier[self] . identifier[tags] ,
identifier[attributes] = identifier[self] . identifier[attrs] ,
identifier[strip] = keyword[True] ,
). identifier[strip] ()
|
def _deserialize(self, value, attr, data):
"""Deserialize string by sanitizing HTML."""
value = super(SanitizedHTML, self)._deserialize(value, attr, data)
return bleach.clean(value, tags=self.tags, attributes=self.attrs, strip=True).strip()
|
def timedcall(executable_function, *args):
"""!
@brief Executes specified method or function with measuring of execution time.
@param[in] executable_function (pointer): Pointer to function or method.
@param[in] args (*): Arguments of called function or method.
@return (tuple) Execution time and result of execution of function or method (execution_time, result_execution).
"""
time_start = time.clock();
result = executable_function(*args);
time_end = time.clock();
return (time_end - time_start, result);
|
def function[timedcall, parameter[executable_function]]:
constant[!
@brief Executes specified method or function with measuring of execution time.
@param[in] executable_function (pointer): Pointer to function or method.
@param[in] args (*): Arguments of called function or method.
@return (tuple) Execution time and result of execution of function or method (execution_time, result_execution).
]
variable[time_start] assign[=] call[name[time].clock, parameter[]]
variable[result] assign[=] call[name[executable_function], parameter[<ast.Starred object at 0x7da1b01b3850>]]
variable[time_end] assign[=] call[name[time].clock, parameter[]]
return[tuple[[<ast.BinOp object at 0x7da1b01b0910>, <ast.Name object at 0x7da1b01b01c0>]]]
|
keyword[def] identifier[timedcall] ( identifier[executable_function] ,* identifier[args] ):
literal[string]
identifier[time_start] = identifier[time] . identifier[clock] ();
identifier[result] = identifier[executable_function] (* identifier[args] );
identifier[time_end] = identifier[time] . identifier[clock] ();
keyword[return] ( identifier[time_end] - identifier[time_start] , identifier[result] );
|
def timedcall(executable_function, *args):
"""!
@brief Executes specified method or function with measuring of execution time.
@param[in] executable_function (pointer): Pointer to function or method.
@param[in] args (*): Arguments of called function or method.
@return (tuple) Execution time and result of execution of function or method (execution_time, result_execution).
"""
time_start = time.clock()
result = executable_function(*args)
time_end = time.clock()
return (time_end - time_start, result)
|
def process_result_value(self, value, dialect):
"""convert value from json to a python object"""
if value is not None:
value = simplejson.loads(value)
return value
|
def function[process_result_value, parameter[self, value, dialect]]:
constant[convert value from json to a python object]
if compare[name[value] is_not constant[None]] begin[:]
variable[value] assign[=] call[name[simplejson].loads, parameter[name[value]]]
return[name[value]]
|
keyword[def] identifier[process_result_value] ( identifier[self] , identifier[value] , identifier[dialect] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[value] = identifier[simplejson] . identifier[loads] ( identifier[value] )
keyword[return] identifier[value]
|
def process_result_value(self, value, dialect):
"""convert value from json to a python object"""
if value is not None:
value = simplejson.loads(value) # depends on [control=['if'], data=['value']]
return value
|
def create_patches(destination, root, settings=None, traverse_bases=True,
filter=default_filter, recursive=True, use_decorators=True):
"""Create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
root : object
Root object, either a module or a class.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
``True`` to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
list of gorilla.Patch
The patches.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:func:`patches`.
"""
if filter is None:
filter = _true
out = []
root_patch = Patch(destination, '', root, settings=settings)
stack = collections.deque((root_patch,))
while stack:
parent_patch = stack.popleft()
members = _get_members(parent_patch.obj, traverse_bases=traverse_bases,
filter=None, recursive=False)
for name, value in members:
patch = Patch(parent_patch.destination, name, value,
settings=copy.deepcopy(parent_patch.settings))
if use_decorators:
base = _get_base(value)
decorator_data = get_decorator_data(base)
filter_override = (None if decorator_data is None
else decorator_data.filter)
if ((filter_override is None and not filter(name, value))
or filter_override is False):
continue
if decorator_data is not None:
patch._update(**decorator_data.override)
elif not filter(name, value):
continue
if recursive and isinstance(value, _CLASS_TYPES):
try:
target = get_attribute(patch.destination, patch.name)
except AttributeError:
pass
else:
if isinstance(target, _CLASS_TYPES):
patch.destination = target
stack.append(patch)
continue
out.append(patch)
return out
|
def function[create_patches, parameter[destination, root, settings, traverse_bases, filter, recursive, use_decorators]]:
constant[Create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
root : object
Root object, either a module or a class.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
``True`` to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
list of gorilla.Patch
The patches.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:func:`patches`.
]
if compare[name[filter] is constant[None]] begin[:]
variable[filter] assign[=] name[_true]
variable[out] assign[=] list[[]]
variable[root_patch] assign[=] call[name[Patch], parameter[name[destination], constant[], name[root]]]
variable[stack] assign[=] call[name[collections].deque, parameter[tuple[[<ast.Name object at 0x7da1b0f121d0>]]]]
while name[stack] begin[:]
variable[parent_patch] assign[=] call[name[stack].popleft, parameter[]]
variable[members] assign[=] call[name[_get_members], parameter[name[parent_patch].obj]]
for taget[tuple[[<ast.Name object at 0x7da1b0f133d0>, <ast.Name object at 0x7da1b0f124a0>]]] in starred[name[members]] begin[:]
variable[patch] assign[=] call[name[Patch], parameter[name[parent_patch].destination, name[name], name[value]]]
if name[use_decorators] begin[:]
variable[base] assign[=] call[name[_get_base], parameter[name[value]]]
variable[decorator_data] assign[=] call[name[get_decorator_data], parameter[name[base]]]
variable[filter_override] assign[=] <ast.IfExp object at 0x7da1b0f12e60>
if <ast.BoolOp object at 0x7da1b0f13310> begin[:]
continue
if compare[name[decorator_data] is_not constant[None]] begin[:]
call[name[patch]._update, parameter[]]
if <ast.BoolOp object at 0x7da1b0f10a00> begin[:]
<ast.Try object at 0x7da1b0f126b0>
call[name[out].append, parameter[name[patch]]]
return[name[out]]
|
keyword[def] identifier[create_patches] ( identifier[destination] , identifier[root] , identifier[settings] = keyword[None] , identifier[traverse_bases] = keyword[True] ,
identifier[filter] = identifier[default_filter] , identifier[recursive] = keyword[True] , identifier[use_decorators] = keyword[True] ):
literal[string]
keyword[if] identifier[filter] keyword[is] keyword[None] :
identifier[filter] = identifier[_true]
identifier[out] =[]
identifier[root_patch] = identifier[Patch] ( identifier[destination] , literal[string] , identifier[root] , identifier[settings] = identifier[settings] )
identifier[stack] = identifier[collections] . identifier[deque] (( identifier[root_patch] ,))
keyword[while] identifier[stack] :
identifier[parent_patch] = identifier[stack] . identifier[popleft] ()
identifier[members] = identifier[_get_members] ( identifier[parent_patch] . identifier[obj] , identifier[traverse_bases] = identifier[traverse_bases] ,
identifier[filter] = keyword[None] , identifier[recursive] = keyword[False] )
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[members] :
identifier[patch] = identifier[Patch] ( identifier[parent_patch] . identifier[destination] , identifier[name] , identifier[value] ,
identifier[settings] = identifier[copy] . identifier[deepcopy] ( identifier[parent_patch] . identifier[settings] ))
keyword[if] identifier[use_decorators] :
identifier[base] = identifier[_get_base] ( identifier[value] )
identifier[decorator_data] = identifier[get_decorator_data] ( identifier[base] )
identifier[filter_override] =( keyword[None] keyword[if] identifier[decorator_data] keyword[is] keyword[None]
keyword[else] identifier[decorator_data] . identifier[filter] )
keyword[if] (( identifier[filter_override] keyword[is] keyword[None] keyword[and] keyword[not] identifier[filter] ( identifier[name] , identifier[value] ))
keyword[or] identifier[filter_override] keyword[is] keyword[False] ):
keyword[continue]
keyword[if] identifier[decorator_data] keyword[is] keyword[not] keyword[None] :
identifier[patch] . identifier[_update] (** identifier[decorator_data] . identifier[override] )
keyword[elif] keyword[not] identifier[filter] ( identifier[name] , identifier[value] ):
keyword[continue]
keyword[if] identifier[recursive] keyword[and] identifier[isinstance] ( identifier[value] , identifier[_CLASS_TYPES] ):
keyword[try] :
identifier[target] = identifier[get_attribute] ( identifier[patch] . identifier[destination] , identifier[patch] . identifier[name] )
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[target] , identifier[_CLASS_TYPES] ):
identifier[patch] . identifier[destination] = identifier[target]
identifier[stack] . identifier[append] ( identifier[patch] )
keyword[continue]
identifier[out] . identifier[append] ( identifier[patch] )
keyword[return] identifier[out]
|
def create_patches(destination, root, settings=None, traverse_bases=True, filter=default_filter, recursive=True, use_decorators=True):
"""Create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
root : object
Root object, either a module or a class.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
``True`` to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
list of gorilla.Patch
The patches.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:func:`patches`.
"""
if filter is None:
filter = _true # depends on [control=['if'], data=['filter']]
out = []
root_patch = Patch(destination, '', root, settings=settings)
stack = collections.deque((root_patch,))
while stack:
parent_patch = stack.popleft()
members = _get_members(parent_patch.obj, traverse_bases=traverse_bases, filter=None, recursive=False)
for (name, value) in members:
patch = Patch(parent_patch.destination, name, value, settings=copy.deepcopy(parent_patch.settings))
if use_decorators:
base = _get_base(value)
decorator_data = get_decorator_data(base)
filter_override = None if decorator_data is None else decorator_data.filter
if filter_override is None and (not filter(name, value)) or filter_override is False:
continue # depends on [control=['if'], data=[]]
if decorator_data is not None:
patch._update(**decorator_data.override) # depends on [control=['if'], data=['decorator_data']] # depends on [control=['if'], data=[]]
elif not filter(name, value):
continue # depends on [control=['if'], data=[]]
if recursive and isinstance(value, _CLASS_TYPES):
try:
target = get_attribute(patch.destination, patch.name) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
if isinstance(target, _CLASS_TYPES):
patch.destination = target
stack.append(patch)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
out.append(patch) # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]]
return out
|
def run(self):
"""Run command and wait until it finishes."""
_check_directory(self.directory)
with DirectoryContextManager(self.directory):
process = subprocess.Popen(self.arguments, shell=self._shell, env=self.env)
_, _ = process.communicate()
returncode = process.returncode
if returncode != 0 and not self._ignore_errors:
raise CommandError(
'"{0}" failed (err code {1})'.format(self.__repr__(), returncode)
)
|
def function[run, parameter[self]]:
constant[Run command and wait until it finishes.]
call[name[_check_directory], parameter[name[self].directory]]
with call[name[DirectoryContextManager], parameter[name[self].directory]] begin[:]
variable[process] assign[=] call[name[subprocess].Popen, parameter[name[self].arguments]]
<ast.Tuple object at 0x7da20e955f30> assign[=] call[name[process].communicate, parameter[]]
variable[returncode] assign[=] name[process].returncode
if <ast.BoolOp object at 0x7da20e954ee0> begin[:]
<ast.Raise object at 0x7da20e9548e0>
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[_check_directory] ( identifier[self] . identifier[directory] )
keyword[with] identifier[DirectoryContextManager] ( identifier[self] . identifier[directory] ):
identifier[process] = identifier[subprocess] . identifier[Popen] ( identifier[self] . identifier[arguments] , identifier[shell] = identifier[self] . identifier[_shell] , identifier[env] = identifier[self] . identifier[env] )
identifier[_] , identifier[_] = identifier[process] . identifier[communicate] ()
identifier[returncode] = identifier[process] . identifier[returncode]
keyword[if] identifier[returncode] != literal[int] keyword[and] keyword[not] identifier[self] . identifier[_ignore_errors] :
keyword[raise] identifier[CommandError] (
literal[string] . identifier[format] ( identifier[self] . identifier[__repr__] (), identifier[returncode] )
)
|
def run(self):
"""Run command and wait until it finishes."""
_check_directory(self.directory)
with DirectoryContextManager(self.directory):
process = subprocess.Popen(self.arguments, shell=self._shell, env=self.env)
(_, _) = process.communicate() # depends on [control=['with'], data=[]]
returncode = process.returncode
if returncode != 0 and (not self._ignore_errors):
raise CommandError('"{0}" failed (err code {1})'.format(self.__repr__(), returncode)) # depends on [control=['if'], data=[]]
|
def set_cwd(new_path):
"""
Usage:
with set_cwd('/some/dir'):
walk_around_the_filesystem()
"""
try:
curdir = os.getcwd()
except OSError:
curdir = new_path
try:
os.chdir(new_path)
yield
finally:
os.chdir(curdir)
|
def function[set_cwd, parameter[new_path]]:
constant[
Usage:
with set_cwd('/some/dir'):
walk_around_the_filesystem()
]
<ast.Try object at 0x7da1b0089c60>
<ast.Try object at 0x7da1b00da230>
|
keyword[def] identifier[set_cwd] ( identifier[new_path] ):
literal[string]
keyword[try] :
identifier[curdir] = identifier[os] . identifier[getcwd] ()
keyword[except] identifier[OSError] :
identifier[curdir] = identifier[new_path]
keyword[try] :
identifier[os] . identifier[chdir] ( identifier[new_path] )
keyword[yield]
keyword[finally] :
identifier[os] . identifier[chdir] ( identifier[curdir] )
|
def set_cwd(new_path):
"""
Usage:
with set_cwd('/some/dir'):
walk_around_the_filesystem()
"""
try:
curdir = os.getcwd() # depends on [control=['try'], data=[]]
except OSError:
curdir = new_path # depends on [control=['except'], data=[]]
try:
os.chdir(new_path)
yield # depends on [control=['try'], data=[]]
finally:
os.chdir(curdir)
|
def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
"""
return DXHTTPRequest('/system/generateBatchInputs', input_params, always_retry=always_retry, **kwargs)
|
def function[system_generate_batch_inputs, parameter[input_params, always_retry]]:
constant[
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
]
return[call[name[DXHTTPRequest], parameter[constant[/system/generateBatchInputs], name[input_params]]]]
|
keyword[def] identifier[system_generate_batch_inputs] ( identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] )
|
def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
"""
return DXHTTPRequest('/system/generateBatchInputs', input_params, always_retry=always_retry, **kwargs)
|
def _GetImportTimestamps(self, pefile_object):
"""Retrieves timestamps from the import directory, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: import timestamps.
"""
import_timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'):
return import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:
dll_name = getattr(importdata, 'dll', '')
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
if not dll_name:
dll_name = '<NO DLL NAME>'
timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)
if timestamp:
import_timestamps.append([dll_name, timestamp])
return import_timestamps
|
def function[_GetImportTimestamps, parameter[self, pefile_object]]:
constant[Retrieves timestamps from the import directory, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: import timestamps.
]
variable[import_timestamps] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da2041d9480> begin[:]
return[name[import_timestamps]]
for taget[name[importdata]] in starred[name[pefile_object].DIRECTORY_ENTRY_IMPORT] begin[:]
variable[dll_name] assign[=] call[name[getattr], parameter[name[importdata], constant[dll], constant[]]]
<ast.Try object at 0x7da2054a6620>
if <ast.UnaryOp object at 0x7da2054a6fe0> begin[:]
variable[dll_name] assign[=] constant[<NO DLL NAME>]
variable[timestamp] assign[=] call[name[getattr], parameter[name[importdata].struct, constant[TimeDateStamp], constant[0]]]
if name[timestamp] begin[:]
call[name[import_timestamps].append, parameter[list[[<ast.Name object at 0x7da2054a6350>, <ast.Name object at 0x7da2054a4910>]]]]
return[name[import_timestamps]]
|
keyword[def] identifier[_GetImportTimestamps] ( identifier[self] , identifier[pefile_object] ):
literal[string]
identifier[import_timestamps] =[]
keyword[if] keyword[not] identifier[hasattr] ( identifier[pefile_object] , literal[string] ):
keyword[return] identifier[import_timestamps]
keyword[for] identifier[importdata] keyword[in] identifier[pefile_object] . identifier[DIRECTORY_ENTRY_IMPORT] :
identifier[dll_name] = identifier[getattr] ( identifier[importdata] , literal[string] , literal[string] )
keyword[try] :
identifier[dll_name] = identifier[dll_name] . identifier[decode] ( literal[string] )
keyword[except] identifier[UnicodeDecodeError] :
identifier[dll_name] = identifier[dll_name] . identifier[decode] ( literal[string] , identifier[errors] = literal[string] )
keyword[if] keyword[not] identifier[dll_name] :
identifier[dll_name] = literal[string]
identifier[timestamp] = identifier[getattr] ( identifier[importdata] . identifier[struct] , literal[string] , literal[int] )
keyword[if] identifier[timestamp] :
identifier[import_timestamps] . identifier[append] ([ identifier[dll_name] , identifier[timestamp] ])
keyword[return] identifier[import_timestamps]
|
def _GetImportTimestamps(self, pefile_object):
"""Retrieves timestamps from the import directory, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: import timestamps.
"""
import_timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'):
return import_timestamps # depends on [control=['if'], data=[]]
for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:
dll_name = getattr(importdata, 'dll', '')
try:
dll_name = dll_name.decode('ascii') # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace') # depends on [control=['except'], data=[]]
if not dll_name:
dll_name = '<NO DLL NAME>' # depends on [control=['if'], data=[]]
timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)
if timestamp:
import_timestamps.append([dll_name, timestamp]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['importdata']]
return import_timestamps
|
def next_history(self, current): # (C-n)
u'''Move forward through the history list, fetching the next command. '''
if self.history_cursor < len(self.history) - 1:
self.history_cursor += 1
current.set_line(self.history[self.history_cursor].get_line_text())
|
def function[next_history, parameter[self, current]]:
constant[Move forward through the history list, fetching the next command. ]
if compare[name[self].history_cursor less[<] binary_operation[call[name[len], parameter[name[self].history]] - constant[1]]] begin[:]
<ast.AugAssign object at 0x7da1b28af880>
call[name[current].set_line, parameter[call[call[name[self].history][name[self].history_cursor].get_line_text, parameter[]]]]
|
keyword[def] identifier[next_history] ( identifier[self] , identifier[current] ):
literal[string]
keyword[if] identifier[self] . identifier[history_cursor] < identifier[len] ( identifier[self] . identifier[history] )- literal[int] :
identifier[self] . identifier[history_cursor] += literal[int]
identifier[current] . identifier[set_line] ( identifier[self] . identifier[history] [ identifier[self] . identifier[history_cursor] ]. identifier[get_line_text] ())
|
def next_history(self, current): # (C-n)
u'Move forward through the history list, fetching the next command. '
if self.history_cursor < len(self.history) - 1:
self.history_cursor += 1
current.set_line(self.history[self.history_cursor].get_line_text()) # depends on [control=['if'], data=[]]
|
def is_alive(self):
"""
Test Function to check WHAT IF servers are up and running.
"""
u = urllib.urlopen("http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/")
x = xml.dom.minidom.parse(u)
self.alive = len(x.getElementsByTagName("TestEmptyResponse"))
return self.alive
|
def function[is_alive, parameter[self]]:
constant[
Test Function to check WHAT IF servers are up and running.
]
variable[u] assign[=] call[name[urllib].urlopen, parameter[constant[http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/]]]
variable[x] assign[=] call[name[xml].dom.minidom.parse, parameter[name[u]]]
name[self].alive assign[=] call[name[len], parameter[call[name[x].getElementsByTagName, parameter[constant[TestEmptyResponse]]]]]
return[name[self].alive]
|
keyword[def] identifier[is_alive] ( identifier[self] ):
literal[string]
identifier[u] = identifier[urllib] . identifier[urlopen] ( literal[string] )
identifier[x] = identifier[xml] . identifier[dom] . identifier[minidom] . identifier[parse] ( identifier[u] )
identifier[self] . identifier[alive] = identifier[len] ( identifier[x] . identifier[getElementsByTagName] ( literal[string] ))
keyword[return] identifier[self] . identifier[alive]
|
def is_alive(self):
"""
Test Function to check WHAT IF servers are up and running.
"""
u = urllib.urlopen('http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/')
x = xml.dom.minidom.parse(u)
self.alive = len(x.getElementsByTagName('TestEmptyResponse'))
return self.alive
|
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result
|
def function[_normalize_json_search_response, parameter[self, json]]:
constant[
Normalizes a JSON search response so that PB and HTTP have the
same return value
]
variable[result] assign[=] dictionary[[], []]
if compare[constant[facet_counts] in name[json]] begin[:]
call[name[result]][constant[facet_counts]] assign[=] call[name[json]][constant[facet_counts]]
if compare[constant[grouped] in name[json]] begin[:]
call[name[result]][constant[grouped]] assign[=] call[name[json]][constant[grouped]]
if compare[constant[stats] in name[json]] begin[:]
call[name[result]][constant[stats]] assign[=] call[name[json]][constant[stats]]
if compare[constant[response] in name[json]] begin[:]
call[name[result]][constant[num_found]] assign[=] call[call[name[json]][constant[response]]][constant[numFound]]
call[name[result]][constant[max_score]] assign[=] call[name[float], parameter[call[call[name[json]][constant[response]]][constant[maxScore]]]]
variable[docs] assign[=] list[[]]
for taget[name[doc]] in starred[call[call[name[json]][constant[response]]][constant[docs]]] begin[:]
variable[resdoc] assign[=] dictionary[[], []]
if compare[constant[_yz_rk] in name[doc]] begin[:]
variable[resdoc] assign[=] name[doc]
call[name[docs].append, parameter[name[resdoc]]]
call[name[result]][constant[docs]] assign[=] name[docs]
return[name[result]]
|
keyword[def] identifier[_normalize_json_search_response] ( identifier[self] , identifier[json] ):
literal[string]
identifier[result] ={}
keyword[if] literal[string] keyword[in] identifier[json] :
identifier[result] [ literal[string] ]= identifier[json] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[json] :
identifier[result] [ literal[string] ]= identifier[json] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[json] :
identifier[result] [ literal[string] ]= identifier[json] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[json] :
identifier[result] [ literal[string] ]= identifier[json] [ literal[string] ][ literal[string] ]
identifier[result] [ literal[string] ]= identifier[float] ( identifier[json] [ literal[string] ][ literal[string] ])
identifier[docs] =[]
keyword[for] identifier[doc] keyword[in] identifier[json] [ literal[string] ][ literal[string] ]:
identifier[resdoc] ={}
keyword[if] literal[string] keyword[in] identifier[doc] :
identifier[resdoc] = identifier[doc]
keyword[else] :
identifier[resdoc] [ literal[string] ]= identifier[doc] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[doc] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[doc] [ literal[string] ]):
identifier[resdoc] [ identifier[k] ]= identifier[v]
identifier[docs] . identifier[append] ( identifier[resdoc] )
identifier[result] [ literal[string] ]= identifier[docs]
keyword[return] identifier[result]
|
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts'] # depends on [control=['if'], data=['json']]
if 'grouped' in json:
result['grouped'] = json[u'grouped'] # depends on [control=['if'], data=['json']]
if 'stats' in json:
result['stats'] = json[u'stats'] # depends on [control=['if'], data=['json']]
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc # depends on [control=['if'], data=['doc']]
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for (k, v) in six.iteritems(doc[u'fields']):
resdoc[k] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['doc']]
docs.append(resdoc) # depends on [control=['for'], data=['doc']]
result['docs'] = docs # depends on [control=['if'], data=['json']]
return result
|
def remote(view, block=None, **flags):
"""Turn a function into a remote function.
This method can be used for map:
In [1]: @remote(view,block=True)
...: def func(a):
...: pass
"""
def remote_function(f):
return RemoteFunction(view, f, block=block, **flags)
return remote_function
|
def function[remote, parameter[view, block]]:
constant[Turn a function into a remote function.
This method can be used for map:
In [1]: @remote(view,block=True)
...: def func(a):
...: pass
]
def function[remote_function, parameter[f]]:
return[call[name[RemoteFunction], parameter[name[view], name[f]]]]
return[name[remote_function]]
|
keyword[def] identifier[remote] ( identifier[view] , identifier[block] = keyword[None] ,** identifier[flags] ):
literal[string]
keyword[def] identifier[remote_function] ( identifier[f] ):
keyword[return] identifier[RemoteFunction] ( identifier[view] , identifier[f] , identifier[block] = identifier[block] ,** identifier[flags] )
keyword[return] identifier[remote_function]
|
def remote(view, block=None, **flags):
"""Turn a function into a remote function.
This method can be used for map:
In [1]: @remote(view,block=True)
...: def func(a):
...: pass
"""
def remote_function(f):
return RemoteFunction(view, f, block=block, **flags)
return remote_function
|
def round_to_int(number, precision):
"""Round a number to a precision"""
precision = int(precision)
rounded = (int(number) + precision / 2) // precision * precision
return rounded
|
def function[round_to_int, parameter[number, precision]]:
constant[Round a number to a precision]
variable[precision] assign[=] call[name[int], parameter[name[precision]]]
variable[rounded] assign[=] binary_operation[binary_operation[binary_operation[call[name[int], parameter[name[number]]] + binary_operation[name[precision] / constant[2]]] <ast.FloorDiv object at 0x7da2590d6bc0> name[precision]] * name[precision]]
return[name[rounded]]
|
keyword[def] identifier[round_to_int] ( identifier[number] , identifier[precision] ):
literal[string]
identifier[precision] = identifier[int] ( identifier[precision] )
identifier[rounded] =( identifier[int] ( identifier[number] )+ identifier[precision] / literal[int] )// identifier[precision] * identifier[precision]
keyword[return] identifier[rounded]
|
def round_to_int(number, precision):
"""Round a number to a precision"""
precision = int(precision)
rounded = (int(number) + precision / 2) // precision * precision
return rounded
|
def _async_sub_acc_push(self, acc_id_list):
"""
异步连接指定要接收送的acc id
:param acc_id:
:return:
"""
kargs = {
'acc_id_list': acc_id_list,
'conn_id': self.get_async_conn_id(),
}
ret_code, msg, push_req_str = SubAccPush.pack_req(**kargs)
if ret_code == RET_OK:
self._send_async_req(push_req_str)
return RET_OK, None
|
def function[_async_sub_acc_push, parameter[self, acc_id_list]]:
constant[
异步连接指定要接收送的acc id
:param acc_id:
:return:
]
variable[kargs] assign[=] dictionary[[<ast.Constant object at 0x7da2045647f0>, <ast.Constant object at 0x7da2045671f0>], [<ast.Name object at 0x7da2045664a0>, <ast.Call object at 0x7da204566410>]]
<ast.Tuple object at 0x7da204567b80> assign[=] call[name[SubAccPush].pack_req, parameter[]]
if compare[name[ret_code] equal[==] name[RET_OK]] begin[:]
call[name[self]._send_async_req, parameter[name[push_req_str]]]
return[tuple[[<ast.Name object at 0x7da204566ec0>, <ast.Constant object at 0x7da204566770>]]]
|
keyword[def] identifier[_async_sub_acc_push] ( identifier[self] , identifier[acc_id_list] ):
literal[string]
identifier[kargs] ={
literal[string] : identifier[acc_id_list] ,
literal[string] : identifier[self] . identifier[get_async_conn_id] (),
}
identifier[ret_code] , identifier[msg] , identifier[push_req_str] = identifier[SubAccPush] . identifier[pack_req] (** identifier[kargs] )
keyword[if] identifier[ret_code] == identifier[RET_OK] :
identifier[self] . identifier[_send_async_req] ( identifier[push_req_str] )
keyword[return] identifier[RET_OK] , keyword[None]
|
def _async_sub_acc_push(self, acc_id_list):
"""
异步连接指定要接收送的acc id
:param acc_id:
:return:
"""
kargs = {'acc_id_list': acc_id_list, 'conn_id': self.get_async_conn_id()}
(ret_code, msg, push_req_str) = SubAccPush.pack_req(**kargs)
if ret_code == RET_OK:
self._send_async_req(push_req_str) # depends on [control=['if'], data=[]]
return (RET_OK, None)
|
def sum_of_squares(obs, pred):
"""
Sum of squares between observed and predicted data
Parameters
----------
obs : iterable
Observed data
pred : iterable
Predicted data
Returns
-------
float
Sum of squares
Notes
-----
The length of observed and predicted data must match.
"""
return np.sum((np.array(obs) - np.array(pred)) ** 2)
|
def function[sum_of_squares, parameter[obs, pred]]:
constant[
Sum of squares between observed and predicted data
Parameters
----------
obs : iterable
Observed data
pred : iterable
Predicted data
Returns
-------
float
Sum of squares
Notes
-----
The length of observed and predicted data must match.
]
return[call[name[np].sum, parameter[binary_operation[binary_operation[call[name[np].array, parameter[name[obs]]] - call[name[np].array, parameter[name[pred]]]] ** constant[2]]]]]
|
keyword[def] identifier[sum_of_squares] ( identifier[obs] , identifier[pred] ):
literal[string]
keyword[return] identifier[np] . identifier[sum] (( identifier[np] . identifier[array] ( identifier[obs] )- identifier[np] . identifier[array] ( identifier[pred] ))** literal[int] )
|
def sum_of_squares(obs, pred):
"""
Sum of squares between observed and predicted data
Parameters
----------
obs : iterable
Observed data
pred : iterable
Predicted data
Returns
-------
float
Sum of squares
Notes
-----
The length of observed and predicted data must match.
"""
return np.sum((np.array(obs) - np.array(pred)) ** 2)
|
def execute_get_text(command, raise_errors=False): # type: (str, bool) -> str
"""
Execute a shell commmand
:param command:
:return:
"""
try:
result = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
# print(result.decode())
except subprocess.CalledProcessError:
if raise_errors:
raise
return ""
if result:
return result.decode("utf-8")
return ""
|
def function[execute_get_text, parameter[command, raise_errors]]:
constant[
Execute a shell commmand
:param command:
:return:
]
<ast.Try object at 0x7da18fe933d0>
if name[result] begin[:]
return[call[name[result].decode, parameter[constant[utf-8]]]]
return[constant[]]
|
keyword[def] identifier[execute_get_text] ( identifier[command] , identifier[raise_errors] = keyword[False] ):
literal[string]
keyword[try] :
identifier[result] = identifier[subprocess] . identifier[check_output] ( identifier[command] , identifier[stderr] = identifier[subprocess] . identifier[STDOUT] , identifier[shell] = keyword[True] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
keyword[if] identifier[raise_errors] :
keyword[raise]
keyword[return] literal[string]
keyword[if] identifier[result] :
keyword[return] identifier[result] . identifier[decode] ( literal[string] )
keyword[return] literal[string]
|
def execute_get_text(command, raise_errors=False): # type: (str, bool) -> str
'\n Execute a shell commmand\n :param command:\n :return:\n '
try:
result = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True) # depends on [control=['try'], data=[]]
# print(result.decode())
except subprocess.CalledProcessError:
if raise_errors:
raise # depends on [control=['if'], data=[]]
return '' # depends on [control=['except'], data=[]]
if result:
return result.decode('utf-8') # depends on [control=['if'], data=[]]
return ''
|
def get_frame(frame_id, rows=10, rows_offset=0, cols=-1, full_cols=-1, cols_offset=0, light=False):
"""
Retrieve an existing H2OFrame from the H2O cluster using the frame's id.
:param str frame_id: id of the frame to retrieve
:param int rows: number of rows to fetch for preview (10 by default)
:param int rows_offset: offset to fetch rows from (0 by default)
:param int cols: number of columns to fetch (all by default)
:param full_cols: number of columns to fetch together with backed data
:param int cols_offset: offset to fetch rows from (0 by default)
:param bool light: wether to use light frame endpoint or not
:returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.
"""
fr = H2OFrame()
fr._ex._cache._id = frame_id
try:
fr._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, full_cols=full_cols, cols_offset=cols_offset, light=light)
except EnvironmentError:
return None
return fr
|
def function[get_frame, parameter[frame_id, rows, rows_offset, cols, full_cols, cols_offset, light]]:
constant[
Retrieve an existing H2OFrame from the H2O cluster using the frame's id.
:param str frame_id: id of the frame to retrieve
:param int rows: number of rows to fetch for preview (10 by default)
:param int rows_offset: offset to fetch rows from (0 by default)
:param int cols: number of columns to fetch (all by default)
:param full_cols: number of columns to fetch together with backed data
:param int cols_offset: offset to fetch rows from (0 by default)
:param bool light: wether to use light frame endpoint or not
:returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.
]
variable[fr] assign[=] call[name[H2OFrame], parameter[]]
name[fr]._ex._cache._id assign[=] name[frame_id]
<ast.Try object at 0x7da18dc9b760>
return[name[fr]]
|
keyword[def] identifier[get_frame] ( identifier[frame_id] , identifier[rows] = literal[int] , identifier[rows_offset] = literal[int] , identifier[cols] =- literal[int] , identifier[full_cols] =- literal[int] , identifier[cols_offset] = literal[int] , identifier[light] = keyword[False] ):
literal[string]
identifier[fr] = identifier[H2OFrame] ()
identifier[fr] . identifier[_ex] . identifier[_cache] . identifier[_id] = identifier[frame_id]
keyword[try] :
identifier[fr] . identifier[_ex] . identifier[_cache] . identifier[fill] ( identifier[rows] = identifier[rows] , identifier[rows_offset] = identifier[rows_offset] , identifier[cols] = identifier[cols] , identifier[full_cols] = identifier[full_cols] , identifier[cols_offset] = identifier[cols_offset] , identifier[light] = identifier[light] )
keyword[except] identifier[EnvironmentError] :
keyword[return] keyword[None]
keyword[return] identifier[fr]
|
def get_frame(frame_id, rows=10, rows_offset=0, cols=-1, full_cols=-1, cols_offset=0, light=False):
"""
Retrieve an existing H2OFrame from the H2O cluster using the frame's id.
:param str frame_id: id of the frame to retrieve
:param int rows: number of rows to fetch for preview (10 by default)
:param int rows_offset: offset to fetch rows from (0 by default)
:param int cols: number of columns to fetch (all by default)
:param full_cols: number of columns to fetch together with backed data
:param int cols_offset: offset to fetch rows from (0 by default)
:param bool light: wether to use light frame endpoint or not
:returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.
"""
fr = H2OFrame()
fr._ex._cache._id = frame_id
try:
fr._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, full_cols=full_cols, cols_offset=cols_offset, light=light) # depends on [control=['try'], data=[]]
except EnvironmentError:
return None # depends on [control=['except'], data=[]]
return fr
|
def read(cls, fname):
""" read(fname, fmt)
This classmethod is the entry point for reading OBJ files.
Parameters
----------
fname : str
The name of the file to read.
fmt : str
Can be "obj" or "gz" to specify the file format.
"""
# Open file
fmt = op.splitext(fname)[1].lower()
assert fmt in ('.obj', '.gz')
opener = open if fmt == '.obj' else gzip_open
with opener(fname, 'rb') as f:
try:
reader = WavefrontReader(f)
while True:
reader.readLine()
except EOFError:
pass
# Done
t0 = time.time()
mesh = reader.finish()
logger.debug('reading mesh took ' +
str(time.time() - t0) +
' seconds')
return mesh
|
def function[read, parameter[cls, fname]]:
constant[ read(fname, fmt)
This classmethod is the entry point for reading OBJ files.
Parameters
----------
fname : str
The name of the file to read.
fmt : str
Can be "obj" or "gz" to specify the file format.
]
variable[fmt] assign[=] call[call[call[name[op].splitext, parameter[name[fname]]]][constant[1]].lower, parameter[]]
assert[compare[name[fmt] in tuple[[<ast.Constant object at 0x7da1b10ee110>, <ast.Constant object at 0x7da1b10ed2a0>]]]]
variable[opener] assign[=] <ast.IfExp object at 0x7da1b10ed870>
with call[name[opener], parameter[name[fname], constant[rb]]] begin[:]
<ast.Try object at 0x7da18dc984c0>
variable[t0] assign[=] call[name[time].time, parameter[]]
variable[mesh] assign[=] call[name[reader].finish, parameter[]]
call[name[logger].debug, parameter[binary_operation[binary_operation[constant[reading mesh took ] + call[name[str], parameter[binary_operation[call[name[time].time, parameter[]] - name[t0]]]]] + constant[ seconds]]]]
return[name[mesh]]
|
keyword[def] identifier[read] ( identifier[cls] , identifier[fname] ):
literal[string]
identifier[fmt] = identifier[op] . identifier[splitext] ( identifier[fname] )[ literal[int] ]. identifier[lower] ()
keyword[assert] identifier[fmt] keyword[in] ( literal[string] , literal[string] )
identifier[opener] = identifier[open] keyword[if] identifier[fmt] == literal[string] keyword[else] identifier[gzip_open]
keyword[with] identifier[opener] ( identifier[fname] , literal[string] ) keyword[as] identifier[f] :
keyword[try] :
identifier[reader] = identifier[WavefrontReader] ( identifier[f] )
keyword[while] keyword[True] :
identifier[reader] . identifier[readLine] ()
keyword[except] identifier[EOFError] :
keyword[pass]
identifier[t0] = identifier[time] . identifier[time] ()
identifier[mesh] = identifier[reader] . identifier[finish] ()
identifier[logger] . identifier[debug] ( literal[string] +
identifier[str] ( identifier[time] . identifier[time] ()- identifier[t0] )+
literal[string] )
keyword[return] identifier[mesh]
|
def read(cls, fname):
""" read(fname, fmt)
This classmethod is the entry point for reading OBJ files.
Parameters
----------
fname : str
The name of the file to read.
fmt : str
Can be "obj" or "gz" to specify the file format.
"""
# Open file
fmt = op.splitext(fname)[1].lower()
assert fmt in ('.obj', '.gz')
opener = open if fmt == '.obj' else gzip_open
with opener(fname, 'rb') as f:
try:
reader = WavefrontReader(f)
while True:
reader.readLine() # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except EOFError:
pass # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['f']]
# Done
t0 = time.time()
mesh = reader.finish()
logger.debug('reading mesh took ' + str(time.time() - t0) + ' seconds')
return mesh
|
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs
|
def function[sbar, parameter[Ss]]:
constant[
calculate average s,sigma from list of "s"s.
]
if compare[call[name[type], parameter[name[Ss]]] equal[==] name[list]] begin[:]
variable[Ss] assign[=] call[name[np].array, parameter[name[Ss]]]
variable[npts] assign[=] call[name[Ss].shape][constant[0]]
variable[Ss] assign[=] call[name[Ss].transpose, parameter[]]
<ast.Tuple object at 0x7da18ede5090> assign[=] tuple[[<ast.List object at 0x7da18ede4b50>, <ast.List object at 0x7da18ede4bb0>]]
variable[D] assign[=] call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da18ede6830>, <ast.Subscript object at 0x7da18ede46d0>, <ast.Subscript object at 0x7da18ede64d0>, <ast.BinOp object at 0x7da18ede5480>, <ast.BinOp object at 0x7da18ede60e0>, <ast.BinOp object at 0x7da18ede76d0>]]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[6]]]] begin[:]
call[name[avd].append, parameter[call[name[np].average, parameter[call[name[D]][name[j]]]]]]
call[name[avs].append, parameter[call[name[np].average, parameter[call[name[Ss]][name[j]]]]]]
variable[D] assign[=] call[name[D].transpose, parameter[]]
variable[nf] assign[=] binary_operation[binary_operation[name[npts] - constant[1]] * constant[6]]
variable[s0] assign[=] constant[0]
variable[Dels] assign[=] binary_operation[binary_operation[name[D] - name[avd]] ** constant[2]]
variable[s0] assign[=] call[name[np].sum, parameter[name[Dels]]]
variable[sigma] assign[=] call[name[np].sqrt, parameter[binary_operation[name[s0] / call[name[float], parameter[name[nf]]]]]]
return[tuple[[<ast.Name object at 0x7da18f00d270>, <ast.Name object at 0x7da18f00e200>, <ast.Name object at 0x7da18f00c6a0>]]]
|
keyword[def] identifier[sbar] ( identifier[Ss] ):
literal[string]
keyword[if] identifier[type] ( identifier[Ss] )== identifier[list] :
identifier[Ss] = identifier[np] . identifier[array] ( identifier[Ss] )
identifier[npts] = identifier[Ss] . identifier[shape] [ literal[int] ]
identifier[Ss] = identifier[Ss] . identifier[transpose] ()
identifier[avd] , identifier[avs] =[],[]
identifier[D] = identifier[np] . identifier[array] ([ identifier[Ss] [ literal[int] ], identifier[Ss] [ literal[int] ], identifier[Ss] [ literal[int] ], identifier[Ss] [ literal[int] ]+ literal[int] *( identifier[Ss] [ literal[int] ]+ identifier[Ss] [ literal[int] ]),
identifier[Ss] [ literal[int] ]+ literal[int] *( identifier[Ss] [ literal[int] ]+ identifier[Ss] [ literal[int] ]), identifier[Ss] [ literal[int] ]+ literal[int] *( identifier[Ss] [ literal[int] ]+ identifier[Ss] [ literal[int] ])])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] ):
identifier[avd] . identifier[append] ( identifier[np] . identifier[average] ( identifier[D] [ identifier[j] ]))
identifier[avs] . identifier[append] ( identifier[np] . identifier[average] ( identifier[Ss] [ identifier[j] ]))
identifier[D] = identifier[D] . identifier[transpose] ()
identifier[nf] =( identifier[npts] - literal[int] )* literal[int]
identifier[s0] = literal[int]
identifier[Dels] =( identifier[D] - identifier[avd] )** literal[int]
identifier[s0] = identifier[np] . identifier[sum] ( identifier[Dels] )
identifier[sigma] = identifier[np] . identifier[sqrt] ( identifier[s0] / identifier[float] ( identifier[nf] ))
keyword[return] identifier[nf] , identifier[sigma] , identifier[avs]
|
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss) # depends on [control=['if'], data=[]]
npts = Ss.shape[0]
Ss = Ss.transpose()
(avd, avs) = ([], [])
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]), Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j])) # depends on [control=['for'], data=['j']]
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd) ** 2
s0 = np.sum(Dels)
sigma = np.sqrt(s0 / float(nf))
return (nf, sigma, avs)
|
def md5sum(self, filename, use_sudo=False):
"""
Compute the MD5 sum of a file.
"""
func = use_sudo and run_as_root or self.run
with self.settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
# Linux (LSB)
if exists(u'/usr/bin/md5sum'):
res = func(u'/usr/bin/md5sum %(filename)s' % locals())
# BSD / OS X
elif exists(u'/sbin/md5'):
res = func(u'/sbin/md5 -r %(filename)s' % locals())
# SmartOS Joyent build
elif exists(u'/opt/local/gnu/bin/md5sum'):
res = func(u'/opt/local/gnu/bin/md5sum %(filename)s' % locals())
# SmartOS Joyent build
# (the former doesn't exist, at least on joyent_20130222T000747Z)
elif exists(u'/opt/local/bin/md5sum'):
res = func(u'/opt/local/bin/md5sum %(filename)s' % locals())
# Try to find ``md5sum`` or ``md5`` on ``$PATH`` or abort
else:
md5sum = func(u'which md5sum')
md5 = func(u'which md5')
if exists(md5sum):
res = func('%(md5sum)s %(filename)s' % locals())
elif exists(md5):
res = func('%(md5)s %(filename)s' % locals())
else:
abort('No MD5 utility was found on this system.')
if res.succeeded:
_md5sum = res
else:
warn(res)
_md5sum = None
if isinstance(_md5sum, six.string_types):
_md5sum = _md5sum.strip().split('\n')[-1].split()[0]
return _md5sum
|
def function[md5sum, parameter[self, filename, use_sudo]]:
constant[
Compute the MD5 sum of a file.
]
variable[func] assign[=] <ast.BoolOp object at 0x7da1b00ddc00>
with call[name[self].settings, parameter[call[name[hide], parameter[constant[running], constant[stdout], constant[stderr], constant[warnings]]]]] begin[:]
if call[name[exists], parameter[constant[/usr/bin/md5sum]]] begin[:]
variable[res] assign[=] call[name[func], parameter[binary_operation[constant[/usr/bin/md5sum %(filename)s] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]]]
if name[res].succeeded begin[:]
variable[_md5sum] assign[=] name[res]
if call[name[isinstance], parameter[name[_md5sum], name[six].string_types]] begin[:]
variable[_md5sum] assign[=] call[call[call[call[call[name[_md5sum].strip, parameter[]].split, parameter[constant[
]]]][<ast.UnaryOp object at 0x7da1b00b4d00>].split, parameter[]]][constant[0]]
return[name[_md5sum]]
|
keyword[def] identifier[md5sum] ( identifier[self] , identifier[filename] , identifier[use_sudo] = keyword[False] ):
literal[string]
identifier[func] = identifier[use_sudo] keyword[and] identifier[run_as_root] keyword[or] identifier[self] . identifier[run]
keyword[with] identifier[self] . identifier[settings] ( identifier[hide] ( literal[string] , literal[string] , literal[string] , literal[string] ), identifier[warn_only] = keyword[True] ):
keyword[if] identifier[exists] ( literal[string] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[elif] identifier[exists] ( literal[string] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[elif] identifier[exists] ( literal[string] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[elif] identifier[exists] ( literal[string] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[else] :
identifier[md5sum] = identifier[func] ( literal[string] )
identifier[md5] = identifier[func] ( literal[string] )
keyword[if] identifier[exists] ( identifier[md5sum] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[elif] identifier[exists] ( identifier[md5] ):
identifier[res] = identifier[func] ( literal[string] % identifier[locals] ())
keyword[else] :
identifier[abort] ( literal[string] )
keyword[if] identifier[res] . identifier[succeeded] :
identifier[_md5sum] = identifier[res]
keyword[else] :
identifier[warn] ( identifier[res] )
identifier[_md5sum] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[_md5sum] , identifier[six] . identifier[string_types] ):
identifier[_md5sum] = identifier[_md5sum] . identifier[strip] (). identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ()[ literal[int] ]
keyword[return] identifier[_md5sum]
|
def md5sum(self, filename, use_sudo=False):
"""
Compute the MD5 sum of a file.
"""
func = use_sudo and run_as_root or self.run
with self.settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
# Linux (LSB)
if exists(u'/usr/bin/md5sum'):
res = func(u'/usr/bin/md5sum %(filename)s' % locals()) # depends on [control=['if'], data=[]]
# BSD / OS X
elif exists(u'/sbin/md5'):
res = func(u'/sbin/md5 -r %(filename)s' % locals()) # depends on [control=['if'], data=[]]
# SmartOS Joyent build
elif exists(u'/opt/local/gnu/bin/md5sum'):
res = func(u'/opt/local/gnu/bin/md5sum %(filename)s' % locals()) # depends on [control=['if'], data=[]]
# SmartOS Joyent build
# (the former doesn't exist, at least on joyent_20130222T000747Z)
elif exists(u'/opt/local/bin/md5sum'):
res = func(u'/opt/local/bin/md5sum %(filename)s' % locals()) # depends on [control=['if'], data=[]]
else:
# Try to find ``md5sum`` or ``md5`` on ``$PATH`` or abort
md5sum = func(u'which md5sum')
md5 = func(u'which md5')
if exists(md5sum):
res = func('%(md5sum)s %(filename)s' % locals()) # depends on [control=['if'], data=[]]
elif exists(md5):
res = func('%(md5)s %(filename)s' % locals()) # depends on [control=['if'], data=[]]
else:
abort('No MD5 utility was found on this system.') # depends on [control=['with'], data=[]]
if res.succeeded:
_md5sum = res # depends on [control=['if'], data=[]]
else:
warn(res)
_md5sum = None
if isinstance(_md5sum, six.string_types):
_md5sum = _md5sum.strip().split('\n')[-1].split()[0] # depends on [control=['if'], data=[]]
return _md5sum
|
def do_graphviz(self, args, arguments):
"""
::
Usage:
graphviz FILENAME
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
"""
filename = arguments['FILENAME']
if platform.system() == 'Darwin':
if os.path.isfile(filename):
os.system("open -a '\''/Applications/Graphviz.app'\'' " + filename)
|
def function[do_graphviz, parameter[self, args, arguments]]:
constant[
::
Usage:
graphviz FILENAME
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
]
variable[filename] assign[=] call[name[arguments]][constant[FILENAME]]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Darwin]] begin[:]
if call[name[os].path.isfile, parameter[name[filename]]] begin[:]
call[name[os].system, parameter[binary_operation[constant[open -a '''/Applications/Graphviz.app''' ] + name[filename]]]]
|
keyword[def] identifier[do_graphviz] ( identifier[self] , identifier[args] , identifier[arguments] ):
literal[string]
identifier[filename] = identifier[arguments] [ literal[string] ]
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ):
identifier[os] . identifier[system] ( literal[string] + identifier[filename] )
|
def do_graphviz(self, args, arguments):
"""
::
Usage:
graphviz FILENAME
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
"""
filename = arguments['FILENAME']
if platform.system() == 'Darwin':
if os.path.isfile(filename):
os.system("open -a '''/Applications/Graphviz.app''' " + filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def switch_to_frame_with_id(self, frame):
"""Swap Selenium's context to the given frame or iframe."""
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to.frame(elem)
|
def function[switch_to_frame_with_id, parameter[self, frame]]:
constant[Swap Selenium's context to the given frame or iframe.]
variable[elem] assign[=] call[name[world].browser.find_element_by_id, parameter[name[frame]]]
call[name[world].browser.switch_to.frame, parameter[name[elem]]]
|
keyword[def] identifier[switch_to_frame_with_id] ( identifier[self] , identifier[frame] ):
literal[string]
identifier[elem] = identifier[world] . identifier[browser] . identifier[find_element_by_id] ( identifier[frame] )
identifier[world] . identifier[browser] . identifier[switch_to] . identifier[frame] ( identifier[elem] )
|
def switch_to_frame_with_id(self, frame):
"""Swap Selenium's context to the given frame or iframe."""
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to.frame(elem)
|
def add_segments(self, segments):
"""Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
"""
self.tracks.update([seg.track for seg in segments])
self.segments.extend(segments)
|
def function[add_segments, parameter[self, segments]]:
constant[Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
]
call[name[self].tracks.update, parameter[<ast.ListComp object at 0x7da18bc70ac0>]]
call[name[self].segments.extend, parameter[name[segments]]]
|
keyword[def] identifier[add_segments] ( identifier[self] , identifier[segments] ):
literal[string]
identifier[self] . identifier[tracks] . identifier[update] ([ identifier[seg] . identifier[track] keyword[for] identifier[seg] keyword[in] identifier[segments] ])
identifier[self] . identifier[segments] . identifier[extend] ( identifier[segments] )
|
def add_segments(self, segments):
"""Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
"""
self.tracks.update([seg.track for seg in segments])
self.segments.extend(segments)
|
def fetch_package_version(dist_name):
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError('pkg_resources is not available '
'on this Python install')
dist = pkg_resources.get_distribution(dist_name)
return dist.version
|
def function[fetch_package_version, parameter[dist_name]]:
constant[
>>> fetch_package_version('sentry')
]
<ast.Try object at 0x7da1b17cf100>
variable[dist] assign[=] call[name[pkg_resources].get_distribution, parameter[name[dist_name]]]
return[name[dist].version]
|
keyword[def] identifier[fetch_package_version] ( identifier[dist_name] ):
literal[string]
keyword[try] :
keyword[import] identifier[pkg_resources]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] )
identifier[dist] = identifier[pkg_resources] . identifier[get_distribution] ( identifier[dist_name] )
keyword[return] identifier[dist] . identifier[version]
|
def fetch_package_version(dist_name):
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources # depends on [control=['try'], data=[]]
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError('pkg_resources is not available on this Python install') # depends on [control=['except'], data=[]]
dist = pkg_resources.get_distribution(dist_name)
return dist.version
|
def username_validator(self, form, field):
"""Ensure that Usernames contains at least 3 alphanumeric characters.
Override this method to customize the username validator.
"""
username = field.data
if len(username) < 3:
raise ValidationError(
_('Username must be at least 3 characters long'))
valid_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._'
chars = list(username)
for char in chars:
if char not in valid_chars:
raise ValidationError(
_("Username may only contain letters, numbers, '-', '.' and '_'"))
|
def function[username_validator, parameter[self, form, field]]:
constant[Ensure that Usernames contains at least 3 alphanumeric characters.
Override this method to customize the username validator.
]
variable[username] assign[=] name[field].data
if compare[call[name[len], parameter[name[username]]] less[<] constant[3]] begin[:]
<ast.Raise object at 0x7da1b1d47970>
variable[valid_chars] assign[=] constant[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._]
variable[chars] assign[=] call[name[list], parameter[name[username]]]
for taget[name[char]] in starred[name[chars]] begin[:]
if compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[valid_chars]] begin[:]
<ast.Raise object at 0x7da1b1e01990>
|
keyword[def] identifier[username_validator] ( identifier[self] , identifier[form] , identifier[field] ):
literal[string]
identifier[username] = identifier[field] . identifier[data]
keyword[if] identifier[len] ( identifier[username] )< literal[int] :
keyword[raise] identifier[ValidationError] (
identifier[_] ( literal[string] ))
identifier[valid_chars] = literal[string]
identifier[chars] = identifier[list] ( identifier[username] )
keyword[for] identifier[char] keyword[in] identifier[chars] :
keyword[if] identifier[char] keyword[not] keyword[in] identifier[valid_chars] :
keyword[raise] identifier[ValidationError] (
identifier[_] ( literal[string] ))
|
def username_validator(self, form, field):
"""Ensure that Usernames contains at least 3 alphanumeric characters.
Override this method to customize the username validator.
"""
username = field.data
if len(username) < 3:
raise ValidationError(_('Username must be at least 3 characters long')) # depends on [control=['if'], data=[]]
valid_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._'
chars = list(username)
for char in chars:
if char not in valid_chars:
raise ValidationError(_("Username may only contain letters, numbers, '-', '.' and '_'")) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['char']]
|
def str_value(self):
"""
See the class documentation.
"""
if self._cached_str_val is not None:
return self._cached_str_val
if self.orig_type in _BOOL_TRISTATE:
# Also calculates the visibility, so invalidation safe
self._cached_str_val = TRI_TO_STR[self.tri_value]
return self._cached_str_val
# As a quirk of Kconfig, undefined symbols get their name as their
# string value. This is why things like "FOO = bar" work for seeing if
# FOO has the value "bar".
if not self.orig_type: # UNKNOWN
self._cached_str_val = self.name
return self.name
val = ""
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
self._write_to_conf = (vis != 0)
if self.orig_type in _INT_HEX:
# The C implementation checks the user value against the range in a
# separate code path (post-processing after loading a .config).
# Checking all values here instead makes more sense for us. It
# requires that we check for a range first.
base = _TYPE_TO_BASE[self.orig_type]
# Check if a range is in effect
for low_expr, high_expr, cond in self.ranges:
if expr_value(cond):
has_active_range = True
# The zeros are from the C implementation running strtoll()
# on empty strings
low = int(low_expr.str_value, base) if \
_is_base_n(low_expr.str_value, base) else 0
high = int(high_expr.str_value, base) if \
_is_base_n(high_expr.str_value, base) else 0
break
else:
has_active_range = False
# Defaults are used if the symbol is invisible, lacks a user value,
# or has an out-of-range user value
use_defaults = True
if vis and self.user_value:
user_val = int(self.user_value, base)
if has_active_range and not low <= user_val <= high:
num2str = str if base == 10 else hex
self.kconfig._warn(
"user value {} on the {} symbol {} ignored due to "
"being outside the active range ([{}, {}]) -- falling "
"back on defaults"
.format(num2str(user_val), TYPE_TO_STR[self.orig_type],
_name_and_loc(self),
num2str(low), num2str(high)))
else:
# If the user value is well-formed and satisfies range
# contraints, it is stored in exactly the same form as
# specified in the assignment (with or without "0x", etc.)
val = self.user_value
use_defaults = False
if use_defaults:
# No user value or invalid user value. Look at defaults.
# Used to implement the warning below
has_default = False
for sym, cond in self.defaults:
if expr_value(cond):
has_default = self._write_to_conf = True
val = sym.str_value
if _is_base_n(val, base):
val_num = int(val, base)
else:
val_num = 0 # strtoll() on empty string
break
else:
val_num = 0 # strtoll() on empty string
# This clamping procedure runs even if there's no default
if has_active_range:
clamp = None
if val_num < low:
clamp = low
elif val_num > high:
clamp = high
if clamp is not None:
# The value is rewritten to a standard form if it is
# clamped
val = str(clamp) \
if self.orig_type is INT else \
hex(clamp)
if has_default:
num2str = str if base == 10 else hex
self.kconfig._warn(
"default value {} on {} clamped to {} due to "
"being outside the active range ([{}, {}])"
.format(val_num, _name_and_loc(self),
num2str(clamp), num2str(low),
num2str(high)))
elif self.orig_type is STRING:
if vis and self.user_value is not None:
# If the symbol is visible and has a user value, use that
val = self.user_value
else:
# Otherwise, look at defaults
for sym, cond in self.defaults:
if expr_value(cond):
val = sym.str_value
self._write_to_conf = True
break
# env_var corresponds to SYMBOL_AUTO in the C implementation, and is
# also set on the defconfig_list symbol there. Test for the
# defconfig_list symbol explicitly instead here, to avoid a nonsensical
# env_var setting and the defconfig_list symbol being printed
# incorrectly. This code is pretty cold anyway.
if self.env_var is not None or self is self.kconfig.defconfig_list:
self._write_to_conf = False
self._cached_str_val = val
return val
|
def function[str_value, parameter[self]]:
constant[
See the class documentation.
]
if compare[name[self]._cached_str_val is_not constant[None]] begin[:]
return[name[self]._cached_str_val]
if compare[name[self].orig_type in name[_BOOL_TRISTATE]] begin[:]
name[self]._cached_str_val assign[=] call[name[TRI_TO_STR]][name[self].tri_value]
return[name[self]._cached_str_val]
if <ast.UnaryOp object at 0x7da20c795c60> begin[:]
name[self]._cached_str_val assign[=] name[self].name
return[name[self].name]
variable[val] assign[=] constant[]
variable[vis] assign[=] name[self].visibility
name[self]._write_to_conf assign[=] compare[name[vis] not_equal[!=] constant[0]]
if compare[name[self].orig_type in name[_INT_HEX]] begin[:]
variable[base] assign[=] call[name[_TYPE_TO_BASE]][name[self].orig_type]
for taget[tuple[[<ast.Name object at 0x7da18f812500>, <ast.Name object at 0x7da18f8114e0>, <ast.Name object at 0x7da18f813190>]]] in starred[name[self].ranges] begin[:]
if call[name[expr_value], parameter[name[cond]]] begin[:]
variable[has_active_range] assign[=] constant[True]
variable[low] assign[=] <ast.IfExp object at 0x7da18f813490>
variable[high] assign[=] <ast.IfExp object at 0x7da18f811f90>
break
variable[use_defaults] assign[=] constant[True]
if <ast.BoolOp object at 0x7da18f810e80> begin[:]
variable[user_val] assign[=] call[name[int], parameter[name[self].user_value, name[base]]]
if <ast.BoolOp object at 0x7da18f810100> begin[:]
variable[num2str] assign[=] <ast.IfExp object at 0x7da18f811600>
call[name[self].kconfig._warn, parameter[call[constant[user value {} on the {} symbol {} ignored due to being outside the active range ([{}, {}]) -- falling back on defaults].format, parameter[call[name[num2str], parameter[name[user_val]]], call[name[TYPE_TO_STR]][name[self].orig_type], call[name[_name_and_loc], parameter[name[self]]], call[name[num2str], parameter[name[low]]], call[name[num2str], parameter[name[high]]]]]]]
if name[use_defaults] begin[:]
variable[has_default] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da18f813c10>, <ast.Name object at 0x7da18f812a10>]]] in starred[name[self].defaults] begin[:]
if call[name[expr_value], parameter[name[cond]]] begin[:]
variable[has_default] assign[=] constant[True]
variable[val] assign[=] name[sym].str_value
if call[name[_is_base_n], parameter[name[val], name[base]]] begin[:]
variable[val_num] assign[=] call[name[int], parameter[name[val], name[base]]]
break
if name[has_active_range] begin[:]
variable[clamp] assign[=] constant[None]
if compare[name[val_num] less[<] name[low]] begin[:]
variable[clamp] assign[=] name[low]
if compare[name[clamp] is_not constant[None]] begin[:]
variable[val] assign[=] <ast.IfExp object at 0x7da20c795900>
if name[has_default] begin[:]
variable[num2str] assign[=] <ast.IfExp object at 0x7da20c794e80>
call[name[self].kconfig._warn, parameter[call[constant[default value {} on {} clamped to {} due to being outside the active range ([{}, {}])].format, parameter[name[val_num], call[name[_name_and_loc], parameter[name[self]]], call[name[num2str], parameter[name[clamp]]], call[name[num2str], parameter[name[low]]], call[name[num2str], parameter[name[high]]]]]]]
if <ast.BoolOp object at 0x7da1b20a9780> begin[:]
name[self]._write_to_conf assign[=] constant[False]
name[self]._cached_str_val assign[=] name[val]
return[name[val]]
|
keyword[def] identifier[str_value] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_cached_str_val] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_cached_str_val]
keyword[if] identifier[self] . identifier[orig_type] keyword[in] identifier[_BOOL_TRISTATE] :
identifier[self] . identifier[_cached_str_val] = identifier[TRI_TO_STR] [ identifier[self] . identifier[tri_value] ]
keyword[return] identifier[self] . identifier[_cached_str_val]
keyword[if] keyword[not] identifier[self] . identifier[orig_type] :
identifier[self] . identifier[_cached_str_val] = identifier[self] . identifier[name]
keyword[return] identifier[self] . identifier[name]
identifier[val] = literal[string]
identifier[vis] = identifier[self] . identifier[visibility]
identifier[self] . identifier[_write_to_conf] =( identifier[vis] != literal[int] )
keyword[if] identifier[self] . identifier[orig_type] keyword[in] identifier[_INT_HEX] :
identifier[base] = identifier[_TYPE_TO_BASE] [ identifier[self] . identifier[orig_type] ]
keyword[for] identifier[low_expr] , identifier[high_expr] , identifier[cond] keyword[in] identifier[self] . identifier[ranges] :
keyword[if] identifier[expr_value] ( identifier[cond] ):
identifier[has_active_range] = keyword[True]
identifier[low] = identifier[int] ( identifier[low_expr] . identifier[str_value] , identifier[base] ) keyword[if] identifier[_is_base_n] ( identifier[low_expr] . identifier[str_value] , identifier[base] ) keyword[else] literal[int]
identifier[high] = identifier[int] ( identifier[high_expr] . identifier[str_value] , identifier[base] ) keyword[if] identifier[_is_base_n] ( identifier[high_expr] . identifier[str_value] , identifier[base] ) keyword[else] literal[int]
keyword[break]
keyword[else] :
identifier[has_active_range] = keyword[False]
identifier[use_defaults] = keyword[True]
keyword[if] identifier[vis] keyword[and] identifier[self] . identifier[user_value] :
identifier[user_val] = identifier[int] ( identifier[self] . identifier[user_value] , identifier[base] )
keyword[if] identifier[has_active_range] keyword[and] keyword[not] identifier[low] <= identifier[user_val] <= identifier[high] :
identifier[num2str] = identifier[str] keyword[if] identifier[base] == literal[int] keyword[else] identifier[hex]
identifier[self] . identifier[kconfig] . identifier[_warn] (
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[num2str] ( identifier[user_val] ), identifier[TYPE_TO_STR] [ identifier[self] . identifier[orig_type] ],
identifier[_name_and_loc] ( identifier[self] ),
identifier[num2str] ( identifier[low] ), identifier[num2str] ( identifier[high] )))
keyword[else] :
identifier[val] = identifier[self] . identifier[user_value]
identifier[use_defaults] = keyword[False]
keyword[if] identifier[use_defaults] :
identifier[has_default] = keyword[False]
keyword[for] identifier[sym] , identifier[cond] keyword[in] identifier[self] . identifier[defaults] :
keyword[if] identifier[expr_value] ( identifier[cond] ):
identifier[has_default] = identifier[self] . identifier[_write_to_conf] = keyword[True]
identifier[val] = identifier[sym] . identifier[str_value]
keyword[if] identifier[_is_base_n] ( identifier[val] , identifier[base] ):
identifier[val_num] = identifier[int] ( identifier[val] , identifier[base] )
keyword[else] :
identifier[val_num] = literal[int]
keyword[break]
keyword[else] :
identifier[val_num] = literal[int]
keyword[if] identifier[has_active_range] :
identifier[clamp] = keyword[None]
keyword[if] identifier[val_num] < identifier[low] :
identifier[clamp] = identifier[low]
keyword[elif] identifier[val_num] > identifier[high] :
identifier[clamp] = identifier[high]
keyword[if] identifier[clamp] keyword[is] keyword[not] keyword[None] :
identifier[val] = identifier[str] ( identifier[clamp] ) keyword[if] identifier[self] . identifier[orig_type] keyword[is] identifier[INT] keyword[else] identifier[hex] ( identifier[clamp] )
keyword[if] identifier[has_default] :
identifier[num2str] = identifier[str] keyword[if] identifier[base] == literal[int] keyword[else] identifier[hex]
identifier[self] . identifier[kconfig] . identifier[_warn] (
literal[string]
literal[string]
. identifier[format] ( identifier[val_num] , identifier[_name_and_loc] ( identifier[self] ),
identifier[num2str] ( identifier[clamp] ), identifier[num2str] ( identifier[low] ),
identifier[num2str] ( identifier[high] )))
keyword[elif] identifier[self] . identifier[orig_type] keyword[is] identifier[STRING] :
keyword[if] identifier[vis] keyword[and] identifier[self] . identifier[user_value] keyword[is] keyword[not] keyword[None] :
identifier[val] = identifier[self] . identifier[user_value]
keyword[else] :
keyword[for] identifier[sym] , identifier[cond] keyword[in] identifier[self] . identifier[defaults] :
keyword[if] identifier[expr_value] ( identifier[cond] ):
identifier[val] = identifier[sym] . identifier[str_value]
identifier[self] . identifier[_write_to_conf] = keyword[True]
keyword[break]
keyword[if] identifier[self] . identifier[env_var] keyword[is] keyword[not] keyword[None] keyword[or] identifier[self] keyword[is] identifier[self] . identifier[kconfig] . identifier[defconfig_list] :
identifier[self] . identifier[_write_to_conf] = keyword[False]
identifier[self] . identifier[_cached_str_val] = identifier[val]
keyword[return] identifier[val]
|
def str_value(self):
"""
See the class documentation.
"""
if self._cached_str_val is not None:
return self._cached_str_val # depends on [control=['if'], data=[]]
if self.orig_type in _BOOL_TRISTATE:
# Also calculates the visibility, so invalidation safe
self._cached_str_val = TRI_TO_STR[self.tri_value]
return self._cached_str_val # depends on [control=['if'], data=[]]
# As a quirk of Kconfig, undefined symbols get their name as their
# string value. This is why things like "FOO = bar" work for seeing if
# FOO has the value "bar".
if not self.orig_type: # UNKNOWN
self._cached_str_val = self.name
return self.name # depends on [control=['if'], data=[]]
val = ''
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
self._write_to_conf = vis != 0
if self.orig_type in _INT_HEX:
# The C implementation checks the user value against the range in a
# separate code path (post-processing after loading a .config).
# Checking all values here instead makes more sense for us. It
# requires that we check for a range first.
base = _TYPE_TO_BASE[self.orig_type]
# Check if a range is in effect
for (low_expr, high_expr, cond) in self.ranges:
if expr_value(cond):
has_active_range = True
# The zeros are from the C implementation running strtoll()
# on empty strings
low = int(low_expr.str_value, base) if _is_base_n(low_expr.str_value, base) else 0
high = int(high_expr.str_value, base) if _is_base_n(high_expr.str_value, base) else 0
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
has_active_range = False
# Defaults are used if the symbol is invisible, lacks a user value,
# or has an out-of-range user value
use_defaults = True
if vis and self.user_value:
user_val = int(self.user_value, base)
if has_active_range and (not low <= user_val <= high):
num2str = str if base == 10 else hex
self.kconfig._warn('user value {} on the {} symbol {} ignored due to being outside the active range ([{}, {}]) -- falling back on defaults'.format(num2str(user_val), TYPE_TO_STR[self.orig_type], _name_and_loc(self), num2str(low), num2str(high))) # depends on [control=['if'], data=[]]
else:
# If the user value is well-formed and satisfies range
# contraints, it is stored in exactly the same form as
# specified in the assignment (with or without "0x", etc.)
val = self.user_value
use_defaults = False # depends on [control=['if'], data=[]]
if use_defaults:
# No user value or invalid user value. Look at defaults.
# Used to implement the warning below
has_default = False
for (sym, cond) in self.defaults:
if expr_value(cond):
has_default = self._write_to_conf = True
val = sym.str_value
if _is_base_n(val, base):
val_num = int(val, base) # depends on [control=['if'], data=[]]
else:
val_num = 0 # strtoll() on empty string
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
val_num = 0 # strtoll() on empty string
# This clamping procedure runs even if there's no default
if has_active_range:
clamp = None
if val_num < low:
clamp = low # depends on [control=['if'], data=['low']]
elif val_num > high:
clamp = high # depends on [control=['if'], data=['high']]
if clamp is not None:
# The value is rewritten to a standard form if it is
# clamped
val = str(clamp) if self.orig_type is INT else hex(clamp)
if has_default:
num2str = str if base == 10 else hex
self.kconfig._warn('default value {} on {} clamped to {} due to being outside the active range ([{}, {}])'.format(val_num, _name_and_loc(self), num2str(clamp), num2str(low), num2str(high))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['clamp']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.orig_type is STRING:
if vis and self.user_value is not None:
# If the symbol is visible and has a user value, use that
val = self.user_value # depends on [control=['if'], data=[]]
else:
# Otherwise, look at defaults
for (sym, cond) in self.defaults:
if expr_value(cond):
val = sym.str_value
self._write_to_conf = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# env_var corresponds to SYMBOL_AUTO in the C implementation, and is
# also set on the defconfig_list symbol there. Test for the
# defconfig_list symbol explicitly instead here, to avoid a nonsensical
# env_var setting and the defconfig_list symbol being printed
# incorrectly. This code is pretty cold anyway.
if self.env_var is not None or self is self.kconfig.defconfig_list:
self._write_to_conf = False # depends on [control=['if'], data=[]]
self._cached_str_val = val
return val
|
def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out)
|
def function[transform, parameter[self, maps]]:
constant[This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
]
variable[out] assign[=] dictionary[[], []]
call[name[out]][name[parameters].mass1] assign[=] call[name[conversions].mass1_from_mchirp_eta, parameter[call[name[maps]][name[parameters].mchirp], call[name[maps]][name[parameters].eta]]]
call[name[out]][name[parameters].mass2] assign[=] call[name[conversions].mass2_from_mchirp_eta, parameter[call[name[maps]][name[parameters].mchirp], call[name[maps]][name[parameters].eta]]]
return[call[name[self].format_output, parameter[name[maps], name[out]]]]
|
keyword[def] identifier[transform] ( identifier[self] , identifier[maps] ):
literal[string]
identifier[out] ={}
identifier[out] [ identifier[parameters] . identifier[mass1] ]= identifier[conversions] . identifier[mass1_from_mchirp_eta] (
identifier[maps] [ identifier[parameters] . identifier[mchirp] ],
identifier[maps] [ identifier[parameters] . identifier[eta] ])
identifier[out] [ identifier[parameters] . identifier[mass2] ]= identifier[conversions] . identifier[mass2_from_mchirp_eta] (
identifier[maps] [ identifier[parameters] . identifier[mchirp] ],
identifier[maps] [ identifier[parameters] . identifier[eta] ])
keyword[return] identifier[self] . identifier[format_output] ( identifier[maps] , identifier[out] )
|
def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(maps[parameters.mchirp], maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(maps[parameters.mchirp], maps[parameters.eta])
return self.format_output(maps, out)
|
def map_noreturn(targ, argslist):
"""
parallel_call_noreturn(targ, argslist)
:Parameters:
- targ : function
- argslist : list of tuples
Does [targ(*args) for args in argslist] using the threadpool.
"""
# Thanks to Anne Archibald's handythread.py for the exception handling
# mechanism.
exceptions = []
n_threads = len(argslist)
exc_lock = threading.Lock()
done_lock = CountDownLatch(n_threads)
def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock):
el.acquire()
ex.append(sys.exc_info())
el.release()
dl.countdown()
def cb(wr, value, dl=done_lock):
dl.countdown()
for args in argslist:
__PyMCThreadPool__.putRequest(
WorkRequest(targ,
callback=cb,
exc_callback=eb,
args=args,
requestID=id(args)))
done_lock.await_lock()
if exceptions:
six.reraise(*exceptions[0])
|
def function[map_noreturn, parameter[targ, argslist]]:
constant[
parallel_call_noreturn(targ, argslist)
:Parameters:
- targ : function
- argslist : list of tuples
Does [targ(*args) for args in argslist] using the threadpool.
]
variable[exceptions] assign[=] list[[]]
variable[n_threads] assign[=] call[name[len], parameter[name[argslist]]]
variable[exc_lock] assign[=] call[name[threading].Lock, parameter[]]
variable[done_lock] assign[=] call[name[CountDownLatch], parameter[name[n_threads]]]
def function[eb, parameter[wr, el, ex, dl]]:
call[name[el].acquire, parameter[]]
call[name[ex].append, parameter[call[name[sys].exc_info, parameter[]]]]
call[name[el].release, parameter[]]
call[name[dl].countdown, parameter[]]
def function[cb, parameter[wr, value, dl]]:
call[name[dl].countdown, parameter[]]
for taget[name[args]] in starred[name[argslist]] begin[:]
call[name[__PyMCThreadPool__].putRequest, parameter[call[name[WorkRequest], parameter[name[targ]]]]]
call[name[done_lock].await_lock, parameter[]]
if name[exceptions] begin[:]
call[name[six].reraise, parameter[<ast.Starred object at 0x7da1b184a290>]]
|
keyword[def] identifier[map_noreturn] ( identifier[targ] , identifier[argslist] ):
literal[string]
identifier[exceptions] =[]
identifier[n_threads] = identifier[len] ( identifier[argslist] )
identifier[exc_lock] = identifier[threading] . identifier[Lock] ()
identifier[done_lock] = identifier[CountDownLatch] ( identifier[n_threads] )
keyword[def] identifier[eb] ( identifier[wr] , identifier[el] = identifier[exc_lock] , identifier[ex] = identifier[exceptions] , identifier[dl] = identifier[done_lock] ):
identifier[el] . identifier[acquire] ()
identifier[ex] . identifier[append] ( identifier[sys] . identifier[exc_info] ())
identifier[el] . identifier[release] ()
identifier[dl] . identifier[countdown] ()
keyword[def] identifier[cb] ( identifier[wr] , identifier[value] , identifier[dl] = identifier[done_lock] ):
identifier[dl] . identifier[countdown] ()
keyword[for] identifier[args] keyword[in] identifier[argslist] :
identifier[__PyMCThreadPool__] . identifier[putRequest] (
identifier[WorkRequest] ( identifier[targ] ,
identifier[callback] = identifier[cb] ,
identifier[exc_callback] = identifier[eb] ,
identifier[args] = identifier[args] ,
identifier[requestID] = identifier[id] ( identifier[args] )))
identifier[done_lock] . identifier[await_lock] ()
keyword[if] identifier[exceptions] :
identifier[six] . identifier[reraise] (* identifier[exceptions] [ literal[int] ])
|
def map_noreturn(targ, argslist):
"""
parallel_call_noreturn(targ, argslist)
:Parameters:
- targ : function
- argslist : list of tuples
Does [targ(*args) for args in argslist] using the threadpool.
"""
# Thanks to Anne Archibald's handythread.py for the exception handling
# mechanism.
exceptions = []
n_threads = len(argslist)
exc_lock = threading.Lock()
done_lock = CountDownLatch(n_threads)
def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock):
el.acquire()
ex.append(sys.exc_info())
el.release()
dl.countdown()
def cb(wr, value, dl=done_lock):
dl.countdown()
for args in argslist:
__PyMCThreadPool__.putRequest(WorkRequest(targ, callback=cb, exc_callback=eb, args=args, requestID=id(args))) # depends on [control=['for'], data=['args']]
done_lock.await_lock()
if exceptions:
six.reraise(*exceptions[0]) # depends on [control=['if'], data=[]]
|
def overview():
"""
Creates a overview of the hosts per range.
"""
range_search = RangeSearch()
ranges = range_search.get_ranges()
if ranges:
formatted_ranges = []
tags_lookup = {}
for r in ranges:
formatted_ranges.append({'mask': r.range})
tags_lookup[r.range] = r.tags
search = Host.search()
search = search.filter('term', status='up')
search.aggs.bucket('hosts', 'ip_range', field='address', ranges=formatted_ranges)
response = search.execute()
print_line("{0:<18} {1:<6} {2}".format("Range", "Count", "Tags"))
print_line("-" * 60)
for entry in response.aggregations.hosts.buckets:
print_line("{0:<18} {1:<6} {2}".format(entry.key, entry.doc_count, tags_lookup[entry.key]))
else:
print_error("No ranges defined.")
|
def function[overview, parameter[]]:
constant[
Creates a overview of the hosts per range.
]
variable[range_search] assign[=] call[name[RangeSearch], parameter[]]
variable[ranges] assign[=] call[name[range_search].get_ranges, parameter[]]
if name[ranges] begin[:]
variable[formatted_ranges] assign[=] list[[]]
variable[tags_lookup] assign[=] dictionary[[], []]
for taget[name[r]] in starred[name[ranges]] begin[:]
call[name[formatted_ranges].append, parameter[dictionary[[<ast.Constant object at 0x7da1b009e4a0>], [<ast.Attribute object at 0x7da1b009e290>]]]]
call[name[tags_lookup]][name[r].range] assign[=] name[r].tags
variable[search] assign[=] call[name[Host].search, parameter[]]
variable[search] assign[=] call[name[search].filter, parameter[constant[term]]]
call[name[search].aggs.bucket, parameter[constant[hosts], constant[ip_range]]]
variable[response] assign[=] call[name[search].execute, parameter[]]
call[name[print_line], parameter[call[constant[{0:<18} {1:<6} {2}].format, parameter[constant[Range], constant[Count], constant[Tags]]]]]
call[name[print_line], parameter[binary_operation[constant[-] * constant[60]]]]
for taget[name[entry]] in starred[name[response].aggregations.hosts.buckets] begin[:]
call[name[print_line], parameter[call[constant[{0:<18} {1:<6} {2}].format, parameter[name[entry].key, name[entry].doc_count, call[name[tags_lookup]][name[entry].key]]]]]
|
keyword[def] identifier[overview] ():
literal[string]
identifier[range_search] = identifier[RangeSearch] ()
identifier[ranges] = identifier[range_search] . identifier[get_ranges] ()
keyword[if] identifier[ranges] :
identifier[formatted_ranges] =[]
identifier[tags_lookup] ={}
keyword[for] identifier[r] keyword[in] identifier[ranges] :
identifier[formatted_ranges] . identifier[append] ({ literal[string] : identifier[r] . identifier[range] })
identifier[tags_lookup] [ identifier[r] . identifier[range] ]= identifier[r] . identifier[tags]
identifier[search] = identifier[Host] . identifier[search] ()
identifier[search] = identifier[search] . identifier[filter] ( literal[string] , identifier[status] = literal[string] )
identifier[search] . identifier[aggs] . identifier[bucket] ( literal[string] , literal[string] , identifier[field] = literal[string] , identifier[ranges] = identifier[formatted_ranges] )
identifier[response] = identifier[search] . identifier[execute] ()
identifier[print_line] ( literal[string] . identifier[format] ( literal[string] , literal[string] , literal[string] ))
identifier[print_line] ( literal[string] * literal[int] )
keyword[for] identifier[entry] keyword[in] identifier[response] . identifier[aggregations] . identifier[hosts] . identifier[buckets] :
identifier[print_line] ( literal[string] . identifier[format] ( identifier[entry] . identifier[key] , identifier[entry] . identifier[doc_count] , identifier[tags_lookup] [ identifier[entry] . identifier[key] ]))
keyword[else] :
identifier[print_error] ( literal[string] )
|
def overview():
"""
Creates a overview of the hosts per range.
"""
range_search = RangeSearch()
ranges = range_search.get_ranges()
if ranges:
formatted_ranges = []
tags_lookup = {}
for r in ranges:
formatted_ranges.append({'mask': r.range})
tags_lookup[r.range] = r.tags # depends on [control=['for'], data=['r']]
search = Host.search()
search = search.filter('term', status='up')
search.aggs.bucket('hosts', 'ip_range', field='address', ranges=formatted_ranges)
response = search.execute()
print_line('{0:<18} {1:<6} {2}'.format('Range', 'Count', 'Tags'))
print_line('-' * 60)
for entry in response.aggregations.hosts.buckets:
print_line('{0:<18} {1:<6} {2}'.format(entry.key, entry.doc_count, tags_lookup[entry.key])) # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]]
else:
print_error('No ranges defined.')
|
def evaluate_model_single_recording_multisymbol(model_file, recording):
"""
Evaluate a model for a single recording where possibly multiple symbols
are.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
"""
(preprocessing_queue, feature_list, model,
output_semantics) = load_model(model_file)
logging.info("multiple symbol mode")
logging.info(recording)
results = evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording)
return results
|
def function[evaluate_model_single_recording_multisymbol, parameter[model_file, recording]]:
constant[
Evaluate a model for a single recording where possibly multiple symbols
are.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
]
<ast.Tuple object at 0x7da2041d9870> assign[=] call[name[load_model], parameter[name[model_file]]]
call[name[logging].info, parameter[constant[multiple symbol mode]]]
call[name[logging].info, parameter[name[recording]]]
variable[results] assign[=] call[name[evaluate_model_single_recording_preloaded], parameter[name[preprocessing_queue], name[feature_list], name[model], name[output_semantics], name[recording]]]
return[name[results]]
|
keyword[def] identifier[evaluate_model_single_recording_multisymbol] ( identifier[model_file] , identifier[recording] ):
literal[string]
( identifier[preprocessing_queue] , identifier[feature_list] , identifier[model] ,
identifier[output_semantics] )= identifier[load_model] ( identifier[model_file] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[logging] . identifier[info] ( identifier[recording] )
identifier[results] = identifier[evaluate_model_single_recording_preloaded] ( identifier[preprocessing_queue] ,
identifier[feature_list] ,
identifier[model] ,
identifier[output_semantics] ,
identifier[recording] )
keyword[return] identifier[results]
|
def evaluate_model_single_recording_multisymbol(model_file, recording):
"""
Evaluate a model for a single recording where possibly multiple symbols
are.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
"""
(preprocessing_queue, feature_list, model, output_semantics) = load_model(model_file)
logging.info('multiple symbol mode')
logging.info(recording)
results = evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording)
return results
|
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
|
def function[service_unavailable, parameter[cls, errors]]:
constant[Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
]
if name[cls].expose_status begin[:]
name[cls].response.content_type assign[=] constant[application/json]
name[cls].response._status_line assign[=] constant[503 Service Unavailable]
return[call[name[cls], parameter[constant[503], constant[None], name[errors]]].to_json]
|
keyword[def] identifier[service_unavailable] ( identifier[cls] , identifier[errors] = keyword[None] ):
literal[string]
keyword[if] identifier[cls] . identifier[expose_status] :
identifier[cls] . identifier[response] . identifier[content_type] = literal[string]
identifier[cls] . identifier[response] . identifier[_status_line] = literal[string]
keyword[return] identifier[cls] ( literal[int] , keyword[None] , identifier[errors] ). identifier[to_json]
|
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable' # depends on [control=['if'], data=[]]
return cls(503, None, errors).to_json
|
def var_deleted(self, v):
"""
var was added in the bot
:param v:
:return:
"""
widget = self.widgets[v.name]
# widgets are all in a single container ..
parent = widget.get_parent()
self.container.remove(parent)
del self.widgets[v.name]
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all()
|
def function[var_deleted, parameter[self, v]]:
constant[
var was added in the bot
:param v:
:return:
]
variable[widget] assign[=] call[name[self].widgets][name[v].name]
variable[parent] assign[=] call[name[widget].get_parent, parameter[]]
call[name[self].container.remove, parameter[name[parent]]]
<ast.Delete object at 0x7da18dc046d0>
call[name[self].window.set_size_request, parameter[constant[400], binary_operation[constant[35] * call[name[len], parameter[call[name[self].widgets.keys, parameter[]]]]]]]
call[name[self].window.show_all, parameter[]]
|
keyword[def] identifier[var_deleted] ( identifier[self] , identifier[v] ):
literal[string]
identifier[widget] = identifier[self] . identifier[widgets] [ identifier[v] . identifier[name] ]
identifier[parent] = identifier[widget] . identifier[get_parent] ()
identifier[self] . identifier[container] . identifier[remove] ( identifier[parent] )
keyword[del] identifier[self] . identifier[widgets] [ identifier[v] . identifier[name] ]
identifier[self] . identifier[window] . identifier[set_size_request] ( literal[int] , literal[int] * identifier[len] ( identifier[self] . identifier[widgets] . identifier[keys] ()))
identifier[self] . identifier[window] . identifier[show_all] ()
|
def var_deleted(self, v):
"""
var was added in the bot
:param v:
:return:
"""
widget = self.widgets[v.name]
# widgets are all in a single container ..
parent = widget.get_parent()
self.container.remove(parent)
del self.widgets[v.name]
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all()
|
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum):
"""Move point between clusters, numerical attributes."""
# Update sum of attributes in cluster.
for iattr, curattr in enumerate(point):
cl_attr_sum[to_clust][iattr] += curattr
cl_attr_sum[from_clust][iattr] -= curattr
# Update sums of memberships in cluster
cl_memb_sum[to_clust] += 1
cl_memb_sum[from_clust] -= 1
return cl_attr_sum, cl_memb_sum
|
def function[move_point_num, parameter[point, to_clust, from_clust, cl_attr_sum, cl_memb_sum]]:
constant[Move point between clusters, numerical attributes.]
for taget[tuple[[<ast.Name object at 0x7da1b1834550>, <ast.Name object at 0x7da1b1834520>]]] in starred[call[name[enumerate], parameter[name[point]]]] begin[:]
<ast.AugAssign object at 0x7da1b1835870>
<ast.AugAssign object at 0x7da1b1835450>
<ast.AugAssign object at 0x7da1b18373d0>
<ast.AugAssign object at 0x7da1b1837340>
return[tuple[[<ast.Name object at 0x7da1b18370d0>, <ast.Name object at 0x7da1b1835270>]]]
|
keyword[def] identifier[move_point_num] ( identifier[point] , identifier[to_clust] , identifier[from_clust] , identifier[cl_attr_sum] , identifier[cl_memb_sum] ):
literal[string]
keyword[for] identifier[iattr] , identifier[curattr] keyword[in] identifier[enumerate] ( identifier[point] ):
identifier[cl_attr_sum] [ identifier[to_clust] ][ identifier[iattr] ]+= identifier[curattr]
identifier[cl_attr_sum] [ identifier[from_clust] ][ identifier[iattr] ]-= identifier[curattr]
identifier[cl_memb_sum] [ identifier[to_clust] ]+= literal[int]
identifier[cl_memb_sum] [ identifier[from_clust] ]-= literal[int]
keyword[return] identifier[cl_attr_sum] , identifier[cl_memb_sum]
|
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum):
"""Move point between clusters, numerical attributes."""
# Update sum of attributes in cluster.
for (iattr, curattr) in enumerate(point):
cl_attr_sum[to_clust][iattr] += curattr
cl_attr_sum[from_clust][iattr] -= curattr # depends on [control=['for'], data=[]]
# Update sums of memberships in cluster
cl_memb_sum[to_clust] += 1
cl_memb_sum[from_clust] -= 1
return (cl_attr_sum, cl_memb_sum)
|
def VerifyStructure(self, parser_mediator, line):
"""Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._DPKG_LOG_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug(
'Unable to parse Debian dpkg.log file with error: {0!s}'.format(
exception))
return False
return 'date_time' in structure and 'body' in structure
|
def function[VerifyStructure, parameter[self, parser_mediator, line]]:
constant[Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
]
<ast.Try object at 0x7da2047e9c00>
return[<ast.BoolOp object at 0x7da2047e93f0>]
|
keyword[def] identifier[VerifyStructure] ( identifier[self] , identifier[parser_mediator] , identifier[line] ):
literal[string]
keyword[try] :
identifier[structure] = identifier[self] . identifier[_DPKG_LOG_LINE] . identifier[parseString] ( identifier[line] )
keyword[except] identifier[pyparsing] . identifier[ParseException] keyword[as] identifier[exception] :
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] (
identifier[exception] ))
keyword[return] keyword[False]
keyword[return] literal[string] keyword[in] identifier[structure] keyword[and] literal[string] keyword[in] identifier[structure]
|
def VerifyStructure(self, parser_mediator, line):
"""Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._DPKG_LOG_LINE.parseString(line) # depends on [control=['try'], data=[]]
except pyparsing.ParseException as exception:
logger.debug('Unable to parse Debian dpkg.log file with error: {0!s}'.format(exception))
return False # depends on [control=['except'], data=['exception']]
return 'date_time' in structure and 'body' in structure
|
def sync(self, localpath, href, exclude=None, block=True):
"""
Sync local and remote folders
:param localpath: local folder
:param href: remote folder
:param exclude: filter folder which need to exlude
:return: respose
"""
logger.info(u("sync: %s %s") % (localpath, href))
try:
localpath = _(localpath)
href = remote(href)
localRoot, localFolders, localFiles = next(os.walk(localpath))
remoteFolders, remoteFiles = self.list(href)
if remoteFiles is None or remoteFolders is None:
remoteFiles = {}
remoteFolders = {}
self.mkdir(href)
def norm(folder):
path = os.path.join(href, _(folder))
if path[len(path) - 1] != os.path.sep:
path += u("/")
return path
foldersToCreate = filter(
lambda folderPath: folderPath not in remoteFolders,
map(norm, localFolders)
)
apply_async("mkdir", lambda path: self.mkdir(path), foldersToCreate, self.limit)
filesToSync = filter(
lambda lFile: os.path.join(href, _(lFile)) not in remoteFiles,
localFiles
)
fileArgs = [(os.path.join(localpath, f), os.path.join(href, f))
for f in filesToSync]
apply_async("upload", lambda s, t: self.upload(s, t), fileArgs, self.limit)
for folder in localFolders:
localFolderPath = os.path.join(localpath, folder)
remoteFolderPath = os.path.join(href, folder)
if exclude:
bSync = exclude(localFolderPath, remoteFolderPath)
else:
bSync = True
if bSync:
apply_async(
"sync",
lambda localpath, href: self.sync(localpath, href, exclude, False),
[(localFolderPath, remoteFolderPath), ]
)
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
if block:
qWork.join()
|
def function[sync, parameter[self, localpath, href, exclude, block]]:
constant[
Sync local and remote folders
:param localpath: local folder
:param href: remote folder
:param exclude: filter folder which need to exlude
:return: respose
]
call[name[logger].info, parameter[binary_operation[call[name[u], parameter[constant[sync: %s %s]]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2456ef0>, <ast.Name object at 0x7da1b2455a20>]]]]]
<ast.Try object at 0x7da1b2457910>
if name[block] begin[:]
call[name[qWork].join, parameter[]]
|
keyword[def] identifier[sync] ( identifier[self] , identifier[localpath] , identifier[href] , identifier[exclude] = keyword[None] , identifier[block] = keyword[True] ):
literal[string]
identifier[logger] . identifier[info] ( identifier[u] ( literal[string] )%( identifier[localpath] , identifier[href] ))
keyword[try] :
identifier[localpath] = identifier[_] ( identifier[localpath] )
identifier[href] = identifier[remote] ( identifier[href] )
identifier[localRoot] , identifier[localFolders] , identifier[localFiles] = identifier[next] ( identifier[os] . identifier[walk] ( identifier[localpath] ))
identifier[remoteFolders] , identifier[remoteFiles] = identifier[self] . identifier[list] ( identifier[href] )
keyword[if] identifier[remoteFiles] keyword[is] keyword[None] keyword[or] identifier[remoteFolders] keyword[is] keyword[None] :
identifier[remoteFiles] ={}
identifier[remoteFolders] ={}
identifier[self] . identifier[mkdir] ( identifier[href] )
keyword[def] identifier[norm] ( identifier[folder] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[href] , identifier[_] ( identifier[folder] ))
keyword[if] identifier[path] [ identifier[len] ( identifier[path] )- literal[int] ]!= identifier[os] . identifier[path] . identifier[sep] :
identifier[path] += identifier[u] ( literal[string] )
keyword[return] identifier[path]
identifier[foldersToCreate] = identifier[filter] (
keyword[lambda] identifier[folderPath] : identifier[folderPath] keyword[not] keyword[in] identifier[remoteFolders] ,
identifier[map] ( identifier[norm] , identifier[localFolders] )
)
identifier[apply_async] ( literal[string] , keyword[lambda] identifier[path] : identifier[self] . identifier[mkdir] ( identifier[path] ), identifier[foldersToCreate] , identifier[self] . identifier[limit] )
identifier[filesToSync] = identifier[filter] (
keyword[lambda] identifier[lFile] : identifier[os] . identifier[path] . identifier[join] ( identifier[href] , identifier[_] ( identifier[lFile] )) keyword[not] keyword[in] identifier[remoteFiles] ,
identifier[localFiles]
)
identifier[fileArgs] =[( identifier[os] . identifier[path] . identifier[join] ( identifier[localpath] , identifier[f] ), identifier[os] . identifier[path] . identifier[join] ( identifier[href] , identifier[f] ))
keyword[for] identifier[f] keyword[in] identifier[filesToSync] ]
identifier[apply_async] ( literal[string] , keyword[lambda] identifier[s] , identifier[t] : identifier[self] . identifier[upload] ( identifier[s] , identifier[t] ), identifier[fileArgs] , identifier[self] . identifier[limit] )
keyword[for] identifier[folder] keyword[in] identifier[localFolders] :
identifier[localFolderPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[localpath] , identifier[folder] )
identifier[remoteFolderPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[href] , identifier[folder] )
keyword[if] identifier[exclude] :
identifier[bSync] = identifier[exclude] ( identifier[localFolderPath] , identifier[remoteFolderPath] )
keyword[else] :
identifier[bSync] = keyword[True]
keyword[if] identifier[bSync] :
identifier[apply_async] (
literal[string] ,
keyword[lambda] identifier[localpath] , identifier[href] : identifier[self] . identifier[sync] ( identifier[localpath] , identifier[href] , identifier[exclude] , keyword[False] ),
[( identifier[localFolderPath] , identifier[remoteFolderPath] ),]
)
keyword[except] identifier[ConnectionException] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
identifier[logger] . identifier[exception] ( identifier[e] )
keyword[if] identifier[block] :
identifier[qWork] . identifier[join] ()
|
def sync(self, localpath, href, exclude=None, block=True):
"""
Sync local and remote folders
:param localpath: local folder
:param href: remote folder
:param exclude: filter folder which need to exlude
:return: respose
"""
logger.info(u('sync: %s %s') % (localpath, href))
try:
localpath = _(localpath)
href = remote(href)
(localRoot, localFolders, localFiles) = next(os.walk(localpath))
(remoteFolders, remoteFiles) = self.list(href)
if remoteFiles is None or remoteFolders is None:
remoteFiles = {}
remoteFolders = {}
self.mkdir(href) # depends on [control=['if'], data=[]]
def norm(folder):
path = os.path.join(href, _(folder))
if path[len(path) - 1] != os.path.sep:
path += u('/') # depends on [control=['if'], data=[]]
return path
foldersToCreate = filter(lambda folderPath: folderPath not in remoteFolders, map(norm, localFolders))
apply_async('mkdir', lambda path: self.mkdir(path), foldersToCreate, self.limit)
filesToSync = filter(lambda lFile: os.path.join(href, _(lFile)) not in remoteFiles, localFiles)
fileArgs = [(os.path.join(localpath, f), os.path.join(href, f)) for f in filesToSync]
apply_async('upload', lambda s, t: self.upload(s, t), fileArgs, self.limit)
for folder in localFolders:
localFolderPath = os.path.join(localpath, folder)
remoteFolderPath = os.path.join(href, folder)
if exclude:
bSync = exclude(localFolderPath, remoteFolderPath) # depends on [control=['if'], data=[]]
else:
bSync = True
if bSync:
apply_async('sync', lambda localpath, href: self.sync(localpath, href, exclude, False), [(localFolderPath, remoteFolderPath)]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['folder']] # depends on [control=['try'], data=[]]
except ConnectionException:
raise # depends on [control=['except'], data=[]]
except Exception:
e = sys.exc_info()[1]
logger.exception(e) # depends on [control=['except'], data=[]]
if block:
qWork.join() # depends on [control=['if'], data=[]]
|
def _shortcut_open(
uri,
mode,
ignore_ext=False,
buffering=-1,
encoding=None,
errors=None,
):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != 'file':
return None
_, extension = P.splitext(parsed_uri.uri_path)
if extension in _COMPRESSOR_REGISTRY and not ignore_ext:
return None
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '')
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors
#
# Under Py3, the built-in open accepts kwargs, and it's OK to use that.
# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it
# whenever possible (see issue #207). If we're under Py2 and have to use
# kwargs, then we have no option other to use io.open.
#
if six.PY3:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
elif not open_kwargs:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering)
return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
|
def function[_shortcut_open, parameter[uri, mode, ignore_ext, buffering, encoding, errors]]:
constant[Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
]
if <ast.UnaryOp object at 0x7da18eb57bb0> begin[:]
return[constant[None]]
variable[parsed_uri] assign[=] call[name[_parse_uri], parameter[name[uri]]]
if compare[name[parsed_uri].scheme not_equal[!=] constant[file]] begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da18eb55f30> assign[=] call[name[P].splitext, parameter[name[parsed_uri].uri_path]]
if <ast.BoolOp object at 0x7da18eb54460> begin[:]
return[constant[None]]
variable[open_kwargs] assign[=] dictionary[[], []]
if compare[name[encoding] is_not constant[None]] begin[:]
call[name[open_kwargs]][constant[encoding]] assign[=] name[encoding]
variable[mode] assign[=] call[name[mode].replace, parameter[constant[b], constant[]]]
if <ast.BoolOp object at 0x7da18eb55780> begin[:]
call[name[open_kwargs]][constant[errors]] assign[=] name[errors]
if name[six].PY3 begin[:]
return[call[name[_builtin_open], parameter[name[parsed_uri].uri_path, name[mode]]]]
return[call[name[io].open, parameter[name[parsed_uri].uri_path, name[mode]]]]
|
keyword[def] identifier[_shortcut_open] (
identifier[uri] ,
identifier[mode] ,
identifier[ignore_ext] = keyword[False] ,
identifier[buffering] =- literal[int] ,
identifier[encoding] = keyword[None] ,
identifier[errors] = keyword[None] ,
):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[uri] , identifier[six] . identifier[string_types] ):
keyword[return] keyword[None]
identifier[parsed_uri] = identifier[_parse_uri] ( identifier[uri] )
keyword[if] identifier[parsed_uri] . identifier[scheme] != literal[string] :
keyword[return] keyword[None]
identifier[_] , identifier[extension] = identifier[P] . identifier[splitext] ( identifier[parsed_uri] . identifier[uri_path] )
keyword[if] identifier[extension] keyword[in] identifier[_COMPRESSOR_REGISTRY] keyword[and] keyword[not] identifier[ignore_ext] :
keyword[return] keyword[None]
identifier[open_kwargs] ={}
keyword[if] identifier[encoding] keyword[is] keyword[not] keyword[None] :
identifier[open_kwargs] [ literal[string] ]= identifier[encoding]
identifier[mode] = identifier[mode] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[errors] keyword[and] literal[string] keyword[not] keyword[in] identifier[mode] :
identifier[open_kwargs] [ literal[string] ]= identifier[errors]
keyword[if] identifier[six] . identifier[PY3] :
keyword[return] identifier[_builtin_open] ( identifier[parsed_uri] . identifier[uri_path] , identifier[mode] , identifier[buffering] = identifier[buffering] ,** identifier[open_kwargs] )
keyword[elif] keyword[not] identifier[open_kwargs] :
keyword[return] identifier[_builtin_open] ( identifier[parsed_uri] . identifier[uri_path] , identifier[mode] , identifier[buffering] = identifier[buffering] )
keyword[return] identifier[io] . identifier[open] ( identifier[parsed_uri] . identifier[uri_path] , identifier[mode] , identifier[buffering] = identifier[buffering] ,** identifier[open_kwargs] )
|
def _shortcut_open(uri, mode, ignore_ext=False, buffering=-1, encoding=None, errors=None):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None # depends on [control=['if'], data=[]]
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != 'file':
return None # depends on [control=['if'], data=[]]
(_, extension) = P.splitext(parsed_uri.uri_path)
if extension in _COMPRESSOR_REGISTRY and (not ignore_ext):
return None # depends on [control=['if'], data=[]]
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '') # depends on [control=['if'], data=['encoding']]
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors # depends on [control=['if'], data=[]]
#
# Under Py3, the built-in open accepts kwargs, and it's OK to use that.
# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it
# whenever possible (see issue #207). If we're under Py2 and have to use
# kwargs, then we have no option other to use io.open.
#
if six.PY3:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs) # depends on [control=['if'], data=[]]
elif not open_kwargs:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering) # depends on [control=['if'], data=[]]
return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
|
def connect(self, address):
"""
Equivalent to socket.connect(), but sends an client handshake request
after connecting.
`address` is a (host, port) tuple of the server to connect to.
"""
self.sock.connect(address)
ClientHandshake(self).perform()
self.handshake_sent = True
|
def function[connect, parameter[self, address]]:
constant[
Equivalent to socket.connect(), but sends an client handshake request
after connecting.
`address` is a (host, port) tuple of the server to connect to.
]
call[name[self].sock.connect, parameter[name[address]]]
call[call[name[ClientHandshake], parameter[name[self]]].perform, parameter[]]
name[self].handshake_sent assign[=] constant[True]
|
keyword[def] identifier[connect] ( identifier[self] , identifier[address] ):
literal[string]
identifier[self] . identifier[sock] . identifier[connect] ( identifier[address] )
identifier[ClientHandshake] ( identifier[self] ). identifier[perform] ()
identifier[self] . identifier[handshake_sent] = keyword[True]
|
def connect(self, address):
"""
Equivalent to socket.connect(), but sends an client handshake request
after connecting.
`address` is a (host, port) tuple of the server to connect to.
"""
self.sock.connect(address)
ClientHandshake(self).perform()
self.handshake_sent = True
|
def set_default_locators_and_formatters(self, axis):
"""
Set up the locators and formatters for the scale.
Parameters
----------
axis: matplotlib.axis
Axis for which to set locators and formatters.
"""
axis.set_major_locator(_LogicleLocator(self._transform))
axis.set_minor_locator(_LogicleLocator(self._transform,
subs=np.arange(2.0, 10.)))
axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation(
labelOnlyBase=True))
|
def function[set_default_locators_and_formatters, parameter[self, axis]]:
constant[
Set up the locators and formatters for the scale.
Parameters
----------
axis: matplotlib.axis
Axis for which to set locators and formatters.
]
call[name[axis].set_major_locator, parameter[call[name[_LogicleLocator], parameter[name[self]._transform]]]]
call[name[axis].set_minor_locator, parameter[call[name[_LogicleLocator], parameter[name[self]._transform]]]]
call[name[axis].set_major_formatter, parameter[call[name[matplotlib].ticker.LogFormatterSciNotation, parameter[]]]]
|
keyword[def] identifier[set_default_locators_and_formatters] ( identifier[self] , identifier[axis] ):
literal[string]
identifier[axis] . identifier[set_major_locator] ( identifier[_LogicleLocator] ( identifier[self] . identifier[_transform] ))
identifier[axis] . identifier[set_minor_locator] ( identifier[_LogicleLocator] ( identifier[self] . identifier[_transform] ,
identifier[subs] = identifier[np] . identifier[arange] ( literal[int] , literal[int] )))
identifier[axis] . identifier[set_major_formatter] ( identifier[matplotlib] . identifier[ticker] . identifier[LogFormatterSciNotation] (
identifier[labelOnlyBase] = keyword[True] ))
|
def set_default_locators_and_formatters(self, axis):
"""
Set up the locators and formatters for the scale.
Parameters
----------
axis: matplotlib.axis
Axis for which to set locators and formatters.
"""
axis.set_major_locator(_LogicleLocator(self._transform))
axis.set_minor_locator(_LogicleLocator(self._transform, subs=np.arange(2.0, 10.0)))
axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation(labelOnlyBase=True))
|
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate thermal conductivity of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
kg : float
Thermal conductivity of gas mixture, [W/m/K]
'''
if method == SIMPLE:
ks = [i(T, P) for i in self.ThermalConductivityGases]
return mixing_simple(zs, ks)
elif method == LINDSAY_BROMLEY:
ks = [i(T, P) for i in self.ThermalConductivityGases]
mus = [i(T, P) for i in self.ViscosityGases]
return Lindsay_Bromley(T=T, ys=zs, ks=ks, mus=mus, Tbs=self.Tbs, MWs=self.MWs)
else:
raise Exception('Method not valid')
|
def function[calculate, parameter[self, T, P, zs, ws, method]]:
constant[Method to calculate thermal conductivity of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
kg : float
Thermal conductivity of gas mixture, [W/m/K]
]
if compare[name[method] equal[==] name[SIMPLE]] begin[:]
variable[ks] assign[=] <ast.ListComp object at 0x7da2046200d0>
return[call[name[mixing_simple], parameter[name[zs], name[ks]]]]
|
keyword[def] identifier[calculate] ( identifier[self] , identifier[T] , identifier[P] , identifier[zs] , identifier[ws] , identifier[method] ):
literal[string]
keyword[if] identifier[method] == identifier[SIMPLE] :
identifier[ks] =[ identifier[i] ( identifier[T] , identifier[P] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[ThermalConductivityGases] ]
keyword[return] identifier[mixing_simple] ( identifier[zs] , identifier[ks] )
keyword[elif] identifier[method] == identifier[LINDSAY_BROMLEY] :
identifier[ks] =[ identifier[i] ( identifier[T] , identifier[P] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[ThermalConductivityGases] ]
identifier[mus] =[ identifier[i] ( identifier[T] , identifier[P] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[ViscosityGases] ]
keyword[return] identifier[Lindsay_Bromley] ( identifier[T] = identifier[T] , identifier[ys] = identifier[zs] , identifier[ks] = identifier[ks] , identifier[mus] = identifier[mus] , identifier[Tbs] = identifier[self] . identifier[Tbs] , identifier[MWs] = identifier[self] . identifier[MWs] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
|
def calculate(self, T, P, zs, ws, method):
"""Method to calculate thermal conductivity of a gas mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
kg : float
Thermal conductivity of gas mixture, [W/m/K]
"""
if method == SIMPLE:
ks = [i(T, P) for i in self.ThermalConductivityGases]
return mixing_simple(zs, ks) # depends on [control=['if'], data=[]]
elif method == LINDSAY_BROMLEY:
ks = [i(T, P) for i in self.ThermalConductivityGases]
mus = [i(T, P) for i in self.ViscosityGases]
return Lindsay_Bromley(T=T, ys=zs, ks=ks, mus=mus, Tbs=self.Tbs, MWs=self.MWs) # depends on [control=['if'], data=[]]
else:
raise Exception('Method not valid')
|
def resolve(self, notes=None):
'''Save all changes and resolve this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
|
def function[resolve, parameter[self, notes]]:
constant[Save all changes and resolve this issue]
call[name[self].set_status, parameter[name[self]._redmine.ISSUE_STATUS_ID_RESOLVED]]
|
keyword[def] identifier[resolve] ( identifier[self] , identifier[notes] = keyword[None] ):
literal[string]
identifier[self] . identifier[set_status] ( identifier[self] . identifier[_redmine] . identifier[ISSUE_STATUS_ID_RESOLVED] , identifier[notes] = identifier[notes] )
|
def resolve(self, notes=None):
"""Save all changes and resolve this issue"""
self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
|
def device(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for device in self.devices():
if device.name.lower() == name.lower():
return device
raise NotFound('Unable to find device %s' % name)
|
def function[device, parameter[self, name]]:
constant[ Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
]
for taget[name[device]] in starred[call[name[self].devices, parameter[]]] begin[:]
if compare[call[name[device].name.lower, parameter[]] equal[==] call[name[name].lower, parameter[]]] begin[:]
return[name[device]]
<ast.Raise object at 0x7da2044c1480>
|
keyword[def] identifier[device] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[device] keyword[in] identifier[self] . identifier[devices] ():
keyword[if] identifier[device] . identifier[name] . identifier[lower] ()== identifier[name] . identifier[lower] ():
keyword[return] identifier[device]
keyword[raise] identifier[NotFound] ( literal[string] % identifier[name] )
|
def device(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for device in self.devices():
if device.name.lower() == name.lower():
return device # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['device']]
raise NotFound('Unable to find device %s' % name)
|
def add(self, song):
"""往播放列表末尾添加一首歌曲"""
if song in self._songs:
return
self._songs.append(song)
logger.debug('Add %s to player playlist', song)
|
def function[add, parameter[self, song]]:
constant[往播放列表末尾添加一首歌曲]
if compare[name[song] in name[self]._songs] begin[:]
return[None]
call[name[self]._songs.append, parameter[name[song]]]
call[name[logger].debug, parameter[constant[Add %s to player playlist], name[song]]]
|
keyword[def] identifier[add] ( identifier[self] , identifier[song] ):
literal[string]
keyword[if] identifier[song] keyword[in] identifier[self] . identifier[_songs] :
keyword[return]
identifier[self] . identifier[_songs] . identifier[append] ( identifier[song] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[song] )
|
def add(self, song):
"""往播放列表末尾添加一首歌曲"""
if song in self._songs:
return # depends on [control=['if'], data=[]]
self._songs.append(song)
logger.debug('Add %s to player playlist', song)
|
def tag_begin(self, tag_name, attributes=None):
"""Marks the beginning of the ``tag_name`` structure.
Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the
structure.
The attributes string is of the form "key1=value2 key2=value2 ...".
Values may be boolean (true/false or 1/0), integer, float, string, or
an array.
String values are enclosed in single quotes ('). Single quotes and
backslashes inside the string should be escaped with a backslash.
Boolean values may be set to true by only specifying the key. eg the
attribute string "key" is the equivalent to "key=true".
Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]".
If no attributes are required, ``attributes`` can be omitted, an empty
string or None.
See cairo's Tags and Links Description for the list of tags and
attributes.
Invalid nesting of tags or invalid attributes will cause the context to
shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_end`.
:param tag_name: tag name
:param attributes: tag attributes
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
if attributes is None:
attributes = ''
cairo.cairo_tag_begin(
self._pointer, _encode_string(tag_name),
_encode_string(attributes))
self._check_status()
|
def function[tag_begin, parameter[self, tag_name, attributes]]:
constant[Marks the beginning of the ``tag_name`` structure.
Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the
structure.
The attributes string is of the form "key1=value2 key2=value2 ...".
Values may be boolean (true/false or 1/0), integer, float, string, or
an array.
String values are enclosed in single quotes ('). Single quotes and
backslashes inside the string should be escaped with a backslash.
Boolean values may be set to true by only specifying the key. eg the
attribute string "key" is the equivalent to "key=true".
Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]".
If no attributes are required, ``attributes`` can be omitted, an empty
string or None.
See cairo's Tags and Links Description for the list of tags and
attributes.
Invalid nesting of tags or invalid attributes will cause the context to
shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_end`.
:param tag_name: tag name
:param attributes: tag attributes
*New in cairo 1.16.*
*New in cairocffi 0.9.*
]
if compare[name[attributes] is constant[None]] begin[:]
variable[attributes] assign[=] constant[]
call[name[cairo].cairo_tag_begin, parameter[name[self]._pointer, call[name[_encode_string], parameter[name[tag_name]]], call[name[_encode_string], parameter[name[attributes]]]]]
call[name[self]._check_status, parameter[]]
|
keyword[def] identifier[tag_begin] ( identifier[self] , identifier[tag_name] , identifier[attributes] = keyword[None] ):
literal[string]
keyword[if] identifier[attributes] keyword[is] keyword[None] :
identifier[attributes] = literal[string]
identifier[cairo] . identifier[cairo_tag_begin] (
identifier[self] . identifier[_pointer] , identifier[_encode_string] ( identifier[tag_name] ),
identifier[_encode_string] ( identifier[attributes] ))
identifier[self] . identifier[_check_status] ()
|
def tag_begin(self, tag_name, attributes=None):
"""Marks the beginning of the ``tag_name`` structure.
Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the
structure.
The attributes string is of the form "key1=value2 key2=value2 ...".
Values may be boolean (true/false or 1/0), integer, float, string, or
an array.
String values are enclosed in single quotes ('). Single quotes and
backslashes inside the string should be escaped with a backslash.
Boolean values may be set to true by only specifying the key. eg the
attribute string "key" is the equivalent to "key=true".
Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]".
If no attributes are required, ``attributes`` can be omitted, an empty
string or None.
See cairo's Tags and Links Description for the list of tags and
attributes.
Invalid nesting of tags or invalid attributes will cause the context to
shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_end`.
:param tag_name: tag name
:param attributes: tag attributes
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
if attributes is None:
attributes = '' # depends on [control=['if'], data=['attributes']]
cairo.cairo_tag_begin(self._pointer, _encode_string(tag_name), _encode_string(attributes))
self._check_status()
|
def proj_path(*path_parts):
# type: (str) -> str
""" Return absolute path to the repo dir (root project directory).
Args:
path (str):
The path relative to the project root (pelconf.yaml).
Returns:
str: The given path converted to an absolute path.
"""
path_parts = path_parts or ['.']
# If path represented by path_parts is absolute, do not modify it.
if not os.path.isabs(path_parts[0]):
proj_path = _find_proj_root()
if proj_path is not None:
path_parts = [proj_path] + list(path_parts)
return os.path.normpath(os.path.join(*path_parts))
|
def function[proj_path, parameter[]]:
constant[ Return absolute path to the repo dir (root project directory).
Args:
path (str):
The path relative to the project root (pelconf.yaml).
Returns:
str: The given path converted to an absolute path.
]
variable[path_parts] assign[=] <ast.BoolOp object at 0x7da20c76f9d0>
if <ast.UnaryOp object at 0x7da20c76f2e0> begin[:]
variable[proj_path] assign[=] call[name[_find_proj_root], parameter[]]
if compare[name[proj_path] is_not constant[None]] begin[:]
variable[path_parts] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b10e44c0>]] + call[name[list], parameter[name[path_parts]]]]
return[call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[<ast.Starred object at 0x7da1b10e5840>]]]]]
|
keyword[def] identifier[proj_path] (* identifier[path_parts] ):
literal[string]
identifier[path_parts] = identifier[path_parts] keyword[or] [ literal[string] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[path_parts] [ literal[int] ]):
identifier[proj_path] = identifier[_find_proj_root] ()
keyword[if] identifier[proj_path] keyword[is] keyword[not] keyword[None] :
identifier[path_parts] =[ identifier[proj_path] ]+ identifier[list] ( identifier[path_parts] )
keyword[return] identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] (* identifier[path_parts] ))
|
def proj_path(*path_parts):
# type: (str) -> str
' Return absolute path to the repo dir (root project directory).\n\n Args:\n path (str):\n The path relative to the project root (pelconf.yaml).\n\n Returns:\n str: The given path converted to an absolute path.\n '
path_parts = path_parts or ['.']
# If path represented by path_parts is absolute, do not modify it.
if not os.path.isabs(path_parts[0]):
proj_path = _find_proj_root()
if proj_path is not None:
path_parts = [proj_path] + list(path_parts) # depends on [control=['if'], data=['proj_path']] # depends on [control=['if'], data=[]]
return os.path.normpath(os.path.join(*path_parts))
|
def getIndxOps(self, valu, cmpr='='):
'''
Return a list of index operation tuples to lift values in a table.
Valid index operations include:
('eq', <indx>)
('pref', <indx>)
('range', (<minindx>, <maxindx>))
'''
func = self.indxcmpr.get(cmpr)
if func is None:
raise s_exc.NoSuchCmpr(name=self.name, cmpr=cmpr)
return func(valu)
|
def function[getIndxOps, parameter[self, valu, cmpr]]:
constant[
Return a list of index operation tuples to lift values in a table.
Valid index operations include:
('eq', <indx>)
('pref', <indx>)
('range', (<minindx>, <maxindx>))
]
variable[func] assign[=] call[name[self].indxcmpr.get, parameter[name[cmpr]]]
if compare[name[func] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76d540>
return[call[name[func], parameter[name[valu]]]]
|
keyword[def] identifier[getIndxOps] ( identifier[self] , identifier[valu] , identifier[cmpr] = literal[string] ):
literal[string]
identifier[func] = identifier[self] . identifier[indxcmpr] . identifier[get] ( identifier[cmpr] )
keyword[if] identifier[func] keyword[is] keyword[None] :
keyword[raise] identifier[s_exc] . identifier[NoSuchCmpr] ( identifier[name] = identifier[self] . identifier[name] , identifier[cmpr] = identifier[cmpr] )
keyword[return] identifier[func] ( identifier[valu] )
|
def getIndxOps(self, valu, cmpr='='):
"""
Return a list of index operation tuples to lift values in a table.
Valid index operations include:
('eq', <indx>)
('pref', <indx>)
('range', (<minindx>, <maxindx>))
"""
func = self.indxcmpr.get(cmpr)
if func is None:
raise s_exc.NoSuchCmpr(name=self.name, cmpr=cmpr) # depends on [control=['if'], data=[]]
return func(valu)
|
def preview(df,preview_rows = 20):#,preview_max_cols = 0):
""" Returns a preview of a dataframe, which contains both header
rows and tail rows.
"""
if preview_rows < 4:
preview_rows = 4
preview_rows = min(preview_rows,df.shape[0])
outer = math.floor(preview_rows / 4)
return pd.concat([df.head(outer),
df[outer:-outer].sample(preview_rows-2*outer),
df.tail(outer)])
|
def function[preview, parameter[df, preview_rows]]:
constant[ Returns a preview of a dataframe, which contains both header
rows and tail rows.
]
if compare[name[preview_rows] less[<] constant[4]] begin[:]
variable[preview_rows] assign[=] constant[4]
variable[preview_rows] assign[=] call[name[min], parameter[name[preview_rows], call[name[df].shape][constant[0]]]]
variable[outer] assign[=] call[name[math].floor, parameter[binary_operation[name[preview_rows] / constant[4]]]]
return[call[name[pd].concat, parameter[list[[<ast.Call object at 0x7da20c9930a0>, <ast.Call object at 0x7da20c990430>, <ast.Call object at 0x7da18f09e5c0>]]]]]
|
keyword[def] identifier[preview] ( identifier[df] , identifier[preview_rows] = literal[int] ):
literal[string]
keyword[if] identifier[preview_rows] < literal[int] :
identifier[preview_rows] = literal[int]
identifier[preview_rows] = identifier[min] ( identifier[preview_rows] , identifier[df] . identifier[shape] [ literal[int] ])
identifier[outer] = identifier[math] . identifier[floor] ( identifier[preview_rows] / literal[int] )
keyword[return] identifier[pd] . identifier[concat] ([ identifier[df] . identifier[head] ( identifier[outer] ),
identifier[df] [ identifier[outer] :- identifier[outer] ]. identifier[sample] ( identifier[preview_rows] - literal[int] * identifier[outer] ),
identifier[df] . identifier[tail] ( identifier[outer] )])
|
def preview(df, preview_rows=20): #,preview_max_cols = 0):
' Returns a preview of a dataframe, which contains both header\n rows and tail rows.\n '
if preview_rows < 4:
preview_rows = 4 # depends on [control=['if'], data=['preview_rows']]
preview_rows = min(preview_rows, df.shape[0])
outer = math.floor(preview_rows / 4)
return pd.concat([df.head(outer), df[outer:-outer].sample(preview_rows - 2 * outer), df.tail(outer)])
|
def set_marksize(self):
"""Set size/radius of marking."""
try:
sz = float(self.w.mark_size.get_text())
except ValueError:
self.logger.error('Cannot set mark size')
self.w.mark_size.set_text(str(self.marksize))
else:
self.marksize = sz
|
def function[set_marksize, parameter[self]]:
constant[Set size/radius of marking.]
<ast.Try object at 0x7da18dc05150>
|
keyword[def] identifier[set_marksize] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[sz] = identifier[float] ( identifier[self] . identifier[w] . identifier[mark_size] . identifier[get_text] ())
keyword[except] identifier[ValueError] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] )
identifier[self] . identifier[w] . identifier[mark_size] . identifier[set_text] ( identifier[str] ( identifier[self] . identifier[marksize] ))
keyword[else] :
identifier[self] . identifier[marksize] = identifier[sz]
|
def set_marksize(self):
"""Set size/radius of marking."""
try:
sz = float(self.w.mark_size.get_text()) # depends on [control=['try'], data=[]]
except ValueError:
self.logger.error('Cannot set mark size')
self.w.mark_size.set_text(str(self.marksize)) # depends on [control=['except'], data=[]]
else:
self.marksize = sz
|
def qualified_note_rate(pianoroll, threshold=2):
"""Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll."""
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets)
|
def function[qualified_note_rate, parameter[pianoroll, threshold]]:
constant[Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll.]
call[name[_validate_pianoroll], parameter[name[pianoroll]]]
if call[name[np].issubdtype, parameter[name[pianoroll].dtype, name[np].bool_]] begin[:]
variable[pianoroll] assign[=] call[name[pianoroll].astype, parameter[name[np].uint8]]
variable[padded] assign[=] call[name[np].pad, parameter[name[pianoroll], tuple[[<ast.Tuple object at 0x7da20c6e4580>, <ast.Tuple object at 0x7da20c6e7a60>]], constant[constant]]]
variable[diff] assign[=] call[call[name[np].diff, parameter[name[padded]]].reshape, parameter[<ast.UnaryOp object at 0x7da20c6e7e80>]]
variable[onsets] assign[=] call[call[compare[name[diff] greater[>] constant[0]].nonzero, parameter[]]][constant[0]]
variable[offsets] assign[=] call[call[compare[name[diff] less[<] constant[0]].nonzero, parameter[]]][constant[0]]
variable[n_qualified_notes] assign[=] call[name[np].count_nonzero, parameter[compare[binary_operation[name[offsets] - name[onsets]] greater_or_equal[>=] name[threshold]]]]
return[binary_operation[name[n_qualified_notes] / call[name[len], parameter[name[onsets]]]]]
|
keyword[def] identifier[qualified_note_rate] ( identifier[pianoroll] , identifier[threshold] = literal[int] ):
literal[string]
identifier[_validate_pianoroll] ( identifier[pianoroll] )
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[pianoroll] . identifier[dtype] , identifier[np] . identifier[bool_] ):
identifier[pianoroll] = identifier[pianoroll] . identifier[astype] ( identifier[np] . identifier[uint8] )
identifier[padded] = identifier[np] . identifier[pad] ( identifier[pianoroll] ,(( literal[int] , literal[int] ),( literal[int] , literal[int] )), literal[string] )
identifier[diff] = identifier[np] . identifier[diff] ( identifier[padded] , identifier[axis] = literal[int] ). identifier[reshape] (- literal[int] )
identifier[onsets] =( identifier[diff] > literal[int] ). identifier[nonzero] ()[ literal[int] ]
identifier[offsets] =( identifier[diff] < literal[int] ). identifier[nonzero] ()[ literal[int] ]
identifier[n_qualified_notes] = identifier[np] . identifier[count_nonzero] ( identifier[offsets] - identifier[onsets] >= identifier[threshold] )
keyword[return] identifier[n_qualified_notes] / identifier[len] ( identifier[onsets] )
|
def qualified_note_rate(pianoroll, threshold=2):
"""Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll."""
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8) # depends on [control=['if'], data=[]]
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets)
|
def get_next_property(self):
"""Gets the next ``Property`` in this list.
:return: the next ``Property`` in this list. The ``has_next()`` method should be used to test that a next ``Property`` is available before calling this method.
:rtype: ``osid.Property``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
try:
next_object = self.next()
except StopIteration:
raise IllegalState('no more elements available in this list')
except Exception: # Need to specify exceptions here!
raise OperationFailed()
else:
return next_object
|
def function[get_next_property, parameter[self]]:
constant[Gets the next ``Property`` in this list.
:return: the next ``Property`` in this list. The ``has_next()`` method should be used to test that a next ``Property`` is available before calling this method.
:rtype: ``osid.Property``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
]
<ast.Try object at 0x7da20c797730>
|
keyword[def] identifier[get_next_property] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[next_object] = identifier[self] . identifier[next] ()
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[IllegalState] ( literal[string] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[OperationFailed] ()
keyword[else] :
keyword[return] identifier[next_object]
|
def get_next_property(self):
"""Gets the next ``Property`` in this list.
:return: the next ``Property`` in this list. The ``has_next()`` method should be used to test that a next ``Property`` is available before calling this method.
:rtype: ``osid.Property``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
try:
next_object = self.next() # depends on [control=['try'], data=[]]
except StopIteration:
raise IllegalState('no more elements available in this list') # depends on [control=['except'], data=[]]
except Exception: # Need to specify exceptions here!
raise OperationFailed() # depends on [control=['except'], data=[]]
else:
return next_object
|
def _send_accum_trace(self, device_uuid):
"""Send whatever accumulated tracing data we have for the device."""
if device_uuid not in self._connections:
self._logger.debug("Dropping trace data for device without an active connection, uuid=0x%X", device_uuid)
return
conn_data = self._connections[device_uuid]
trace = conn_data['trace_accum']
if len(trace) > 0:
slug = self._build_device_slug(device_uuid)
tracing_topic = self.topics.prefix + 'devices/{}/data/tracing'.format(slug)
data = {'type': 'notification', 'operation': 'trace'}
data['trace'] = binascii.hexlify(trace)
data['trace_origin'] = device_uuid
self._logger.debug('Publishing trace: (topic=%s)', tracing_topic)
self.client.publish(tracing_topic, data)
conn_data['trace_scheduled'] = False
conn_data['last_trace'] = monotonic()
conn_data['trace_accum'] = bytes()
|
def function[_send_accum_trace, parameter[self, device_uuid]]:
constant[Send whatever accumulated tracing data we have for the device.]
if compare[name[device_uuid] <ast.NotIn object at 0x7da2590d7190> name[self]._connections] begin[:]
call[name[self]._logger.debug, parameter[constant[Dropping trace data for device without an active connection, uuid=0x%X], name[device_uuid]]]
return[None]
variable[conn_data] assign[=] call[name[self]._connections][name[device_uuid]]
variable[trace] assign[=] call[name[conn_data]][constant[trace_accum]]
if compare[call[name[len], parameter[name[trace]]] greater[>] constant[0]] begin[:]
variable[slug] assign[=] call[name[self]._build_device_slug, parameter[name[device_uuid]]]
variable[tracing_topic] assign[=] binary_operation[name[self].topics.prefix + call[constant[devices/{}/data/tracing].format, parameter[name[slug]]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b1720>, <ast.Constant object at 0x7da20e9b3070>], [<ast.Constant object at 0x7da20e9b3220>, <ast.Constant object at 0x7da20e9b20b0>]]
call[name[data]][constant[trace]] assign[=] call[name[binascii].hexlify, parameter[name[trace]]]
call[name[data]][constant[trace_origin]] assign[=] name[device_uuid]
call[name[self]._logger.debug, parameter[constant[Publishing trace: (topic=%s)], name[tracing_topic]]]
call[name[self].client.publish, parameter[name[tracing_topic], name[data]]]
call[name[conn_data]][constant[trace_scheduled]] assign[=] constant[False]
call[name[conn_data]][constant[last_trace]] assign[=] call[name[monotonic], parameter[]]
call[name[conn_data]][constant[trace_accum]] assign[=] call[name[bytes], parameter[]]
|
keyword[def] identifier[_send_accum_trace] ( identifier[self] , identifier[device_uuid] ):
literal[string]
keyword[if] identifier[device_uuid] keyword[not] keyword[in] identifier[self] . identifier[_connections] :
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[device_uuid] )
keyword[return]
identifier[conn_data] = identifier[self] . identifier[_connections] [ identifier[device_uuid] ]
identifier[trace] = identifier[conn_data] [ literal[string] ]
keyword[if] identifier[len] ( identifier[trace] )> literal[int] :
identifier[slug] = identifier[self] . identifier[_build_device_slug] ( identifier[device_uuid] )
identifier[tracing_topic] = identifier[self] . identifier[topics] . identifier[prefix] + literal[string] . identifier[format] ( identifier[slug] )
identifier[data] ={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[data] [ literal[string] ]= identifier[binascii] . identifier[hexlify] ( identifier[trace] )
identifier[data] [ literal[string] ]= identifier[device_uuid]
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[tracing_topic] )
identifier[self] . identifier[client] . identifier[publish] ( identifier[tracing_topic] , identifier[data] )
identifier[conn_data] [ literal[string] ]= keyword[False]
identifier[conn_data] [ literal[string] ]= identifier[monotonic] ()
identifier[conn_data] [ literal[string] ]= identifier[bytes] ()
|
def _send_accum_trace(self, device_uuid):
"""Send whatever accumulated tracing data we have for the device."""
if device_uuid not in self._connections:
self._logger.debug('Dropping trace data for device without an active connection, uuid=0x%X', device_uuid)
return # depends on [control=['if'], data=['device_uuid']]
conn_data = self._connections[device_uuid]
trace = conn_data['trace_accum']
if len(trace) > 0:
slug = self._build_device_slug(device_uuid)
tracing_topic = self.topics.prefix + 'devices/{}/data/tracing'.format(slug)
data = {'type': 'notification', 'operation': 'trace'}
data['trace'] = binascii.hexlify(trace)
data['trace_origin'] = device_uuid
self._logger.debug('Publishing trace: (topic=%s)', tracing_topic)
self.client.publish(tracing_topic, data) # depends on [control=['if'], data=[]]
conn_data['trace_scheduled'] = False
conn_data['last_trace'] = monotonic()
conn_data['trace_accum'] = bytes()
|
def find_module_defining_flag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
registered_flag = self._flags().get(flagname)
if registered_flag is None:
return default
for module, flags in six.iteritems(self.flags_by_module_dict()):
for flag in flags:
# It must compare the flag with the one in _flags. This is because a
# flag might be overridden only for its long name (or short name),
# and only its short name (or long name) is considered registered.
if (flag.name == registered_flag.name and
flag.short_name == registered_flag.short_name):
return module
return default
|
def function[find_module_defining_flag, parameter[self, flagname, default]]:
constant[Return the name of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
]
variable[registered_flag] assign[=] call[call[name[self]._flags, parameter[]].get, parameter[name[flagname]]]
if compare[name[registered_flag] is constant[None]] begin[:]
return[name[default]]
for taget[tuple[[<ast.Name object at 0x7da1b18a3fa0>, <ast.Name object at 0x7da1b18a2380>]]] in starred[call[name[six].iteritems, parameter[call[name[self].flags_by_module_dict, parameter[]]]]] begin[:]
for taget[name[flag]] in starred[name[flags]] begin[:]
if <ast.BoolOp object at 0x7da1b18a21d0> begin[:]
return[name[module]]
return[name[default]]
|
keyword[def] identifier[find_module_defining_flag] ( identifier[self] , identifier[flagname] , identifier[default] = keyword[None] ):
literal[string]
identifier[registered_flag] = identifier[self] . identifier[_flags] (). identifier[get] ( identifier[flagname] )
keyword[if] identifier[registered_flag] keyword[is] keyword[None] :
keyword[return] identifier[default]
keyword[for] identifier[module] , identifier[flags] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[flags_by_module_dict] ()):
keyword[for] identifier[flag] keyword[in] identifier[flags] :
keyword[if] ( identifier[flag] . identifier[name] == identifier[registered_flag] . identifier[name] keyword[and]
identifier[flag] . identifier[short_name] == identifier[registered_flag] . identifier[short_name] ):
keyword[return] identifier[module]
keyword[return] identifier[default]
|
def find_module_defining_flag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
registered_flag = self._flags().get(flagname)
if registered_flag is None:
return default # depends on [control=['if'], data=[]]
for (module, flags) in six.iteritems(self.flags_by_module_dict()):
for flag in flags:
# It must compare the flag with the one in _flags. This is because a
# flag might be overridden only for its long name (or short name),
# and only its short name (or long name) is considered registered.
if flag.name == registered_flag.name and flag.short_name == registered_flag.short_name:
return module # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['flag']] # depends on [control=['for'], data=[]]
return default
|
def float_to_decimal(f):
""" Convert a float to a 38-precision Decimal """
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
return DECIMAL_CONTEXT.divide(numerator, denominator)
|
def function[float_to_decimal, parameter[f]]:
constant[ Convert a float to a 38-precision Decimal ]
<ast.Tuple object at 0x7da1b2407820> assign[=] call[name[f].as_integer_ratio, parameter[]]
<ast.Tuple object at 0x7da1b2406650> assign[=] tuple[[<ast.Call object at 0x7da1b2405000>, <ast.Call object at 0x7da1b24054b0>]]
return[call[name[DECIMAL_CONTEXT].divide, parameter[name[numerator], name[denominator]]]]
|
keyword[def] identifier[float_to_decimal] ( identifier[f] ):
literal[string]
identifier[n] , identifier[d] = identifier[f] . identifier[as_integer_ratio] ()
identifier[numerator] , identifier[denominator] = identifier[Decimal] ( identifier[n] ), identifier[Decimal] ( identifier[d] )
keyword[return] identifier[DECIMAL_CONTEXT] . identifier[divide] ( identifier[numerator] , identifier[denominator] )
|
def float_to_decimal(f):
""" Convert a float to a 38-precision Decimal """
(n, d) = f.as_integer_ratio()
(numerator, denominator) = (Decimal(n), Decimal(d))
return DECIMAL_CONTEXT.divide(numerator, denominator)
|
def write_xmlbif(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
"""
with open(filename, 'w') as fout:
fout.write(self.__str__())
|
def function[write_xmlbif, parameter[self, filename]]:
constant[
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[fout].write, parameter[call[name[self].__str__, parameter[]]]]
|
keyword[def] identifier[write_xmlbif] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fout] :
identifier[fout] . identifier[write] ( identifier[self] . identifier[__str__] ())
|
def write_xmlbif(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
"""
with open(filename, 'w') as fout:
fout.write(self.__str__()) # depends on [control=['with'], data=['fout']]
|
def define_bucket_batch_sizes(buckets: List[Tuple[int, int]],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
data_target_average_len: List[Optional[float]]) -> List[BucketBatchSize]:
"""
Computes bucket-specific batch sizes (sentences, average_words).
If sentence-based batching: number of sentences is the same for each batch, determines the
number of words. Hence all batch sizes for each bucket are equal.
If word-based batching: number of sentences for each batch is set to the multiple of number
of devices that produces the number of words closest to the target batch size. Average
target sentence length (non-padding symbols) is used for word number calculations.
:param buckets: Bucket list.
:param batch_size: Batch size.
:param batch_by_words: Batch by words.
:param batch_num_devices: Number of devices.
:param data_target_average_len: Optional average target length for each bucket.
"""
check_condition(len(data_target_average_len) == len(buckets),
"Must provide None or average target length for each bucket")
data_target_average_len = list(data_target_average_len)
bucket_batch_sizes = [] # type: List[BucketBatchSize]
largest_total_num_words = 0
for buck_idx, bucket in enumerate(buckets):
# Target/label length with padding
padded_seq_len = bucket[1]
# Average target/label length excluding padding
if data_target_average_len[buck_idx] is None:
data_target_average_len[buck_idx] = padded_seq_len
average_seq_len = data_target_average_len[buck_idx]
# Word-based: num words determines num sentences
# Sentence-based: num sentences determines num words
if batch_by_words:
check_condition(padded_seq_len <= batch_size, "Word batch size must cover sequence lengths for all"
" buckets: (%d > %d)" % (padded_seq_len, batch_size))
# Multiple of number of devices (int) closest to target number of words, assuming each sentence is of
# average length
batch_size_seq = batch_num_devices * max(1, round((batch_size / average_seq_len) / batch_num_devices))
batch_size_word = batch_size_seq * average_seq_len
else:
batch_size_seq = batch_size
batch_size_word = batch_size_seq * average_seq_len
bucket_batch_sizes.append(BucketBatchSize(bucket, batch_size_seq, batch_size_word))
# Track largest number of source or target word samples in a batch
largest_total_num_words = max(largest_total_num_words, batch_size_seq * max(*bucket))
# Final step: guarantee that largest bucket by sequence length also has a batch size so that it covers any
# (batch_size, len_source) and (batch_size, len_target) matrix from the data iterator to allow for memory sharing.
# When batching by sentences, this will already be the case.
if batch_by_words:
padded_seq_len = max(*buckets[-1])
average_seq_len = data_target_average_len[-1]
while bucket_batch_sizes[-1].batch_size * padded_seq_len < largest_total_num_words:
bucket_batch_sizes[-1] = BucketBatchSize(
bucket_batch_sizes[-1].bucket,
bucket_batch_sizes[-1].batch_size + batch_num_devices,
bucket_batch_sizes[-1].average_words_per_batch + batch_num_devices * average_seq_len)
return bucket_batch_sizes
|
def function[define_bucket_batch_sizes, parameter[buckets, batch_size, batch_by_words, batch_num_devices, data_target_average_len]]:
constant[
Computes bucket-specific batch sizes (sentences, average_words).
If sentence-based batching: number of sentences is the same for each batch, determines the
number of words. Hence all batch sizes for each bucket are equal.
If word-based batching: number of sentences for each batch is set to the multiple of number
of devices that produces the number of words closest to the target batch size. Average
target sentence length (non-padding symbols) is used for word number calculations.
:param buckets: Bucket list.
:param batch_size: Batch size.
:param batch_by_words: Batch by words.
:param batch_num_devices: Number of devices.
:param data_target_average_len: Optional average target length for each bucket.
]
call[name[check_condition], parameter[compare[call[name[len], parameter[name[data_target_average_len]]] equal[==] call[name[len], parameter[name[buckets]]]], constant[Must provide None or average target length for each bucket]]]
variable[data_target_average_len] assign[=] call[name[list], parameter[name[data_target_average_len]]]
variable[bucket_batch_sizes] assign[=] list[[]]
variable[largest_total_num_words] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18fe92140>, <ast.Name object at 0x7da18fe90430>]]] in starred[call[name[enumerate], parameter[name[buckets]]]] begin[:]
variable[padded_seq_len] assign[=] call[name[bucket]][constant[1]]
if compare[call[name[data_target_average_len]][name[buck_idx]] is constant[None]] begin[:]
call[name[data_target_average_len]][name[buck_idx]] assign[=] name[padded_seq_len]
variable[average_seq_len] assign[=] call[name[data_target_average_len]][name[buck_idx]]
if name[batch_by_words] begin[:]
call[name[check_condition], parameter[compare[name[padded_seq_len] less_or_equal[<=] name[batch_size]], binary_operation[constant[Word batch size must cover sequence lengths for all buckets: (%d > %d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18fe92200>, <ast.Name object at 0x7da18fe90a90>]]]]]
variable[batch_size_seq] assign[=] binary_operation[name[batch_num_devices] * call[name[max], parameter[constant[1], call[name[round], parameter[binary_operation[binary_operation[name[batch_size] / name[average_seq_len]] / name[batch_num_devices]]]]]]]
variable[batch_size_word] assign[=] binary_operation[name[batch_size_seq] * name[average_seq_len]]
call[name[bucket_batch_sizes].append, parameter[call[name[BucketBatchSize], parameter[name[bucket], name[batch_size_seq], name[batch_size_word]]]]]
variable[largest_total_num_words] assign[=] call[name[max], parameter[name[largest_total_num_words], binary_operation[name[batch_size_seq] * call[name[max], parameter[<ast.Starred object at 0x7da18fe91e40>]]]]]
if name[batch_by_words] begin[:]
variable[padded_seq_len] assign[=] call[name[max], parameter[<ast.Starred object at 0x7da18fe92920>]]
variable[average_seq_len] assign[=] call[name[data_target_average_len]][<ast.UnaryOp object at 0x7da1b1d35a50>]
while compare[binary_operation[call[name[bucket_batch_sizes]][<ast.UnaryOp object at 0x7da1b1d36740>].batch_size * name[padded_seq_len]] less[<] name[largest_total_num_words]] begin[:]
call[name[bucket_batch_sizes]][<ast.UnaryOp object at 0x7da1b1d36230>] assign[=] call[name[BucketBatchSize], parameter[call[name[bucket_batch_sizes]][<ast.UnaryOp object at 0x7da1b1d37490>].bucket, binary_operation[call[name[bucket_batch_sizes]][<ast.UnaryOp object at 0x7da18ede7d60>].batch_size + name[batch_num_devices]], binary_operation[call[name[bucket_batch_sizes]][<ast.UnaryOp object at 0x7da18ede6380>].average_words_per_batch + binary_operation[name[batch_num_devices] * name[average_seq_len]]]]]
return[name[bucket_batch_sizes]]
|
keyword[def] identifier[define_bucket_batch_sizes] ( identifier[buckets] : identifier[List] [ identifier[Tuple] [ identifier[int] , identifier[int] ]],
identifier[batch_size] : identifier[int] ,
identifier[batch_by_words] : identifier[bool] ,
identifier[batch_num_devices] : identifier[int] ,
identifier[data_target_average_len] : identifier[List] [ identifier[Optional] [ identifier[float] ]])-> identifier[List] [ identifier[BucketBatchSize] ]:
literal[string]
identifier[check_condition] ( identifier[len] ( identifier[data_target_average_len] )== identifier[len] ( identifier[buckets] ),
literal[string] )
identifier[data_target_average_len] = identifier[list] ( identifier[data_target_average_len] )
identifier[bucket_batch_sizes] =[]
identifier[largest_total_num_words] = literal[int]
keyword[for] identifier[buck_idx] , identifier[bucket] keyword[in] identifier[enumerate] ( identifier[buckets] ):
identifier[padded_seq_len] = identifier[bucket] [ literal[int] ]
keyword[if] identifier[data_target_average_len] [ identifier[buck_idx] ] keyword[is] keyword[None] :
identifier[data_target_average_len] [ identifier[buck_idx] ]= identifier[padded_seq_len]
identifier[average_seq_len] = identifier[data_target_average_len] [ identifier[buck_idx] ]
keyword[if] identifier[batch_by_words] :
identifier[check_condition] ( identifier[padded_seq_len] <= identifier[batch_size] , literal[string]
literal[string] %( identifier[padded_seq_len] , identifier[batch_size] ))
identifier[batch_size_seq] = identifier[batch_num_devices] * identifier[max] ( literal[int] , identifier[round] (( identifier[batch_size] / identifier[average_seq_len] )/ identifier[batch_num_devices] ))
identifier[batch_size_word] = identifier[batch_size_seq] * identifier[average_seq_len]
keyword[else] :
identifier[batch_size_seq] = identifier[batch_size]
identifier[batch_size_word] = identifier[batch_size_seq] * identifier[average_seq_len]
identifier[bucket_batch_sizes] . identifier[append] ( identifier[BucketBatchSize] ( identifier[bucket] , identifier[batch_size_seq] , identifier[batch_size_word] ))
identifier[largest_total_num_words] = identifier[max] ( identifier[largest_total_num_words] , identifier[batch_size_seq] * identifier[max] (* identifier[bucket] ))
keyword[if] identifier[batch_by_words] :
identifier[padded_seq_len] = identifier[max] (* identifier[buckets] [- literal[int] ])
identifier[average_seq_len] = identifier[data_target_average_len] [- literal[int] ]
keyword[while] identifier[bucket_batch_sizes] [- literal[int] ]. identifier[batch_size] * identifier[padded_seq_len] < identifier[largest_total_num_words] :
identifier[bucket_batch_sizes] [- literal[int] ]= identifier[BucketBatchSize] (
identifier[bucket_batch_sizes] [- literal[int] ]. identifier[bucket] ,
identifier[bucket_batch_sizes] [- literal[int] ]. identifier[batch_size] + identifier[batch_num_devices] ,
identifier[bucket_batch_sizes] [- literal[int] ]. identifier[average_words_per_batch] + identifier[batch_num_devices] * identifier[average_seq_len] )
keyword[return] identifier[bucket_batch_sizes]
|
def define_bucket_batch_sizes(buckets: List[Tuple[int, int]], batch_size: int, batch_by_words: bool, batch_num_devices: int, data_target_average_len: List[Optional[float]]) -> List[BucketBatchSize]:
"""
Computes bucket-specific batch sizes (sentences, average_words).
If sentence-based batching: number of sentences is the same for each batch, determines the
number of words. Hence all batch sizes for each bucket are equal.
If word-based batching: number of sentences for each batch is set to the multiple of number
of devices that produces the number of words closest to the target batch size. Average
target sentence length (non-padding symbols) is used for word number calculations.
:param buckets: Bucket list.
:param batch_size: Batch size.
:param batch_by_words: Batch by words.
:param batch_num_devices: Number of devices.
:param data_target_average_len: Optional average target length for each bucket.
"""
check_condition(len(data_target_average_len) == len(buckets), 'Must provide None or average target length for each bucket')
data_target_average_len = list(data_target_average_len)
bucket_batch_sizes = [] # type: List[BucketBatchSize]
largest_total_num_words = 0
for (buck_idx, bucket) in enumerate(buckets):
# Target/label length with padding
padded_seq_len = bucket[1]
# Average target/label length excluding padding
if data_target_average_len[buck_idx] is None:
data_target_average_len[buck_idx] = padded_seq_len # depends on [control=['if'], data=[]]
average_seq_len = data_target_average_len[buck_idx]
# Word-based: num words determines num sentences
# Sentence-based: num sentences determines num words
if batch_by_words:
check_condition(padded_seq_len <= batch_size, 'Word batch size must cover sequence lengths for all buckets: (%d > %d)' % (padded_seq_len, batch_size))
# Multiple of number of devices (int) closest to target number of words, assuming each sentence is of
# average length
batch_size_seq = batch_num_devices * max(1, round(batch_size / average_seq_len / batch_num_devices))
batch_size_word = batch_size_seq * average_seq_len # depends on [control=['if'], data=[]]
else:
batch_size_seq = batch_size
batch_size_word = batch_size_seq * average_seq_len
bucket_batch_sizes.append(BucketBatchSize(bucket, batch_size_seq, batch_size_word))
# Track largest number of source or target word samples in a batch
largest_total_num_words = max(largest_total_num_words, batch_size_seq * max(*bucket)) # depends on [control=['for'], data=[]]
# Final step: guarantee that largest bucket by sequence length also has a batch size so that it covers any
# (batch_size, len_source) and (batch_size, len_target) matrix from the data iterator to allow for memory sharing.
# When batching by sentences, this will already be the case.
if batch_by_words:
padded_seq_len = max(*buckets[-1])
average_seq_len = data_target_average_len[-1]
while bucket_batch_sizes[-1].batch_size * padded_seq_len < largest_total_num_words:
bucket_batch_sizes[-1] = BucketBatchSize(bucket_batch_sizes[-1].bucket, bucket_batch_sizes[-1].batch_size + batch_num_devices, bucket_batch_sizes[-1].average_words_per_batch + batch_num_devices * average_seq_len) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
return bucket_batch_sizes
|
def cli(ctx, id_number, new_key, metadata=""):
"""Update a canned key
Output:
an empty dictionary
"""
return ctx.gi.cannedkeys.update_key(id_number, new_key, metadata=metadata)
|
def function[cli, parameter[ctx, id_number, new_key, metadata]]:
constant[Update a canned key
Output:
an empty dictionary
]
return[call[name[ctx].gi.cannedkeys.update_key, parameter[name[id_number], name[new_key]]]]
|
keyword[def] identifier[cli] ( identifier[ctx] , identifier[id_number] , identifier[new_key] , identifier[metadata] = literal[string] ):
literal[string]
keyword[return] identifier[ctx] . identifier[gi] . identifier[cannedkeys] . identifier[update_key] ( identifier[id_number] , identifier[new_key] , identifier[metadata] = identifier[metadata] )
|
def cli(ctx, id_number, new_key, metadata=''):
"""Update a canned key
Output:
an empty dictionary
"""
return ctx.gi.cannedkeys.update_key(id_number, new_key, metadata=metadata)
|
def get_seaborn_clustermap(dfr, params, title=None, annot=True):
"""Returns a Seaborn clustermap."""
fig = sns.clustermap(
dfr,
cmap=params.cmap,
vmin=params.vmin,
vmax=params.vmax,
col_colors=params.colorbar,
row_colors=params.colorbar,
figsize=(params.figsize, params.figsize),
linewidths=params.linewidths,
xticklabels=params.labels,
yticklabels=params.labels,
annot=annot,
)
fig.cax.yaxis.set_label_position("left")
if title:
fig.cax.set_ylabel(title)
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig
|
def function[get_seaborn_clustermap, parameter[dfr, params, title, annot]]:
constant[Returns a Seaborn clustermap.]
variable[fig] assign[=] call[name[sns].clustermap, parameter[name[dfr]]]
call[name[fig].cax.yaxis.set_label_position, parameter[constant[left]]]
if name[title] begin[:]
call[name[fig].cax.set_ylabel, parameter[name[title]]]
call[name[fig].ax_heatmap.set_xticklabels, parameter[call[name[fig].ax_heatmap.get_xticklabels, parameter[]]]]
call[name[fig].ax_heatmap.set_yticklabels, parameter[call[name[fig].ax_heatmap.get_yticklabels, parameter[]]]]
return[name[fig]]
|
keyword[def] identifier[get_seaborn_clustermap] ( identifier[dfr] , identifier[params] , identifier[title] = keyword[None] , identifier[annot] = keyword[True] ):
literal[string]
identifier[fig] = identifier[sns] . identifier[clustermap] (
identifier[dfr] ,
identifier[cmap] = identifier[params] . identifier[cmap] ,
identifier[vmin] = identifier[params] . identifier[vmin] ,
identifier[vmax] = identifier[params] . identifier[vmax] ,
identifier[col_colors] = identifier[params] . identifier[colorbar] ,
identifier[row_colors] = identifier[params] . identifier[colorbar] ,
identifier[figsize] =( identifier[params] . identifier[figsize] , identifier[params] . identifier[figsize] ),
identifier[linewidths] = identifier[params] . identifier[linewidths] ,
identifier[xticklabels] = identifier[params] . identifier[labels] ,
identifier[yticklabels] = identifier[params] . identifier[labels] ,
identifier[annot] = identifier[annot] ,
)
identifier[fig] . identifier[cax] . identifier[yaxis] . identifier[set_label_position] ( literal[string] )
keyword[if] identifier[title] :
identifier[fig] . identifier[cax] . identifier[set_ylabel] ( identifier[title] )
identifier[fig] . identifier[ax_heatmap] . identifier[set_xticklabels] ( identifier[fig] . identifier[ax_heatmap] . identifier[get_xticklabels] (), identifier[rotation] = literal[int] )
identifier[fig] . identifier[ax_heatmap] . identifier[set_yticklabels] ( identifier[fig] . identifier[ax_heatmap] . identifier[get_yticklabels] (), identifier[rotation] = literal[int] )
keyword[return] identifier[fig]
|
def get_seaborn_clustermap(dfr, params, title=None, annot=True):
"""Returns a Seaborn clustermap."""
fig = sns.clustermap(dfr, cmap=params.cmap, vmin=params.vmin, vmax=params.vmax, col_colors=params.colorbar, row_colors=params.colorbar, figsize=(params.figsize, params.figsize), linewidths=params.linewidths, xticklabels=params.labels, yticklabels=params.labels, annot=annot)
fig.cax.yaxis.set_label_position('left')
if title:
fig.cax.set_ylabel(title) # depends on [control=['if'], data=[]]
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig
|
def _insert(self, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(
docs, ordered, check_keys, manipulate, write_concern, op_id,
bypass_doc_val, session)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session)
except BulkWriteError as bwe:
_raise_last_error(bwe.details)
return ids
|
def function[_insert, parameter[self, docs, ordered, check_keys, manipulate, write_concern, op_id, bypass_doc_val, session]]:
constant[Internal insert helper.]
if call[name[isinstance], parameter[name[docs], name[abc].Mapping]] begin[:]
return[call[name[self]._insert_one, parameter[name[docs], name[ordered], name[check_keys], name[manipulate], name[write_concern], name[op_id], name[bypass_doc_val], name[session]]]]
variable[ids] assign[=] list[[]]
if name[manipulate] begin[:]
def function[gen, parameter[]]:
constant[Generator that applies SON manipulators to each document
and adds _id if necessary.
]
variable[_db] assign[=] name[self].__database
for taget[name[doc]] in starred[name[docs]] begin[:]
variable[doc] assign[=] call[name[_db]._apply_incoming_manipulators, parameter[name[doc], name[self]]]
if <ast.UnaryOp object at 0x7da2054a7310> begin[:]
call[name[doc]][constant[_id]] assign[=] call[name[ObjectId], parameter[]]
variable[doc] assign[=] call[name[_db]._apply_incoming_copying_manipulators, parameter[name[doc], name[self]]]
call[name[ids].append, parameter[call[name[doc]][constant[_id]]]]
<ast.Yield object at 0x7da2054a4550>
variable[write_concern] assign[=] <ast.BoolOp object at 0x7da2054a66e0>
variable[blk] assign[=] call[name[_Bulk], parameter[name[self], name[ordered], name[bypass_doc_val]]]
name[blk].ops assign[=] <ast.ListComp object at 0x7da2054a6080>
<ast.Try object at 0x7da2054a59f0>
return[name[ids]]
|
keyword[def] identifier[_insert] ( identifier[self] , identifier[docs] , identifier[ordered] = keyword[True] , identifier[check_keys] = keyword[True] ,
identifier[manipulate] = keyword[False] , identifier[write_concern] = keyword[None] , identifier[op_id] = keyword[None] ,
identifier[bypass_doc_val] = keyword[False] , identifier[session] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[docs] , identifier[abc] . identifier[Mapping] ):
keyword[return] identifier[self] . identifier[_insert_one] (
identifier[docs] , identifier[ordered] , identifier[check_keys] , identifier[manipulate] , identifier[write_concern] , identifier[op_id] ,
identifier[bypass_doc_val] , identifier[session] )
identifier[ids] =[]
keyword[if] identifier[manipulate] :
keyword[def] identifier[gen] ():
literal[string]
identifier[_db] = identifier[self] . identifier[__database]
keyword[for] identifier[doc] keyword[in] identifier[docs] :
identifier[doc] = identifier[_db] . identifier[_apply_incoming_manipulators] ( identifier[doc] , identifier[self] )
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[doc] , identifier[RawBSONDocument] ) keyword[or] literal[string] keyword[in] identifier[doc] ):
identifier[doc] [ literal[string] ]= identifier[ObjectId] ()
identifier[doc] = identifier[_db] . identifier[_apply_incoming_copying_manipulators] ( identifier[doc] , identifier[self] )
identifier[ids] . identifier[append] ( identifier[doc] [ literal[string] ])
keyword[yield] identifier[doc]
keyword[else] :
keyword[def] identifier[gen] ():
literal[string]
keyword[for] identifier[doc] keyword[in] identifier[docs] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[doc] , identifier[RawBSONDocument] ):
identifier[ids] . identifier[append] ( identifier[doc] . identifier[get] ( literal[string] ))
keyword[yield] identifier[doc]
identifier[write_concern] = identifier[write_concern] keyword[or] identifier[self] . identifier[_write_concern_for] ( identifier[session] )
identifier[blk] = identifier[_Bulk] ( identifier[self] , identifier[ordered] , identifier[bypass_doc_val] )
identifier[blk] . identifier[ops] =[( identifier[message] . identifier[_INSERT] , identifier[doc] ) keyword[for] identifier[doc] keyword[in] identifier[gen] ()]
keyword[try] :
identifier[blk] . identifier[execute] ( identifier[write_concern] , identifier[session] = identifier[session] )
keyword[except] identifier[BulkWriteError] keyword[as] identifier[bwe] :
identifier[_raise_last_error] ( identifier[bwe] . identifier[details] )
keyword[return] identifier[ids]
|
def _insert(self, docs, ordered=True, check_keys=True, manipulate=False, write_concern=None, op_id=None, bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(docs, ordered, check_keys, manipulate, write_concern, op_id, bypass_doc_val, session) # depends on [control=['if'], data=[]]
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId() # depends on [control=['if'], data=[]]
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc # depends on [control=['for'], data=['doc']] # depends on [control=['if'], data=[]]
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id')) # depends on [control=['if'], data=[]]
yield doc # depends on [control=['for'], data=['doc']]
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session) # depends on [control=['try'], data=[]]
except BulkWriteError as bwe:
_raise_last_error(bwe.details) # depends on [control=['except'], data=['bwe']]
return ids
|
def get_me(self) -> "pyrogram.User":
"""A simple method for testing your authorization. Requires no parameters.
Returns:
Basic information about the user or bot in form of a :obj:`User` object
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
return pyrogram.User._parse(
self,
self.send(
functions.users.GetFullUser(
id=types.InputPeerSelf()
)
).user
)
|
def function[get_me, parameter[self]]:
constant[A simple method for testing your authorization. Requires no parameters.
Returns:
Basic information about the user or bot in form of a :obj:`User` object
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
]
return[call[name[pyrogram].User._parse, parameter[name[self], call[name[self].send, parameter[call[name[functions].users.GetFullUser, parameter[]]]].user]]]
|
keyword[def] identifier[get_me] ( identifier[self] )-> literal[string] :
literal[string]
keyword[return] identifier[pyrogram] . identifier[User] . identifier[_parse] (
identifier[self] ,
identifier[self] . identifier[send] (
identifier[functions] . identifier[users] . identifier[GetFullUser] (
identifier[id] = identifier[types] . identifier[InputPeerSelf] ()
)
). identifier[user]
)
|
def get_me(self) -> 'pyrogram.User':
"""A simple method for testing your authorization. Requires no parameters.
Returns:
Basic information about the user or bot in form of a :obj:`User` object
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
return pyrogram.User._parse(self, self.send(functions.users.GetFullUser(id=types.InputPeerSelf())).user)
|
def eval_string(stri):
'evaluate expressions passed as string'
tokens = shlex.split(stri)
return run_write_read(['plash', 'eval'], '\n'.join(tokens).encode()).decode()
|
def function[eval_string, parameter[stri]]:
constant[evaluate expressions passed as string]
variable[tokens] assign[=] call[name[shlex].split, parameter[name[stri]]]
return[call[call[name[run_write_read], parameter[list[[<ast.Constant object at 0x7da18f09ebc0>, <ast.Constant object at 0x7da18f09dd50>]], call[call[constant[
].join, parameter[name[tokens]]].encode, parameter[]]]].decode, parameter[]]]
|
keyword[def] identifier[eval_string] ( identifier[stri] ):
literal[string]
identifier[tokens] = identifier[shlex] . identifier[split] ( identifier[stri] )
keyword[return] identifier[run_write_read] ([ literal[string] , literal[string] ], literal[string] . identifier[join] ( identifier[tokens] ). identifier[encode] ()). identifier[decode] ()
|
def eval_string(stri):
"""evaluate expressions passed as string"""
tokens = shlex.split(stri)
return run_write_read(['plash', 'eval'], '\n'.join(tokens).encode()).decode()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.