code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _tr_above(self):
"""
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
"""
tr_lst = self._tbl.tr_lst
tr_idx = tr_lst.index(self._tr)
if tr_idx == 0:
raise ValueError('no tr above topmost tr')
return tr_lst[tr_idx-1]
|
def function[_tr_above, parameter[self]]:
constant[
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
]
variable[tr_lst] assign[=] name[self]._tbl.tr_lst
variable[tr_idx] assign[=] call[name[tr_lst].index, parameter[name[self]._tr]]
if compare[name[tr_idx] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1cbb0d0>
return[call[name[tr_lst]][binary_operation[name[tr_idx] - constant[1]]]]
|
keyword[def] identifier[_tr_above] ( identifier[self] ):
literal[string]
identifier[tr_lst] = identifier[self] . identifier[_tbl] . identifier[tr_lst]
identifier[tr_idx] = identifier[tr_lst] . identifier[index] ( identifier[self] . identifier[_tr] )
keyword[if] identifier[tr_idx] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[tr_lst] [ identifier[tr_idx] - literal[int] ]
|
def _tr_above(self):
"""
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
"""
tr_lst = self._tbl.tr_lst
tr_idx = tr_lst.index(self._tr)
if tr_idx == 0:
raise ValueError('no tr above topmost tr') # depends on [control=['if'], data=[]]
return tr_lst[tr_idx - 1]
|
def message(self, category, subject, msg_file):
"""Send message to all users in `category`."""
users = getattr(self.sub, category)
if not users:
print('There are no {} users on {}.'.format(category, self.sub))
return
if msg_file:
try:
msg = open(msg_file).read()
except IOError as error:
print(str(error))
return
else:
print('Enter message:')
msg = sys.stdin.read()
print('You are about to send the following message to the users {}:'
.format(', '.join([str(x) for x in users])))
print('---BEGIN MESSAGE---\n{}\n---END MESSAGE---'.format(msg))
if input('Are you sure? yes/[no]: ').lower() not in ['y', 'yes']:
print('Message sending aborted.')
return
for user in users:
user.send_message(subject, msg)
print('Sent to: {}'.format(user))
|
def function[message, parameter[self, category, subject, msg_file]]:
constant[Send message to all users in `category`.]
variable[users] assign[=] call[name[getattr], parameter[name[self].sub, name[category]]]
if <ast.UnaryOp object at 0x7da20e9b2b60> begin[:]
call[name[print], parameter[call[constant[There are no {} users on {}.].format, parameter[name[category], name[self].sub]]]]
return[None]
if name[msg_file] begin[:]
<ast.Try object at 0x7da20e9b0d30>
call[name[print], parameter[call[constant[You are about to send the following message to the users {}:].format, parameter[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da20e9b3c40>]]]]]]
call[name[print], parameter[call[constant[---BEGIN MESSAGE---
{}
---END MESSAGE---].format, parameter[name[msg]]]]]
if compare[call[call[name[input], parameter[constant[Are you sure? yes/[no]: ]]].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20e9b2fb0>, <ast.Constant object at 0x7da20e9b0430>]]] begin[:]
call[name[print], parameter[constant[Message sending aborted.]]]
return[None]
for taget[name[user]] in starred[name[users]] begin[:]
call[name[user].send_message, parameter[name[subject], name[msg]]]
call[name[print], parameter[call[constant[Sent to: {}].format, parameter[name[user]]]]]
|
keyword[def] identifier[message] ( identifier[self] , identifier[category] , identifier[subject] , identifier[msg_file] ):
literal[string]
identifier[users] = identifier[getattr] ( identifier[self] . identifier[sub] , identifier[category] )
keyword[if] keyword[not] identifier[users] :
identifier[print] ( literal[string] . identifier[format] ( identifier[category] , identifier[self] . identifier[sub] ))
keyword[return]
keyword[if] identifier[msg_file] :
keyword[try] :
identifier[msg] = identifier[open] ( identifier[msg_file] ). identifier[read] ()
keyword[except] identifier[IOError] keyword[as] identifier[error] :
identifier[print] ( identifier[str] ( identifier[error] ))
keyword[return]
keyword[else] :
identifier[print] ( literal[string] )
identifier[msg] = identifier[sys] . identifier[stdin] . identifier[read] ()
identifier[print] ( literal[string]
. identifier[format] ( literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[users] ])))
identifier[print] ( literal[string] . identifier[format] ( identifier[msg] ))
keyword[if] identifier[input] ( literal[string] ). identifier[lower] () keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[print] ( literal[string] )
keyword[return]
keyword[for] identifier[user] keyword[in] identifier[users] :
identifier[user] . identifier[send_message] ( identifier[subject] , identifier[msg] )
identifier[print] ( literal[string] . identifier[format] ( identifier[user] ))
|
def message(self, category, subject, msg_file):
"""Send message to all users in `category`."""
users = getattr(self.sub, category)
if not users:
print('There are no {} users on {}.'.format(category, self.sub))
return # depends on [control=['if'], data=[]]
if msg_file:
try:
msg = open(msg_file).read() # depends on [control=['try'], data=[]]
except IOError as error:
print(str(error))
return # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]]
else:
print('Enter message:')
msg = sys.stdin.read()
print('You are about to send the following message to the users {}:'.format(', '.join([str(x) for x in users])))
print('---BEGIN MESSAGE---\n{}\n---END MESSAGE---'.format(msg))
if input('Are you sure? yes/[no]: ').lower() not in ['y', 'yes']:
print('Message sending aborted.')
return # depends on [control=['if'], data=[]]
for user in users:
user.send_message(subject, msg)
print('Sent to: {}'.format(user)) # depends on [control=['for'], data=['user']]
|
def register(key, initializer: callable, param=None):
'''Adds resolver to global container'''
get_current_scope().container.register(key, initializer, param)
|
def function[register, parameter[key, initializer, param]]:
constant[Adds resolver to global container]
call[call[name[get_current_scope], parameter[]].container.register, parameter[name[key], name[initializer], name[param]]]
|
keyword[def] identifier[register] ( identifier[key] , identifier[initializer] : identifier[callable] , identifier[param] = keyword[None] ):
literal[string]
identifier[get_current_scope] (). identifier[container] . identifier[register] ( identifier[key] , identifier[initializer] , identifier[param] )
|
def register(key, initializer: callable, param=None):
"""Adds resolver to global container"""
get_current_scope().container.register(key, initializer, param)
|
def retrieve_asset(filename):
""" Retrieves a non-image asset associated with an entry """
record = model.Image.get(asset_name=filename)
if not record:
raise http_error.NotFound("File not found")
if not record.is_asset:
raise http_error.Forbidden()
return flask.send_file(record.file_path, conditional=True)
|
def function[retrieve_asset, parameter[filename]]:
constant[ Retrieves a non-image asset associated with an entry ]
variable[record] assign[=] call[name[model].Image.get, parameter[]]
if <ast.UnaryOp object at 0x7da18c4ce2c0> begin[:]
<ast.Raise object at 0x7da18c4cf5b0>
if <ast.UnaryOp object at 0x7da18c4cda20> begin[:]
<ast.Raise object at 0x7da18c4cc430>
return[call[name[flask].send_file, parameter[name[record].file_path]]]
|
keyword[def] identifier[retrieve_asset] ( identifier[filename] ):
literal[string]
identifier[record] = identifier[model] . identifier[Image] . identifier[get] ( identifier[asset_name] = identifier[filename] )
keyword[if] keyword[not] identifier[record] :
keyword[raise] identifier[http_error] . identifier[NotFound] ( literal[string] )
keyword[if] keyword[not] identifier[record] . identifier[is_asset] :
keyword[raise] identifier[http_error] . identifier[Forbidden] ()
keyword[return] identifier[flask] . identifier[send_file] ( identifier[record] . identifier[file_path] , identifier[conditional] = keyword[True] )
|
def retrieve_asset(filename):
""" Retrieves a non-image asset associated with an entry """
record = model.Image.get(asset_name=filename)
if not record:
raise http_error.NotFound('File not found') # depends on [control=['if'], data=[]]
if not record.is_asset:
raise http_error.Forbidden() # depends on [control=['if'], data=[]]
return flask.send_file(record.file_path, conditional=True)
|
def nvmlUnitGetTemperature(unit, type):
r"""
/**
* Retrieves the temperature readings for the unit, in degrees C.
*
* For S-class products.
*
* Depending on the product, readings may be available for intake (type=0),
* exhaust (type=1) and board (type=2).
*
* @param unit The identifier of the target unit
* @param type The type of reading to take
* @param temp Reference in which to return the intake temperature
*
* @return
* - \ref NVML_SUCCESS if \a temp has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetTemperature
"""
"""
/**
* Retrieves the temperature readings for the unit, in degrees C.
*
* For S-class products.
*
* Depending on the product, readings may be available for intake (type=0),
* exhaust (type=1) and board (type=2).
*
* @param unit The identifier of the target unit
* @param type The type of reading to take
* @param temp Reference in which to return the intake temperature
*
* @return
* - \ref NVML_SUCCESS if \a temp has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
"""
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetTemperature")
ret = fn(unit, c_uint(type), byref(c_temp))
_nvmlCheckReturn(ret)
return bytes_to_str(c_temp.value)
|
def function[nvmlUnitGetTemperature, parameter[unit, type]]:
constant[
/**
* Retrieves the temperature readings for the unit, in degrees C.
*
* For S-class products.
*
* Depending on the product, readings may be available for intake (type=0),
* exhaust (type=1) and board (type=2).
*
* @param unit The identifier of the target unit
* @param type The type of reading to take
* @param temp Reference in which to return the intake temperature
*
* @return
* - \ref NVML_SUCCESS if \a temp has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetTemperature
]
constant[
/**
* Retrieves the temperature readings for the unit, in degrees C.
*
* For S-class products.
*
* Depending on the product, readings may be available for intake (type=0),
* exhaust (type=1) and board (type=2).
*
* @param unit The identifier of the target unit
* @param type The type of reading to take
* @param temp Reference in which to return the intake temperature
*
* @return
* -
ef NVML_SUCCESS if temp has been populated
* -
ef NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* -
ef NVML_ERROR_INVALID_ARGUMENT if unit or type is invalid or temp is NULL
* -
ef NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* -
ef NVML_ERROR_UNKNOWN on any unexpected error
*/
]
variable[c_temp] assign[=] call[name[c_uint], parameter[]]
variable[fn] assign[=] call[name[_nvmlGetFunctionPointer], parameter[constant[nvmlUnitGetTemperature]]]
variable[ret] assign[=] call[name[fn], parameter[name[unit], call[name[c_uint], parameter[name[type]]], call[name[byref], parameter[name[c_temp]]]]]
call[name[_nvmlCheckReturn], parameter[name[ret]]]
return[call[name[bytes_to_str], parameter[name[c_temp].value]]]
|
keyword[def] identifier[nvmlUnitGetTemperature] ( identifier[unit] , identifier[type] ):
literal[string]
literal[string]
identifier[c_temp] = identifier[c_uint] ()
identifier[fn] = identifier[_nvmlGetFunctionPointer] ( literal[string] )
identifier[ret] = identifier[fn] ( identifier[unit] , identifier[c_uint] ( identifier[type] ), identifier[byref] ( identifier[c_temp] ))
identifier[_nvmlCheckReturn] ( identifier[ret] )
keyword[return] identifier[bytes_to_str] ( identifier[c_temp] . identifier[value] )
|
def nvmlUnitGetTemperature(unit, type):
"""
/**
* Retrieves the temperature readings for the unit, in degrees C.
*
* For S-class products.
*
* Depending on the product, readings may be available for intake (type=0),
* exhaust (type=1) and board (type=2).
*
* @param unit The identifier of the target unit
* @param type The type of reading to take
* @param temp Reference in which to return the intake temperature
*
* @return
* - \\ref NVML_SUCCESS if \\a temp has been populated
* - \\ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \\ref NVML_ERROR_INVALID_ARGUMENT if \\a unit or \\a type is invalid or \\a temp is NULL
* - \\ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \\ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetTemperature
"""
'\n/**\n * Retrieves the temperature readings for the unit, in degrees C.\n *\n * For S-class products.\n *\n * Depending on the product, readings may be available for intake (type=0),\n * exhaust (type=1) and board (type=2).\n *\n * @param unit The identifier of the target unit\n * @param type The type of reading to take\n * @param temp Reference in which to return the intake temperature\n *\n * @return\n * - \ref NVML_SUCCESS if \x07 temp has been populated\n * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized\n * - \ref NVML_ERROR_INVALID_ARGUMENT if \x07 unit or \x07 type is invalid or \x07 temp is NULL\n * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product\n * - \ref NVML_ERROR_UNKNOWN on any unexpected error\n */\n '
c_temp = c_uint()
fn = _nvmlGetFunctionPointer('nvmlUnitGetTemperature')
ret = fn(unit, c_uint(type), byref(c_temp))
_nvmlCheckReturn(ret)
return bytes_to_str(c_temp.value)
|
def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will
fire on config-changed.
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
|
def function[do_action_openstack_upgrade, parameter[package, upgrade_callback, configs]]:
constant[Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will
fire on config-changed.
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
]
variable[ret] assign[=] constant[False]
if call[name[openstack_upgrade_available], parameter[name[package]]] begin[:]
if call[name[config], parameter[constant[action-managed-upgrade]]] begin[:]
call[name[juju_log], parameter[constant[Upgrading OpenStack release]]]
<ast.Try object at 0x7da18fe905e0>
return[name[ret]]
|
keyword[def] identifier[do_action_openstack_upgrade] ( identifier[package] , identifier[upgrade_callback] , identifier[configs] ):
literal[string]
identifier[ret] = keyword[False]
keyword[if] identifier[openstack_upgrade_available] ( identifier[package] ):
keyword[if] identifier[config] ( literal[string] ):
identifier[juju_log] ( literal[string] )
keyword[try] :
identifier[upgrade_callback] ( identifier[configs] = identifier[configs] )
identifier[action_set] ({ literal[string] : literal[string] })
identifier[ret] = keyword[True]
keyword[except] identifier[Exception] :
identifier[action_set] ({ literal[string] : literal[string] })
identifier[action_set] ({ literal[string] : identifier[traceback] . identifier[format_exc] ()})
identifier[action_fail] ( literal[string]
literal[string] )
keyword[else] :
identifier[action_set] ({ literal[string] : literal[string]
literal[string] })
keyword[else] :
identifier[action_set] ({ literal[string] : literal[string] })
keyword[return] identifier[ret]
|
def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will
fire on config-changed.
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True # depends on [control=['try'], data=[]]
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an unexpected error') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
action_set({'outcome': 'action-managed-upgrade config is False, skipped upgrade.'}) # depends on [control=['if'], data=[]]
else:
action_set({'outcome': 'no upgrade available.'})
return ret
|
def HslToRgb(h, s, l):
'''Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (l, l, l) # achromatic (gray)
if l<0.5: n2 = l * (1.0 + s)
else: n2 = l+s - (l*s)
n1 = (2.0 * l) - n2
h /= 60.0
hueToRgb = Color._HueToRgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b)
|
def function[HslToRgb, parameter[h, s, l]]:
constant[Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
]
if compare[name[s] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Name object at 0x7da20c6c5810>, <ast.Name object at 0x7da20c6c6290>, <ast.Name object at 0x7da20c6c6ce0>]]]
if compare[name[l] less[<] constant[0.5]] begin[:]
variable[n2] assign[=] binary_operation[name[l] * binary_operation[constant[1.0] + name[s]]]
variable[n1] assign[=] binary_operation[binary_operation[constant[2.0] * name[l]] - name[n2]]
<ast.AugAssign object at 0x7da18c4cea10>
variable[hueToRgb] assign[=] name[Color]._HueToRgb
variable[r] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], binary_operation[name[h] + constant[2]]]]
variable[g] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], name[h]]]
variable[b] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], binary_operation[name[h] - constant[2]]]]
return[tuple[[<ast.Name object at 0x7da20e954d30>, <ast.Name object at 0x7da20e9555a0>, <ast.Name object at 0x7da20e955000>]]]
|
keyword[def] identifier[HslToRgb] ( identifier[h] , identifier[s] , identifier[l] ):
literal[string]
keyword[if] identifier[s] == literal[int] : keyword[return] ( identifier[l] , identifier[l] , identifier[l] )
keyword[if] identifier[l] < literal[int] : identifier[n2] = identifier[l] *( literal[int] + identifier[s] )
keyword[else] : identifier[n2] = identifier[l] + identifier[s] -( identifier[l] * identifier[s] )
identifier[n1] =( literal[int] * identifier[l] )- identifier[n2]
identifier[h] /= literal[int]
identifier[hueToRgb] = identifier[Color] . identifier[_HueToRgb]
identifier[r] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] + literal[int] )
identifier[g] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] )
identifier[b] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] - literal[int] )
keyword[return] ( identifier[r] , identifier[g] , identifier[b] )
|
def HslToRgb(h, s, l):
"""Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
"""
if s == 0:
return (l, l, l) # achromatic (gray) # depends on [control=['if'], data=[]]
if l < 0.5:
n2 = l * (1.0 + s) # depends on [control=['if'], data=['l']]
else:
n2 = l + s - l * s
n1 = 2.0 * l - n2
h /= 60.0
hueToRgb = Color._HueToRgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b)
|
def m2o_to_m2m(cr, model, table, field, source_field):
"""
Recreate relations in many2many fields that were formerly
many2one fields. Use rename_columns in your pre-migrate
script to retain the column's old value, then call m2o_to_m2m
in your post-migrate script.
:param model: The target model registry object
:param table: The source table
:param field: The field name of the target model
:param source_field: the many2one column on the source table.
.. versionadded:: 7.0
.. deprecated:: 8.0
Use :func:`m2o_to_x2m` instead.
"""
return m2o_to_x2m(cr, model, table, field, source_field)
|
def function[m2o_to_m2m, parameter[cr, model, table, field, source_field]]:
constant[
Recreate relations in many2many fields that were formerly
many2one fields. Use rename_columns in your pre-migrate
script to retain the column's old value, then call m2o_to_m2m
in your post-migrate script.
:param model: The target model registry object
:param table: The source table
:param field: The field name of the target model
:param source_field: the many2one column on the source table.
.. versionadded:: 7.0
.. deprecated:: 8.0
Use :func:`m2o_to_x2m` instead.
]
return[call[name[m2o_to_x2m], parameter[name[cr], name[model], name[table], name[field], name[source_field]]]]
|
keyword[def] identifier[m2o_to_m2m] ( identifier[cr] , identifier[model] , identifier[table] , identifier[field] , identifier[source_field] ):
literal[string]
keyword[return] identifier[m2o_to_x2m] ( identifier[cr] , identifier[model] , identifier[table] , identifier[field] , identifier[source_field] )
|
def m2o_to_m2m(cr, model, table, field, source_field):
"""
Recreate relations in many2many fields that were formerly
many2one fields. Use rename_columns in your pre-migrate
script to retain the column's old value, then call m2o_to_m2m
in your post-migrate script.
:param model: The target model registry object
:param table: The source table
:param field: The field name of the target model
:param source_field: the many2one column on the source table.
.. versionadded:: 7.0
.. deprecated:: 8.0
Use :func:`m2o_to_x2m` instead.
"""
return m2o_to_x2m(cr, model, table, field, source_field)
|
def convert_bytes(n):
"""
Convert a size number to 'K', 'M', .etc
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
|
def function[convert_bytes, parameter[n]]:
constant[
Convert a size number to 'K', 'M', .etc
]
variable[symbols] assign[=] tuple[[<ast.Constant object at 0x7da2041db5e0>, <ast.Constant object at 0x7da2041db010>, <ast.Constant object at 0x7da2041d9510>, <ast.Constant object at 0x7da2041d97b0>, <ast.Constant object at 0x7da2041db700>, <ast.Constant object at 0x7da2041da4d0>, <ast.Constant object at 0x7da2041dab90>, <ast.Constant object at 0x7da2041dbee0>]]
variable[prefix] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2041db0d0>, <ast.Name object at 0x7da2041d96f0>]]] in starred[call[name[enumerate], parameter[name[symbols]]]] begin[:]
call[name[prefix]][name[s]] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[binary_operation[name[i] + constant[1]] * constant[10]]]
for taget[name[s]] in starred[call[name[reversed], parameter[name[symbols]]]] begin[:]
if compare[name[n] greater_or_equal[>=] call[name[prefix]][name[s]]] begin[:]
variable[value] assign[=] binary_operation[call[name[float], parameter[name[n]]] / call[name[prefix]][name[s]]]
return[binary_operation[constant[%.1f%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2041dad70>, <ast.Name object at 0x7da2041daf20>]]]]
return[binary_operation[constant[%sB] <ast.Mod object at 0x7da2590d6920> name[n]]]
|
keyword[def] identifier[convert_bytes] ( identifier[n] ):
literal[string]
identifier[symbols] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
identifier[prefix] ={}
keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[symbols] ):
identifier[prefix] [ identifier[s] ]= literal[int] <<( identifier[i] + literal[int] )* literal[int]
keyword[for] identifier[s] keyword[in] identifier[reversed] ( identifier[symbols] ):
keyword[if] identifier[n] >= identifier[prefix] [ identifier[s] ]:
identifier[value] = identifier[float] ( identifier[n] )/ identifier[prefix] [ identifier[s] ]
keyword[return] literal[string] %( identifier[value] , identifier[s] )
keyword[return] literal[string] % identifier[n]
|
def convert_bytes(n):
"""
Convert a size number to 'K', 'M', .etc
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for (i, s) in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10 # depends on [control=['for'], data=[]]
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s) # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['s']]
return '%sB' % n
|
def get_child(self, streamId, childId, options={}):
"""Get the child of a stream."""
return self.get('stream/' + streamId + '/children/' + childId, options)
|
def function[get_child, parameter[self, streamId, childId, options]]:
constant[Get the child of a stream.]
return[call[name[self].get, parameter[binary_operation[binary_operation[binary_operation[constant[stream/] + name[streamId]] + constant[/children/]] + name[childId]], name[options]]]]
|
keyword[def] identifier[get_child] ( identifier[self] , identifier[streamId] , identifier[childId] , identifier[options] ={}):
literal[string]
keyword[return] identifier[self] . identifier[get] ( literal[string] + identifier[streamId] + literal[string] + identifier[childId] , identifier[options] )
|
def get_child(self, streamId, childId, options={}):
"""Get the child of a stream."""
return self.get('stream/' + streamId + '/children/' + childId, options)
|
def rate(self):
"""Get the sample rate in Hz.
Returns
---------
rate : float
The sample rate, in Hz, calculated from the timestamps
"""
N = len(self.timestamps)
t = self.timestamps[-1] - self.timestamps[0]
rate = 1.0 * N / t
return rate
|
def function[rate, parameter[self]]:
constant[Get the sample rate in Hz.
Returns
---------
rate : float
The sample rate, in Hz, calculated from the timestamps
]
variable[N] assign[=] call[name[len], parameter[name[self].timestamps]]
variable[t] assign[=] binary_operation[call[name[self].timestamps][<ast.UnaryOp object at 0x7da18f7203d0>] - call[name[self].timestamps][constant[0]]]
variable[rate] assign[=] binary_operation[binary_operation[constant[1.0] * name[N]] / name[t]]
return[name[rate]]
|
keyword[def] identifier[rate] ( identifier[self] ):
literal[string]
identifier[N] = identifier[len] ( identifier[self] . identifier[timestamps] )
identifier[t] = identifier[self] . identifier[timestamps] [- literal[int] ]- identifier[self] . identifier[timestamps] [ literal[int] ]
identifier[rate] = literal[int] * identifier[N] / identifier[t]
keyword[return] identifier[rate]
|
def rate(self):
"""Get the sample rate in Hz.
Returns
---------
rate : float
The sample rate, in Hz, calculated from the timestamps
"""
N = len(self.timestamps)
t = self.timestamps[-1] - self.timestamps[0]
rate = 1.0 * N / t
return rate
|
def variance(numbers, type='population'):
"""
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
"""
mean = average(numbers)
variance = 0
for number in numbers:
variance += (mean - number) ** 2
if type == 'population':
return variance / len(numbers)
else:
return variance / (len(numbers) - 1)
|
def function[variance, parameter[numbers, type]]:
constant[
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
]
variable[mean] assign[=] call[name[average], parameter[name[numbers]]]
variable[variance] assign[=] constant[0]
for taget[name[number]] in starred[name[numbers]] begin[:]
<ast.AugAssign object at 0x7da20e9b1fc0>
if compare[name[type] equal[==] constant[population]] begin[:]
return[binary_operation[name[variance] / call[name[len], parameter[name[numbers]]]]]
|
keyword[def] identifier[variance] ( identifier[numbers] , identifier[type] = literal[string] ):
literal[string]
identifier[mean] = identifier[average] ( identifier[numbers] )
identifier[variance] = literal[int]
keyword[for] identifier[number] keyword[in] identifier[numbers] :
identifier[variance] +=( identifier[mean] - identifier[number] )** literal[int]
keyword[if] identifier[type] == literal[string] :
keyword[return] identifier[variance] / identifier[len] ( identifier[numbers] )
keyword[else] :
keyword[return] identifier[variance] /( identifier[len] ( identifier[numbers] )- literal[int] )
|
def variance(numbers, type='population'):
"""
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
"""
mean = average(numbers)
variance = 0
for number in numbers:
variance += (mean - number) ** 2 # depends on [control=['for'], data=['number']]
if type == 'population':
return variance / len(numbers) # depends on [control=['if'], data=[]]
else:
return variance / (len(numbers) - 1)
|
def set_frustum(self, l, r, b, t, n, f):
"""Set the frustum
Parameters
----------
l : float
Left.
r : float
Right.
b : float
Bottom.
t : float
Top.
n : float
Near.
f : float
Far.
"""
self.matrix = transforms.frustum(l, r, b, t, n, f)
|
def function[set_frustum, parameter[self, l, r, b, t, n, f]]:
constant[Set the frustum
Parameters
----------
l : float
Left.
r : float
Right.
b : float
Bottom.
t : float
Top.
n : float
Near.
f : float
Far.
]
name[self].matrix assign[=] call[name[transforms].frustum, parameter[name[l], name[r], name[b], name[t], name[n], name[f]]]
|
keyword[def] identifier[set_frustum] ( identifier[self] , identifier[l] , identifier[r] , identifier[b] , identifier[t] , identifier[n] , identifier[f] ):
literal[string]
identifier[self] . identifier[matrix] = identifier[transforms] . identifier[frustum] ( identifier[l] , identifier[r] , identifier[b] , identifier[t] , identifier[n] , identifier[f] )
|
def set_frustum(self, l, r, b, t, n, f):
"""Set the frustum
Parameters
----------
l : float
Left.
r : float
Right.
b : float
Bottom.
t : float
Top.
n : float
Near.
f : float
Far.
"""
self.matrix = transforms.frustum(l, r, b, t, n, f)
|
def serialize_with_sampled_logs(self, logs_limit=-1):
"""serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
"""
return {
'id': self.id,
'pathName': self.path_name,
'name': self.name,
'isUnregistered': self.is_unregistered,
'logs': [log.serialize for log in self.sampled_logs(logs_limit)],
'args': self.args.serialize if self.args is not None else [],
'commands': [cmd.serialize for cmd in self.commands],
'snapshots': [cmd.serialize for cmd in self.snapshots],
'logModifiedAt': self.log_modified_at.isoformat()
}
|
def function[serialize_with_sampled_logs, parameter[self, logs_limit]]:
constant[serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
]
return[dictionary[[<ast.Constant object at 0x7da18bc734c0>, <ast.Constant object at 0x7da18bc731f0>, <ast.Constant object at 0x7da18bc70490>, <ast.Constant object at 0x7da18bc72440>, <ast.Constant object at 0x7da18bc719c0>, <ast.Constant object at 0x7da18bc706d0>, <ast.Constant object at 0x7da18bc70b80>, <ast.Constant object at 0x7da18bc72bc0>, <ast.Constant object at 0x7da18bc72e00>], [<ast.Attribute object at 0x7da18bc72230>, <ast.Attribute object at 0x7da18bc73730>, <ast.Attribute object at 0x7da18bc70190>, <ast.Attribute object at 0x7da18bc73700>, <ast.ListComp object at 0x7da18bc73490>, <ast.IfExp object at 0x7da18bc70790>, <ast.ListComp object at 0x7da1b0dbe4d0>, <ast.ListComp object at 0x7da1b0dbe2f0>, <ast.Call object at 0x7da1b0dbf310>]]]
|
keyword[def] identifier[serialize_with_sampled_logs] ( identifier[self] , identifier[logs_limit] =- literal[int] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[id] ,
literal[string] : identifier[self] . identifier[path_name] ,
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[self] . identifier[is_unregistered] ,
literal[string] :[ identifier[log] . identifier[serialize] keyword[for] identifier[log] keyword[in] identifier[self] . identifier[sampled_logs] ( identifier[logs_limit] )],
literal[string] : identifier[self] . identifier[args] . identifier[serialize] keyword[if] identifier[self] . identifier[args] keyword[is] keyword[not] keyword[None] keyword[else] [],
literal[string] :[ identifier[cmd] . identifier[serialize] keyword[for] identifier[cmd] keyword[in] identifier[self] . identifier[commands] ],
literal[string] :[ identifier[cmd] . identifier[serialize] keyword[for] identifier[cmd] keyword[in] identifier[self] . identifier[snapshots] ],
literal[string] : identifier[self] . identifier[log_modified_at] . identifier[isoformat] ()
}
|
def serialize_with_sampled_logs(self, logs_limit=-1):
"""serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
"""
return {'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat()}
|
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
|
def function[iterator, parameter[self]]:
constant[
An iterator over the results from applying this QuerySet to the api.
]
for taget[name[item]] in starred[call[name[self].query.results, parameter[]]] begin[:]
variable[obj] assign[=] call[name[self].resource, parameter[]]
<ast.Yield object at 0x7da18dc988e0>
|
keyword[def] identifier[iterator] ( identifier[self] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[query] . identifier[results] ():
identifier[obj] = identifier[self] . identifier[resource] (** identifier[item] )
keyword[yield] identifier[obj]
|
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj # depends on [control=['for'], data=['item']]
|
def raise_if_error(frame):
"""
Checks a frame and raises the relevant exception if required.
"""
if "status" not in frame or frame["status"] == b"\x00":
return
codes_and_exceptions = {
b"\x01": exceptions.ZigBeeUnknownError,
b"\x02": exceptions.ZigBeeInvalidCommand,
b"\x03": exceptions.ZigBeeInvalidParameter,
b"\x04": exceptions.ZigBeeTxFailure
}
if frame["status"] in codes_and_exceptions:
raise codes_and_exceptions[frame["status"]]()
raise exceptions.ZigBeeUnknownStatus()
|
def function[raise_if_error, parameter[frame]]:
constant[
Checks a frame and raises the relevant exception if required.
]
if <ast.BoolOp object at 0x7da1b23d25f0> begin[:]
return[None]
variable[codes_and_exceptions] assign[=] dictionary[[<ast.Constant object at 0x7da1b23d2b90>, <ast.Constant object at 0x7da1b23d2b60>, <ast.Constant object at 0x7da1b23d2b30>, <ast.Constant object at 0x7da1b23d2bc0>], [<ast.Attribute object at 0x7da1b23d0eb0>, <ast.Attribute object at 0x7da1b23d0e50>, <ast.Attribute object at 0x7da1b23d2500>, <ast.Attribute object at 0x7da1b23d2680>]]
if compare[call[name[frame]][constant[status]] in name[codes_and_exceptions]] begin[:]
<ast.Raise object at 0x7da1b23d2890>
<ast.Raise object at 0x7da1b23d2080>
|
keyword[def] identifier[raise_if_error] ( identifier[frame] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[frame] keyword[or] identifier[frame] [ literal[string] ]== literal[string] :
keyword[return]
identifier[codes_and_exceptions] ={
literal[string] : identifier[exceptions] . identifier[ZigBeeUnknownError] ,
literal[string] : identifier[exceptions] . identifier[ZigBeeInvalidCommand] ,
literal[string] : identifier[exceptions] . identifier[ZigBeeInvalidParameter] ,
literal[string] : identifier[exceptions] . identifier[ZigBeeTxFailure]
}
keyword[if] identifier[frame] [ literal[string] ] keyword[in] identifier[codes_and_exceptions] :
keyword[raise] identifier[codes_and_exceptions] [ identifier[frame] [ literal[string] ]]()
keyword[raise] identifier[exceptions] . identifier[ZigBeeUnknownStatus] ()
|
def raise_if_error(frame):
"""
Checks a frame and raises the relevant exception if required.
"""
if 'status' not in frame or frame['status'] == b'\x00':
return # depends on [control=['if'], data=[]]
codes_and_exceptions = {b'\x01': exceptions.ZigBeeUnknownError, b'\x02': exceptions.ZigBeeInvalidCommand, b'\x03': exceptions.ZigBeeInvalidParameter, b'\x04': exceptions.ZigBeeTxFailure}
if frame['status'] in codes_and_exceptions:
raise codes_and_exceptions[frame['status']]() # depends on [control=['if'], data=['codes_and_exceptions']]
raise exceptions.ZigBeeUnknownStatus()
|
def _make_request(self, opener, request, timeout=None):
"""Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
"""
timeout = timeout or self.timeout
try:
return opener.open(request, timeout=timeout)
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc
|
def function[_make_request, parameter[self, opener, request, timeout]]:
constant[Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
]
variable[timeout] assign[=] <ast.BoolOp object at 0x7da2047e97b0>
<ast.Try object at 0x7da2047e89a0>
|
keyword[def] identifier[_make_request] ( identifier[self] , identifier[opener] , identifier[request] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[timeout] = identifier[timeout] keyword[or] identifier[self] . identifier[timeout]
keyword[try] :
keyword[return] identifier[opener] . identifier[open] ( identifier[request] , identifier[timeout] = identifier[timeout] )
keyword[except] identifier[HTTPError] keyword[as] identifier[err] :
identifier[exc] = identifier[handle_error] ( identifier[err] )
identifier[exc] . identifier[__cause__] = keyword[None]
keyword[raise] identifier[exc]
|
def _make_request(self, opener, request, timeout=None):
"""Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
"""
timeout = timeout or self.timeout
try:
return opener.open(request, timeout=timeout) # depends on [control=['try'], data=[]]
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc # depends on [control=['except'], data=['err']]
|
def save(self):
"""
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
"""
project_paths = OrderedDict()
for project, d in OrderedDict(self).items():
if isinstance(d, dict):
project_path = d['root']
fname = osp.join(project_path, '.project', '.project.yml')
if not osp.exists(osp.dirname(fname)):
os.makedirs(osp.dirname(fname))
if osp.exists(fname):
os.rename(fname, fname + '~')
d = self.rel_paths(copy.deepcopy(d))
safe_dump(d, fname, default_flow_style=False)
project_paths[project] = project_path
else:
project_paths = self.project_paths[project]
self.project_paths = project_paths
safe_dump(project_paths, self.all_projects, default_flow_style=False)
|
def function[save, parameter[self]]:
constant[
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
]
variable[project_paths] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9552a0>, <ast.Name object at 0x7da20e957b80>]]] in starred[call[call[name[OrderedDict], parameter[name[self]]].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[d], name[dict]]] begin[:]
variable[project_path] assign[=] call[name[d]][constant[root]]
variable[fname] assign[=] call[name[osp].join, parameter[name[project_path], constant[.project], constant[.project.yml]]]
if <ast.UnaryOp object at 0x7da20e955360> begin[:]
call[name[os].makedirs, parameter[call[name[osp].dirname, parameter[name[fname]]]]]
if call[name[osp].exists, parameter[name[fname]]] begin[:]
call[name[os].rename, parameter[name[fname], binary_operation[name[fname] + constant[~]]]]
variable[d] assign[=] call[name[self].rel_paths, parameter[call[name[copy].deepcopy, parameter[name[d]]]]]
call[name[safe_dump], parameter[name[d], name[fname]]]
call[name[project_paths]][name[project]] assign[=] name[project_path]
name[self].project_paths assign[=] name[project_paths]
call[name[safe_dump], parameter[name[project_paths], name[self].all_projects]]
|
keyword[def] identifier[save] ( identifier[self] ):
literal[string]
identifier[project_paths] = identifier[OrderedDict] ()
keyword[for] identifier[project] , identifier[d] keyword[in] identifier[OrderedDict] ( identifier[self] ). identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[d] , identifier[dict] ):
identifier[project_path] = identifier[d] [ literal[string] ]
identifier[fname] = identifier[osp] . identifier[join] ( identifier[project_path] , literal[string] , literal[string] )
keyword[if] keyword[not] identifier[osp] . identifier[exists] ( identifier[osp] . identifier[dirname] ( identifier[fname] )):
identifier[os] . identifier[makedirs] ( identifier[osp] . identifier[dirname] ( identifier[fname] ))
keyword[if] identifier[osp] . identifier[exists] ( identifier[fname] ):
identifier[os] . identifier[rename] ( identifier[fname] , identifier[fname] + literal[string] )
identifier[d] = identifier[self] . identifier[rel_paths] ( identifier[copy] . identifier[deepcopy] ( identifier[d] ))
identifier[safe_dump] ( identifier[d] , identifier[fname] , identifier[default_flow_style] = keyword[False] )
identifier[project_paths] [ identifier[project] ]= identifier[project_path]
keyword[else] :
identifier[project_paths] = identifier[self] . identifier[project_paths] [ identifier[project] ]
identifier[self] . identifier[project_paths] = identifier[project_paths]
identifier[safe_dump] ( identifier[project_paths] , identifier[self] . identifier[all_projects] , identifier[default_flow_style] = keyword[False] )
|
def save(self):
"""
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
"""
project_paths = OrderedDict()
for (project, d) in OrderedDict(self).items():
if isinstance(d, dict):
project_path = d['root']
fname = osp.join(project_path, '.project', '.project.yml')
if not osp.exists(osp.dirname(fname)):
os.makedirs(osp.dirname(fname)) # depends on [control=['if'], data=[]]
if osp.exists(fname):
os.rename(fname, fname + '~') # depends on [control=['if'], data=[]]
d = self.rel_paths(copy.deepcopy(d))
safe_dump(d, fname, default_flow_style=False)
project_paths[project] = project_path # depends on [control=['if'], data=[]]
else:
project_paths = self.project_paths[project] # depends on [control=['for'], data=[]]
self.project_paths = project_paths
safe_dump(project_paths, self.all_projects, default_flow_style=False)
|
def add_target(self, target_text, name='', drop_behavior_type=None):
"""stub"""
if not isinstance(target_text, DisplayText):
raise InvalidArgument('target_text is not a DisplayText object')
if not isinstance(name, DisplayText):
# if default ''
name = self._str_display_text(name)
target = {
'id': str(ObjectId()),
'texts': [self._dict_display_text(target_text)],
'names': [self._dict_display_text(name)],
'dropBehaviorType': drop_behavior_type
}
self.my_osid_object_form._my_map['targets'].append(target)
return target
|
def function[add_target, parameter[self, target_text, name, drop_behavior_type]]:
constant[stub]
if <ast.UnaryOp object at 0x7da1b0a22530> begin[:]
<ast.Raise object at 0x7da1b0a210f0>
if <ast.UnaryOp object at 0x7da1b0a220e0> begin[:]
variable[name] assign[=] call[name[self]._str_display_text, parameter[name[name]]]
variable[target] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a21c60>, <ast.Constant object at 0x7da1b0a20ee0>, <ast.Constant object at 0x7da1b0a21ea0>, <ast.Constant object at 0x7da1b0a22b30>], [<ast.Call object at 0x7da1b0a21390>, <ast.List object at 0x7da1b0a21a20>, <ast.List object at 0x7da1b0a23550>, <ast.Name object at 0x7da1b0a23f70>]]
call[call[name[self].my_osid_object_form._my_map][constant[targets]].append, parameter[name[target]]]
return[name[target]]
|
keyword[def] identifier[add_target] ( identifier[self] , identifier[target_text] , identifier[name] = literal[string] , identifier[drop_behavior_type] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[target_text] , identifier[DisplayText] ):
keyword[raise] identifier[InvalidArgument] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[DisplayText] ):
identifier[name] = identifier[self] . identifier[_str_display_text] ( identifier[name] )
identifier[target] ={
literal[string] : identifier[str] ( identifier[ObjectId] ()),
literal[string] :[ identifier[self] . identifier[_dict_display_text] ( identifier[target_text] )],
literal[string] :[ identifier[self] . identifier[_dict_display_text] ( identifier[name] )],
literal[string] : identifier[drop_behavior_type]
}
identifier[self] . identifier[my_osid_object_form] . identifier[_my_map] [ literal[string] ]. identifier[append] ( identifier[target] )
keyword[return] identifier[target]
|
def add_target(self, target_text, name='', drop_behavior_type=None):
"""stub"""
if not isinstance(target_text, DisplayText):
raise InvalidArgument('target_text is not a DisplayText object') # depends on [control=['if'], data=[]]
if not isinstance(name, DisplayText):
# if default ''
name = self._str_display_text(name) # depends on [control=['if'], data=[]]
target = {'id': str(ObjectId()), 'texts': [self._dict_display_text(target_text)], 'names': [self._dict_display_text(name)], 'dropBehaviorType': drop_behavior_type}
self.my_osid_object_form._my_map['targets'].append(target)
return target
|
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
|
def function[exec_, parameter[self, columns, by, where]]:
constant[exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
]
return[call[name[self]._seu, parameter[constant[exec], name[columns], name[by], name[where], name[kwds]]]]
|
keyword[def] identifier[exec_] ( identifier[self] , identifier[columns] =(), identifier[by] =(), identifier[where] =(),** identifier[kwds] ):
literal[string]
keyword[return] identifier[self] . identifier[_seu] ( literal[string] , identifier[columns] , identifier[by] , identifier[where] , identifier[kwds] )
|
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
|
def _context_data(data, request=None):
"""
Add admin global context, for compatibility with Django 1.7
"""
try:
return dict(site.each_context(request).items() + data.items())
except AttributeError:
return data
|
def function[_context_data, parameter[data, request]]:
constant[
Add admin global context, for compatibility with Django 1.7
]
<ast.Try object at 0x7da1b021f340>
|
keyword[def] identifier[_context_data] ( identifier[data] , identifier[request] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[dict] ( identifier[site] . identifier[each_context] ( identifier[request] ). identifier[items] ()+ identifier[data] . identifier[items] ())
keyword[except] identifier[AttributeError] :
keyword[return] identifier[data]
|
def _context_data(data, request=None):
"""
Add admin global context, for compatibility with Django 1.7
"""
try:
return dict(site.each_context(request).items() + data.items()) # depends on [control=['try'], data=[]]
except AttributeError:
return data # depends on [control=['except'], data=[]]
|
def list_sections(self):
"""List all sections."""
if not os.path.exists(self._full_base):
return []
return [
name for name in os.listdir(self._full_base)
if os.path.isdir(os.path.join(self._full_base, name))
]
|
def function[list_sections, parameter[self]]:
constant[List all sections.]
if <ast.UnaryOp object at 0x7da1b0948130> begin[:]
return[list[[]]]
return[<ast.ListComp object at 0x7da1b09487c0>]
|
keyword[def] identifier[list_sections] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[_full_base] ):
keyword[return] []
keyword[return] [
identifier[name] keyword[for] identifier[name] keyword[in] identifier[os] . identifier[listdir] ( identifier[self] . identifier[_full_base] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_full_base] , identifier[name] ))
]
|
def list_sections(self):
"""List all sections."""
if not os.path.exists(self._full_base):
return [] # depends on [control=['if'], data=[]]
return [name for name in os.listdir(self._full_base) if os.path.isdir(os.path.join(self._full_base, name))]
|
def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
parser = ArgumentParser(
description="Remove duplicate files"
)
parser.add_argument(
"directory", nargs="+",
help="Files and/or directories to check and remove duplicates from."
)
parser.add_argument(
"--recursive", action="store_true",
help="Recurse through any directories found"
)
parser.add_argument(
"--dummy_run", action="store_true",
help="Dummy run only; don't actually delete anything"
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well,"
"you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
while True:
deduplicate(args.directory,
recursive=args.recursive,
dummy_run=args.dummy_run)
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly)
|
def function[main, parameter[]]:
constant[
Command-line processor. See ``--help`` for details.
]
variable[parser] assign[=] call[name[ArgumentParser], parameter[]]
call[name[parser].add_argument, parameter[constant[directory]]]
call[name[parser].add_argument, parameter[constant[--recursive]]]
call[name[parser].add_argument, parameter[constant[--dummy_run]]]
call[name[parser].add_argument, parameter[constant[--run_repeatedly]]]
call[name[parser].add_argument, parameter[constant[--verbose]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
call[name[main_only_quicksetup_rootlogger], parameter[]]
while constant[True] begin[:]
call[name[deduplicate], parameter[name[args].directory]]
if compare[name[args].run_repeatedly is constant[None]] begin[:]
break
call[name[log].info, parameter[constant[Sleeping for {} s...], name[args].run_repeatedly]]
call[name[sleep], parameter[name[args].run_repeatedly]]
|
keyword[def] identifier[main] ()-> keyword[None] :
literal[string]
identifier[parser] = identifier[ArgumentParser] (
identifier[description] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[int] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[main_only_quicksetup_rootlogger] (
identifier[level] = identifier[logging] . identifier[DEBUG] keyword[if] identifier[args] . identifier[verbose] keyword[else] identifier[logging] . identifier[INFO] )
keyword[while] keyword[True] :
identifier[deduplicate] ( identifier[args] . identifier[directory] ,
identifier[recursive] = identifier[args] . identifier[recursive] ,
identifier[dummy_run] = identifier[args] . identifier[dummy_run] )
keyword[if] identifier[args] . identifier[run_repeatedly] keyword[is] keyword[None] :
keyword[break]
identifier[log] . identifier[info] ( literal[string] , identifier[args] . identifier[run_repeatedly] )
identifier[sleep] ( identifier[args] . identifier[run_repeatedly] )
|
def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
parser = ArgumentParser(description='Remove duplicate files')
parser.add_argument('directory', nargs='+', help='Files and/or directories to check and remove duplicates from.')
parser.add_argument('--recursive', action='store_true', help='Recurse through any directories found')
parser.add_argument('--dummy_run', action='store_true', help="Dummy run only; don't actually delete anything")
parser.add_argument('--run_repeatedly', type=int, help="Run the tool repeatedly with a pause of <run_repeatedly> seconds between runs. (For this to work well,you should specify one or more DIRECTORIES in the 'filename' arguments, not files, and you will need the --recursive option.)")
parser.add_argument('--verbose', action='store_true', help='Verbose output')
args = parser.parse_args()
main_only_quicksetup_rootlogger(level=logging.DEBUG if args.verbose else logging.INFO)
while True:
deduplicate(args.directory, recursive=args.recursive, dummy_run=args.dummy_run)
if args.run_repeatedly is None:
break # depends on [control=['if'], data=[]]
log.info('Sleeping for {} s...', args.run_repeatedly)
sleep(args.run_repeatedly) # depends on [control=['while'], data=[]]
|
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers
|
def function[_remove_buffers, parameter[state]]:
constant[Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
]
<ast.Tuple object at 0x7da1b1a67f70> assign[=] tuple[[<ast.List object at 0x7da1b1a67df0>, <ast.List object at 0x7da1b1a66140>]]
variable[state] assign[=] call[name[_separate_buffers], parameter[name[state], list[[]], name[buffer_paths], name[buffers]]]
return[tuple[[<ast.Name object at 0x7da1b1a66050>, <ast.Name object at 0x7da1b1a65ed0>, <ast.Name object at 0x7da1b1a65d80>]]]
|
keyword[def] identifier[_remove_buffers] ( identifier[state] ):
literal[string]
identifier[buffer_paths] , identifier[buffers] =[],[]
identifier[state] = identifier[_separate_buffers] ( identifier[state] ,[], identifier[buffer_paths] , identifier[buffers] )
keyword[return] identifier[state] , identifier[buffer_paths] , identifier[buffers]
|
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
(buffer_paths, buffers) = ([], [])
state = _separate_buffers(state, [], buffer_paths, buffers)
return (state, buffer_paths, buffers)
|
def ed(s1, s2):
'''edit distance
>>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a')
(0, 0, 1, 1, 1)
This implementation takes only O(min(|s1|,|s2|)) space.
'''
m, n = len(s1), len(s2)
if m < n:
m, n = n, m # ensure n <= m, to use O(min(n,m)) space
s1, s2 = s2, s1
d = list(range(n+1))
for i in range(m):
p = i
d[0] = i+1
for j in range(n):
t = 0 if s1[i] == s2[j] else 1
p, d[j+1] = d[j+1], min(p+t, d[j]+1, d[j+1]+1)
return d[n]
|
def function[ed, parameter[s1, s2]]:
constant[edit distance
>>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a')
(0, 0, 1, 1, 1)
This implementation takes only O(min(|s1|,|s2|)) space.
]
<ast.Tuple object at 0x7da20e956d40> assign[=] tuple[[<ast.Call object at 0x7da20e955e10>, <ast.Call object at 0x7da20e955c00>]]
if compare[name[m] less[<] name[n]] begin[:]
<ast.Tuple object at 0x7da20e957c10> assign[=] tuple[[<ast.Name object at 0x7da20e955420>, <ast.Name object at 0x7da20e957ac0>]]
<ast.Tuple object at 0x7da20e954220> assign[=] tuple[[<ast.Name object at 0x7da20e954550>, <ast.Name object at 0x7da20e9572b0>]]
variable[d] assign[=] call[name[list], parameter[call[name[range], parameter[binary_operation[name[n] + constant[1]]]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[m]]]] begin[:]
variable[p] assign[=] name[i]
call[name[d]][constant[0]] assign[=] binary_operation[name[i] + constant[1]]
for taget[name[j]] in starred[call[name[range], parameter[name[n]]]] begin[:]
variable[t] assign[=] <ast.IfExp object at 0x7da20e956620>
<ast.Tuple object at 0x7da20e957400> assign[=] tuple[[<ast.Subscript object at 0x7da20e954c70>, <ast.Call object at 0x7da20e957d90>]]
return[call[name[d]][name[n]]]
|
keyword[def] identifier[ed] ( identifier[s1] , identifier[s2] ):
literal[string]
identifier[m] , identifier[n] = identifier[len] ( identifier[s1] ), identifier[len] ( identifier[s2] )
keyword[if] identifier[m] < identifier[n] :
identifier[m] , identifier[n] = identifier[n] , identifier[m]
identifier[s1] , identifier[s2] = identifier[s2] , identifier[s1]
identifier[d] = identifier[list] ( identifier[range] ( identifier[n] + literal[int] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[m] ):
identifier[p] = identifier[i]
identifier[d] [ literal[int] ]= identifier[i] + literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n] ):
identifier[t] = literal[int] keyword[if] identifier[s1] [ identifier[i] ]== identifier[s2] [ identifier[j] ] keyword[else] literal[int]
identifier[p] , identifier[d] [ identifier[j] + literal[int] ]= identifier[d] [ identifier[j] + literal[int] ], identifier[min] ( identifier[p] + identifier[t] , identifier[d] [ identifier[j] ]+ literal[int] , identifier[d] [ identifier[j] + literal[int] ]+ literal[int] )
keyword[return] identifier[d] [ identifier[n] ]
|
def ed(s1, s2):
"""edit distance
>>> ed('', ''), ed('a', 'a'), ed('','a'), ed('a', ''), ed('a!a', 'a.a')
(0, 0, 1, 1, 1)
This implementation takes only O(min(|s1|,|s2|)) space.
"""
(m, n) = (len(s1), len(s2))
if m < n:
(m, n) = (n, m) # ensure n <= m, to use O(min(n,m)) space
(s1, s2) = (s2, s1) # depends on [control=['if'], data=['m', 'n']]
d = list(range(n + 1))
for i in range(m):
p = i
d[0] = i + 1
for j in range(n):
t = 0 if s1[i] == s2[j] else 1
(p, d[j + 1]) = (d[j + 1], min(p + t, d[j] + 1, d[j + 1] + 1)) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return d[n]
|
def is_within(self, cutoff_dist, point, ligands=True):
"""Returns all atoms in AMPAL object within `cut-off` distance from the `point`."""
return find_atoms_within_distance(self.get_atoms(ligands=ligands), cutoff_dist, point)
|
def function[is_within, parameter[self, cutoff_dist, point, ligands]]:
constant[Returns all atoms in AMPAL object within `cut-off` distance from the `point`.]
return[call[name[find_atoms_within_distance], parameter[call[name[self].get_atoms, parameter[]], name[cutoff_dist], name[point]]]]
|
keyword[def] identifier[is_within] ( identifier[self] , identifier[cutoff_dist] , identifier[point] , identifier[ligands] = keyword[True] ):
literal[string]
keyword[return] identifier[find_atoms_within_distance] ( identifier[self] . identifier[get_atoms] ( identifier[ligands] = identifier[ligands] ), identifier[cutoff_dist] , identifier[point] )
|
def is_within(self, cutoff_dist, point, ligands=True):
"""Returns all atoms in AMPAL object within `cut-off` distance from the `point`."""
return find_atoms_within_distance(self.get_atoms(ligands=ligands), cutoff_dist, point)
|
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
|
def function[get_speaker_info, parameter[self, refresh, timeout]]:
constant[Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
]
if <ast.BoolOp object at 0x7da1b180dc60> begin[:]
return[name[self].speaker_info]
variable[device] assign[=] call[name[dom].find, parameter[constant[{urn:schemas-upnp-org:device-1-0}device]]]
if compare[name[device] is_not constant[None]] begin[:]
call[name[self].speaker_info][constant[zone_name]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}roomName]]]
call[name[self].speaker_info][constant[player_icon]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}iconList/{urn:schemas-upnp-org:device-1-0}icon/{urn:schemas-upnp-org:device-1-0}url]]]
call[name[self].speaker_info][constant[uid]] assign[=] name[self].uid
call[name[self].speaker_info][constant[serial_number]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}serialNum]]]
call[name[self].speaker_info][constant[software_version]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}softwareVersion]]]
call[name[self].speaker_info][constant[hardware_version]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}hardwareVersion]]]
call[name[self].speaker_info][constant[model_number]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}modelNumber]]]
call[name[self].speaker_info][constant[model_name]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}modelName]]]
call[name[self].speaker_info][constant[display_version]] assign[=] call[name[device].findtext, parameter[constant[{urn:schemas-upnp-org:device-1-0}displayVersion]]]
variable[mac] assign[=] call[call[call[name[self].speaker_info][constant[serial_number]].split, parameter[constant[:]]]][constant[0]]
call[name[self].speaker_info][constant[mac_address]] assign[=] name[mac]
return[name[self].speaker_info]
return[constant[None]]
|
keyword[def] identifier[get_speaker_info] ( identifier[self] , identifier[refresh] = keyword[False] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[speaker_info] keyword[and] identifier[refresh] keyword[is] keyword[False] :
keyword[return] identifier[self] . identifier[speaker_info]
keyword[else] :
identifier[response] = identifier[requests] . identifier[get] ( literal[string] + identifier[self] . identifier[ip_address] +
literal[string] ,
identifier[timeout] = identifier[timeout] )
identifier[dom] = identifier[XML] . identifier[fromstring] ( identifier[response] . identifier[content] )
identifier[device] = identifier[dom] . identifier[find] ( literal[string] )
keyword[if] identifier[device] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string]
literal[string]
literal[string]
)
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[self] . identifier[uid]
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[device] . identifier[findtext] (
literal[string] )
identifier[mac] = identifier[self] . identifier[speaker_info] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[self] . identifier[speaker_info] [ literal[string] ]= identifier[mac]
keyword[return] identifier[self] . identifier[speaker_info]
keyword[return] keyword[None]
|
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info # depends on [control=['if'], data=[]]
else:
response = requests.get('http://' + self.ip_address + ':1400/xml/device_description.xml', timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext('{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext('{urn:schemas-upnp-org:device-1-0}iconList/{urn:schemas-upnp-org:device-1-0}icon/{urn:schemas-upnp-org:device-1-0}url')
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext('{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext('{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext('{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info # depends on [control=['if'], data=['device']]
return None
|
def SLH_to_qutip(slh, full_space=None, time_symbol=None,
convert_as='pyfunc'):
"""Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
"""
if full_space:
if not full_space >= slh.space:
raise AlgebraError("full_space="+str(full_space)+" needs to "
"at least include slh.space = "+str(slh.space))
else:
full_space = slh.space
if full_space == TrivialSpace:
raise AlgebraError(
"Cannot convert SLH object in TrivialSpace. "
"You may pass a non-trivial `full_space`")
slh = move_drive_to_H(slh)
if time_symbol is None:
H = convert_to_qutip(slh.H, full_space=full_space)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = convert_to_qutip(L, full_space=full_space)
if L_qutip.norm('max') > 0:
Ls.append(L_qutip)
else:
H = _time_dependent_to_qutip(slh.H, full_space, time_symbol,
convert_as)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = _time_dependent_to_qutip(L, full_space, time_symbol,
convert_as)
Ls.append(L_qutip)
return H, Ls
|
def function[SLH_to_qutip, parameter[slh, full_space, time_symbol, convert_as]]:
constant[Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
]
if name[full_space] begin[:]
if <ast.UnaryOp object at 0x7da204564040> begin[:]
<ast.Raise object at 0x7da204565030>
if compare[name[full_space] equal[==] name[TrivialSpace]] begin[:]
<ast.Raise object at 0x7da204567220>
variable[slh] assign[=] call[name[move_drive_to_H], parameter[name[slh]]]
if compare[name[time_symbol] is constant[None]] begin[:]
variable[H] assign[=] call[name[convert_to_qutip], parameter[name[slh].H]]
variable[Ls] assign[=] list[[]]
for taget[name[L]] in starred[name[slh].Ls] begin[:]
if call[name[is_scalar], parameter[name[L]]] begin[:]
variable[L] assign[=] binary_operation[name[L] * name[IdentityOperator]]
variable[L_qutip] assign[=] call[name[convert_to_qutip], parameter[name[L]]]
if compare[call[name[L_qutip].norm, parameter[constant[max]]] greater[>] constant[0]] begin[:]
call[name[Ls].append, parameter[name[L_qutip]]]
return[tuple[[<ast.Name object at 0x7da20c795f90>, <ast.Name object at 0x7da20c7943a0>]]]
|
keyword[def] identifier[SLH_to_qutip] ( identifier[slh] , identifier[full_space] = keyword[None] , identifier[time_symbol] = keyword[None] ,
identifier[convert_as] = literal[string] ):
literal[string]
keyword[if] identifier[full_space] :
keyword[if] keyword[not] identifier[full_space] >= identifier[slh] . identifier[space] :
keyword[raise] identifier[AlgebraError] ( literal[string] + identifier[str] ( identifier[full_space] )+ literal[string]
literal[string] + identifier[str] ( identifier[slh] . identifier[space] ))
keyword[else] :
identifier[full_space] = identifier[slh] . identifier[space]
keyword[if] identifier[full_space] == identifier[TrivialSpace] :
keyword[raise] identifier[AlgebraError] (
literal[string]
literal[string] )
identifier[slh] = identifier[move_drive_to_H] ( identifier[slh] )
keyword[if] identifier[time_symbol] keyword[is] keyword[None] :
identifier[H] = identifier[convert_to_qutip] ( identifier[slh] . identifier[H] , identifier[full_space] = identifier[full_space] )
identifier[Ls] =[]
keyword[for] identifier[L] keyword[in] identifier[slh] . identifier[Ls] :
keyword[if] identifier[is_scalar] ( identifier[L] ):
identifier[L] = identifier[L] * identifier[IdentityOperator]
identifier[L_qutip] = identifier[convert_to_qutip] ( identifier[L] , identifier[full_space] = identifier[full_space] )
keyword[if] identifier[L_qutip] . identifier[norm] ( literal[string] )> literal[int] :
identifier[Ls] . identifier[append] ( identifier[L_qutip] )
keyword[else] :
identifier[H] = identifier[_time_dependent_to_qutip] ( identifier[slh] . identifier[H] , identifier[full_space] , identifier[time_symbol] ,
identifier[convert_as] )
identifier[Ls] =[]
keyword[for] identifier[L] keyword[in] identifier[slh] . identifier[Ls] :
keyword[if] identifier[is_scalar] ( identifier[L] ):
identifier[L] = identifier[L] * identifier[IdentityOperator]
identifier[L_qutip] = identifier[_time_dependent_to_qutip] ( identifier[L] , identifier[full_space] , identifier[time_symbol] ,
identifier[convert_as] )
identifier[Ls] . identifier[append] ( identifier[L_qutip] )
keyword[return] identifier[H] , identifier[Ls]
|
def SLH_to_qutip(slh, full_space=None, time_symbol=None, convert_as='pyfunc'):
"""Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
"""
if full_space:
if not full_space >= slh.space:
raise AlgebraError('full_space=' + str(full_space) + ' needs to at least include slh.space = ' + str(slh.space)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
full_space = slh.space
if full_space == TrivialSpace:
raise AlgebraError('Cannot convert SLH object in TrivialSpace. You may pass a non-trivial `full_space`') # depends on [control=['if'], data=[]]
slh = move_drive_to_H(slh)
if time_symbol is None:
H = convert_to_qutip(slh.H, full_space=full_space)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator # depends on [control=['if'], data=[]]
L_qutip = convert_to_qutip(L, full_space=full_space)
if L_qutip.norm('max') > 0:
Ls.append(L_qutip) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['L']] # depends on [control=['if'], data=[]]
else:
H = _time_dependent_to_qutip(slh.H, full_space, time_symbol, convert_as)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator # depends on [control=['if'], data=[]]
L_qutip = _time_dependent_to_qutip(L, full_space, time_symbol, convert_as)
Ls.append(L_qutip) # depends on [control=['for'], data=['L']]
return (H, Ls)
|
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files):
""" For theFile (a Node) update any file_tests and search for graphics files
then find all included files and call ScanFiles recursively for each of them"""
content = theFile.get_text_contents()
if Verbose:
print(" scanning ",str(theFile))
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1])
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
print(" found match for ",file_tests[i][1][-1])
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
for l in range(len(findresult)) :
(file_tests[i][1]).insert(0,'.'+findresult[l][3])
(file_tests[i][1]).insert(0,'.'+findresult[l][2])
(file_tests[i][1]).insert(0,'.'+findresult[l][0])
suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ]
newglossary_suffix.append(suffix_list)
if Verbose:
print(" new suffixes for newglossary ",newglossary_suffix)
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1)))
if Verbose:
print("\include file names : ", aux_files)
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(theFile),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
if Verbose:
print(" done scanning ",str(theFile))
return file_tests
|
def function[ScanFiles, parameter[theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files]]:
constant[ For theFile (a Node) update any file_tests and search for graphics files
then find all included files and call ScanFiles recursively for each of them]
variable[content] assign[=] call[name[theFile].get_text_contents, parameter[]]
if name[Verbose] begin[:]
call[name[print], parameter[constant[ scanning ], call[name[str], parameter[name[theFile]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[file_tests_search]]]]]] begin[:]
if compare[call[call[name[file_tests]][name[i]]][constant[0]] is constant[None]] begin[:]
if name[Verbose] begin[:]
call[name[print], parameter[constant[scan i ], name[i], constant[ files_tests[i] ], call[name[file_tests]][name[i]], call[call[name[file_tests]][name[i]]][constant[1]]]]
call[call[name[file_tests]][name[i]]][constant[0]] assign[=] call[call[name[file_tests_search]][name[i]].search, parameter[name[content]]]
if <ast.BoolOp object at 0x7da20c7967d0> begin[:]
call[name[print], parameter[constant[ found match for ], call[call[call[name[file_tests]][name[i]]][constant[1]]][<ast.UnaryOp object at 0x7da20c794700>]]]
if <ast.BoolOp object at 0x7da20c7962c0> begin[:]
variable[findresult] assign[=] call[call[name[file_tests_search]][name[i]].findall, parameter[name[content]]]
for taget[name[l]] in starred[call[name[range], parameter[call[name[len], parameter[name[findresult]]]]]] begin[:]
call[call[call[name[file_tests]][name[i]]][constant[1]].insert, parameter[constant[0], binary_operation[constant[.] + call[call[name[findresult]][name[l]]][constant[3]]]]]
call[call[call[name[file_tests]][name[i]]][constant[1]].insert, parameter[constant[0], binary_operation[constant[.] + call[call[name[findresult]][name[l]]][constant[2]]]]]
call[call[call[name[file_tests]][name[i]]][constant[1]].insert, parameter[constant[0], binary_operation[constant[.] + call[call[name[findresult]][name[l]]][constant[0]]]]]
variable[suffix_list] assign[=] list[[<ast.BinOp object at 0x7da1b2346d70>, <ast.BinOp object at 0x7da1b2345ed0>, <ast.BinOp object at 0x7da1b2347910>]]
call[name[newglossary_suffix].append, parameter[name[suffix_list]]]
if name[Verbose] begin[:]
call[name[print], parameter[constant[ new suffixes for newglossary ], name[newglossary_suffix]]]
variable[incResult] assign[=] call[name[includeOnly_re].search, parameter[name[content]]]
if name[incResult] begin[:]
call[name[aux_files].append, parameter[call[name[os].path.join, parameter[name[targetdir], call[name[incResult].group, parameter[constant[1]]]]]]]
if name[Verbose] begin[:]
call[name[print], parameter[constant[\include file names : ], name[aux_files]]]
variable[inc_files] assign[=] list[[]]
call[name[inc_files].extend, parameter[call[name[include_re].findall, parameter[name[content]]]]]
if name[Verbose] begin[:]
call[name[print], parameter[binary_operation[constant[files included by '%s': ] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[theFile]]]], name[inc_files]]]
for taget[name[src]] in starred[name[inc_files]] begin[:]
variable[srcNode] assign[=] call[name[FindFile], parameter[name[src], list[[<ast.Constant object at 0x7da1b2347f40>, <ast.Constant object at 0x7da1b23470a0>, <ast.Constant object at 0x7da1b2345f00>]], name[paths], name[env]]]
if compare[name[srcNode] is_not constant[None]] begin[:]
variable[file_tests] assign[=] call[name[ScanFiles], parameter[name[srcNode], name[target], name[paths], name[file_tests], name[file_tests_search], name[env], name[graphics_extensions], name[targetdir], name[aux_files]]]
if name[Verbose] begin[:]
call[name[print], parameter[constant[ done scanning ], call[name[str], parameter[name[theFile]]]]]
return[name[file_tests]]
|
keyword[def] identifier[ScanFiles] ( identifier[theFile] , identifier[target] , identifier[paths] , identifier[file_tests] , identifier[file_tests_search] , identifier[env] , identifier[graphics_extensions] , identifier[targetdir] , identifier[aux_files] ):
literal[string]
identifier[content] = identifier[theFile] . identifier[get_text_contents] ()
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] , identifier[str] ( identifier[theFile] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[file_tests_search] )):
keyword[if] identifier[file_tests] [ identifier[i] ][ literal[int] ] keyword[is] keyword[None] :
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] , identifier[i] , literal[string] , identifier[file_tests] [ identifier[i] ], identifier[file_tests] [ identifier[i] ][ literal[int] ])
identifier[file_tests] [ identifier[i] ][ literal[int] ]= identifier[file_tests_search] [ identifier[i] ]. identifier[search] ( identifier[content] )
keyword[if] identifier[Verbose] keyword[and] identifier[file_tests] [ identifier[i] ][ literal[int] ]:
identifier[print] ( literal[string] , identifier[file_tests] [ identifier[i] ][ literal[int] ][- literal[int] ])
keyword[if] identifier[file_tests] [ identifier[i] ][ literal[int] ] keyword[and] identifier[file_tests] [ identifier[i] ][ literal[int] ][- literal[int] ]== literal[string] :
identifier[findresult] = identifier[file_tests_search] [ identifier[i] ]. identifier[findall] ( identifier[content] )
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[len] ( identifier[findresult] )):
( identifier[file_tests] [ identifier[i] ][ literal[int] ]). identifier[insert] ( literal[int] , literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ])
( identifier[file_tests] [ identifier[i] ][ literal[int] ]). identifier[insert] ( literal[int] , literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ])
( identifier[file_tests] [ identifier[i] ][ literal[int] ]). identifier[insert] ( literal[int] , literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ])
identifier[suffix_list] =[ literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ], literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ], literal[string] + identifier[findresult] [ identifier[l] ][ literal[int] ]]
identifier[newglossary_suffix] . identifier[append] ( identifier[suffix_list] )
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] , identifier[newglossary_suffix] )
identifier[incResult] = identifier[includeOnly_re] . identifier[search] ( identifier[content] )
keyword[if] identifier[incResult] :
identifier[aux_files] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[targetdir] , identifier[incResult] . identifier[group] ( literal[int] )))
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] , identifier[aux_files] )
identifier[inc_files] =[]
identifier[inc_files] . identifier[extend] ( identifier[include_re] . identifier[findall] ( identifier[content] ))
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] % identifier[str] ( identifier[theFile] ), identifier[inc_files] )
keyword[for] identifier[src] keyword[in] identifier[inc_files] :
identifier[srcNode] = identifier[FindFile] ( identifier[src] ,[ literal[string] , literal[string] , literal[string] ], identifier[paths] , identifier[env] , identifier[requireExt] = keyword[False] )
keyword[if] identifier[srcNode] keyword[is] keyword[not] keyword[None] :
identifier[file_tests] = identifier[ScanFiles] ( identifier[srcNode] , identifier[target] , identifier[paths] , identifier[file_tests] , identifier[file_tests_search] , identifier[env] , identifier[graphics_extensions] , identifier[targetdir] , identifier[aux_files] )
keyword[if] identifier[Verbose] :
identifier[print] ( literal[string] , identifier[str] ( identifier[theFile] ))
keyword[return] identifier[file_tests]
|
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files):
""" For theFile (a Node) update any file_tests and search for graphics files
then find all included files and call ScanFiles recursively for each of them"""
content = theFile.get_text_contents()
if Verbose:
print(' scanning ', str(theFile)) # depends on [control=['if'], data=[]]
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
print('scan i ', i, ' files_tests[i] ', file_tests[i], file_tests[i][1]) # depends on [control=['if'], data=[]]
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
print(' found match for ', file_tests[i][1][-1]) # depends on [control=['if'], data=[]]
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
for l in range(len(findresult)):
file_tests[i][1].insert(0, '.' + findresult[l][3])
file_tests[i][1].insert(0, '.' + findresult[l][2])
file_tests[i][1].insert(0, '.' + findresult[l][0])
suffix_list = ['.' + findresult[l][0], '.' + findresult[l][2], '.' + findresult[l][3]]
newglossary_suffix.append(suffix_list) # depends on [control=['for'], data=['l']]
if Verbose:
print(' new suffixes for newglossary ', newglossary_suffix) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1))) # depends on [control=['if'], data=[]]
if Verbose:
print('\\include file names : ', aux_files) # depends on [control=['if'], data=[]]
# recursively call this on each of the included files
inc_files = []
inc_files.extend(include_re.findall(content))
if Verbose:
print("files included by '%s': " % str(theFile), inc_files) # depends on [control=['if'], data=[]]
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = FindFile(src, ['.tex', '.ltx', '.latex'], paths, env, requireExt=False)
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files) # depends on [control=['if'], data=['srcNode']] # depends on [control=['for'], data=['src']]
if Verbose:
print(' done scanning ', str(theFile)) # depends on [control=['if'], data=[]]
return file_tests
|
def rfxcom(device):
"""Start the event loop to collect data from the serial device."""
# If the device isn't passed in, look for it in the config.
if device is None:
device = app.config.get('DEVICE')
# If the device is *still* none, error.
if device is None:
print("The serial device needs to be passed in as --device or "
"set in the config as DEVICE.")
return
rfxcom_collect(device)
|
def function[rfxcom, parameter[device]]:
constant[Start the event loop to collect data from the serial device.]
if compare[name[device] is constant[None]] begin[:]
variable[device] assign[=] call[name[app].config.get, parameter[constant[DEVICE]]]
if compare[name[device] is constant[None]] begin[:]
call[name[print], parameter[constant[The serial device needs to be passed in as --device or set in the config as DEVICE.]]]
return[None]
call[name[rfxcom_collect], parameter[name[device]]]
|
keyword[def] identifier[rfxcom] ( identifier[device] ):
literal[string]
keyword[if] identifier[device] keyword[is] keyword[None] :
identifier[device] = identifier[app] . identifier[config] . identifier[get] ( literal[string] )
keyword[if] identifier[device] keyword[is] keyword[None] :
identifier[print] ( literal[string]
literal[string] )
keyword[return]
identifier[rfxcom_collect] ( identifier[device] )
|
def rfxcom(device):
"""Start the event loop to collect data from the serial device."""
# If the device isn't passed in, look for it in the config.
if device is None:
device = app.config.get('DEVICE') # depends on [control=['if'], data=['device']]
# If the device is *still* none, error.
if device is None:
print('The serial device needs to be passed in as --device or set in the config as DEVICE.')
return # depends on [control=['if'], data=[]]
rfxcom_collect(device)
|
def may_contain_matches(self, path):
"""Tests whether it's possible for paths under the given one to match.
If this method returns None, no path under the given one will match the
pattern.
"""
path = self._prepare_path(path)
return self.int_regex.search(path) is not None
|
def function[may_contain_matches, parameter[self, path]]:
constant[Tests whether it's possible for paths under the given one to match.
If this method returns None, no path under the given one will match the
pattern.
]
variable[path] assign[=] call[name[self]._prepare_path, parameter[name[path]]]
return[compare[call[name[self].int_regex.search, parameter[name[path]]] is_not constant[None]]]
|
keyword[def] identifier[may_contain_matches] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[self] . identifier[_prepare_path] ( identifier[path] )
keyword[return] identifier[self] . identifier[int_regex] . identifier[search] ( identifier[path] ) keyword[is] keyword[not] keyword[None]
|
def may_contain_matches(self, path):
"""Tests whether it's possible for paths under the given one to match.
If this method returns None, no path under the given one will match the
pattern.
"""
path = self._prepare_path(path)
return self.int_regex.search(path) is not None
|
def prof_altitude(pressure, p_coef=(-0.028389, -0.0493698, 0.485718, 0.278656,
-17.5703, 48.0926)):
"""
Return altitude for given pressure.
This function evaluates a polynomial at log10(pressure) values.
Parameters
----------
pressure : array-like
pressure values [hPa].
p_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
altitude : array-like
altitude values [km] (same shape than the pressure input array).
See Also
--------
prof_pressure : Returns pressure for
given altitude.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with pressure less than about 3e-4 hPa.
Examples
--------
>>> prof_altitude([1000, 800, 600])
array([ 0.1065092 , 1.95627858, 4.2060627 ])
"""
pressure = np.asarray(pressure)
altitude = np.polyval(p_coef, np.log10(pressure.flatten()))
return altitude.reshape(pressure.shape)
|
def function[prof_altitude, parameter[pressure, p_coef]]:
constant[
Return altitude for given pressure.
This function evaluates a polynomial at log10(pressure) values.
Parameters
----------
pressure : array-like
pressure values [hPa].
p_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
altitude : array-like
altitude values [km] (same shape than the pressure input array).
See Also
--------
prof_pressure : Returns pressure for
given altitude.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with pressure less than about 3e-4 hPa.
Examples
--------
>>> prof_altitude([1000, 800, 600])
array([ 0.1065092 , 1.95627858, 4.2060627 ])
]
variable[pressure] assign[=] call[name[np].asarray, parameter[name[pressure]]]
variable[altitude] assign[=] call[name[np].polyval, parameter[name[p_coef], call[name[np].log10, parameter[call[name[pressure].flatten, parameter[]]]]]]
return[call[name[altitude].reshape, parameter[name[pressure].shape]]]
|
keyword[def] identifier[prof_altitude] ( identifier[pressure] , identifier[p_coef] =(- literal[int] ,- literal[int] , literal[int] , literal[int] ,
- literal[int] , literal[int] )):
literal[string]
identifier[pressure] = identifier[np] . identifier[asarray] ( identifier[pressure] )
identifier[altitude] = identifier[np] . identifier[polyval] ( identifier[p_coef] , identifier[np] . identifier[log10] ( identifier[pressure] . identifier[flatten] ()))
keyword[return] identifier[altitude] . identifier[reshape] ( identifier[pressure] . identifier[shape] )
|
def prof_altitude(pressure, p_coef=(-0.028389, -0.0493698, 0.485718, 0.278656, -17.5703, 48.0926)):
"""
Return altitude for given pressure.
This function evaluates a polynomial at log10(pressure) values.
Parameters
----------
pressure : array-like
pressure values [hPa].
p_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
altitude : array-like
altitude values [km] (same shape than the pressure input array).
See Also
--------
prof_pressure : Returns pressure for
given altitude.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with pressure less than about 3e-4 hPa.
Examples
--------
>>> prof_altitude([1000, 800, 600])
array([ 0.1065092 , 1.95627858, 4.2060627 ])
"""
pressure = np.asarray(pressure)
altitude = np.polyval(p_coef, np.log10(pressure.flatten()))
return altitude.reshape(pressure.shape)
|
def time_vs_parameter(self, parameter, bp, merge=False, merge_method='mean', masked=False):
"""To get the parameter of either a specfic base-pair/step or a DNA segment as a function of time.
parameters
----------
parameter : str
Name of a base-pair or base-step or helical parameter.
For details about accepted keywords, see ``parameter`` in the
method :meth:`DNA.get_parameters`.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # merge = False
bp = [4,15] # merge = True
merge : bool
``Default=False``. As shown above, if ``True``, bp should a list of
range otherwise a list of single value. If ``bp = True``, the
parameter for the respective DNA segment could be merged or
calculated by ``merge_method``.
merge_method : str
Method to calculate the parameter of a DNA segment from local
parameters of all base-pairs/steps that are between the range
given through ``bp``.
Currently accepted keywords are as follows:
* ``merge_method = mean``: Average of local parameters
* ``merge_method = sum``: Sum of local parameters
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
Returns
-------
time : 1D array
Array containing time of each frame from trajectory
value : 1D array
Array containing parameter values for each frame from trajectory
"""
if not (isinstance(bp, list) or isinstance(bp, np.ndarray)):
raise AssertionError(
"type %s is not list or np.ndarray" % type(bp))
if (len(bp) > 1) and (merge == False):
raise AssertionError(
"bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True" % bp)
exit(1)
if len(bp) == 1:
merge = False
if (merge == True) and not ((merge_method == 'mean') or (merge_method == 'sum')):
raise AssertionError(
"merge method %s is not available." % merge_method)
# Masking values according to mask array
midx = []
if masked and self.mask is None:
print(" WARNING: mask is not set. Mask is set during helical axis smoothing. \n")
masked = False
for i in range(len(self.time)):
if masked:
if self.mask[i] == False:
midx.append(i)
else:
midx.append(i)
if len(bp) == 1:
param_value, bp_idx = self.get_parameters(parameter, bp, bp_range=False, masked=masked)
else:
param_value, bp_idx = self.get_parameters(parameter, bp, bp_range=True, masked=masked)
if (merge == True) and (merge_method == 'mean'):
return self.time, np.mean(param_value, axis=0)
elif (merge == True) and (merge_method == 'sum'):
return self.time[midx], np.sum(param_value, axis=0)
else:
return self.time[midx], param_value[0]
|
def function[time_vs_parameter, parameter[self, parameter, bp, merge, merge_method, masked]]:
constant[To get the parameter of either a specfic base-pair/step or a DNA segment as a function of time.
parameters
----------
parameter : str
Name of a base-pair or base-step or helical parameter.
For details about accepted keywords, see ``parameter`` in the
method :meth:`DNA.get_parameters`.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # merge = False
bp = [4,15] # merge = True
merge : bool
``Default=False``. As shown above, if ``True``, bp should a list of
range otherwise a list of single value. If ``bp = True``, the
parameter for the respective DNA segment could be merged or
calculated by ``merge_method``.
merge_method : str
Method to calculate the parameter of a DNA segment from local
parameters of all base-pairs/steps that are between the range
given through ``bp``.
Currently accepted keywords are as follows:
* ``merge_method = mean``: Average of local parameters
* ``merge_method = sum``: Sum of local parameters
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
Returns
-------
time : 1D array
Array containing time of each frame from trajectory
value : 1D array
Array containing parameter values for each frame from trajectory
]
if <ast.UnaryOp object at 0x7da1b25a8c70> begin[:]
<ast.Raise object at 0x7da1b25a8e80>
if <ast.BoolOp object at 0x7da1b25a9030> begin[:]
<ast.Raise object at 0x7da1b25a91e0>
call[name[exit], parameter[constant[1]]]
if compare[call[name[len], parameter[name[bp]]] equal[==] constant[1]] begin[:]
variable[merge] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b25a8250> begin[:]
<ast.Raise object at 0x7da1b25a8490>
variable[midx] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b25a97b0> begin[:]
call[name[print], parameter[constant[ WARNING: mask is not set. Mask is set during helical axis smoothing.
]]]
variable[masked] assign[=] constant[False]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].time]]]]] begin[:]
if name[masked] begin[:]
if compare[call[name[self].mask][name[i]] equal[==] constant[False]] begin[:]
call[name[midx].append, parameter[name[i]]]
if compare[call[name[len], parameter[name[bp]]] equal[==] constant[1]] begin[:]
<ast.Tuple object at 0x7da1b25707f0> assign[=] call[name[self].get_parameters, parameter[name[parameter], name[bp]]]
if <ast.BoolOp object at 0x7da1b2570e80> begin[:]
return[tuple[[<ast.Attribute object at 0x7da1b2570a00>, <ast.Call object at 0x7da1b2571390>]]]
|
keyword[def] identifier[time_vs_parameter] ( identifier[self] , identifier[parameter] , identifier[bp] , identifier[merge] = keyword[False] , identifier[merge_method] = literal[string] , identifier[masked] = keyword[False] ):
literal[string]
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[bp] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[bp] , identifier[np] . identifier[ndarray] )):
keyword[raise] identifier[AssertionError] (
literal[string] % identifier[type] ( identifier[bp] ))
keyword[if] ( identifier[len] ( identifier[bp] )> literal[int] ) keyword[and] ( identifier[merge] == keyword[False] ):
keyword[raise] identifier[AssertionError] (
literal[string] % identifier[bp] )
identifier[exit] ( literal[int] )
keyword[if] identifier[len] ( identifier[bp] )== literal[int] :
identifier[merge] = keyword[False]
keyword[if] ( identifier[merge] == keyword[True] ) keyword[and] keyword[not] (( identifier[merge_method] == literal[string] ) keyword[or] ( identifier[merge_method] == literal[string] )):
keyword[raise] identifier[AssertionError] (
literal[string] % identifier[merge_method] )
identifier[midx] =[]
keyword[if] identifier[masked] keyword[and] identifier[self] . identifier[mask] keyword[is] keyword[None] :
identifier[print] ( literal[string] )
identifier[masked] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[time] )):
keyword[if] identifier[masked] :
keyword[if] identifier[self] . identifier[mask] [ identifier[i] ]== keyword[False] :
identifier[midx] . identifier[append] ( identifier[i] )
keyword[else] :
identifier[midx] . identifier[append] ( identifier[i] )
keyword[if] identifier[len] ( identifier[bp] )== literal[int] :
identifier[param_value] , identifier[bp_idx] = identifier[self] . identifier[get_parameters] ( identifier[parameter] , identifier[bp] , identifier[bp_range] = keyword[False] , identifier[masked] = identifier[masked] )
keyword[else] :
identifier[param_value] , identifier[bp_idx] = identifier[self] . identifier[get_parameters] ( identifier[parameter] , identifier[bp] , identifier[bp_range] = keyword[True] , identifier[masked] = identifier[masked] )
keyword[if] ( identifier[merge] == keyword[True] ) keyword[and] ( identifier[merge_method] == literal[string] ):
keyword[return] identifier[self] . identifier[time] , identifier[np] . identifier[mean] ( identifier[param_value] , identifier[axis] = literal[int] )
keyword[elif] ( identifier[merge] == keyword[True] ) keyword[and] ( identifier[merge_method] == literal[string] ):
keyword[return] identifier[self] . identifier[time] [ identifier[midx] ], identifier[np] . identifier[sum] ( identifier[param_value] , identifier[axis] = literal[int] )
keyword[else] :
keyword[return] identifier[self] . identifier[time] [ identifier[midx] ], identifier[param_value] [ literal[int] ]
|
def time_vs_parameter(self, parameter, bp, merge=False, merge_method='mean', masked=False):
"""To get the parameter of either a specfic base-pair/step or a DNA segment as a function of time.
parameters
----------
parameter : str
Name of a base-pair or base-step or helical parameter.
For details about accepted keywords, see ``parameter`` in the
method :meth:`DNA.get_parameters`.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # merge = False
bp = [4,15] # merge = True
merge : bool
``Default=False``. As shown above, if ``True``, bp should a list of
range otherwise a list of single value. If ``bp = True``, the
parameter for the respective DNA segment could be merged or
calculated by ``merge_method``.
merge_method : str
Method to calculate the parameter of a DNA segment from local
parameters of all base-pairs/steps that are between the range
given through ``bp``.
Currently accepted keywords are as follows:
* ``merge_method = mean``: Average of local parameters
* ``merge_method = sum``: Sum of local parameters
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
Returns
-------
time : 1D array
Array containing time of each frame from trajectory
value : 1D array
Array containing parameter values for each frame from trajectory
"""
if not (isinstance(bp, list) or isinstance(bp, np.ndarray)):
raise AssertionError('type %s is not list or np.ndarray' % type(bp)) # depends on [control=['if'], data=[]]
if len(bp) > 1 and merge == False:
raise AssertionError('bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True' % bp)
exit(1) # depends on [control=['if'], data=[]]
if len(bp) == 1:
merge = False # depends on [control=['if'], data=[]]
if merge == True and (not (merge_method == 'mean' or merge_method == 'sum')):
raise AssertionError('merge method %s is not available.' % merge_method) # depends on [control=['if'], data=[]]
# Masking values according to mask array
midx = []
if masked and self.mask is None:
print(' WARNING: mask is not set. Mask is set during helical axis smoothing. \n')
masked = False # depends on [control=['if'], data=[]]
for i in range(len(self.time)):
if masked:
if self.mask[i] == False:
midx.append(i) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
midx.append(i) # depends on [control=['for'], data=['i']]
if len(bp) == 1:
(param_value, bp_idx) = self.get_parameters(parameter, bp, bp_range=False, masked=masked) # depends on [control=['if'], data=[]]
else:
(param_value, bp_idx) = self.get_parameters(parameter, bp, bp_range=True, masked=masked)
if merge == True and merge_method == 'mean':
return (self.time, np.mean(param_value, axis=0)) # depends on [control=['if'], data=[]]
elif merge == True and merge_method == 'sum':
return (self.time[midx], np.sum(param_value, axis=0)) # depends on [control=['if'], data=[]]
else:
return (self.time[midx], param_value[0])
|
def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):
"""
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
"""
noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id)
for offset_front_id in candidate_offset_front_ids:
offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id)
offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)]
noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets)))
noverlaps.append((noverlap_this_id, offset_front_id))
_overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[0])
return int(chosen_offset_front_id)
|
def function[_choose_front_id_from_candidates, parameter[candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets]]:
constant[
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
]
variable[noverlaps] assign[=] list[[]]
for taget[name[offset_front_id]] in starred[name[candidate_offset_front_ids]] begin[:]
<ast.Tuple object at 0x7da1b0394130> assign[=] call[name[np].where, parameter[compare[name[offset_fronts] equal[==] name[offset_front_id]]]]
variable[offset_front_idxs] assign[=] <ast.ListComp object at 0x7da1b0396110>
variable[noverlap_this_id] assign[=] call[name[len], parameter[call[call[name[set], parameter[name[offset_front_idxs]]].symmetric_difference, parameter[call[name[set], parameter[name[offsets_corresponding_to_onsets]]]]]]]
call[name[noverlaps].append, parameter[tuple[[<ast.Name object at 0x7da1b032ee90>, <ast.Name object at 0x7da1b032dab0>]]]]
<ast.Tuple object at 0x7da1b032d300> assign[=] call[name[max], parameter[name[noverlaps]]]
return[call[name[int], parameter[name[chosen_offset_front_id]]]]
|
keyword[def] identifier[_choose_front_id_from_candidates] ( identifier[candidate_offset_front_ids] , identifier[offset_fronts] , identifier[offsets_corresponding_to_onsets] ):
literal[string]
identifier[noverlaps] =[]
keyword[for] identifier[offset_front_id] keyword[in] identifier[candidate_offset_front_ids] :
identifier[offset_front_f_idxs] , identifier[offset_front_s_idxs] = identifier[np] . identifier[where] ( identifier[offset_fronts] == identifier[offset_front_id] )
identifier[offset_front_idxs] =[( identifier[f] , identifier[i] ) keyword[for] identifier[f] , identifier[i] keyword[in] identifier[zip] ( identifier[offset_front_f_idxs] , identifier[offset_front_s_idxs] )]
identifier[noverlap_this_id] = identifier[len] ( identifier[set] ( identifier[offset_front_idxs] ). identifier[symmetric_difference] ( identifier[set] ( identifier[offsets_corresponding_to_onsets] )))
identifier[noverlaps] . identifier[append] (( identifier[noverlap_this_id] , identifier[offset_front_id] ))
identifier[_overlapped] , identifier[chosen_offset_front_id] = identifier[max] ( identifier[noverlaps] , identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ])
keyword[return] identifier[int] ( identifier[chosen_offset_front_id] )
|
def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):
"""
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
"""
noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id)
for offset_front_id in candidate_offset_front_ids:
(offset_front_f_idxs, offset_front_s_idxs) = np.where(offset_fronts == offset_front_id)
offset_front_idxs = [(f, i) for (f, i) in zip(offset_front_f_idxs, offset_front_s_idxs)]
noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets)))
noverlaps.append((noverlap_this_id, offset_front_id)) # depends on [control=['for'], data=['offset_front_id']]
(_overlapped, chosen_offset_front_id) = max(noverlaps, key=lambda t: t[0])
return int(chosen_offset_front_id)
|
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return self._to_pandas().bool()
|
def function[bool, parameter[self]]:
constant[Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
]
variable[shape] assign[=] name[self].shape
if <ast.BoolOp object at 0x7da20c76ecb0> begin[:]
<ast.Raise object at 0x7da20c76dea0>
|
keyword[def] identifier[bool] ( identifier[self] ):
literal[string]
identifier[shape] = identifier[self] . identifier[shape]
keyword[if] identifier[shape] !=( literal[int] ,) keyword[and] identifier[shape] !=( literal[int] , literal[int] ):
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[else] :
keyword[return] identifier[self] . identifier[_to_pandas] (). identifier[bool] ()
|
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError('The PandasObject does not have exactly\n 1 element. Return the bool of a single\n element PandasObject. The truth value is\n ambiguous. Use a.empty, a.item(), a.any()\n or a.all().') # depends on [control=['if'], data=[]]
else:
return self._to_pandas().bool()
|
def SignalAbort(self):
"""Signals the process to abort."""
self._abort = True
if self._extraction_worker:
self._extraction_worker.SignalAbort()
if self._parser_mediator:
self._parser_mediator.SignalAbort()
|
def function[SignalAbort, parameter[self]]:
constant[Signals the process to abort.]
name[self]._abort assign[=] constant[True]
if name[self]._extraction_worker begin[:]
call[name[self]._extraction_worker.SignalAbort, parameter[]]
if name[self]._parser_mediator begin[:]
call[name[self]._parser_mediator.SignalAbort, parameter[]]
|
keyword[def] identifier[SignalAbort] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_abort] = keyword[True]
keyword[if] identifier[self] . identifier[_extraction_worker] :
identifier[self] . identifier[_extraction_worker] . identifier[SignalAbort] ()
keyword[if] identifier[self] . identifier[_parser_mediator] :
identifier[self] . identifier[_parser_mediator] . identifier[SignalAbort] ()
|
def SignalAbort(self):
"""Signals the process to abort."""
self._abort = True
if self._extraction_worker:
self._extraction_worker.SignalAbort() # depends on [control=['if'], data=[]]
if self._parser_mediator:
self._parser_mediator.SignalAbort() # depends on [control=['if'], data=[]]
|
def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data):
"""Create a SageMaker Model Package from the results of training with an Algorithm Package
Args:
name (str): ModelPackage name
description (str): Model Package description
algorithm_arn (str): arn or name of the algorithm used for training.
model_data (str): s3 URI to the model artifacts produced by training
"""
request = {
'ModelPackageName': name,
'ModelPackageDescription': description,
'SourceAlgorithmSpecification': {
'SourceAlgorithms': [
{
'AlgorithmName': algorithm_arn,
'ModelDataUrl': model_data
}
]
}
}
try:
LOGGER.info('Creating model package with name: {}'.format(name))
self.sagemaker_client.create_model_package(**request)
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if (
error_code == 'ValidationException'
and 'ModelPackage already exists' in message
):
LOGGER.warning('Using already existing model package: {}'.format(name))
else:
raise
|
def function[create_model_package_from_algorithm, parameter[self, name, description, algorithm_arn, model_data]]:
constant[Create a SageMaker Model Package from the results of training with an Algorithm Package
Args:
name (str): ModelPackage name
description (str): Model Package description
algorithm_arn (str): arn or name of the algorithm used for training.
model_data (str): s3 URI to the model artifacts produced by training
]
variable[request] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c1bb80>, <ast.Constant object at 0x7da1b1c19480>, <ast.Constant object at 0x7da1b1c19ed0>], [<ast.Name object at 0x7da1b1c19960>, <ast.Name object at 0x7da1b1c18640>, <ast.Dict object at 0x7da1b1c19390>]]
<ast.Try object at 0x7da1b1c19a80>
|
keyword[def] identifier[create_model_package_from_algorithm] ( identifier[self] , identifier[name] , identifier[description] , identifier[algorithm_arn] , identifier[model_data] ):
literal[string]
identifier[request] ={
literal[string] : identifier[name] ,
literal[string] : identifier[description] ,
literal[string] :{
literal[string] :[
{
literal[string] : identifier[algorithm_arn] ,
literal[string] : identifier[model_data]
}
]
}
}
keyword[try] :
identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[self] . identifier[sagemaker_client] . identifier[create_model_package] (** identifier[request] )
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
identifier[error_code] = identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]
identifier[message] = identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]
keyword[if] (
identifier[error_code] == literal[string]
keyword[and] literal[string] keyword[in] identifier[message]
):
identifier[LOGGER] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[else] :
keyword[raise]
|
def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data):
"""Create a SageMaker Model Package from the results of training with an Algorithm Package
Args:
name (str): ModelPackage name
description (str): Model Package description
algorithm_arn (str): arn or name of the algorithm used for training.
model_data (str): s3 URI to the model artifacts produced by training
"""
request = {'ModelPackageName': name, 'ModelPackageDescription': description, 'SourceAlgorithmSpecification': {'SourceAlgorithms': [{'AlgorithmName': algorithm_arn, 'ModelDataUrl': model_data}]}}
try:
LOGGER.info('Creating model package with name: {}'.format(name))
self.sagemaker_client.create_model_package(**request) # depends on [control=['try'], data=[]]
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'ValidationException' and 'ModelPackage already exists' in message:
LOGGER.warning('Using already existing model package: {}'.format(name)) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
|
def current_human_transaction(self):
"""Current ongoing human transaction"""
try:
tx, _, _, _, _ = self._callstack[0]
if tx.result is not None:
#That tx finished. No current tx.
return None
assert tx.depth == 0
return tx
except IndexError:
return None
|
def function[current_human_transaction, parameter[self]]:
constant[Current ongoing human transaction]
<ast.Try object at 0x7da204963430>
|
keyword[def] identifier[current_human_transaction] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[tx] , identifier[_] , identifier[_] , identifier[_] , identifier[_] = identifier[self] . identifier[_callstack] [ literal[int] ]
keyword[if] identifier[tx] . identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[None]
keyword[assert] identifier[tx] . identifier[depth] == literal[int]
keyword[return] identifier[tx]
keyword[except] identifier[IndexError] :
keyword[return] keyword[None]
|
def current_human_transaction(self):
"""Current ongoing human transaction"""
try:
(tx, _, _, _, _) = self._callstack[0]
if tx.result is not None:
#That tx finished. No current tx.
return None # depends on [control=['if'], data=[]]
assert tx.depth == 0
return tx # depends on [control=['try'], data=[]]
except IndexError:
return None # depends on [control=['except'], data=[]]
|
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation)
|
def function[_mock_request, parameter[self]]:
constant[
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
]
variable[model] assign[=] call[name[kwargs].get, parameter[constant[model]]]
variable[service] assign[=] name[model].service_model.endpoint_prefix
variable[operation] assign[=] name[model].name
call[name[LOG].debug, parameter[constant[_make_request: %s.%s], name[service], name[operation]]]
return[call[name[self].load_response, parameter[name[service], name[operation]]]]
|
keyword[def] identifier[_mock_request] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[model] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[service] = identifier[model] . identifier[service_model] . identifier[endpoint_prefix]
identifier[operation] = identifier[model] . identifier[name]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[service] , identifier[operation] )
keyword[return] identifier[self] . identifier[load_response] ( identifier[service] , identifier[operation] )
|
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation)
|
def broadcast(self, bot, channel_type, text):
"""
Use this method to broadcast text message to all users of bot.
:param bot: bot that will push user
:type bot: Bot
:param channel_type: one of [telegram, facebook, slack]
:type channel_type: str
:param text: text message
:type text: str
"""
self.client.broadcast.__getattr__(bot.name).__call__(_method="POST",
_params=dict(channel=channel_type),
_json=dict(message=text))
|
def function[broadcast, parameter[self, bot, channel_type, text]]:
constant[
Use this method to broadcast text message to all users of bot.
:param bot: bot that will push user
:type bot: Bot
:param channel_type: one of [telegram, facebook, slack]
:type channel_type: str
:param text: text message
:type text: str
]
call[call[name[self].client.broadcast.__getattr__, parameter[name[bot].name]].__call__, parameter[]]
|
keyword[def] identifier[broadcast] ( identifier[self] , identifier[bot] , identifier[channel_type] , identifier[text] ):
literal[string]
identifier[self] . identifier[client] . identifier[broadcast] . identifier[__getattr__] ( identifier[bot] . identifier[name] ). identifier[__call__] ( identifier[_method] = literal[string] ,
identifier[_params] = identifier[dict] ( identifier[channel] = identifier[channel_type] ),
identifier[_json] = identifier[dict] ( identifier[message] = identifier[text] ))
|
def broadcast(self, bot, channel_type, text):
"""
Use this method to broadcast text message to all users of bot.
:param bot: bot that will push user
:type bot: Bot
:param channel_type: one of [telegram, facebook, slack]
:type channel_type: str
:param text: text message
:type text: str
"""
self.client.broadcast.__getattr__(bot.name).__call__(_method='POST', _params=dict(channel=channel_type), _json=dict(message=text))
|
def all(self, typ, **kwargs):
"""
List all of type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
"""
return self._load(self._request(typ, params=kwargs))
|
def function[all, parameter[self, typ]]:
constant[
List all of type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
]
return[call[name[self]._load, parameter[call[name[self]._request, parameter[name[typ]]]]]]
|
keyword[def] identifier[all] ( identifier[self] , identifier[typ] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_load] ( identifier[self] . identifier[_request] ( identifier[typ] , identifier[params] = identifier[kwargs] ))
|
def all(self, typ, **kwargs):
"""
List all of type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
"""
return self._load(self._request(typ, params=kwargs))
|
def find_one(cls, *args, **kwargs):
"""Run a find_one on this model's collection. The arguments to
``Model.find_one`` are the same as to ``pymongo.Collection.find_one``."""
database, collection = cls._collection_key.split('.')
return current()[database][collection].find_one(*args, **kwargs)
|
def function[find_one, parameter[cls]]:
constant[Run a find_one on this model's collection. The arguments to
``Model.find_one`` are the same as to ``pymongo.Collection.find_one``.]
<ast.Tuple object at 0x7da18f00e500> assign[=] call[name[cls]._collection_key.split, parameter[constant[.]]]
return[call[call[call[call[name[current], parameter[]]][name[database]]][name[collection]].find_one, parameter[<ast.Starred object at 0x7da18f00ca00>]]]
|
keyword[def] identifier[find_one] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[database] , identifier[collection] = identifier[cls] . identifier[_collection_key] . identifier[split] ( literal[string] )
keyword[return] identifier[current] ()[ identifier[database] ][ identifier[collection] ]. identifier[find_one] (* identifier[args] ,** identifier[kwargs] )
|
def find_one(cls, *args, **kwargs):
"""Run a find_one on this model's collection. The arguments to
``Model.find_one`` are the same as to ``pymongo.Collection.find_one``."""
(database, collection) = cls._collection_key.split('.')
return current()[database][collection].find_one(*args, **kwargs)
|
def list(self, container_or_share_name, container=None, account=None):
"""List the blobs/files inside a container/share_name.
Args:
container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files.
container(bool): flag to know it you are listing files or blobs.
account(str): The name of the storage account.
"""
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value
if container:
bs = BlockBlobService(account_name=account, account_key=key)
container_list = []
for i in bs.list_blobs(container_or_share_name).items:
container_list.append(i.name)
return container_list
elif not container:
fs = FileService(account_name=account, account_key=key)
container_list = []
for i in fs.list_directories_and_files(container_or_share_name).items:
container_list.append(i.name)
return container_list
else:
raise ValueError("You have to pass a value for container param")
|
def function[list, parameter[self, container_or_share_name, container, account]]:
constant[List the blobs/files inside a container/share_name.
Args:
container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files.
container(bool): flag to know it you are listing files or blobs.
account(str): The name of the storage account.
]
variable[key] assign[=] call[call[name[self].storage_client.storage_accounts.list_keys, parameter[name[self].resource_group_name, name[account]]].keys][constant[0]].value
if name[container] begin[:]
variable[bs] assign[=] call[name[BlockBlobService], parameter[]]
variable[container_list] assign[=] list[[]]
for taget[name[i]] in starred[call[name[bs].list_blobs, parameter[name[container_or_share_name]]].items] begin[:]
call[name[container_list].append, parameter[name[i].name]]
return[name[container_list]]
|
keyword[def] identifier[list] ( identifier[self] , identifier[container_or_share_name] , identifier[container] = keyword[None] , identifier[account] = keyword[None] ):
literal[string]
identifier[key] = identifier[self] . identifier[storage_client] . identifier[storage_accounts] . identifier[list_keys] ( identifier[self] . identifier[resource_group_name] , identifier[account] ). identifier[keys] [ literal[int] ]. identifier[value]
keyword[if] identifier[container] :
identifier[bs] = identifier[BlockBlobService] ( identifier[account_name] = identifier[account] , identifier[account_key] = identifier[key] )
identifier[container_list] =[]
keyword[for] identifier[i] keyword[in] identifier[bs] . identifier[list_blobs] ( identifier[container_or_share_name] ). identifier[items] :
identifier[container_list] . identifier[append] ( identifier[i] . identifier[name] )
keyword[return] identifier[container_list]
keyword[elif] keyword[not] identifier[container] :
identifier[fs] = identifier[FileService] ( identifier[account_name] = identifier[account] , identifier[account_key] = identifier[key] )
identifier[container_list] =[]
keyword[for] identifier[i] keyword[in] identifier[fs] . identifier[list_directories_and_files] ( identifier[container_or_share_name] ). identifier[items] :
identifier[container_list] . identifier[append] ( identifier[i] . identifier[name] )
keyword[return] identifier[container_list]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def list(self, container_or_share_name, container=None, account=None):
"""List the blobs/files inside a container/share_name.
Args:
container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files.
container(bool): flag to know it you are listing files or blobs.
account(str): The name of the storage account.
"""
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value
if container:
bs = BlockBlobService(account_name=account, account_key=key)
container_list = []
for i in bs.list_blobs(container_or_share_name).items:
container_list.append(i.name) # depends on [control=['for'], data=['i']]
return container_list # depends on [control=['if'], data=[]]
elif not container:
fs = FileService(account_name=account, account_key=key)
container_list = []
for i in fs.list_directories_and_files(container_or_share_name).items:
container_list.append(i.name) # depends on [control=['for'], data=['i']]
return container_list # depends on [control=['if'], data=[]]
else:
raise ValueError('You have to pass a value for container param')
|
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle):
'''Set new video subtitle file.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \
_Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_subtitle)
|
def function[libvlc_video_set_subtitle_file, parameter[p_mi, psz_subtitle]]:
constant[Set new video subtitle file.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
]
variable[f] assign[=] <ast.BoolOp object at 0x7da1b2344d30>
return[call[name[f], parameter[name[p_mi], name[psz_subtitle]]]]
|
keyword[def] identifier[libvlc_video_set_subtitle_file] ( identifier[p_mi] , identifier[psz_subtitle] ):
literal[string]
identifier[f] = identifier[_Cfunctions] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[_Cfunction] ( literal[string] ,(( literal[int] ,),( literal[int] ,),), keyword[None] ,
identifier[ctypes] . identifier[c_int] , identifier[MediaPlayer] , identifier[ctypes] . identifier[c_char_p] )
keyword[return] identifier[f] ( identifier[p_mi] , identifier[psz_subtitle] )
|
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle):
"""Set new video subtitle file.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
"""
f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or _Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,)), None, ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_subtitle)
|
def _resolve_rule_refs(self, grammar_parser, model_parser):
"""Resolves parser ParsingExpression crossrefs."""
def _resolve_rule(rule):
"""
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
"""
if not isinstance(rule, RuleCrossRef) and rule in resolved_rules:
return rule
resolved_rules.add(rule)
if grammar_parser.debug:
grammar_parser.dprint("Resolving rule: {}".format(rule))
if type(rule) is RuleCrossRef:
rule_name = rule.rule_name
suppress = rule.suppress
if rule_name in model_parser.metamodel:
rule = model_parser.metamodel[rule_name]._tx_peg_rule
if type(rule) is RuleCrossRef:
rule = _resolve_rule(rule)
model_parser.metamodel[rule_name]._tx_peg_rule = rule
if suppress:
# Special case. Suppression on rule reference.
_tx_class = rule._tx_class
rule = Sequence(nodes=[rule],
rule_name=rule_name,
suppress=suppress)
rule._tx_class = _tx_class
else:
line, col = grammar_parser.pos_to_linecol(rule.position)
raise TextXSemanticError(
'Unexisting rule "{}" at position {}.'
.format(rule.rule_name,
(line, col)), line, col)
assert isinstance(rule, ParsingExpression),\
"{}:{}".format(type(rule), text(rule))
# Recurse into subrules, and resolve rules.
for idx, child in enumerate(rule.nodes):
if child not in resolved_rules:
child = _resolve_rule(child)
rule.nodes[idx] = child
return rule
# Two pass resolving
for i in range(2):
if grammar_parser.debug:
grammar_parser.dprint("RESOLVING RULE CROSS-REFS - PASS {}"
.format(i + 1))
resolved_rules = set()
_resolve_rule(model_parser.parser_model)
# Resolve rules of all meta-classes to handle unreferenced
# rules also.
for cls in model_parser.metamodel:
cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule)
|
def function[_resolve_rule_refs, parameter[self, grammar_parser, model_parser]]:
constant[Resolves parser ParsingExpression crossrefs.]
def function[_resolve_rule, parameter[rule]]:
constant[
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
]
if <ast.BoolOp object at 0x7da1b08e7760> begin[:]
return[name[rule]]
call[name[resolved_rules].add, parameter[name[rule]]]
if name[grammar_parser].debug begin[:]
call[name[grammar_parser].dprint, parameter[call[constant[Resolving rule: {}].format, parameter[name[rule]]]]]
if compare[call[name[type], parameter[name[rule]]] is name[RuleCrossRef]] begin[:]
variable[rule_name] assign[=] name[rule].rule_name
variable[suppress] assign[=] name[rule].suppress
if compare[name[rule_name] in name[model_parser].metamodel] begin[:]
variable[rule] assign[=] call[name[model_parser].metamodel][name[rule_name]]._tx_peg_rule
if compare[call[name[type], parameter[name[rule]]] is name[RuleCrossRef]] begin[:]
variable[rule] assign[=] call[name[_resolve_rule], parameter[name[rule]]]
call[name[model_parser].metamodel][name[rule_name]]._tx_peg_rule assign[=] name[rule]
if name[suppress] begin[:]
variable[_tx_class] assign[=] name[rule]._tx_class
variable[rule] assign[=] call[name[Sequence], parameter[]]
name[rule]._tx_class assign[=] name[_tx_class]
assert[call[name[isinstance], parameter[name[rule], name[ParsingExpression]]]]
for taget[tuple[[<ast.Name object at 0x7da1b08e50c0>, <ast.Name object at 0x7da1b08e7280>]]] in starred[call[name[enumerate], parameter[name[rule].nodes]]] begin[:]
if compare[name[child] <ast.NotIn object at 0x7da2590d7190> name[resolved_rules]] begin[:]
variable[child] assign[=] call[name[_resolve_rule], parameter[name[child]]]
call[name[rule].nodes][name[idx]] assign[=] name[child]
return[name[rule]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2]]]] begin[:]
if name[grammar_parser].debug begin[:]
call[name[grammar_parser].dprint, parameter[call[constant[RESOLVING RULE CROSS-REFS - PASS {}].format, parameter[binary_operation[name[i] + constant[1]]]]]]
variable[resolved_rules] assign[=] call[name[set], parameter[]]
call[name[_resolve_rule], parameter[name[model_parser].parser_model]]
for taget[name[cls]] in starred[name[model_parser].metamodel] begin[:]
name[cls]._tx_peg_rule assign[=] call[name[_resolve_rule], parameter[name[cls]._tx_peg_rule]]
|
keyword[def] identifier[_resolve_rule_refs] ( identifier[self] , identifier[grammar_parser] , identifier[model_parser] ):
literal[string]
keyword[def] identifier[_resolve_rule] ( identifier[rule] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[rule] , identifier[RuleCrossRef] ) keyword[and] identifier[rule] keyword[in] identifier[resolved_rules] :
keyword[return] identifier[rule]
identifier[resolved_rules] . identifier[add] ( identifier[rule] )
keyword[if] identifier[grammar_parser] . identifier[debug] :
identifier[grammar_parser] . identifier[dprint] ( literal[string] . identifier[format] ( identifier[rule] ))
keyword[if] identifier[type] ( identifier[rule] ) keyword[is] identifier[RuleCrossRef] :
identifier[rule_name] = identifier[rule] . identifier[rule_name]
identifier[suppress] = identifier[rule] . identifier[suppress]
keyword[if] identifier[rule_name] keyword[in] identifier[model_parser] . identifier[metamodel] :
identifier[rule] = identifier[model_parser] . identifier[metamodel] [ identifier[rule_name] ]. identifier[_tx_peg_rule]
keyword[if] identifier[type] ( identifier[rule] ) keyword[is] identifier[RuleCrossRef] :
identifier[rule] = identifier[_resolve_rule] ( identifier[rule] )
identifier[model_parser] . identifier[metamodel] [ identifier[rule_name] ]. identifier[_tx_peg_rule] = identifier[rule]
keyword[if] identifier[suppress] :
identifier[_tx_class] = identifier[rule] . identifier[_tx_class]
identifier[rule] = identifier[Sequence] ( identifier[nodes] =[ identifier[rule] ],
identifier[rule_name] = identifier[rule_name] ,
identifier[suppress] = identifier[suppress] )
identifier[rule] . identifier[_tx_class] = identifier[_tx_class]
keyword[else] :
identifier[line] , identifier[col] = identifier[grammar_parser] . identifier[pos_to_linecol] ( identifier[rule] . identifier[position] )
keyword[raise] identifier[TextXSemanticError] (
literal[string]
. identifier[format] ( identifier[rule] . identifier[rule_name] ,
( identifier[line] , identifier[col] )), identifier[line] , identifier[col] )
keyword[assert] identifier[isinstance] ( identifier[rule] , identifier[ParsingExpression] ), literal[string] . identifier[format] ( identifier[type] ( identifier[rule] ), identifier[text] ( identifier[rule] ))
keyword[for] identifier[idx] , identifier[child] keyword[in] identifier[enumerate] ( identifier[rule] . identifier[nodes] ):
keyword[if] identifier[child] keyword[not] keyword[in] identifier[resolved_rules] :
identifier[child] = identifier[_resolve_rule] ( identifier[child] )
identifier[rule] . identifier[nodes] [ identifier[idx] ]= identifier[child]
keyword[return] identifier[rule]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
keyword[if] identifier[grammar_parser] . identifier[debug] :
identifier[grammar_parser] . identifier[dprint] ( literal[string]
. identifier[format] ( identifier[i] + literal[int] ))
identifier[resolved_rules] = identifier[set] ()
identifier[_resolve_rule] ( identifier[model_parser] . identifier[parser_model] )
keyword[for] identifier[cls] keyword[in] identifier[model_parser] . identifier[metamodel] :
identifier[cls] . identifier[_tx_peg_rule] = identifier[_resolve_rule] ( identifier[cls] . identifier[_tx_peg_rule] )
|
def _resolve_rule_refs(self, grammar_parser, model_parser):
"""Resolves parser ParsingExpression crossrefs."""
def _resolve_rule(rule):
"""
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
"""
if not isinstance(rule, RuleCrossRef) and rule in resolved_rules:
return rule # depends on [control=['if'], data=[]]
resolved_rules.add(rule)
if grammar_parser.debug:
grammar_parser.dprint('Resolving rule: {}'.format(rule)) # depends on [control=['if'], data=[]]
if type(rule) is RuleCrossRef:
rule_name = rule.rule_name
suppress = rule.suppress
if rule_name in model_parser.metamodel:
rule = model_parser.metamodel[rule_name]._tx_peg_rule
if type(rule) is RuleCrossRef:
rule = _resolve_rule(rule)
model_parser.metamodel[rule_name]._tx_peg_rule = rule # depends on [control=['if'], data=[]]
if suppress:
# Special case. Suppression on rule reference.
_tx_class = rule._tx_class
rule = Sequence(nodes=[rule], rule_name=rule_name, suppress=suppress)
rule._tx_class = _tx_class # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['rule_name']]
else:
(line, col) = grammar_parser.pos_to_linecol(rule.position)
raise TextXSemanticError('Unexisting rule "{}" at position {}.'.format(rule.rule_name, (line, col)), line, col) # depends on [control=['if'], data=['RuleCrossRef']]
assert isinstance(rule, ParsingExpression), '{}:{}'.format(type(rule), text(rule))
# Recurse into subrules, and resolve rules.
for (idx, child) in enumerate(rule.nodes):
if child not in resolved_rules:
child = _resolve_rule(child)
rule.nodes[idx] = child # depends on [control=['if'], data=['child']] # depends on [control=['for'], data=[]]
return rule
# Two pass resolving
for i in range(2):
if grammar_parser.debug:
grammar_parser.dprint('RESOLVING RULE CROSS-REFS - PASS {}'.format(i + 1)) # depends on [control=['if'], data=[]]
resolved_rules = set()
_resolve_rule(model_parser.parser_model)
# Resolve rules of all meta-classes to handle unreferenced
# rules also.
for cls in model_parser.metamodel:
cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule) # depends on [control=['for'], data=['cls']] # depends on [control=['for'], data=['i']]
|
def get_tile(self, x_tile, y_tile, zoom,
bands=None, masked=None, resampling=Resampling.cubic):
"""Convert mercator tile to raster window.
:param x_tile: x coordinate of tile
:param y_tile: y coordinate of tile
:param zoom: zoom level
:param bands: list of indices of requested bands, default None which returns all bands
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster2 of tile in WEB_MERCATOR_CRS
You can use TELLURIC_GET_TILE_BUFFER env variable to control the number of pixels surrounding
the vector you should fetch when using this method on a raster that is not in WEB_MERCATOR_CRS
default to 10
"""
if self.crs == WEB_MERCATOR_CRS:
return self._get_tile_when_web_mercator_crs(x_tile, y_tile, zoom, bands, masked, resampling)
roi = GeoVector.from_xyz(x_tile, y_tile, zoom)
left, bottom, right, top = roi.get_bounds(WEB_MERCATOR_CRS)
new_affine = rasterio.warp.calculate_default_transform(WEB_MERCATOR_CRS, self.crs,
256, 256, left, bottom, right, top)[0]
new_resolution = resolution_from_affine(new_affine)
buffer_ratio = int(os.environ.get("TELLURIC_GET_TILE_BUFFER", 10))
roi_buffer = roi.buffer(math.sqrt(roi.area * buffer_ratio / 100))
raster = self.crop(roi_buffer, resolution=new_resolution, masked=masked,
bands=bands, resampling=resampling)
raster = raster.reproject(dst_crs=WEB_MERCATOR_CRS, resolution=MERCATOR_RESOLUTION_MAPPING[zoom],
dst_bounds=roi_buffer.get_bounds(WEB_MERCATOR_CRS),
resampling=Resampling.cubic_spline)
# raster = raster.get_tile(x_tile, y_tile, zoom, bands, masked, resampling)
raster = raster.crop(roi).resize(dest_width=256, dest_height=256)
return raster
|
def function[get_tile, parameter[self, x_tile, y_tile, zoom, bands, masked, resampling]]:
constant[Convert mercator tile to raster window.
:param x_tile: x coordinate of tile
:param y_tile: y coordinate of tile
:param zoom: zoom level
:param bands: list of indices of requested bands, default None which returns all bands
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster2 of tile in WEB_MERCATOR_CRS
You can use TELLURIC_GET_TILE_BUFFER env variable to control the number of pixels surrounding
the vector you should fetch when using this method on a raster that is not in WEB_MERCATOR_CRS
default to 10
]
if compare[name[self].crs equal[==] name[WEB_MERCATOR_CRS]] begin[:]
return[call[name[self]._get_tile_when_web_mercator_crs, parameter[name[x_tile], name[y_tile], name[zoom], name[bands], name[masked], name[resampling]]]]
variable[roi] assign[=] call[name[GeoVector].from_xyz, parameter[name[x_tile], name[y_tile], name[zoom]]]
<ast.Tuple object at 0x7da18bc70cd0> assign[=] call[name[roi].get_bounds, parameter[name[WEB_MERCATOR_CRS]]]
variable[new_affine] assign[=] call[call[name[rasterio].warp.calculate_default_transform, parameter[name[WEB_MERCATOR_CRS], name[self].crs, constant[256], constant[256], name[left], name[bottom], name[right], name[top]]]][constant[0]]
variable[new_resolution] assign[=] call[name[resolution_from_affine], parameter[name[new_affine]]]
variable[buffer_ratio] assign[=] call[name[int], parameter[call[name[os].environ.get, parameter[constant[TELLURIC_GET_TILE_BUFFER], constant[10]]]]]
variable[roi_buffer] assign[=] call[name[roi].buffer, parameter[call[name[math].sqrt, parameter[binary_operation[binary_operation[name[roi].area * name[buffer_ratio]] / constant[100]]]]]]
variable[raster] assign[=] call[name[self].crop, parameter[name[roi_buffer]]]
variable[raster] assign[=] call[name[raster].reproject, parameter[]]
variable[raster] assign[=] call[call[name[raster].crop, parameter[name[roi]]].resize, parameter[]]
return[name[raster]]
|
keyword[def] identifier[get_tile] ( identifier[self] , identifier[x_tile] , identifier[y_tile] , identifier[zoom] ,
identifier[bands] = keyword[None] , identifier[masked] = keyword[None] , identifier[resampling] = identifier[Resampling] . identifier[cubic] ):
literal[string]
keyword[if] identifier[self] . identifier[crs] == identifier[WEB_MERCATOR_CRS] :
keyword[return] identifier[self] . identifier[_get_tile_when_web_mercator_crs] ( identifier[x_tile] , identifier[y_tile] , identifier[zoom] , identifier[bands] , identifier[masked] , identifier[resampling] )
identifier[roi] = identifier[GeoVector] . identifier[from_xyz] ( identifier[x_tile] , identifier[y_tile] , identifier[zoom] )
identifier[left] , identifier[bottom] , identifier[right] , identifier[top] = identifier[roi] . identifier[get_bounds] ( identifier[WEB_MERCATOR_CRS] )
identifier[new_affine] = identifier[rasterio] . identifier[warp] . identifier[calculate_default_transform] ( identifier[WEB_MERCATOR_CRS] , identifier[self] . identifier[crs] ,
literal[int] , literal[int] , identifier[left] , identifier[bottom] , identifier[right] , identifier[top] )[ literal[int] ]
identifier[new_resolution] = identifier[resolution_from_affine] ( identifier[new_affine] )
identifier[buffer_ratio] = identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[int] ))
identifier[roi_buffer] = identifier[roi] . identifier[buffer] ( identifier[math] . identifier[sqrt] ( identifier[roi] . identifier[area] * identifier[buffer_ratio] / literal[int] ))
identifier[raster] = identifier[self] . identifier[crop] ( identifier[roi_buffer] , identifier[resolution] = identifier[new_resolution] , identifier[masked] = identifier[masked] ,
identifier[bands] = identifier[bands] , identifier[resampling] = identifier[resampling] )
identifier[raster] = identifier[raster] . identifier[reproject] ( identifier[dst_crs] = identifier[WEB_MERCATOR_CRS] , identifier[resolution] = identifier[MERCATOR_RESOLUTION_MAPPING] [ identifier[zoom] ],
identifier[dst_bounds] = identifier[roi_buffer] . identifier[get_bounds] ( identifier[WEB_MERCATOR_CRS] ),
identifier[resampling] = identifier[Resampling] . identifier[cubic_spline] )
identifier[raster] = identifier[raster] . identifier[crop] ( identifier[roi] ). identifier[resize] ( identifier[dest_width] = literal[int] , identifier[dest_height] = literal[int] )
keyword[return] identifier[raster]
|
def get_tile(self, x_tile, y_tile, zoom, bands=None, masked=None, resampling=Resampling.cubic):
"""Convert mercator tile to raster window.
:param x_tile: x coordinate of tile
:param y_tile: y coordinate of tile
:param zoom: zoom level
:param bands: list of indices of requested bands, default None which returns all bands
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster2 of tile in WEB_MERCATOR_CRS
You can use TELLURIC_GET_TILE_BUFFER env variable to control the number of pixels surrounding
the vector you should fetch when using this method on a raster that is not in WEB_MERCATOR_CRS
default to 10
"""
if self.crs == WEB_MERCATOR_CRS:
return self._get_tile_when_web_mercator_crs(x_tile, y_tile, zoom, bands, masked, resampling) # depends on [control=['if'], data=[]]
roi = GeoVector.from_xyz(x_tile, y_tile, zoom)
(left, bottom, right, top) = roi.get_bounds(WEB_MERCATOR_CRS)
new_affine = rasterio.warp.calculate_default_transform(WEB_MERCATOR_CRS, self.crs, 256, 256, left, bottom, right, top)[0]
new_resolution = resolution_from_affine(new_affine)
buffer_ratio = int(os.environ.get('TELLURIC_GET_TILE_BUFFER', 10))
roi_buffer = roi.buffer(math.sqrt(roi.area * buffer_ratio / 100))
raster = self.crop(roi_buffer, resolution=new_resolution, masked=masked, bands=bands, resampling=resampling)
raster = raster.reproject(dst_crs=WEB_MERCATOR_CRS, resolution=MERCATOR_RESOLUTION_MAPPING[zoom], dst_bounds=roi_buffer.get_bounds(WEB_MERCATOR_CRS), resampling=Resampling.cubic_spline)
# raster = raster.get_tile(x_tile, y_tile, zoom, bands, masked, resampling)
raster = raster.crop(roi).resize(dest_width=256, dest_height=256)
return raster
|
def get_geo_top_artists(self, country, limit=None, cacheable=True):
"""Get the most popular artists on Last.fm by country.
Parameters:
country (Required) : A country name, as defined by the ISO 3166-1
country names standard.
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
params = {"country": country}
if limit:
params["limit"] = limit
doc = _Request(self, "geo.getTopArtists", params).execute(cacheable)
return _extract_top_artists(doc, self)
|
def function[get_geo_top_artists, parameter[self, country, limit, cacheable]]:
constant[Get the most popular artists on Last.fm by country.
Parameters:
country (Required) : A country name, as defined by the ISO 3166-1
country names standard.
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b49de0>], [<ast.Name object at 0x7da1b0b48b50>]]
if name[limit] begin[:]
call[name[params]][constant[limit]] assign[=] name[limit]
variable[doc] assign[=] call[call[name[_Request], parameter[name[self], constant[geo.getTopArtists], name[params]]].execute, parameter[name[cacheable]]]
return[call[name[_extract_top_artists], parameter[name[doc], name[self]]]]
|
keyword[def] identifier[get_geo_top_artists] ( identifier[self] , identifier[country] , identifier[limit] = keyword[None] , identifier[cacheable] = keyword[True] ):
literal[string]
identifier[params] ={ literal[string] : identifier[country] }
keyword[if] identifier[limit] :
identifier[params] [ literal[string] ]= identifier[limit]
identifier[doc] = identifier[_Request] ( identifier[self] , literal[string] , identifier[params] ). identifier[execute] ( identifier[cacheable] )
keyword[return] identifier[_extract_top_artists] ( identifier[doc] , identifier[self] )
|
def get_geo_top_artists(self, country, limit=None, cacheable=True):
"""Get the most popular artists on Last.fm by country.
Parameters:
country (Required) : A country name, as defined by the ISO 3166-1
country names standard.
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
params = {'country': country}
if limit:
params['limit'] = limit # depends on [control=['if'], data=[]]
doc = _Request(self, 'geo.getTopArtists', params).execute(cacheable)
return _extract_top_artists(doc, self)
|
def tail(self, limit=25, **fetch_kwargs):
"""
shortcut to fetch the last few entries from query expression.
Equivalent to fetch(order_by="KEY DESC", limit=25)[::-1]
:param limit: number of entries
:param fetch_kwargs: kwargs for fetch
:return: query result
"""
return self.fetch(order_by="KEY DESC", limit=limit, **fetch_kwargs)[::-1]
|
def function[tail, parameter[self, limit]]:
constant[
shortcut to fetch the last few entries from query expression.
Equivalent to fetch(order_by="KEY DESC", limit=25)[::-1]
:param limit: number of entries
:param fetch_kwargs: kwargs for fetch
:return: query result
]
return[call[call[name[self].fetch, parameter[]]][<ast.Slice object at 0x7da1b12d93c0>]]
|
keyword[def] identifier[tail] ( identifier[self] , identifier[limit] = literal[int] ,** identifier[fetch_kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[fetch] ( identifier[order_by] = literal[string] , identifier[limit] = identifier[limit] ,** identifier[fetch_kwargs] )[::- literal[int] ]
|
def tail(self, limit=25, **fetch_kwargs):
"""
shortcut to fetch the last few entries from query expression.
Equivalent to fetch(order_by="KEY DESC", limit=25)[::-1]
:param limit: number of entries
:param fetch_kwargs: kwargs for fetch
:return: query result
"""
return self.fetch(order_by='KEY DESC', limit=limit, **fetch_kwargs)[::-1]
|
def restart(self, restart_only_stale_services=None,
redeploy_client_configuration=None,
restart_service_names=None):
"""
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
"""
if self._get_resource_root().version < 6:
return self._cmd('restart')
else:
args = dict()
args['restartOnlyStaleServices'] = restart_only_stale_services
args['redeployClientConfiguration'] = redeploy_client_configuration
if self._get_resource_root().version >= 11:
args['restartServiceNames'] = restart_service_names
return self._cmd('restart', data=args, api_version=6)
|
def function[restart, parameter[self, restart_only_stale_services, redeploy_client_configuration, restart_service_names]]:
constant[
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
]
if compare[call[name[self]._get_resource_root, parameter[]].version less[<] constant[6]] begin[:]
return[call[name[self]._cmd, parameter[constant[restart]]]]
|
keyword[def] identifier[restart] ( identifier[self] , identifier[restart_only_stale_services] = keyword[None] ,
identifier[redeploy_client_configuration] = keyword[None] ,
identifier[restart_service_names] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_get_resource_root] (). identifier[version] < literal[int] :
keyword[return] identifier[self] . identifier[_cmd] ( literal[string] )
keyword[else] :
identifier[args] = identifier[dict] ()
identifier[args] [ literal[string] ]= identifier[restart_only_stale_services]
identifier[args] [ literal[string] ]= identifier[redeploy_client_configuration]
keyword[if] identifier[self] . identifier[_get_resource_root] (). identifier[version] >= literal[int] :
identifier[args] [ literal[string] ]= identifier[restart_service_names]
keyword[return] identifier[self] . identifier[_cmd] ( literal[string] , identifier[data] = identifier[args] , identifier[api_version] = literal[int] )
|
def restart(self, restart_only_stale_services=None, redeploy_client_configuration=None, restart_service_names=None):
"""
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
"""
if self._get_resource_root().version < 6:
return self._cmd('restart') # depends on [control=['if'], data=[]]
else:
args = dict()
args['restartOnlyStaleServices'] = restart_only_stale_services
args['redeployClientConfiguration'] = redeploy_client_configuration
if self._get_resource_root().version >= 11:
args['restartServiceNames'] = restart_service_names # depends on [control=['if'], data=[]]
return self._cmd('restart', data=args, api_version=6)
|
def receive_trial_result(self, parameter_id, parameters, value):
""" Record an observation of the objective function.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
"""
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError("Received parameter_id not in total_data.")
(_, father_id, model_id) = self.total_data[parameter_id]
graph = self.bo.searcher.load_model_by_id(model_id)
# to use the value and graph
self.add_model(reward, model_id)
self.update(father_id, graph, reward, model_id)
|
def function[receive_trial_result, parameter[self, parameter_id, parameters, value]]:
constant[ Record an observation of the objective function.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
]
variable[reward] assign[=] call[name[extract_scalar_reward], parameter[name[value]]]
if compare[name[parameter_id] <ast.NotIn object at 0x7da2590d7190> name[self].total_data] begin[:]
<ast.Raise object at 0x7da18ede56f0>
<ast.Tuple object at 0x7da18ede7430> assign[=] call[name[self].total_data][name[parameter_id]]
variable[graph] assign[=] call[name[self].bo.searcher.load_model_by_id, parameter[name[model_id]]]
call[name[self].add_model, parameter[name[reward], name[model_id]]]
call[name[self].update, parameter[name[father_id], name[graph], name[reward], name[model_id]]]
|
keyword[def] identifier[receive_trial_result] ( identifier[self] , identifier[parameter_id] , identifier[parameters] , identifier[value] ):
literal[string]
identifier[reward] = identifier[extract_scalar_reward] ( identifier[value] )
keyword[if] identifier[parameter_id] keyword[not] keyword[in] identifier[self] . identifier[total_data] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
( identifier[_] , identifier[father_id] , identifier[model_id] )= identifier[self] . identifier[total_data] [ identifier[parameter_id] ]
identifier[graph] = identifier[self] . identifier[bo] . identifier[searcher] . identifier[load_model_by_id] ( identifier[model_id] )
identifier[self] . identifier[add_model] ( identifier[reward] , identifier[model_id] )
identifier[self] . identifier[update] ( identifier[father_id] , identifier[graph] , identifier[reward] , identifier[model_id] )
|
def receive_trial_result(self, parameter_id, parameters, value):
""" Record an observation of the objective function.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
"""
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.') # depends on [control=['if'], data=[]]
(_, father_id, model_id) = self.total_data[parameter_id]
graph = self.bo.searcher.load_model_by_id(model_id)
# to use the value and graph
self.add_model(reward, model_id)
self.update(father_id, graph, reward, model_id)
|
def stage():
"""Option to do something on the staging server."""
common_conf()
env.user = settings.LOGIN_USER_STAGE
env.machine = 'stage'
env.host_string = settings.HOST_STAGE
env.hosts = [env.host_string, ]
|
def function[stage, parameter[]]:
constant[Option to do something on the staging server.]
call[name[common_conf], parameter[]]
name[env].user assign[=] name[settings].LOGIN_USER_STAGE
name[env].machine assign[=] constant[stage]
name[env].host_string assign[=] name[settings].HOST_STAGE
name[env].hosts assign[=] list[[<ast.Attribute object at 0x7da1b085d300>]]
|
keyword[def] identifier[stage] ():
literal[string]
identifier[common_conf] ()
identifier[env] . identifier[user] = identifier[settings] . identifier[LOGIN_USER_STAGE]
identifier[env] . identifier[machine] = literal[string]
identifier[env] . identifier[host_string] = identifier[settings] . identifier[HOST_STAGE]
identifier[env] . identifier[hosts] =[ identifier[env] . identifier[host_string] ,]
|
def stage():
"""Option to do something on the staging server."""
common_conf()
env.user = settings.LOGIN_USER_STAGE
env.machine = 'stage'
env.host_string = settings.HOST_STAGE
env.hosts = [env.host_string]
|
def has(cmd):
"""Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
"""
helps = ["--help", "-h", "--version"]
if "nt" == os.name:
helps.insert(0, "/?")
fakecmd = "fakecmd"
cmderr = strerr(fakecmd).replace(fakecmd, cmd)
for h in helps:
hcmd = "%s %s" % (cmd, h)
if 0 == silent(hcmd):
return True
if len(listout(hcmd)) > 0:
return True
if strerr(hcmd) != cmderr:
return True
return False
|
def function[has, parameter[cmd]]:
constant[Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
]
variable[helps] assign[=] list[[<ast.Constant object at 0x7da18bc72f80>, <ast.Constant object at 0x7da18bc73250>, <ast.Constant object at 0x7da18bc70850>]]
if compare[constant[nt] equal[==] name[os].name] begin[:]
call[name[helps].insert, parameter[constant[0], constant[/?]]]
variable[fakecmd] assign[=] constant[fakecmd]
variable[cmderr] assign[=] call[call[name[strerr], parameter[name[fakecmd]]].replace, parameter[name[fakecmd], name[cmd]]]
for taget[name[h]] in starred[name[helps]] begin[:]
variable[hcmd] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7c97e0>, <ast.Name object at 0x7da20c7cb400>]]]
if compare[constant[0] equal[==] call[name[silent], parameter[name[hcmd]]]] begin[:]
return[constant[True]]
if compare[call[name[len], parameter[call[name[listout], parameter[name[hcmd]]]]] greater[>] constant[0]] begin[:]
return[constant[True]]
if compare[call[name[strerr], parameter[name[hcmd]]] not_equal[!=] name[cmderr]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[has] ( identifier[cmd] ):
literal[string]
identifier[helps] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] literal[string] == identifier[os] . identifier[name] :
identifier[helps] . identifier[insert] ( literal[int] , literal[string] )
identifier[fakecmd] = literal[string]
identifier[cmderr] = identifier[strerr] ( identifier[fakecmd] ). identifier[replace] ( identifier[fakecmd] , identifier[cmd] )
keyword[for] identifier[h] keyword[in] identifier[helps] :
identifier[hcmd] = literal[string] %( identifier[cmd] , identifier[h] )
keyword[if] literal[int] == identifier[silent] ( identifier[hcmd] ):
keyword[return] keyword[True]
keyword[if] identifier[len] ( identifier[listout] ( identifier[hcmd] ))> literal[int] :
keyword[return] keyword[True]
keyword[if] identifier[strerr] ( identifier[hcmd] )!= identifier[cmderr] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def has(cmd):
"""Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
"""
helps = ['--help', '-h', '--version']
if 'nt' == os.name:
helps.insert(0, '/?') # depends on [control=['if'], data=[]]
fakecmd = 'fakecmd'
cmderr = strerr(fakecmd).replace(fakecmd, cmd)
for h in helps:
hcmd = '%s %s' % (cmd, h)
if 0 == silent(hcmd):
return True # depends on [control=['if'], data=[]]
if len(listout(hcmd)) > 0:
return True # depends on [control=['if'], data=[]]
if strerr(hcmd) != cmderr:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']]
return False
|
def p_valueInitializer(p):
"""valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
"""
if len(p) == 4:
id_ = p[1]
val = p[2]
quals = []
else:
quals = p[1]
id_ = p[2]
val = p[3]
p[0] = (quals, id_, val)
|
def function[p_valueInitializer, parameter[p]]:
constant[valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[4]] begin[:]
variable[id_] assign[=] call[name[p]][constant[1]]
variable[val] assign[=] call[name[p]][constant[2]]
variable[quals] assign[=] list[[]]
call[name[p]][constant[0]] assign[=] tuple[[<ast.Name object at 0x7da18f00d9c0>, <ast.Name object at 0x7da18f00f6d0>, <ast.Name object at 0x7da18f00cee0>]]
|
keyword[def] identifier[p_valueInitializer] ( identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[id_] = identifier[p] [ literal[int] ]
identifier[val] = identifier[p] [ literal[int] ]
identifier[quals] =[]
keyword[else] :
identifier[quals] = identifier[p] [ literal[int] ]
identifier[id_] = identifier[p] [ literal[int] ]
identifier[val] = identifier[p] [ literal[int] ]
identifier[p] [ literal[int] ]=( identifier[quals] , identifier[id_] , identifier[val] )
|
def p_valueInitializer(p):
"""valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
"""
if len(p) == 4:
id_ = p[1]
val = p[2]
quals = [] # depends on [control=['if'], data=[]]
else:
quals = p[1]
id_ = p[2]
val = p[3]
p[0] = (quals, id_, val)
|
def window_poisson(N, alpha=2):
r"""Poisson tapering window
:param int N: window length
.. math:: w(n) = \exp^{-\alpha \frac{|n|}{N/2} }
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'poisson')
window_visu(64, 'poisson', alpha=3)
window_visu(64, 'poisson', alpha=4)
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = exp(-alpha * abs(n)/(N/2.))
return w
|
def function[window_poisson, parameter[N, alpha]]:
constant[Poisson tapering window
:param int N: window length
.. math:: w(n) = \exp^{-\alpha \frac{|n|}{N/2} }
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'poisson')
window_visu(64, 'poisson', alpha=3)
window_visu(64, 'poisson', alpha=4)
.. seealso:: :func:`create_window`, :class:`Window`
]
variable[n] assign[=] call[name[linspace], parameter[binary_operation[<ast.UnaryOp object at 0x7da1b007d7e0> / constant[2.0]], binary_operation[name[N] / constant[2.0]], name[N]]]
variable[w] assign[=] call[name[exp], parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b007c340> * call[name[abs], parameter[name[n]]]] / binary_operation[name[N] / constant[2.0]]]]]
return[name[w]]
|
keyword[def] identifier[window_poisson] ( identifier[N] , identifier[alpha] = literal[int] ):
literal[string]
identifier[n] = identifier[linspace] (- identifier[N] / literal[int] ,( identifier[N] )/ literal[int] , identifier[N] )
identifier[w] = identifier[exp] (- identifier[alpha] * identifier[abs] ( identifier[n] )/( identifier[N] / literal[int] ))
keyword[return] identifier[w]
|
def window_poisson(N, alpha=2):
"""Poisson tapering window
:param int N: window length
.. math:: w(n) = \\exp^{-\\alpha \\frac{|n|}{N/2} }
with :math:`-N/2 \\leq n \\leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'poisson')
window_visu(64, 'poisson', alpha=3)
window_visu(64, 'poisson', alpha=4)
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N / 2.0, N / 2.0, N)
w = exp(-alpha * abs(n) / (N / 2.0))
return w
|
def return_msg(self, reply_code, reply_text, exchange, routing_key):
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
write_shortstr(reply_text).\
write_shortstr(exchange).\
write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args))
|
def function[return_msg, parameter[self, reply_code, reply_text, exchange, routing_key]]:
constant[
Return a failed message. Not named "return" because python interpreter
can't deal with that.
]
variable[args] assign[=] call[name[Writer], parameter[]]
call[call[call[call[name[args].write_short, parameter[name[reply_code]]].write_shortstr, parameter[name[reply_text]]].write_shortstr, parameter[name[exchange]]].write_shortstr, parameter[name[routing_key]]]
call[name[self].send_frame, parameter[call[name[MethodFrame], parameter[name[self].channel_id, constant[60], constant[50], name[args]]]]]
|
keyword[def] identifier[return_msg] ( identifier[self] , identifier[reply_code] , identifier[reply_text] , identifier[exchange] , identifier[routing_key] ):
literal[string]
identifier[args] = identifier[Writer] ()
identifier[args] . identifier[write_short] ( identifier[reply_code] ). identifier[write_shortstr] ( identifier[reply_text] ). identifier[write_shortstr] ( identifier[exchange] ). identifier[write_shortstr] ( identifier[routing_key] )
identifier[self] . identifier[send_frame] ( identifier[MethodFrame] ( identifier[self] . identifier[channel_id] , literal[int] , literal[int] , identifier[args] ))
|
def return_msg(self, reply_code, reply_text, exchange, routing_key):
"""
Return a failed message. Not named "return" because python interpreter
can't deal with that.
"""
args = Writer()
args.write_short(reply_code).write_shortstr(reply_text).write_shortstr(exchange).write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args))
|
def suggestions(self,index=None):
"""Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError`
"""
if index is None:
return self.select(Suggestion,None,False, False)
else:
for i, e in enumerate(self.select(Suggestion,None,False, False)):
if index == i:
return e
raise IndexError
|
def function[suggestions, parameter[self, index]]:
constant[Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError`
]
if compare[name[index] is constant[None]] begin[:]
return[call[name[self].select, parameter[name[Suggestion], constant[None], constant[False], constant[False]]]]
|
keyword[def] identifier[suggestions] ( identifier[self] , identifier[index] = keyword[None] ):
literal[string]
keyword[if] identifier[index] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[select] ( identifier[Suggestion] , keyword[None] , keyword[False] , keyword[False] )
keyword[else] :
keyword[for] identifier[i] , identifier[e] keyword[in] identifier[enumerate] ( identifier[self] . identifier[select] ( identifier[Suggestion] , keyword[None] , keyword[False] , keyword[False] )):
keyword[if] identifier[index] == identifier[i] :
keyword[return] identifier[e]
keyword[raise] identifier[IndexError]
|
def suggestions(self, index=None):
"""Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError`
"""
if index is None:
return self.select(Suggestion, None, False, False) # depends on [control=['if'], data=[]]
else:
for (i, e) in enumerate(self.select(Suggestion, None, False, False)):
if index == i:
return e # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise IndexError
|
def human(value):
"If val>=1000 return val/1024+KiB, etc."
if value >= 1073741824000:
return '{:.1f} T'.format(value / 1099511627776.0)
if value >= 1048576000:
return '{:.1f} G'.format(value / 1073741824.0)
if value >= 1024000:
return '{:.1f} M'.format(value / 1048576.0)
if value >= 1000:
return '{:.1f} K'.format(value / 1024.0)
return '{} B'.format(value)
|
def function[human, parameter[value]]:
constant[If val>=1000 return val/1024+KiB, etc.]
if compare[name[value] greater_or_equal[>=] constant[1073741824000]] begin[:]
return[call[constant[{:.1f} T].format, parameter[binary_operation[name[value] / constant[1099511627776.0]]]]]
if compare[name[value] greater_or_equal[>=] constant[1048576000]] begin[:]
return[call[constant[{:.1f} G].format, parameter[binary_operation[name[value] / constant[1073741824.0]]]]]
if compare[name[value] greater_or_equal[>=] constant[1024000]] begin[:]
return[call[constant[{:.1f} M].format, parameter[binary_operation[name[value] / constant[1048576.0]]]]]
if compare[name[value] greater_or_equal[>=] constant[1000]] begin[:]
return[call[constant[{:.1f} K].format, parameter[binary_operation[name[value] / constant[1024.0]]]]]
return[call[constant[{} B].format, parameter[name[value]]]]
|
keyword[def] identifier[human] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] >= literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[value] / literal[int] )
keyword[if] identifier[value] >= literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[value] / literal[int] )
keyword[if] identifier[value] >= literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[value] / literal[int] )
keyword[if] identifier[value] >= literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[value] / literal[int] )
keyword[return] literal[string] . identifier[format] ( identifier[value] )
|
def human(value):
"""If val>=1000 return val/1024+KiB, etc."""
if value >= 1073741824000:
return '{:.1f} T'.format(value / 1099511627776.0) # depends on [control=['if'], data=['value']]
if value >= 1048576000:
return '{:.1f} G'.format(value / 1073741824.0) # depends on [control=['if'], data=['value']]
if value >= 1024000:
return '{:.1f} M'.format(value / 1048576.0) # depends on [control=['if'], data=['value']]
if value >= 1000:
return '{:.1f} K'.format(value / 1024.0) # depends on [control=['if'], data=['value']]
return '{} B'.format(value)
|
def remove_from_context(self, name, *args):
"""Remove attributes from a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
for a in args:
del attrs_[a]
|
def function[remove_from_context, parameter[self, name]]:
constant[Remove attributes from a context.
]
variable[context] assign[=] call[name[self].get_context, parameter[]]
variable[attrs_] assign[=] call[name[context]][constant[context]]
for taget[name[a]] in starred[name[args]] begin[:]
<ast.Delete object at 0x7da20c9936a0>
|
keyword[def] identifier[remove_from_context] ( identifier[self] , identifier[name] ,* identifier[args] ):
literal[string]
identifier[context] = identifier[self] . identifier[get_context] ( identifier[name] = identifier[name] )
identifier[attrs_] = identifier[context] [ literal[string] ]
keyword[for] identifier[a] keyword[in] identifier[args] :
keyword[del] identifier[attrs_] [ identifier[a] ]
|
def remove_from_context(self, name, *args):
"""Remove attributes from a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
for a in args:
del attrs_[a] # depends on [control=['for'], data=['a']]
|
def _check_nested_blocks(self, node):
"""Update and check the number of nested blocks
"""
# only check block levels inside functions or methods
if not isinstance(node.scope(), astroid.FunctionDef):
return
# messages are triggered on leaving the nested block. Here we save the
# stack in case the current node isn't nested in the previous one
nested_blocks = self._nested_blocks[:]
if node.parent == node.scope():
self._nested_blocks = [node]
else:
# go through ancestors from the most nested to the less
for ancestor_node in reversed(self._nested_blocks):
if ancestor_node == node.parent:
break
self._nested_blocks.pop()
# if the node is an elif, this should not be another nesting level
if isinstance(node, astroid.If) and self._is_actual_elif(node):
if self._nested_blocks:
self._nested_blocks.pop()
self._nested_blocks.append(node)
# send message only once per group of nested blocks
if len(nested_blocks) > len(self._nested_blocks):
self._emit_nested_blocks_message_if_needed(nested_blocks)
|
def function[_check_nested_blocks, parameter[self, node]]:
constant[Update and check the number of nested blocks
]
if <ast.UnaryOp object at 0x7da1b028d600> begin[:]
return[None]
variable[nested_blocks] assign[=] call[name[self]._nested_blocks][<ast.Slice object at 0x7da1b028d900>]
if compare[name[node].parent equal[==] call[name[node].scope, parameter[]]] begin[:]
name[self]._nested_blocks assign[=] list[[<ast.Name object at 0x7da1b028df30>]]
if compare[call[name[len], parameter[name[nested_blocks]]] greater[>] call[name[len], parameter[name[self]._nested_blocks]]] begin[:]
call[name[self]._emit_nested_blocks_message_if_needed, parameter[name[nested_blocks]]]
|
keyword[def] identifier[_check_nested_blocks] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[node] . identifier[scope] (), identifier[astroid] . identifier[FunctionDef] ):
keyword[return]
identifier[nested_blocks] = identifier[self] . identifier[_nested_blocks] [:]
keyword[if] identifier[node] . identifier[parent] == identifier[node] . identifier[scope] ():
identifier[self] . identifier[_nested_blocks] =[ identifier[node] ]
keyword[else] :
keyword[for] identifier[ancestor_node] keyword[in] identifier[reversed] ( identifier[self] . identifier[_nested_blocks] ):
keyword[if] identifier[ancestor_node] == identifier[node] . identifier[parent] :
keyword[break]
identifier[self] . identifier[_nested_blocks] . identifier[pop] ()
keyword[if] identifier[isinstance] ( identifier[node] , identifier[astroid] . identifier[If] ) keyword[and] identifier[self] . identifier[_is_actual_elif] ( identifier[node] ):
keyword[if] identifier[self] . identifier[_nested_blocks] :
identifier[self] . identifier[_nested_blocks] . identifier[pop] ()
identifier[self] . identifier[_nested_blocks] . identifier[append] ( identifier[node] )
keyword[if] identifier[len] ( identifier[nested_blocks] )> identifier[len] ( identifier[self] . identifier[_nested_blocks] ):
identifier[self] . identifier[_emit_nested_blocks_message_if_needed] ( identifier[nested_blocks] )
|
def _check_nested_blocks(self, node):
"""Update and check the number of nested blocks
"""
# only check block levels inside functions or methods
if not isinstance(node.scope(), astroid.FunctionDef):
return # depends on [control=['if'], data=[]]
# messages are triggered on leaving the nested block. Here we save the
# stack in case the current node isn't nested in the previous one
nested_blocks = self._nested_blocks[:]
if node.parent == node.scope():
self._nested_blocks = [node] # depends on [control=['if'], data=[]]
else:
# go through ancestors from the most nested to the less
for ancestor_node in reversed(self._nested_blocks):
if ancestor_node == node.parent:
break # depends on [control=['if'], data=[]]
self._nested_blocks.pop() # depends on [control=['for'], data=['ancestor_node']]
# if the node is an elif, this should not be another nesting level
if isinstance(node, astroid.If) and self._is_actual_elif(node):
if self._nested_blocks:
self._nested_blocks.pop() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._nested_blocks.append(node)
# send message only once per group of nested blocks
if len(nested_blocks) > len(self._nested_blocks):
self._emit_nested_blocks_message_if_needed(nested_blocks) # depends on [control=['if'], data=[]]
|
def path(self, *args: typing.List[str]) -> typing.Union[None, str]:
"""
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
"""
if not self._project:
return None
return environ.paths.clean(os.path.join(
self._project.source_directory,
*args
))
|
def function[path, parameter[self]]:
constant[
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
]
if <ast.UnaryOp object at 0x7da20e955fc0> begin[:]
return[constant[None]]
return[call[name[environ].paths.clean, parameter[call[name[os].path.join, parameter[name[self]._project.source_directory, <ast.Starred object at 0x7da20e954670>]]]]]
|
keyword[def] identifier[path] ( identifier[self] ,* identifier[args] : identifier[typing] . identifier[List] [ identifier[str] ])-> identifier[typing] . identifier[Union] [ keyword[None] , identifier[str] ]:
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_project] :
keyword[return] keyword[None]
keyword[return] identifier[environ] . identifier[paths] . identifier[clean] ( identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[_project] . identifier[source_directory] ,
* identifier[args]
))
|
def path(self, *args: typing.List[str]) -> typing.Union[None, str]:
"""
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
"""
if not self._project:
return None # depends on [control=['if'], data=[]]
return environ.paths.clean(os.path.join(self._project.source_directory, *args))
|
def ng_dissim(a, b, X=None, membship=None):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
This function can potentially speed up training convergence.
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
In case of missing membship, this function reverts back to
matching dissimilarity (e.g., when predicting).
"""
# Without membership, revert to matching dissimilarity
if membship is None:
return matching_dissim(a, b)
def calc_cjr(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(memj == 1)
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
cj = float(np.sum(memj))
return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where "
"the number of rows in 'membship' equals the "
"number of rows in 'a' and the number of "
"columns in 'membship' equals the number of rows in 'X'.")
return np.array([np.array([calc_dissim(b, X, membship[idj], idr)
if b[idr] == t else 1.0
for idr, t in enumerate(val_a)]).sum(0)
for idj, val_a in enumerate(a)])
|
def function[ng_dissim, parameter[a, b, X, membship]]:
constant[Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
This function can potentially speed up training convergence.
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
In case of missing membship, this function reverts back to
matching dissimilarity (e.g., when predicting).
]
if compare[name[membship] is constant[None]] begin[:]
return[call[name[matching_dissim], parameter[name[a], name[b]]]]
def function[calc_cjr, parameter[b, X, memj, idr]]:
constant[Num objects w/ category value x_{i,r} for rth attr in jth cluster]
variable[xcids] assign[=] call[name[np].where, parameter[compare[name[memj] equal[==] constant[1]]]]
return[call[name[float], parameter[call[compare[call[call[call[name[np].take, parameter[name[X], name[xcids]]]][constant[0]]][tuple[[<ast.Slice object at 0x7da1b18a37c0>, <ast.Name object at 0x7da1b18a3c10>]]] equal[==] call[name[b]][name[idr]]].sum, parameter[constant[0]]]]]]
def function[calc_dissim, parameter[b, X, memj, idr]]:
variable[cj] assign[=] call[name[float], parameter[call[name[np].sum, parameter[name[memj]]]]]
return[<ast.IfExp object at 0x7da1b18a1e70>]
if <ast.BoolOp object at 0x7da1b18a01c0> begin[:]
<ast.Raise object at 0x7da1b18a2560>
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b18a2fb0>]]]
|
keyword[def] identifier[ng_dissim] ( identifier[a] , identifier[b] , identifier[X] = keyword[None] , identifier[membship] = keyword[None] ):
literal[string]
keyword[if] identifier[membship] keyword[is] keyword[None] :
keyword[return] identifier[matching_dissim] ( identifier[a] , identifier[b] )
keyword[def] identifier[calc_cjr] ( identifier[b] , identifier[X] , identifier[memj] , identifier[idr] ):
literal[string]
identifier[xcids] = identifier[np] . identifier[where] ( identifier[memj] == literal[int] )
keyword[return] identifier[float] (( identifier[np] . identifier[take] ( identifier[X] , identifier[xcids] , identifier[axis] = literal[int] )[ literal[int] ][:, identifier[idr] ]== identifier[b] [ identifier[idr] ]). identifier[sum] ( literal[int] ))
keyword[def] identifier[calc_dissim] ( identifier[b] , identifier[X] , identifier[memj] , identifier[idr] ):
identifier[cj] = identifier[float] ( identifier[np] . identifier[sum] ( identifier[memj] ))
keyword[return] ( literal[int] -( identifier[calc_cjr] ( identifier[b] , identifier[X] , identifier[memj] , identifier[idr] )/ identifier[cj] )) keyword[if] identifier[cj] != literal[int] keyword[else] literal[int]
keyword[if] identifier[len] ( identifier[membship] )!= identifier[a] . identifier[shape] [ literal[int] ] keyword[and] identifier[len] ( identifier[membship] [ literal[int] ])!= identifier[X] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
literal[string] )
keyword[return] identifier[np] . identifier[array] ([ identifier[np] . identifier[array] ([ identifier[calc_dissim] ( identifier[b] , identifier[X] , identifier[membship] [ identifier[idj] ], identifier[idr] )
keyword[if] identifier[b] [ identifier[idr] ]== identifier[t] keyword[else] literal[int]
keyword[for] identifier[idr] , identifier[t] keyword[in] identifier[enumerate] ( identifier[val_a] )]). identifier[sum] ( literal[int] )
keyword[for] identifier[idj] , identifier[val_a] keyword[in] identifier[enumerate] ( identifier[a] )])
|
def ng_dissim(a, b, X=None, membship=None):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
This function can potentially speed up training convergence.
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
In case of missing membship, this function reverts back to
matching dissimilarity (e.g., when predicting).
"""
# Without membership, revert to matching dissimilarity
if membship is None:
return matching_dissim(a, b) # depends on [control=['if'], data=[]]
def calc_cjr(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(memj == 1)
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
cj = float(np.sum(memj))
return 1.0 - calc_cjr(b, X, memj, idr) / cj if cj != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where the number of rows in 'membship' equals the number of rows in 'a' and the number of columns in 'membship' equals the number of rows in 'X'.") # depends on [control=['if'], data=[]]
return np.array([np.array([calc_dissim(b, X, membship[idj], idr) if b[idr] == t else 1.0 for (idr, t) in enumerate(val_a)]).sum(0) for (idj, val_a) in enumerate(a)])
|
def get_pfam_accession_numbers_from_pdb_id(self, pdb_id):
'''Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.'''
pdb_id = pdb_id.lower()
if self.pdb_chain_to_pfam_mapping.get(pdb_id):
return self.pdb_chain_to_pfam_mapping[pdb_id].copy()
|
def function[get_pfam_accession_numbers_from_pdb_id, parameter[self, pdb_id]]:
constant[Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.]
variable[pdb_id] assign[=] call[name[pdb_id].lower, parameter[]]
if call[name[self].pdb_chain_to_pfam_mapping.get, parameter[name[pdb_id]]] begin[:]
return[call[call[name[self].pdb_chain_to_pfam_mapping][name[pdb_id]].copy, parameter[]]]
|
keyword[def] identifier[get_pfam_accession_numbers_from_pdb_id] ( identifier[self] , identifier[pdb_id] ):
literal[string]
identifier[pdb_id] = identifier[pdb_id] . identifier[lower] ()
keyword[if] identifier[self] . identifier[pdb_chain_to_pfam_mapping] . identifier[get] ( identifier[pdb_id] ):
keyword[return] identifier[self] . identifier[pdb_chain_to_pfam_mapping] [ identifier[pdb_id] ]. identifier[copy] ()
|
def get_pfam_accession_numbers_from_pdb_id(self, pdb_id):
"""Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg."""
pdb_id = pdb_id.lower()
if self.pdb_chain_to_pfam_mapping.get(pdb_id):
return self.pdb_chain_to_pfam_mapping[pdb_id].copy() # depends on [control=['if'], data=[]]
|
def path_to_node(tree, path):
"""FST node located at the given path"""
if path is None:
return None
node = tree
for key in path:
node = child_by_key(node, key)
return node
|
def function[path_to_node, parameter[tree, path]]:
constant[FST node located at the given path]
if compare[name[path] is constant[None]] begin[:]
return[constant[None]]
variable[node] assign[=] name[tree]
for taget[name[key]] in starred[name[path]] begin[:]
variable[node] assign[=] call[name[child_by_key], parameter[name[node], name[key]]]
return[name[node]]
|
keyword[def] identifier[path_to_node] ( identifier[tree] , identifier[path] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[node] = identifier[tree]
keyword[for] identifier[key] keyword[in] identifier[path] :
identifier[node] = identifier[child_by_key] ( identifier[node] , identifier[key] )
keyword[return] identifier[node]
|
def path_to_node(tree, path):
"""FST node located at the given path"""
if path is None:
return None # depends on [control=['if'], data=[]]
node = tree
for key in path:
node = child_by_key(node, key) # depends on [control=['for'], data=['key']]
return node
|
def gradient_lonlat(self, data, nit=3, tol=1.0e-3, guarantee_convergence=False):
"""
Return the lon / lat components of the gradient
of a scalar field on the surface of the sphere.
The method consists of minimizing a quadratic functional Q(G) over
gradient vectors, where Q is an approximation to the linearized
curvature over the triangulation of a C-1 bivariate function F(x,y)
which interpolates the nodal values and gradients.
Parameters
----------
data : array of floats, shape (n,)
field over which to evaluate the gradient
nit: int (default: 3)
number of iterations to reach a convergence tolerance, tol
nit >= 1
tol: float (default: 1e-3)
maximum change in gradient between iterations.
convergence is reached when this condition is met.
Returns
-------
dfdlon : array of floats, shape (n,)
derivative of f in the longitudinal direction
dfdlat : array of floats, shape (n,)
derivative of f in the lattitudinal direction
Notes
-----
The gradient is computed via the Cartesian components using
spherical.sTriangulation.gradient_xyz and the iteration parameters
controling the spline interpolation are passed directly to this
routine (See notes for gradient_xyz for more details).
The gradient operator in this geometry is not well defined at the poles
even if the scalar field is smooth and the Cartesian gradient is well defined.
The routine spherical.dxyz2dlonlat is available to convert the Cartesian
to lon/lat coordinates at any point on the unit sphere. This is helpful
to avoid recalculation if you need both forms.
"""
dfxs, dfys, dfzs = self.gradient_xyz(data, nit=nit, tol=tol, guarantee_convergence=guarantee_convergence)
# get deshuffled versions
lons = self.lons
lats = self.lats
z = self.z
dlon = -dfxs * np.cos(lats) * np.sin(lons) + dfys * np.cos(lats) * np.cos(lons) # no z dependence
dlat = -dfxs * np.sin(lats) * np.cos(lons) - dfys * np.sin(lats) * np.sin(lons) + dfzs * np.cos(lats)
corr = np.sqrt((1.0-z**2))
valid = ~np.isclose(corr,0.0)
dlon[valid] = dlon[valid] / corr[valid]
return dlon, dlat
|
def function[gradient_lonlat, parameter[self, data, nit, tol, guarantee_convergence]]:
constant[
Return the lon / lat components of the gradient
of a scalar field on the surface of the sphere.
The method consists of minimizing a quadratic functional Q(G) over
gradient vectors, where Q is an approximation to the linearized
curvature over the triangulation of a C-1 bivariate function F(x,y)
which interpolates the nodal values and gradients.
Parameters
----------
data : array of floats, shape (n,)
field over which to evaluate the gradient
nit: int (default: 3)
number of iterations to reach a convergence tolerance, tol
nit >= 1
tol: float (default: 1e-3)
maximum change in gradient between iterations.
convergence is reached when this condition is met.
Returns
-------
dfdlon : array of floats, shape (n,)
derivative of f in the longitudinal direction
dfdlat : array of floats, shape (n,)
derivative of f in the lattitudinal direction
Notes
-----
The gradient is computed via the Cartesian components using
spherical.sTriangulation.gradient_xyz and the iteration parameters
controling the spline interpolation are passed directly to this
routine (See notes for gradient_xyz for more details).
The gradient operator in this geometry is not well defined at the poles
even if the scalar field is smooth and the Cartesian gradient is well defined.
The routine spherical.dxyz2dlonlat is available to convert the Cartesian
to lon/lat coordinates at any point on the unit sphere. This is helpful
to avoid recalculation if you need both forms.
]
<ast.Tuple object at 0x7da207f038b0> assign[=] call[name[self].gradient_xyz, parameter[name[data]]]
variable[lons] assign[=] name[self].lons
variable[lats] assign[=] name[self].lats
variable[z] assign[=] name[self].z
variable[dlon] assign[=] binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da207f01660> * call[name[np].cos, parameter[name[lats]]]] * call[name[np].sin, parameter[name[lons]]]] + binary_operation[binary_operation[name[dfys] * call[name[np].cos, parameter[name[lats]]]] * call[name[np].cos, parameter[name[lons]]]]]
variable[dlat] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b23ee110> * call[name[np].sin, parameter[name[lats]]]] * call[name[np].cos, parameter[name[lons]]]] - binary_operation[binary_operation[name[dfys] * call[name[np].sin, parameter[name[lats]]]] * call[name[np].sin, parameter[name[lons]]]]] + binary_operation[name[dfzs] * call[name[np].cos, parameter[name[lats]]]]]
variable[corr] assign[=] call[name[np].sqrt, parameter[binary_operation[constant[1.0] - binary_operation[name[z] ** constant[2]]]]]
variable[valid] assign[=] <ast.UnaryOp object at 0x7da1b23efee0>
call[name[dlon]][name[valid]] assign[=] binary_operation[call[name[dlon]][name[valid]] / call[name[corr]][name[valid]]]
return[tuple[[<ast.Name object at 0x7da1b23ec4c0>, <ast.Name object at 0x7da1b23ee2f0>]]]
|
keyword[def] identifier[gradient_lonlat] ( identifier[self] , identifier[data] , identifier[nit] = literal[int] , identifier[tol] = literal[int] , identifier[guarantee_convergence] = keyword[False] ):
literal[string]
identifier[dfxs] , identifier[dfys] , identifier[dfzs] = identifier[self] . identifier[gradient_xyz] ( identifier[data] , identifier[nit] = identifier[nit] , identifier[tol] = identifier[tol] , identifier[guarantee_convergence] = identifier[guarantee_convergence] )
identifier[lons] = identifier[self] . identifier[lons]
identifier[lats] = identifier[self] . identifier[lats]
identifier[z] = identifier[self] . identifier[z]
identifier[dlon] =- identifier[dfxs] * identifier[np] . identifier[cos] ( identifier[lats] )* identifier[np] . identifier[sin] ( identifier[lons] )+ identifier[dfys] * identifier[np] . identifier[cos] ( identifier[lats] )* identifier[np] . identifier[cos] ( identifier[lons] )
identifier[dlat] =- identifier[dfxs] * identifier[np] . identifier[sin] ( identifier[lats] )* identifier[np] . identifier[cos] ( identifier[lons] )- identifier[dfys] * identifier[np] . identifier[sin] ( identifier[lats] )* identifier[np] . identifier[sin] ( identifier[lons] )+ identifier[dfzs] * identifier[np] . identifier[cos] ( identifier[lats] )
identifier[corr] = identifier[np] . identifier[sqrt] (( literal[int] - identifier[z] ** literal[int] ))
identifier[valid] =~ identifier[np] . identifier[isclose] ( identifier[corr] , literal[int] )
identifier[dlon] [ identifier[valid] ]= identifier[dlon] [ identifier[valid] ]/ identifier[corr] [ identifier[valid] ]
keyword[return] identifier[dlon] , identifier[dlat]
|
def gradient_lonlat(self, data, nit=3, tol=0.001, guarantee_convergence=False):
"""
Return the lon / lat components of the gradient
of a scalar field on the surface of the sphere.
The method consists of minimizing a quadratic functional Q(G) over
gradient vectors, where Q is an approximation to the linearized
curvature over the triangulation of a C-1 bivariate function F(x,y)
which interpolates the nodal values and gradients.
Parameters
----------
data : array of floats, shape (n,)
field over which to evaluate the gradient
nit: int (default: 3)
number of iterations to reach a convergence tolerance, tol
nit >= 1
tol: float (default: 1e-3)
maximum change in gradient between iterations.
convergence is reached when this condition is met.
Returns
-------
dfdlon : array of floats, shape (n,)
derivative of f in the longitudinal direction
dfdlat : array of floats, shape (n,)
derivative of f in the lattitudinal direction
Notes
-----
The gradient is computed via the Cartesian components using
spherical.sTriangulation.gradient_xyz and the iteration parameters
controling the spline interpolation are passed directly to this
routine (See notes for gradient_xyz for more details).
The gradient operator in this geometry is not well defined at the poles
even if the scalar field is smooth and the Cartesian gradient is well defined.
The routine spherical.dxyz2dlonlat is available to convert the Cartesian
to lon/lat coordinates at any point on the unit sphere. This is helpful
to avoid recalculation if you need both forms.
"""
(dfxs, dfys, dfzs) = self.gradient_xyz(data, nit=nit, tol=tol, guarantee_convergence=guarantee_convergence)
# get deshuffled versions
lons = self.lons
lats = self.lats
z = self.z
dlon = -dfxs * np.cos(lats) * np.sin(lons) + dfys * np.cos(lats) * np.cos(lons) # no z dependence
dlat = -dfxs * np.sin(lats) * np.cos(lons) - dfys * np.sin(lats) * np.sin(lons) + dfzs * np.cos(lats)
corr = np.sqrt(1.0 - z ** 2)
valid = ~np.isclose(corr, 0.0)
dlon[valid] = dlon[valid] / corr[valid]
return (dlon, dlat)
|
def encode(self, key):
"""Encodes a user key into a particular format. The result of this method
will be used by swauth for storing user credentials.
If salt is not manually set in conf file, a random salt will be
generated and used.
:param key: User's secret key
:returns: A string representing user credentials
"""
salt = self.salt or os.urandom(32).encode('base64').rstrip()
return self.encode_w_salt(salt, key)
|
def function[encode, parameter[self, key]]:
constant[Encodes a user key into a particular format. The result of this method
will be used by swauth for storing user credentials.
If salt is not manually set in conf file, a random salt will be
generated and used.
:param key: User's secret key
:returns: A string representing user credentials
]
variable[salt] assign[=] <ast.BoolOp object at 0x7da1b04320b0>
return[call[name[self].encode_w_salt, parameter[name[salt], name[key]]]]
|
keyword[def] identifier[encode] ( identifier[self] , identifier[key] ):
literal[string]
identifier[salt] = identifier[self] . identifier[salt] keyword[or] identifier[os] . identifier[urandom] ( literal[int] ). identifier[encode] ( literal[string] ). identifier[rstrip] ()
keyword[return] identifier[self] . identifier[encode_w_salt] ( identifier[salt] , identifier[key] )
|
def encode(self, key):
"""Encodes a user key into a particular format. The result of this method
will be used by swauth for storing user credentials.
If salt is not manually set in conf file, a random salt will be
generated and used.
:param key: User's secret key
:returns: A string representing user credentials
"""
salt = self.salt or os.urandom(32).encode('base64').rstrip()
return self.encode_w_salt(salt, key)
|
def on_result(self, task, result):
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
print(json.dumps({
'taskid': task['taskid'],
'project': task['project'],
'url': task['url'],
'result': result,
'updatetime': time.time()
}))
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
|
def function[on_result, parameter[self, task, result]]:
constant[Called every result]
if <ast.UnaryOp object at 0x7da207f9ae60> begin[:]
return[None]
if <ast.BoolOp object at 0x7da207f9a680> begin[:]
call[name[logger].info, parameter[binary_operation[constant[result %s:%s %s -> %.30r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da207f9b850>, <ast.Subscript object at 0x7da207f98a00>, <ast.Subscript object at 0x7da207f9a860>, <ast.Name object at 0x7da207f9a1d0>]]]]]
call[name[print], parameter[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da207f992d0>, <ast.Constant object at 0x7da207f999c0>, <ast.Constant object at 0x7da207f99c90>, <ast.Constant object at 0x7da207f9b640>, <ast.Constant object at 0x7da207f9b910>], [<ast.Subscript object at 0x7da207f9bf70>, <ast.Subscript object at 0x7da207f9b730>, <ast.Subscript object at 0x7da207f98940>, <ast.Name object at 0x7da207f9aa10>, <ast.Call object at 0x7da207f98910>]]]]]]
|
keyword[def] identifier[on_result] ( identifier[self] , identifier[task] , identifier[result] ):
literal[string]
keyword[if] keyword[not] identifier[result] :
keyword[return]
keyword[if] literal[string] keyword[in] identifier[task] keyword[and] literal[string] keyword[in] identifier[task] keyword[and] literal[string] keyword[in] identifier[task] :
identifier[logger] . identifier[info] ( literal[string] %(
identifier[task] [ literal[string] ], identifier[task] [ literal[string] ], identifier[task] [ literal[string] ], identifier[result] ))
identifier[print] ( identifier[json] . identifier[dumps] ({
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[result] ,
literal[string] : identifier[time] . identifier[time] ()
}))
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] % identifier[result] )
keyword[return]
|
def on_result(self, task, result):
"""Called every result"""
if not result:
return # depends on [control=['if'], data=[]]
if 'taskid' in task and 'project' in task and ('url' in task):
logger.info('result %s:%s %s -> %.30r' % (task['project'], task['taskid'], task['url'], result))
print(json.dumps({'taskid': task['taskid'], 'project': task['project'], 'url': task['url'], 'result': result, 'updatetime': time.time()})) # depends on [control=['if'], data=[]]
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
|
def sym_log_map(cls, q, p):
"""Quaternion symmetrized logarithm map.
Find the symmetrized logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector corresponding to the symmetrized geodesic curve formulation.
Note:
Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).
"""
inv_sqrt_q = (q ** (-0.5))
return Quaternion.log(inv_sqrt_q * p * inv_sqrt_q)
|
def function[sym_log_map, parameter[cls, q, p]]:
constant[Quaternion symmetrized logarithm map.
Find the symmetrized logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector corresponding to the symmetrized geodesic curve formulation.
Note:
Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).
]
variable[inv_sqrt_q] assign[=] binary_operation[name[q] ** <ast.UnaryOp object at 0x7da1b08b09a0>]
return[call[name[Quaternion].log, parameter[binary_operation[binary_operation[name[inv_sqrt_q] * name[p]] * name[inv_sqrt_q]]]]]
|
keyword[def] identifier[sym_log_map] ( identifier[cls] , identifier[q] , identifier[p] ):
literal[string]
identifier[inv_sqrt_q] =( identifier[q] **(- literal[int] ))
keyword[return] identifier[Quaternion] . identifier[log] ( identifier[inv_sqrt_q] * identifier[p] * identifier[inv_sqrt_q] )
|
def sym_log_map(cls, q, p):
"""Quaternion symmetrized logarithm map.
Find the symmetrized logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector corresponding to the symmetrized geodesic curve formulation.
Note:
Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).
"""
inv_sqrt_q = q ** (-0.5)
return Quaternion.log(inv_sqrt_q * p * inv_sqrt_q)
|
def _find_source(method):
''' find source code of a given method
Find and extract the source code of a given method in a module.
Uses inspect.findsource to get all source code and performs some
selection magic to identify method source code. Doing it this way
because inspect.getsource returns wrong method.
Parameters:
method (obj):
A method object
Returns:
A string containing the source code of a given method
Example:
>>> from sdss_access.path import Path
>>> path = Path()
>>> path._find_source(path.full)
'''
# get source code lines of entire module method is in
source = inspect.findsource(method)
is_method = inspect.ismethod(method)
# create single source code string
source_str = '\n'.join(source[0])
# define search pattern
if is_method:
pattern = r'def\s{0}\(self'.format(method.__name__)
# search for pattern within the string
start = re.search(pattern, source_str)
if start:
# find start and end positions of source code
startpos = start.start()
endpos = source_str.find('def ', startpos + 1)
code = source_str[startpos:endpos]
else:
code = None
return code
|
def function[_find_source, parameter[method]]:
constant[ find source code of a given method
Find and extract the source code of a given method in a module.
Uses inspect.findsource to get all source code and performs some
selection magic to identify method source code. Doing it this way
because inspect.getsource returns wrong method.
Parameters:
method (obj):
A method object
Returns:
A string containing the source code of a given method
Example:
>>> from sdss_access.path import Path
>>> path = Path()
>>> path._find_source(path.full)
]
variable[source] assign[=] call[name[inspect].findsource, parameter[name[method]]]
variable[is_method] assign[=] call[name[inspect].ismethod, parameter[name[method]]]
variable[source_str] assign[=] call[constant[
].join, parameter[call[name[source]][constant[0]]]]
if name[is_method] begin[:]
variable[pattern] assign[=] call[constant[def\s{0}\(self].format, parameter[name[method].__name__]]
variable[start] assign[=] call[name[re].search, parameter[name[pattern], name[source_str]]]
if name[start] begin[:]
variable[startpos] assign[=] call[name[start].start, parameter[]]
variable[endpos] assign[=] call[name[source_str].find, parameter[constant[def ], binary_operation[name[startpos] + constant[1]]]]
variable[code] assign[=] call[name[source_str]][<ast.Slice object at 0x7da1b19065f0>]
return[name[code]]
|
keyword[def] identifier[_find_source] ( identifier[method] ):
literal[string]
identifier[source] = identifier[inspect] . identifier[findsource] ( identifier[method] )
identifier[is_method] = identifier[inspect] . identifier[ismethod] ( identifier[method] )
identifier[source_str] = literal[string] . identifier[join] ( identifier[source] [ literal[int] ])
keyword[if] identifier[is_method] :
identifier[pattern] = literal[string] . identifier[format] ( identifier[method] . identifier[__name__] )
identifier[start] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[source_str] )
keyword[if] identifier[start] :
identifier[startpos] = identifier[start] . identifier[start] ()
identifier[endpos] = identifier[source_str] . identifier[find] ( literal[string] , identifier[startpos] + literal[int] )
identifier[code] = identifier[source_str] [ identifier[startpos] : identifier[endpos] ]
keyword[else] :
identifier[code] = keyword[None]
keyword[return] identifier[code]
|
def _find_source(method):
""" find source code of a given method
Find and extract the source code of a given method in a module.
Uses inspect.findsource to get all source code and performs some
selection magic to identify method source code. Doing it this way
because inspect.getsource returns wrong method.
Parameters:
method (obj):
A method object
Returns:
A string containing the source code of a given method
Example:
>>> from sdss_access.path import Path
>>> path = Path()
>>> path._find_source(path.full)
"""
# get source code lines of entire module method is in
source = inspect.findsource(method)
is_method = inspect.ismethod(method)
# create single source code string
source_str = '\n'.join(source[0])
# define search pattern
if is_method:
pattern = 'def\\s{0}\\(self'.format(method.__name__) # depends on [control=['if'], data=[]]
# search for pattern within the string
start = re.search(pattern, source_str)
if start:
# find start and end positions of source code
startpos = start.start()
endpos = source_str.find('def ', startpos + 1)
code = source_str[startpos:endpos] # depends on [control=['if'], data=[]]
else:
code = None
return code
|
def assignValue(cfg, playerValue, otherValue):
"""artificially determine match results given match circumstances.
WARNING: cheating will be detected and your player will be banned from server"""
player = cfg.whoAmI()
result = {}
for p in cfg.players:
if p.name == player.name: val = playerValue
else: val = otherValue
result[p.name] = val
return result
|
def function[assignValue, parameter[cfg, playerValue, otherValue]]:
constant[artificially determine match results given match circumstances.
WARNING: cheating will be detected and your player will be banned from server]
variable[player] assign[=] call[name[cfg].whoAmI, parameter[]]
variable[result] assign[=] dictionary[[], []]
for taget[name[p]] in starred[name[cfg].players] begin[:]
if compare[name[p].name equal[==] name[player].name] begin[:]
variable[val] assign[=] name[playerValue]
call[name[result]][name[p].name] assign[=] name[val]
return[name[result]]
|
keyword[def] identifier[assignValue] ( identifier[cfg] , identifier[playerValue] , identifier[otherValue] ):
literal[string]
identifier[player] = identifier[cfg] . identifier[whoAmI] ()
identifier[result] ={}
keyword[for] identifier[p] keyword[in] identifier[cfg] . identifier[players] :
keyword[if] identifier[p] . identifier[name] == identifier[player] . identifier[name] : identifier[val] = identifier[playerValue]
keyword[else] : identifier[val] = identifier[otherValue]
identifier[result] [ identifier[p] . identifier[name] ]= identifier[val]
keyword[return] identifier[result]
|
def assignValue(cfg, playerValue, otherValue):
"""artificially determine match results given match circumstances.
WARNING: cheating will be detected and your player will be banned from server"""
player = cfg.whoAmI()
result = {}
for p in cfg.players:
if p.name == player.name:
val = playerValue # depends on [control=['if'], data=[]]
else:
val = otherValue
result[p.name] = val # depends on [control=['for'], data=['p']]
return result
|
def f(self, y, t):
"""Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system
"""
coupling = self.coupling_function[0]
res = np.empty_like(self.y0)
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
target_y = y[slicej] # target node state
res[slicej] = m.f(target_y, t) # deterministic part of submodel j
# get indices of all source nodes that provide input to node j:
sources = np.nonzero(self.network[:,j])[0]
for i in sources:
weight = self.network[i, j]
source_y = y[slice(self._si[i], self._si[i+1])] # source state
res[slicej] += coupling(source_y, target_y, weight)
return res
|
def function[f, parameter[self, y, t]]:
constant[Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system
]
variable[coupling] assign[=] call[name[self].coupling_function][constant[0]]
variable[res] assign[=] call[name[np].empty_like, parameter[name[self].y0]]
for taget[tuple[[<ast.Name object at 0x7da18f09e4d0>, <ast.Name object at 0x7da18f09ff40>]]] in starred[call[name[enumerate], parameter[name[self].submodels]]] begin[:]
variable[slicej] assign[=] call[name[slice], parameter[call[name[self]._si][name[j]], call[name[self]._si][binary_operation[name[j] + constant[1]]]]]
variable[target_y] assign[=] call[name[y]][name[slicej]]
call[name[res]][name[slicej]] assign[=] call[name[m].f, parameter[name[target_y], name[t]]]
variable[sources] assign[=] call[call[name[np].nonzero, parameter[call[name[self].network][tuple[[<ast.Slice object at 0x7da18f09f6a0>, <ast.Name object at 0x7da18f09e3e0>]]]]]][constant[0]]
for taget[name[i]] in starred[name[sources]] begin[:]
variable[weight] assign[=] call[name[self].network][tuple[[<ast.Name object at 0x7da18f09d090>, <ast.Name object at 0x7da18f09c370>]]]
variable[source_y] assign[=] call[name[y]][call[name[slice], parameter[call[name[self]._si][name[i]], call[name[self]._si][binary_operation[name[i] + constant[1]]]]]]
<ast.AugAssign object at 0x7da18f09fa60>
return[name[res]]
|
keyword[def] identifier[f] ( identifier[self] , identifier[y] , identifier[t] ):
literal[string]
identifier[coupling] = identifier[self] . identifier[coupling_function] [ literal[int] ]
identifier[res] = identifier[np] . identifier[empty_like] ( identifier[self] . identifier[y0] )
keyword[for] identifier[j] , identifier[m] keyword[in] identifier[enumerate] ( identifier[self] . identifier[submodels] ):
identifier[slicej] = identifier[slice] ( identifier[self] . identifier[_si] [ identifier[j] ], identifier[self] . identifier[_si] [ identifier[j] + literal[int] ])
identifier[target_y] = identifier[y] [ identifier[slicej] ]
identifier[res] [ identifier[slicej] ]= identifier[m] . identifier[f] ( identifier[target_y] , identifier[t] )
identifier[sources] = identifier[np] . identifier[nonzero] ( identifier[self] . identifier[network] [:, identifier[j] ])[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[sources] :
identifier[weight] = identifier[self] . identifier[network] [ identifier[i] , identifier[j] ]
identifier[source_y] = identifier[y] [ identifier[slice] ( identifier[self] . identifier[_si] [ identifier[i] ], identifier[self] . identifier[_si] [ identifier[i] + literal[int] ])]
identifier[res] [ identifier[slicej] ]+= identifier[coupling] ( identifier[source_y] , identifier[target_y] , identifier[weight] )
keyword[return] identifier[res]
|
def f(self, y, t):
"""Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system
"""
coupling = self.coupling_function[0]
res = np.empty_like(self.y0)
for (j, m) in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j + 1])
target_y = y[slicej] # target node state
res[slicej] = m.f(target_y, t) # deterministic part of submodel j
# get indices of all source nodes that provide input to node j:
sources = np.nonzero(self.network[:, j])[0]
for i in sources:
weight = self.network[i, j]
source_y = y[slice(self._si[i], self._si[i + 1])] # source state
res[slicej] += coupling(source_y, target_y, weight) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]]
return res
|
def decode(enc):
'''Decode a base58 string (ex: a Monero address) into hexidecimal form.'''
enc = bytearray(enc, encoding='ascii')
l_enc = len(enc)
if l_enc == 0:
return ""
full_block_count = l_enc // __fullEncodedBlockSize
last_block_size = l_enc % __fullEncodedBlockSize
try:
last_block_decoded_size = __encodedBlockSizes.index(last_block_size)
except ValueError:
raise ValueError("Invalid encoded length: %d" % l_enc)
data_size = full_block_count * __fullBlockSize + last_block_decoded_size
data = bytearray(data_size)
for i in range(full_block_count):
data = decode_block(enc[(i*__fullEncodedBlockSize):(i*__fullEncodedBlockSize+__fullEncodedBlockSize)], data, i * __fullBlockSize)
if last_block_size > 0:
data = decode_block(enc[(full_block_count*__fullEncodedBlockSize):(full_block_count*__fullEncodedBlockSize+last_block_size)], data, full_block_count * __fullBlockSize)
return _binToHex(data)
|
def function[decode, parameter[enc]]:
constant[Decode a base58 string (ex: a Monero address) into hexidecimal form.]
variable[enc] assign[=] call[name[bytearray], parameter[name[enc]]]
variable[l_enc] assign[=] call[name[len], parameter[name[enc]]]
if compare[name[l_enc] equal[==] constant[0]] begin[:]
return[constant[]]
variable[full_block_count] assign[=] binary_operation[name[l_enc] <ast.FloorDiv object at 0x7da2590d6bc0> name[__fullEncodedBlockSize]]
variable[last_block_size] assign[=] binary_operation[name[l_enc] <ast.Mod object at 0x7da2590d6920> name[__fullEncodedBlockSize]]
<ast.Try object at 0x7da1b01428c0>
variable[data_size] assign[=] binary_operation[binary_operation[name[full_block_count] * name[__fullBlockSize]] + name[last_block_decoded_size]]
variable[data] assign[=] call[name[bytearray], parameter[name[data_size]]]
for taget[name[i]] in starred[call[name[range], parameter[name[full_block_count]]]] begin[:]
variable[data] assign[=] call[name[decode_block], parameter[call[name[enc]][<ast.Slice object at 0x7da1b01413c0>], name[data], binary_operation[name[i] * name[__fullBlockSize]]]]
if compare[name[last_block_size] greater[>] constant[0]] begin[:]
variable[data] assign[=] call[name[decode_block], parameter[call[name[enc]][<ast.Slice object at 0x7da1b0140790>], name[data], binary_operation[name[full_block_count] * name[__fullBlockSize]]]]
return[call[name[_binToHex], parameter[name[data]]]]
|
keyword[def] identifier[decode] ( identifier[enc] ):
literal[string]
identifier[enc] = identifier[bytearray] ( identifier[enc] , identifier[encoding] = literal[string] )
identifier[l_enc] = identifier[len] ( identifier[enc] )
keyword[if] identifier[l_enc] == literal[int] :
keyword[return] literal[string]
identifier[full_block_count] = identifier[l_enc] // identifier[__fullEncodedBlockSize]
identifier[last_block_size] = identifier[l_enc] % identifier[__fullEncodedBlockSize]
keyword[try] :
identifier[last_block_decoded_size] = identifier[__encodedBlockSizes] . identifier[index] ( identifier[last_block_size] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[l_enc] )
identifier[data_size] = identifier[full_block_count] * identifier[__fullBlockSize] + identifier[last_block_decoded_size]
identifier[data] = identifier[bytearray] ( identifier[data_size] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[full_block_count] ):
identifier[data] = identifier[decode_block] ( identifier[enc] [( identifier[i] * identifier[__fullEncodedBlockSize] ):( identifier[i] * identifier[__fullEncodedBlockSize] + identifier[__fullEncodedBlockSize] )], identifier[data] , identifier[i] * identifier[__fullBlockSize] )
keyword[if] identifier[last_block_size] > literal[int] :
identifier[data] = identifier[decode_block] ( identifier[enc] [( identifier[full_block_count] * identifier[__fullEncodedBlockSize] ):( identifier[full_block_count] * identifier[__fullEncodedBlockSize] + identifier[last_block_size] )], identifier[data] , identifier[full_block_count] * identifier[__fullBlockSize] )
keyword[return] identifier[_binToHex] ( identifier[data] )
|
def decode(enc):
"""Decode a base58 string (ex: a Monero address) into hexidecimal form."""
enc = bytearray(enc, encoding='ascii')
l_enc = len(enc)
if l_enc == 0:
return '' # depends on [control=['if'], data=[]]
full_block_count = l_enc // __fullEncodedBlockSize
last_block_size = l_enc % __fullEncodedBlockSize
try:
last_block_decoded_size = __encodedBlockSizes.index(last_block_size) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Invalid encoded length: %d' % l_enc) # depends on [control=['except'], data=[]]
data_size = full_block_count * __fullBlockSize + last_block_decoded_size
data = bytearray(data_size)
for i in range(full_block_count):
data = decode_block(enc[i * __fullEncodedBlockSize:i * __fullEncodedBlockSize + __fullEncodedBlockSize], data, i * __fullBlockSize) # depends on [control=['for'], data=['i']]
if last_block_size > 0:
data = decode_block(enc[full_block_count * __fullEncodedBlockSize:full_block_count * __fullEncodedBlockSize + last_block_size], data, full_block_count * __fullBlockSize) # depends on [control=['if'], data=['last_block_size']]
return _binToHex(data)
|
def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response
|
def function[get_collection, parameter[self, session, query, api_key]]:
constant[
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
]
variable[model] assign[=] call[name[self]._fetch_model, parameter[name[api_key]]]
variable[include] assign[=] call[name[self]._parse_include, parameter[call[call[name[query].get, parameter[constant[include], constant[]]].split, parameter[constant[,]]]]]
variable[fields] assign[=] call[name[self]._parse_fields, parameter[name[query]]]
variable[included] assign[=] dictionary[[], []]
variable[sorts] assign[=] call[call[name[query].get, parameter[constant[sort], constant[]]].split, parameter[constant[,]]]
variable[order_by] assign[=] list[[]]
variable[collection] assign[=] call[name[session].query, parameter[name[model]]]
for taget[name[attr]] in starred[name[sorts]] begin[:]
if compare[name[attr] equal[==] constant[]] begin[:]
break
<ast.Tuple object at 0x7da1b0ca6200> assign[=] <ast.IfExp object at 0x7da1b0ca7640>
if <ast.BoolOp object at 0x7da1b0ca4ee0> begin[:]
return[call[name[NotSortableError], parameter[name[model], name[attr_name]]]]
variable[attr] assign[=] call[name[getattr], parameter[name[model], name[attr_name]]]
if <ast.UnaryOp object at 0x7da1b0ca4820> begin[:]
return[call[name[NotSortableError], parameter[name[model], name[attr_name]]]]
call[name[check_permission], parameter[name[model], name[attr_name], name[Permissions].VIEW]]
call[name[order_by].append, parameter[<ast.IfExp object at 0x7da1b0ca7220>]]
if compare[call[name[len], parameter[name[order_by]]] greater[>] constant[0]] begin[:]
variable[collection] assign[=] call[name[collection].order_by, parameter[<ast.Starred object at 0x7da1b0ca6380>]]
variable[pos] assign[=] <ast.UnaryOp object at 0x7da1b0ca6bf0>
<ast.Tuple object at 0x7da1b0ca4070> assign[=] call[name[self]._parse_page, parameter[name[query]]]
variable[response] assign[=] call[name[JSONAPIResponse], parameter[]]
call[name[response].data][constant[data]] assign[=] list[[]]
for taget[name[instance]] in starred[name[collection]] begin[:]
<ast.Try object at 0x7da1b0ca4370>
<ast.AugAssign object at 0x7da1b0c53940>
if <ast.BoolOp object at 0x7da1b0c51d20> begin[:]
continue
variable[built] assign[=] call[name[self]._render_full_resource, parameter[name[instance], name[include], name[fields]]]
call[name[included].update, parameter[call[name[built].pop, parameter[constant[included]]]]]
call[call[name[response].data][constant[data]].append, parameter[name[built]]]
call[name[response].data][constant[included]] assign[=] call[name[list], parameter[call[name[included].values, parameter[]]]]
return[name[response]]
|
keyword[def] identifier[get_collection] ( identifier[self] , identifier[session] , identifier[query] , identifier[api_key] ):
literal[string]
identifier[model] = identifier[self] . identifier[_fetch_model] ( identifier[api_key] )
identifier[include] = identifier[self] . identifier[_parse_include] ( identifier[query] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] ))
identifier[fields] = identifier[self] . identifier[_parse_fields] ( identifier[query] )
identifier[included] ={}
identifier[sorts] = identifier[query] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[order_by] =[]
identifier[collection] = identifier[session] . identifier[query] ( identifier[model] )
keyword[for] identifier[attr] keyword[in] identifier[sorts] :
keyword[if] identifier[attr] == literal[string] :
keyword[break]
identifier[attr_name] , identifier[is_asc] =[ identifier[attr] [ literal[int] :], keyword[False] ] keyword[if] identifier[attr] [ literal[int] ]== literal[string] keyword[else] [ identifier[attr] , keyword[True] ]
keyword[if] identifier[attr_name] keyword[not] keyword[in] identifier[model] . identifier[__mapper__] . identifier[all_orm_descriptors] . identifier[keys] () keyword[or] keyword[not] identifier[hasattr] ( identifier[model] , identifier[attr_name] ) keyword[or] identifier[attr_name] keyword[in] identifier[model] . identifier[__mapper__] . identifier[relationships] . identifier[keys] ():
keyword[return] identifier[NotSortableError] ( identifier[model] , identifier[attr_name] )
identifier[attr] = identifier[getattr] ( identifier[model] , identifier[attr_name] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[attr] , literal[string] ):
keyword[return] identifier[NotSortableError] ( identifier[model] , identifier[attr_name] )
identifier[check_permission] ( identifier[model] , identifier[attr_name] , identifier[Permissions] . identifier[VIEW] )
identifier[order_by] . identifier[append] ( identifier[attr] . identifier[asc] () keyword[if] identifier[is_asc] keyword[else] identifier[attr] . identifier[desc] ())
keyword[if] identifier[len] ( identifier[order_by] )> literal[int] :
identifier[collection] = identifier[collection] . identifier[order_by] (* identifier[order_by] )
identifier[pos] =- literal[int]
identifier[start] , identifier[end] = identifier[self] . identifier[_parse_page] ( identifier[query] )
identifier[response] = identifier[JSONAPIResponse] ()
identifier[response] . identifier[data] [ literal[string] ]=[]
keyword[for] identifier[instance] keyword[in] identifier[collection] :
keyword[try] :
identifier[check_permission] ( identifier[instance] , keyword[None] , identifier[Permissions] . identifier[VIEW] )
keyword[except] identifier[PermissionDeniedError] :
keyword[continue]
identifier[pos] += literal[int]
keyword[if] identifier[end] keyword[is] keyword[not] keyword[None] keyword[and] ( identifier[pos] < identifier[start] keyword[or] identifier[pos] > identifier[end] ):
keyword[continue]
identifier[built] = identifier[self] . identifier[_render_full_resource] ( identifier[instance] , identifier[include] , identifier[fields] )
identifier[included] . identifier[update] ( identifier[built] . identifier[pop] ( literal[string] ))
identifier[response] . identifier[data] [ literal[string] ]. identifier[append] ( identifier[built] )
identifier[response] . identifier[data] [ literal[string] ]= identifier[list] ( identifier[included] . identifier[values] ())
keyword[return] identifier[response]
|
def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break # depends on [control=['if'], data=[]]
(attr_name, is_asc) = [attr[1:], False] if attr[0] == '-' else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys() or not hasattr(model, attr_name) or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name) # depends on [control=['if'], data=[]]
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name) # depends on [control=['if'], data=[]]
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc()) # depends on [control=['for'], data=['attr']]
if len(order_by) > 0:
collection = collection.order_by(*order_by) # depends on [control=['if'], data=[]]
pos = -1
(start, end) = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW) # depends on [control=['try'], data=[]]
except PermissionDeniedError:
continue # depends on [control=['except'], data=[]]
pos += 1
if end is not None and (pos < start or pos > end):
continue # depends on [control=['if'], data=[]]
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built) # depends on [control=['for'], data=['instance']]
response.data['included'] = list(included.values())
return response
|
def _new_mock_response(self, response, file_path):
'''Return a new mock Response with the content.'''
mock_response = copy.copy(response)
mock_response.body = Body(open(file_path, 'rb'))
mock_response.fields = NameValueRecord()
for name, value in response.fields.get_all():
mock_response.fields.add(name, value)
mock_response.fields['Content-Type'] = 'text/html; charset="utf-8"'
return mock_response
|
def function[_new_mock_response, parameter[self, response, file_path]]:
constant[Return a new mock Response with the content.]
variable[mock_response] assign[=] call[name[copy].copy, parameter[name[response]]]
name[mock_response].body assign[=] call[name[Body], parameter[call[name[open], parameter[name[file_path], constant[rb]]]]]
name[mock_response].fields assign[=] call[name[NameValueRecord], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2043477f0>, <ast.Name object at 0x7da204347430>]]] in starred[call[name[response].fields.get_all, parameter[]]] begin[:]
call[name[mock_response].fields.add, parameter[name[name], name[value]]]
call[name[mock_response].fields][constant[Content-Type]] assign[=] constant[text/html; charset="utf-8"]
return[name[mock_response]]
|
keyword[def] identifier[_new_mock_response] ( identifier[self] , identifier[response] , identifier[file_path] ):
literal[string]
identifier[mock_response] = identifier[copy] . identifier[copy] ( identifier[response] )
identifier[mock_response] . identifier[body] = identifier[Body] ( identifier[open] ( identifier[file_path] , literal[string] ))
identifier[mock_response] . identifier[fields] = identifier[NameValueRecord] ()
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[response] . identifier[fields] . identifier[get_all] ():
identifier[mock_response] . identifier[fields] . identifier[add] ( identifier[name] , identifier[value] )
identifier[mock_response] . identifier[fields] [ literal[string] ]= literal[string]
keyword[return] identifier[mock_response]
|
def _new_mock_response(self, response, file_path):
"""Return a new mock Response with the content."""
mock_response = copy.copy(response)
mock_response.body = Body(open(file_path, 'rb'))
mock_response.fields = NameValueRecord()
for (name, value) in response.fields.get_all():
mock_response.fields.add(name, value) # depends on [control=['for'], data=[]]
mock_response.fields['Content-Type'] = 'text/html; charset="utf-8"'
return mock_response
|
def save(self, filename):
"""
saves the instance of the script to a file using pickle
Args:
filename: target filename
"""
if filename is None:
filename = self.filename('.b26s')
# if len(filename.split('\\\\?\\')) == 1:
# filename = '\\\\?\\' + filename
with open(filename, 'w') as outfile:
outfile.write(pickle.dumps(self.__dict__))
|
def function[save, parameter[self, filename]]:
constant[
saves the instance of the script to a file using pickle
Args:
filename: target filename
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[name[self].filename, parameter[constant[.b26s]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[outfile].write, parameter[call[name[pickle].dumps, parameter[name[self].__dict__]]]]
|
keyword[def] identifier[save] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[self] . identifier[filename] ( literal[string] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[pickle] . identifier[dumps] ( identifier[self] . identifier[__dict__] ))
|
def save(self, filename):
"""
saves the instance of the script to a file using pickle
Args:
filename: target filename
"""
if filename is None:
filename = self.filename('.b26s') # depends on [control=['if'], data=['filename']]
# if len(filename.split('\\\\?\\')) == 1:
# filename = '\\\\?\\' + filename
with open(filename, 'w') as outfile:
outfile.write(pickle.dumps(self.__dict__)) # depends on [control=['with'], data=['outfile']]
|
def GetConsoleOriginalTitle() -> str:
"""
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
"""
if IsNT6orHigher:
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)
return values.value
else:
raise RuntimeError('GetConsoleOriginalTitle is not supported on Windows XP or lower.')
|
def function[GetConsoleOriginalTitle, parameter[]]:
constant[
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
]
if name[IsNT6orHigher] begin[:]
variable[arrayType] assign[=] binary_operation[name[ctypes].c_wchar * name[MAX_PATH]]
variable[values] assign[=] call[name[arrayType], parameter[]]
call[name[ctypes].windll.kernel32.GetConsoleOriginalTitleW, parameter[name[values], name[MAX_PATH]]]
return[name[values].value]
|
keyword[def] identifier[GetConsoleOriginalTitle] ()-> identifier[str] :
literal[string]
keyword[if] identifier[IsNT6orHigher] :
identifier[arrayType] = identifier[ctypes] . identifier[c_wchar] * identifier[MAX_PATH]
identifier[values] = identifier[arrayType] ()
identifier[ctypes] . identifier[windll] . identifier[kernel32] . identifier[GetConsoleOriginalTitleW] ( identifier[values] , identifier[MAX_PATH] )
keyword[return] identifier[values] . identifier[value]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
|
def GetConsoleOriginalTitle() -> str:
"""
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
"""
if IsNT6orHigher:
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)
return values.value # depends on [control=['if'], data=[]]
else:
raise RuntimeError('GetConsoleOriginalTitle is not supported on Windows XP or lower.')
|
def refresh(self):
"""Refresh the cache by deleting the old one and creating a new one.
"""
if self.exists:
self.delete()
self.populate()
self.open()
|
def function[refresh, parameter[self]]:
constant[Refresh the cache by deleting the old one and creating a new one.
]
if name[self].exists begin[:]
call[name[self].delete, parameter[]]
call[name[self].populate, parameter[]]
call[name[self].open, parameter[]]
|
keyword[def] identifier[refresh] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[exists] :
identifier[self] . identifier[delete] ()
identifier[self] . identifier[populate] ()
identifier[self] . identifier[open] ()
|
def refresh(self):
"""Refresh the cache by deleting the old one and creating a new one.
"""
if self.exists:
self.delete() # depends on [control=['if'], data=[]]
self.populate()
self.open()
|
def _calculate(cls, start=None, end=None):
"""
calculate the difference between starting and ending time.
:param start: A starting time.
:type start: int|str
:param stop: A ending time.
:type stop: int|str
:return:
A dict with following as index.
* :code:`days`
* :code:`hours`
* :code:`minutes`
* :code:`seconds`
as index.
:rtype: dict
"""
if start and end:
# The start and end time is explicitly given.
# We get the difference between the ending and the starting time.
time_difference = int(end) - int(start)
else:
# The start and end time is not explicitly given.
# We get the difference between the ending and the starting time.
time_difference = PyFunceble.INTERN["end"] - PyFunceble.INTERN["start"]
# We initiate an OrderedDict.
# Indeed, we use an ordered dict because we want the structuration and the
# order to stay always the same.
# As a dictionnary is always unordered, we can use it. Otherwise the time will
# not be shown correctly.
data = PyFunceble.OrderedDict()
# We calculate and append the day to our data.
data["days"] = str(time_difference // (24 * 60 * 60)).zfill(2)
# We calculate and append the hours to our data.
data["hours"] = str((time_difference // (60 * 60)) % 24).zfill(2)
# We calculate and append the minutes to our data.
data["minutes"] = str((time_difference % 3600) // 60).zfill(2)
# We calculate and append the minutes to our data.
data["seconds"] = str(time_difference % 60).zfill(2)
# We finaly return our data.
return data
|
def function[_calculate, parameter[cls, start, end]]:
constant[
calculate the difference between starting and ending time.
:param start: A starting time.
:type start: int|str
:param stop: A ending time.
:type stop: int|str
:return:
A dict with following as index.
* :code:`days`
* :code:`hours`
* :code:`minutes`
* :code:`seconds`
as index.
:rtype: dict
]
if <ast.BoolOp object at 0x7da20c9921d0> begin[:]
variable[time_difference] assign[=] binary_operation[call[name[int], parameter[name[end]]] - call[name[int], parameter[name[start]]]]
variable[data] assign[=] call[name[PyFunceble].OrderedDict, parameter[]]
call[name[data]][constant[days]] assign[=] call[call[name[str], parameter[binary_operation[name[time_difference] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[binary_operation[constant[24] * constant[60]] * constant[60]]]]].zfill, parameter[constant[2]]]
call[name[data]][constant[hours]] assign[=] call[call[name[str], parameter[binary_operation[binary_operation[name[time_difference] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[constant[60] * constant[60]]] <ast.Mod object at 0x7da2590d6920> constant[24]]]].zfill, parameter[constant[2]]]
call[name[data]][constant[minutes]] assign[=] call[call[name[str], parameter[binary_operation[binary_operation[name[time_difference] <ast.Mod object at 0x7da2590d6920> constant[3600]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[60]]]].zfill, parameter[constant[2]]]
call[name[data]][constant[seconds]] assign[=] call[call[name[str], parameter[binary_operation[name[time_difference] <ast.Mod object at 0x7da2590d6920> constant[60]]]].zfill, parameter[constant[2]]]
return[name[data]]
|
keyword[def] identifier[_calculate] ( identifier[cls] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
keyword[if] identifier[start] keyword[and] identifier[end] :
identifier[time_difference] = identifier[int] ( identifier[end] )- identifier[int] ( identifier[start] )
keyword[else] :
identifier[time_difference] = identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]- identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]
identifier[data] = identifier[PyFunceble] . identifier[OrderedDict] ()
identifier[data] [ literal[string] ]= identifier[str] ( identifier[time_difference] //( literal[int] * literal[int] * literal[int] )). identifier[zfill] ( literal[int] )
identifier[data] [ literal[string] ]= identifier[str] (( identifier[time_difference] //( literal[int] * literal[int] ))% literal[int] ). identifier[zfill] ( literal[int] )
identifier[data] [ literal[string] ]= identifier[str] (( identifier[time_difference] % literal[int] )// literal[int] ). identifier[zfill] ( literal[int] )
identifier[data] [ literal[string] ]= identifier[str] ( identifier[time_difference] % literal[int] ). identifier[zfill] ( literal[int] )
keyword[return] identifier[data]
|
def _calculate(cls, start=None, end=None):
"""
calculate the difference between starting and ending time.
:param start: A starting time.
:type start: int|str
:param stop: A ending time.
:type stop: int|str
:return:
A dict with following as index.
* :code:`days`
* :code:`hours`
* :code:`minutes`
* :code:`seconds`
as index.
:rtype: dict
"""
if start and end:
# The start and end time is explicitly given.
# We get the difference between the ending and the starting time.
time_difference = int(end) - int(start) # depends on [control=['if'], data=[]]
else:
# The start and end time is not explicitly given.
# We get the difference between the ending and the starting time.
time_difference = PyFunceble.INTERN['end'] - PyFunceble.INTERN['start']
# We initiate an OrderedDict.
# Indeed, we use an ordered dict because we want the structuration and the
# order to stay always the same.
# As a dictionnary is always unordered, we can use it. Otherwise the time will
# not be shown correctly.
data = PyFunceble.OrderedDict()
# We calculate and append the day to our data.
data['days'] = str(time_difference // (24 * 60 * 60)).zfill(2)
# We calculate and append the hours to our data.
data['hours'] = str(time_difference // (60 * 60) % 24).zfill(2)
# We calculate and append the minutes to our data.
data['minutes'] = str(time_difference % 3600 // 60).zfill(2)
# We calculate and append the minutes to our data.
data['seconds'] = str(time_difference % 60).zfill(2)
# We finaly return our data.
return data
|
def get_runs_by_id(self, config_id):
"""
returns a list of runs for a given config id
The runs are sorted by ascending budget, so '-1' will give
the longest run for this config.
"""
d = self.data[config_id]
runs = []
for b in d.results.keys():
try:
err_logs = d.exceptions.get(b, None)
if d.results[b] is None:
r = Run(config_id, b, None, None , d.time_stamps[b], err_logs)
else:
r = Run(config_id, b, d.results[b]['loss'], d.results[b]['info'] , d.time_stamps[b], err_logs)
runs.append(r)
except:
raise
runs.sort(key=lambda r: r.budget)
return(runs)
|
def function[get_runs_by_id, parameter[self, config_id]]:
constant[
returns a list of runs for a given config id
The runs are sorted by ascending budget, so '-1' will give
the longest run for this config.
]
variable[d] assign[=] call[name[self].data][name[config_id]]
variable[runs] assign[=] list[[]]
for taget[name[b]] in starred[call[name[d].results.keys, parameter[]]] begin[:]
<ast.Try object at 0x7da1b17163e0>
call[name[runs].sort, parameter[]]
return[name[runs]]
|
keyword[def] identifier[get_runs_by_id] ( identifier[self] , identifier[config_id] ):
literal[string]
identifier[d] = identifier[self] . identifier[data] [ identifier[config_id] ]
identifier[runs] =[]
keyword[for] identifier[b] keyword[in] identifier[d] . identifier[results] . identifier[keys] ():
keyword[try] :
identifier[err_logs] = identifier[d] . identifier[exceptions] . identifier[get] ( identifier[b] , keyword[None] )
keyword[if] identifier[d] . identifier[results] [ identifier[b] ] keyword[is] keyword[None] :
identifier[r] = identifier[Run] ( identifier[config_id] , identifier[b] , keyword[None] , keyword[None] , identifier[d] . identifier[time_stamps] [ identifier[b] ], identifier[err_logs] )
keyword[else] :
identifier[r] = identifier[Run] ( identifier[config_id] , identifier[b] , identifier[d] . identifier[results] [ identifier[b] ][ literal[string] ], identifier[d] . identifier[results] [ identifier[b] ][ literal[string] ], identifier[d] . identifier[time_stamps] [ identifier[b] ], identifier[err_logs] )
identifier[runs] . identifier[append] ( identifier[r] )
keyword[except] :
keyword[raise]
identifier[runs] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[r] : identifier[r] . identifier[budget] )
keyword[return] ( identifier[runs] )
|
def get_runs_by_id(self, config_id):
"""
returns a list of runs for a given config id
The runs are sorted by ascending budget, so '-1' will give
the longest run for this config.
"""
d = self.data[config_id]
runs = []
for b in d.results.keys():
try:
err_logs = d.exceptions.get(b, None)
if d.results[b] is None:
r = Run(config_id, b, None, None, d.time_stamps[b], err_logs) # depends on [control=['if'], data=[]]
else:
r = Run(config_id, b, d.results[b]['loss'], d.results[b]['info'], d.time_stamps[b], err_logs)
runs.append(r) # depends on [control=['try'], data=[]]
except:
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['b']]
runs.sort(key=lambda r: r.budget)
return runs
|
def write_geoff(discoursegraph, output_file):
"""
converts a DiscourseDocumentGraph into a Geoff file and
writes it to the given file (or file path).
"""
if isinstance(output_file, str):
with open(output_file, 'w') as outfile:
outfile.write(convert_to_geoff(discoursegraph))
else: # output_file is a file object
output_file.write(convert_to_geoff(discoursegraph))
|
def function[write_geoff, parameter[discoursegraph, output_file]]:
constant[
converts a DiscourseDocumentGraph into a Geoff file and
writes it to the given file (or file path).
]
if call[name[isinstance], parameter[name[output_file], name[str]]] begin[:]
with call[name[open], parameter[name[output_file], constant[w]]] begin[:]
call[name[outfile].write, parameter[call[name[convert_to_geoff], parameter[name[discoursegraph]]]]]
|
keyword[def] identifier[write_geoff] ( identifier[discoursegraph] , identifier[output_file] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[output_file] , identifier[str] ):
keyword[with] identifier[open] ( identifier[output_file] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[convert_to_geoff] ( identifier[discoursegraph] ))
keyword[else] :
identifier[output_file] . identifier[write] ( identifier[convert_to_geoff] ( identifier[discoursegraph] ))
|
def write_geoff(discoursegraph, output_file):
"""
converts a DiscourseDocumentGraph into a Geoff file and
writes it to the given file (or file path).
"""
if isinstance(output_file, str):
with open(output_file, 'w') as outfile:
outfile.write(convert_to_geoff(discoursegraph)) # depends on [control=['with'], data=['outfile']] # depends on [control=['if'], data=[]]
else: # output_file is a file object
output_file.write(convert_to_geoff(discoursegraph))
|
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
|
def function[loudness, parameter[self]]:
constant[bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
]
variable[response] assign[=] call[name[self].renderingControl.GetLoudness, parameter[list[[<ast.Tuple object at 0x7da18eb55960>, <ast.Tuple object at 0x7da18eb564a0>]]]]
variable[loudness] assign[=] call[name[response]][constant[CurrentLoudness]]
return[call[name[bool], parameter[call[name[int], parameter[name[loudness]]]]]]
|
keyword[def] identifier[loudness] ( identifier[self] ):
literal[string]
identifier[response] = identifier[self] . identifier[renderingControl] . identifier[GetLoudness] ([
( literal[string] , literal[int] ),
( literal[string] , literal[string] ),
])
identifier[loudness] = identifier[response] [ literal[string] ]
keyword[return] identifier[bool] ( identifier[int] ( identifier[loudness] ))
|
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([('InstanceID', 0), ('Channel', 'Master')])
loudness = response['CurrentLoudness']
return bool(int(loudness))
|
def id_generator(size=15, random_state=None):
"""Helper function to generate random div ids. This is useful for embedding
HTML into ipython notebooks."""
chars = list(string.ascii_uppercase + string.digits)
return ''.join(random_state.choice(chars, size, replace=True))
|
def function[id_generator, parameter[size, random_state]]:
constant[Helper function to generate random div ids. This is useful for embedding
HTML into ipython notebooks.]
variable[chars] assign[=] call[name[list], parameter[binary_operation[name[string].ascii_uppercase + name[string].digits]]]
return[call[constant[].join, parameter[call[name[random_state].choice, parameter[name[chars], name[size]]]]]]
|
keyword[def] identifier[id_generator] ( identifier[size] = literal[int] , identifier[random_state] = keyword[None] ):
literal[string]
identifier[chars] = identifier[list] ( identifier[string] . identifier[ascii_uppercase] + identifier[string] . identifier[digits] )
keyword[return] literal[string] . identifier[join] ( identifier[random_state] . identifier[choice] ( identifier[chars] , identifier[size] , identifier[replace] = keyword[True] ))
|
def id_generator(size=15, random_state=None):
"""Helper function to generate random div ids. This is useful for embedding
HTML into ipython notebooks."""
chars = list(string.ascii_uppercase + string.digits)
return ''.join(random_state.choice(chars, size, replace=True))
|
def red(self, value):
"""gets/sets the red value"""
if value != self._red and \
isinstance(value, int):
self._red = value
|
def function[red, parameter[self, value]]:
constant[gets/sets the red value]
if <ast.BoolOp object at 0x7da18dc9ba30> begin[:]
name[self]._red assign[=] name[value]
|
keyword[def] identifier[red] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] != identifier[self] . identifier[_red] keyword[and] identifier[isinstance] ( identifier[value] , identifier[int] ):
identifier[self] . identifier[_red] = identifier[value]
|
def red(self, value):
"""gets/sets the red value"""
if value != self._red and isinstance(value, int):
self._red = value # depends on [control=['if'], data=[]]
|
def auto_complete_paths(current, completion_type):
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
|
def function[auto_complete_paths, parameter[current, completion_type]]:
constant[If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
]
<ast.Tuple object at 0x7da20e74bc70> assign[=] call[name[os].path.split, parameter[name[current]]]
variable[current_path] assign[=] call[name[os].path.abspath, parameter[name[directory]]]
if <ast.UnaryOp object at 0x7da2041d8730> begin[:]
return[None]
variable[filename] assign[=] call[name[os].path.normcase, parameter[name[filename]]]
variable[file_list] assign[=] <ast.GeneratorExp object at 0x7da2041dbfa0>
for taget[name[f]] in starred[name[file_list]] begin[:]
variable[opt] assign[=] call[name[os].path.join, parameter[name[current_path], name[f]]]
variable[comp_file] assign[=] call[name[os].path.normcase, parameter[call[name[os].path.join, parameter[name[directory], name[f]]]]]
if <ast.BoolOp object at 0x7da2041db4f0> begin[:]
<ast.Yield object at 0x7da2041d8f70>
|
keyword[def] identifier[auto_complete_paths] ( identifier[current] , identifier[completion_type] ):
literal[string]
identifier[directory] , identifier[filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[current] )
identifier[current_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] )
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[current_path] , identifier[os] . identifier[R_OK] ):
keyword[return]
identifier[filename] = identifier[os] . identifier[path] . identifier[normcase] ( identifier[filename] )
identifier[file_list] =( identifier[x] keyword[for] identifier[x] keyword[in] identifier[os] . identifier[listdir] ( identifier[current_path] )
keyword[if] identifier[os] . identifier[path] . identifier[normcase] ( identifier[x] ). identifier[startswith] ( identifier[filename] ))
keyword[for] identifier[f] keyword[in] identifier[file_list] :
identifier[opt] = identifier[os] . identifier[path] . identifier[join] ( identifier[current_path] , identifier[f] )
identifier[comp_file] = identifier[os] . identifier[path] . identifier[normcase] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[f] ))
keyword[if] identifier[completion_type] != literal[string] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[opt] ):
keyword[yield] identifier[comp_file]
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[opt] ):
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[comp_file] , literal[string] )
|
def auto_complete_paths(current, completion_type):
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
(directory, filename) = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return # depends on [control=['if'], data=[]]
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file # depends on [control=['if'], data=[]]
elif os.path.isdir(opt):
yield os.path.join(comp_file, '') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
|
def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware()
|
def function[request, parameter[self]]:
constant[
Define a Decorate to be called before a request.
eg: @middleware.request
]
variable[middleware] assign[=] call[name[args]][constant[0]]
def function[register_middleware, parameter[]]:
call[name[self].request_middleware.append, parameter[name[middleware]]]
return[name[middleware]]
return[call[name[register_middleware], parameter[]]]
|
keyword[def] identifier[request] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[middleware] = identifier[args] [ literal[int] ]
@ identifier[wraps] ( identifier[middleware] )
keyword[def] identifier[register_middleware] (* identifier[args] ,** identifier[kwargs] ):
identifier[self] . identifier[request_middleware] . identifier[append] ( identifier[middleware] )
keyword[return] identifier[middleware]
keyword[return] identifier[register_middleware] ()
|
def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware()
|
def parse_file(infile, exit_on_error=True):
"""Parse a comma-separated file with columns "ra,dec,magnitude".
"""
try:
a, b, mag = np.atleast_2d(
np.genfromtxt(
infile,
usecols=[0, 1, 2],
delimiter=','
)
).T
except IOError as e:
if exit_on_error:
logger.error("There seems to be a problem with the input file, "
"the format should be: RA_degrees (J2000), Dec_degrees (J2000), "
"Magnitude. There should be no header, columns should be "
"separated by a comma")
sys.exit(1)
else:
raise e
return a, b, mag
|
def function[parse_file, parameter[infile, exit_on_error]]:
constant[Parse a comma-separated file with columns "ra,dec,magnitude".
]
<ast.Try object at 0x7da1b0b82c50>
return[tuple[[<ast.Name object at 0x7da1b0a22290>, <ast.Name object at 0x7da1b0a22bc0>, <ast.Name object at 0x7da1b0a22110>]]]
|
keyword[def] identifier[parse_file] ( identifier[infile] , identifier[exit_on_error] = keyword[True] ):
literal[string]
keyword[try] :
identifier[a] , identifier[b] , identifier[mag] = identifier[np] . identifier[atleast_2d] (
identifier[np] . identifier[genfromtxt] (
identifier[infile] ,
identifier[usecols] =[ literal[int] , literal[int] , literal[int] ],
identifier[delimiter] = literal[string]
)
). identifier[T]
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[exit_on_error] :
identifier[logger] . identifier[error] ( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
keyword[raise] identifier[e]
keyword[return] identifier[a] , identifier[b] , identifier[mag]
|
def parse_file(infile, exit_on_error=True):
"""Parse a comma-separated file with columns "ra,dec,magnitude".
"""
try:
(a, b, mag) = np.atleast_2d(np.genfromtxt(infile, usecols=[0, 1, 2], delimiter=',')).T # depends on [control=['try'], data=[]]
except IOError as e:
if exit_on_error:
logger.error('There seems to be a problem with the input file, the format should be: RA_degrees (J2000), Dec_degrees (J2000), Magnitude. There should be no header, columns should be separated by a comma')
sys.exit(1) # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['except'], data=['e']]
return (a, b, mag)
|
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"operation": self.operation, "title": self.title,
"xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(),
"units": self.units.as_dict(), "scf": self.scf.as_dict(),
"geo": self.geo.as_dict(),
"others": [k.as_dict() for k in self.other_directives]}
|
def function[as_dict, parameter[self]]:
constant[
A JSON serializable dict representation of self.
]
return[dictionary[[<ast.Constant object at 0x7da18bc700d0>, <ast.Constant object at 0x7da18bc73d90>, <ast.Constant object at 0x7da18bc72a10>, <ast.Constant object at 0x7da18bc73070>, <ast.Constant object at 0x7da18bc72560>, <ast.Constant object at 0x7da18bc714e0>, <ast.Constant object at 0x7da18bc711e0>, <ast.Constant object at 0x7da18bc73b50>, <ast.Constant object at 0x7da18bc72380>, <ast.Constant object at 0x7da18bc72890>], [<ast.Attribute object at 0x7da18bc706a0>, <ast.Attribute object at 0x7da18bc70df0>, <ast.Attribute object at 0x7da18bc70220>, <ast.Attribute object at 0x7da18bc728c0>, <ast.Call object at 0x7da20c6c7850>, <ast.Call object at 0x7da20c6c5ff0>, <ast.Call object at 0x7da20c6c6980>, <ast.Call object at 0x7da20c6c5c90>, <ast.Call object at 0x7da20c6c4e20>, <ast.ListComp object at 0x7da20c6c7610>]]]
|
keyword[def] identifier[as_dict] ( identifier[self] ):
literal[string]
keyword[return] { literal[string] : identifier[self] . identifier[__class__] . identifier[__module__] ,
literal[string] : identifier[self] . identifier[__class__] . identifier[__name__] ,
literal[string] : identifier[self] . identifier[operation] , literal[string] : identifier[self] . identifier[title] ,
literal[string] : identifier[self] . identifier[xc] . identifier[as_dict] (), literal[string] : identifier[self] . identifier[basis_set] . identifier[as_dict] (),
literal[string] : identifier[self] . identifier[units] . identifier[as_dict] (), literal[string] : identifier[self] . identifier[scf] . identifier[as_dict] (),
literal[string] : identifier[self] . identifier[geo] . identifier[as_dict] (),
literal[string] :[ identifier[k] . identifier[as_dict] () keyword[for] identifier[k] keyword[in] identifier[self] . identifier[other_directives] ]}
|
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'operation': self.operation, 'title': self.title, 'xc': self.xc.as_dict(), 'basis_set': self.basis_set.as_dict(), 'units': self.units.as_dict(), 'scf': self.scf.as_dict(), 'geo': self.geo.as_dict(), 'others': [k.as_dict() for k in self.other_directives]}
|
async def commission(
self, *, enable_ssh: bool = None, skip_networking: bool = None,
skip_storage: bool = None,
commissioning_scripts: typing.Sequence[str] = None,
testing_scripts: typing.Sequence[str] = None,
wait: bool = False, wait_interval: int = 5):
"""Commission this machine.
:param enable_ssh: Prevent the machine from powering off after running
commissioning scripts and enable your user to SSH into the machine.
:type enable_ssh: `bool`
:param skip_networking: Skip updating the MAAS interfaces for the
machine.
:type skip_networking: `bool`
:param skip_storage: Skip update the MAAS block devices for the
machine.
:type skip_storage: `bool`
:param commissioning_scripts: List of extra commisisoning scripts
to run. If the name of the commissioning scripts match a tag, then
all commissioning scripts with that tag will be used.
:type commissioning_scripts: sequence of `str`
:param testing_scripts: List of testing scripts to run after
commissioning. By default a small set of testing scripts will run
by default. Passing empty list will disable running any testing
scripts during commissioning. If the name of the testing scripts
match a tag, then all testing scripts with that tag will be used.
:type testing_scripts: sequence of `str`
:param wait: If specified, wait until the commissioning is complete.
:param wait_interval: How often to poll, defaults to 5 seconds
"""
params = {"system_id": self.system_id}
if enable_ssh is not None:
params["enable_ssh"] = enable_ssh
if skip_networking is not None:
params["skip_networking"] = skip_networking
if skip_storage is not None:
params["skip_storage"] = skip_storage
if (commissioning_scripts is not None and
len(commissioning_scripts) > 0):
params["commissioning_scripts"] = ",".join(commissioning_scripts)
if testing_scripts is not None:
if len(testing_scripts) == 0 or testing_scripts == "none":
params["testing_scripts"] = ["none"]
else:
params["testing_scripts"] = ",".join(testing_scripts)
self._data = await self._handler.commission(**params)
if not wait:
return self
else:
# Wait for the machine to be fully commissioned.
while self.status in [
NodeStatus.COMMISSIONING, NodeStatus.TESTING]:
await asyncio.sleep(wait_interval)
self._data = await self._handler.read(system_id=self.system_id)
if self.status == NodeStatus.FAILED_COMMISSIONING:
msg = "{hostname} failed to commission.".format(
hostname=self.hostname)
raise FailedCommissioning(msg, self)
if self.status == NodeStatus.FAILED_TESTING:
msg = "{hostname} failed testing.".format(
hostname=self.hostname)
raise FailedTesting(msg, self)
return self
|
<ast.AsyncFunctionDef object at 0x7da20c6c4c40>
|
keyword[async] keyword[def] identifier[commission] (
identifier[self] ,*, identifier[enable_ssh] : identifier[bool] = keyword[None] , identifier[skip_networking] : identifier[bool] = keyword[None] ,
identifier[skip_storage] : identifier[bool] = keyword[None] ,
identifier[commissioning_scripts] : identifier[typing] . identifier[Sequence] [ identifier[str] ]= keyword[None] ,
identifier[testing_scripts] : identifier[typing] . identifier[Sequence] [ identifier[str] ]= keyword[None] ,
identifier[wait] : identifier[bool] = keyword[False] , identifier[wait_interval] : identifier[int] = literal[int] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[system_id] }
keyword[if] identifier[enable_ssh] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[enable_ssh]
keyword[if] identifier[skip_networking] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[skip_networking]
keyword[if] identifier[skip_storage] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[skip_storage]
keyword[if] ( identifier[commissioning_scripts] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[len] ( identifier[commissioning_scripts] )> literal[int] ):
identifier[params] [ literal[string] ]= literal[string] . identifier[join] ( identifier[commissioning_scripts] )
keyword[if] identifier[testing_scripts] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[testing_scripts] )== literal[int] keyword[or] identifier[testing_scripts] == literal[string] :
identifier[params] [ literal[string] ]=[ literal[string] ]
keyword[else] :
identifier[params] [ literal[string] ]= literal[string] . identifier[join] ( identifier[testing_scripts] )
identifier[self] . identifier[_data] = keyword[await] identifier[self] . identifier[_handler] . identifier[commission] (** identifier[params] )
keyword[if] keyword[not] identifier[wait] :
keyword[return] identifier[self]
keyword[else] :
keyword[while] identifier[self] . identifier[status] keyword[in] [
identifier[NodeStatus] . identifier[COMMISSIONING] , identifier[NodeStatus] . identifier[TESTING] ]:
keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[wait_interval] )
identifier[self] . identifier[_data] = keyword[await] identifier[self] . identifier[_handler] . identifier[read] ( identifier[system_id] = identifier[self] . identifier[system_id] )
keyword[if] identifier[self] . identifier[status] == identifier[NodeStatus] . identifier[FAILED_COMMISSIONING] :
identifier[msg] = literal[string] . identifier[format] (
identifier[hostname] = identifier[self] . identifier[hostname] )
keyword[raise] identifier[FailedCommissioning] ( identifier[msg] , identifier[self] )
keyword[if] identifier[self] . identifier[status] == identifier[NodeStatus] . identifier[FAILED_TESTING] :
identifier[msg] = literal[string] . identifier[format] (
identifier[hostname] = identifier[self] . identifier[hostname] )
keyword[raise] identifier[FailedTesting] ( identifier[msg] , identifier[self] )
keyword[return] identifier[self]
|
async def commission(self, *, enable_ssh: bool=None, skip_networking: bool=None, skip_storage: bool=None, commissioning_scripts: typing.Sequence[str]=None, testing_scripts: typing.Sequence[str]=None, wait: bool=False, wait_interval: int=5):
"""Commission this machine.
:param enable_ssh: Prevent the machine from powering off after running
commissioning scripts and enable your user to SSH into the machine.
:type enable_ssh: `bool`
:param skip_networking: Skip updating the MAAS interfaces for the
machine.
:type skip_networking: `bool`
:param skip_storage: Skip update the MAAS block devices for the
machine.
:type skip_storage: `bool`
:param commissioning_scripts: List of extra commisisoning scripts
to run. If the name of the commissioning scripts match a tag, then
all commissioning scripts with that tag will be used.
:type commissioning_scripts: sequence of `str`
:param testing_scripts: List of testing scripts to run after
commissioning. By default a small set of testing scripts will run
by default. Passing empty list will disable running any testing
scripts during commissioning. If the name of the testing scripts
match a tag, then all testing scripts with that tag will be used.
:type testing_scripts: sequence of `str`
:param wait: If specified, wait until the commissioning is complete.
:param wait_interval: How often to poll, defaults to 5 seconds
"""
params = {'system_id': self.system_id}
if enable_ssh is not None:
params['enable_ssh'] = enable_ssh # depends on [control=['if'], data=['enable_ssh']]
if skip_networking is not None:
params['skip_networking'] = skip_networking # depends on [control=['if'], data=['skip_networking']]
if skip_storage is not None:
params['skip_storage'] = skip_storage # depends on [control=['if'], data=['skip_storage']]
if commissioning_scripts is not None and len(commissioning_scripts) > 0:
params['commissioning_scripts'] = ','.join(commissioning_scripts) # depends on [control=['if'], data=[]]
if testing_scripts is not None:
if len(testing_scripts) == 0 or testing_scripts == 'none':
params['testing_scripts'] = ['none'] # depends on [control=['if'], data=[]]
else:
params['testing_scripts'] = ','.join(testing_scripts) # depends on [control=['if'], data=['testing_scripts']]
self._data = await self._handler.commission(**params)
if not wait:
return self # depends on [control=['if'], data=[]]
else:
# Wait for the machine to be fully commissioned.
while self.status in [NodeStatus.COMMISSIONING, NodeStatus.TESTING]:
await asyncio.sleep(wait_interval)
self._data = await self._handler.read(system_id=self.system_id) # depends on [control=['while'], data=[]]
if self.status == NodeStatus.FAILED_COMMISSIONING:
msg = '{hostname} failed to commission.'.format(hostname=self.hostname)
raise FailedCommissioning(msg, self) # depends on [control=['if'], data=[]]
if self.status == NodeStatus.FAILED_TESTING:
msg = '{hostname} failed testing.'.format(hostname=self.hostname)
raise FailedTesting(msg, self) # depends on [control=['if'], data=[]]
return self
|
def download_links(self, dir_path):
"""Download web pages or images from search result links.
Args:
dir_path (str):
Path of directory to save downloads of :class:`api.results`.links
"""
links = self.links
if not path.exists(dir_path):
makedirs(dir_path)
for i, url in enumerate(links):
if 'start' in self.cseargs:
i += int(self.cseargs['start'])
ext = self.cseargs['fileType']
ext = '.html' if ext == '' else '.' + ext
file_name = self.cseargs['q'].replace(' ', '_') + '_' + str(i) + ext
file_path = path.join(dir_path, file_name)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
|
def function[download_links, parameter[self, dir_path]]:
constant[Download web pages or images from search result links.
Args:
dir_path (str):
Path of directory to save downloads of :class:`api.results`.links
]
variable[links] assign[=] name[self].links
if <ast.UnaryOp object at 0x7da1b267c7f0> begin[:]
call[name[makedirs], parameter[name[dir_path]]]
for taget[tuple[[<ast.Name object at 0x7da1b267c6d0>, <ast.Name object at 0x7da1b267c700>]]] in starred[call[name[enumerate], parameter[name[links]]]] begin[:]
if compare[constant[start] in name[self].cseargs] begin[:]
<ast.AugAssign object at 0x7da1b267e290>
variable[ext] assign[=] call[name[self].cseargs][constant[fileType]]
variable[ext] assign[=] <ast.IfExp object at 0x7da1b267e3b0>
variable[file_name] assign[=] binary_operation[binary_operation[binary_operation[call[call[name[self].cseargs][constant[q]].replace, parameter[constant[ ], constant[_]]] + constant[_]] + call[name[str], parameter[name[i]]]] + name[ext]]
variable[file_path] assign[=] call[name[path].join, parameter[name[dir_path], name[file_name]]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[r].status_code equal[==] constant[200]] begin[:]
with call[name[open], parameter[name[file_path], constant[wb]]] begin[:]
name[r].raw.decode_content assign[=] constant[True]
call[name[shutil].copyfileobj, parameter[name[r].raw, name[f]]]
|
keyword[def] identifier[download_links] ( identifier[self] , identifier[dir_path] ):
literal[string]
identifier[links] = identifier[self] . identifier[links]
keyword[if] keyword[not] identifier[path] . identifier[exists] ( identifier[dir_path] ):
identifier[makedirs] ( identifier[dir_path] )
keyword[for] identifier[i] , identifier[url] keyword[in] identifier[enumerate] ( identifier[links] ):
keyword[if] literal[string] keyword[in] identifier[self] . identifier[cseargs] :
identifier[i] += identifier[int] ( identifier[self] . identifier[cseargs] [ literal[string] ])
identifier[ext] = identifier[self] . identifier[cseargs] [ literal[string] ]
identifier[ext] = literal[string] keyword[if] identifier[ext] == literal[string] keyword[else] literal[string] + identifier[ext]
identifier[file_name] = identifier[self] . identifier[cseargs] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )+ literal[string] + identifier[str] ( identifier[i] )+ identifier[ext]
identifier[file_path] = identifier[path] . identifier[join] ( identifier[dir_path] , identifier[file_name] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[stream] = keyword[True] )
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[r] . identifier[raw] . identifier[decode_content] = keyword[True]
identifier[shutil] . identifier[copyfileobj] ( identifier[r] . identifier[raw] , identifier[f] )
|
def download_links(self, dir_path):
"""Download web pages or images from search result links.
Args:
dir_path (str):
Path of directory to save downloads of :class:`api.results`.links
"""
links = self.links
if not path.exists(dir_path):
makedirs(dir_path) # depends on [control=['if'], data=[]]
for (i, url) in enumerate(links):
if 'start' in self.cseargs:
i += int(self.cseargs['start']) # depends on [control=['if'], data=[]]
ext = self.cseargs['fileType']
ext = '.html' if ext == '' else '.' + ext
file_name = self.cseargs['q'].replace(' ', '_') + '_' + str(i) + ext
file_path = path.join(dir_path, file_name)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def get_security_file(filename, profile='default'):
"""Return the absolute path of a security file given by filename and profile
This allows users and developers to find security files without
knowledge of the IPython directory structure. The search path
will be ['.', profile.security_dir]
Parameters
----------
filename : str
The file to be found. If it is passed as an absolute path, it will
simply be returned.
profile : str [default: 'default']
The name of the profile to search. Leaving this unspecified
The file to be found. If it is passed as an absolute path, fname will
simply be returned.
Returns
-------
Raises :exc:`IOError` if file not found or returns absolute path to file.
"""
# import here, because profiledir also imports from utils.path
from IPython.core.profiledir import ProfileDir
try:
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
except Exception:
# will raise ProfileDirError if no such profile
raise IOError("Profile %r not found")
return filefind(filename, ['.', pd.security_dir])
|
def function[get_security_file, parameter[filename, profile]]:
constant[Return the absolute path of a security file given by filename and profile
This allows users and developers to find security files without
knowledge of the IPython directory structure. The search path
will be ['.', profile.security_dir]
Parameters
----------
filename : str
The file to be found. If it is passed as an absolute path, it will
simply be returned.
profile : str [default: 'default']
The name of the profile to search. Leaving this unspecified
The file to be found. If it is passed as an absolute path, fname will
simply be returned.
Returns
-------
Raises :exc:`IOError` if file not found or returns absolute path to file.
]
from relative_module[IPython.core.profiledir] import module[ProfileDir]
<ast.Try object at 0x7da20c6abc70>
return[call[name[filefind], parameter[name[filename], list[[<ast.Constant object at 0x7da20c6aa380>, <ast.Attribute object at 0x7da20c6aacb0>]]]]]
|
keyword[def] identifier[get_security_file] ( identifier[filename] , identifier[profile] = literal[string] ):
literal[string]
keyword[from] identifier[IPython] . identifier[core] . identifier[profiledir] keyword[import] identifier[ProfileDir]
keyword[try] :
identifier[pd] = identifier[ProfileDir] . identifier[find_profile_dir_by_name] ( identifier[get_ipython_dir] (), identifier[profile] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[return] identifier[filefind] ( identifier[filename] ,[ literal[string] , identifier[pd] . identifier[security_dir] ])
|
def get_security_file(filename, profile='default'):
"""Return the absolute path of a security file given by filename and profile
This allows users and developers to find security files without
knowledge of the IPython directory structure. The search path
will be ['.', profile.security_dir]
Parameters
----------
filename : str
The file to be found. If it is passed as an absolute path, it will
simply be returned.
profile : str [default: 'default']
The name of the profile to search. Leaving this unspecified
The file to be found. If it is passed as an absolute path, fname will
simply be returned.
Returns
-------
Raises :exc:`IOError` if file not found or returns absolute path to file.
"""
# import here, because profiledir also imports from utils.path
from IPython.core.profiledir import ProfileDir
try:
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile) # depends on [control=['try'], data=[]]
except Exception:
# will raise ProfileDirError if no such profile
raise IOError('Profile %r not found') # depends on [control=['except'], data=[]]
return filefind(filename, ['.', pd.security_dir])
|
def _populate_bunch_with_element(element):
"""
Helper function to recursively populates a Bunch from an XML tree.
Returns leaf XML elements as a simple value, branch elements are returned
as Bunches containing their subelements as value or recursively generated
Bunch members.
"""
if 'value' in element.attrib:
return element.get('value')
current_bunch = Bunch()
if element.get('id'):
current_bunch['nextra_element_id'] = element.get('id')
for subelement in element.getchildren():
current_bunch[subelement.tag] = _populate_bunch_with_element(
subelement)
return current_bunch
|
def function[_populate_bunch_with_element, parameter[element]]:
constant[
Helper function to recursively populates a Bunch from an XML tree.
Returns leaf XML elements as a simple value, branch elements are returned
as Bunches containing their subelements as value or recursively generated
Bunch members.
]
if compare[constant[value] in name[element].attrib] begin[:]
return[call[name[element].get, parameter[constant[value]]]]
variable[current_bunch] assign[=] call[name[Bunch], parameter[]]
if call[name[element].get, parameter[constant[id]]] begin[:]
call[name[current_bunch]][constant[nextra_element_id]] assign[=] call[name[element].get, parameter[constant[id]]]
for taget[name[subelement]] in starred[call[name[element].getchildren, parameter[]]] begin[:]
call[name[current_bunch]][name[subelement].tag] assign[=] call[name[_populate_bunch_with_element], parameter[name[subelement]]]
return[name[current_bunch]]
|
keyword[def] identifier[_populate_bunch_with_element] ( identifier[element] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[element] . identifier[attrib] :
keyword[return] identifier[element] . identifier[get] ( literal[string] )
identifier[current_bunch] = identifier[Bunch] ()
keyword[if] identifier[element] . identifier[get] ( literal[string] ):
identifier[current_bunch] [ literal[string] ]= identifier[element] . identifier[get] ( literal[string] )
keyword[for] identifier[subelement] keyword[in] identifier[element] . identifier[getchildren] ():
identifier[current_bunch] [ identifier[subelement] . identifier[tag] ]= identifier[_populate_bunch_with_element] (
identifier[subelement] )
keyword[return] identifier[current_bunch]
|
def _populate_bunch_with_element(element):
"""
Helper function to recursively populates a Bunch from an XML tree.
Returns leaf XML elements as a simple value, branch elements are returned
as Bunches containing their subelements as value or recursively generated
Bunch members.
"""
if 'value' in element.attrib:
return element.get('value') # depends on [control=['if'], data=[]]
current_bunch = Bunch()
if element.get('id'):
current_bunch['nextra_element_id'] = element.get('id') # depends on [control=['if'], data=[]]
for subelement in element.getchildren():
current_bunch[subelement.tag] = _populate_bunch_with_element(subelement) # depends on [control=['for'], data=['subelement']]
return current_bunch
|
def compare_nouns(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_noun)
|
def function[compare_nouns, parameter[self, word1, word2]]:
constant[
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
]
return[call[name[self]._plequal, parameter[name[word1], name[word2], name[self].plural_noun]]]
|
keyword[def] identifier[compare_nouns] ( identifier[self] , identifier[word1] , identifier[word2] ):
literal[string]
keyword[return] identifier[self] . identifier[_plequal] ( identifier[word1] , identifier[word2] , identifier[self] . identifier[plural_noun] )
|
def compare_nouns(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_noun)
|
def loss(self, xs, ys):
"""Computes the loss of the network."""
return float(
self.sess.run(
self.cross_entropy, feed_dict={
self.x: xs,
self.y_: ys
}))
|
def function[loss, parameter[self, xs, ys]]:
constant[Computes the loss of the network.]
return[call[name[float], parameter[call[name[self].sess.run, parameter[name[self].cross_entropy]]]]]
|
keyword[def] identifier[loss] ( identifier[self] , identifier[xs] , identifier[ys] ):
literal[string]
keyword[return] identifier[float] (
identifier[self] . identifier[sess] . identifier[run] (
identifier[self] . identifier[cross_entropy] , identifier[feed_dict] ={
identifier[self] . identifier[x] : identifier[xs] ,
identifier[self] . identifier[y_] : identifier[ys]
}))
|
def loss(self, xs, ys):
"""Computes the loss of the network."""
return float(self.sess.run(self.cross_entropy, feed_dict={self.x: xs, self.y_: ys}))
|
def main():
"""
Command line interface.
"""
parser = argparse.ArgumentParser(
description='monoseq: pretty-printing DNA and protein sequences',
epilog='If INPUT is in FASTA format, each record is pretty-printed '
'after printing its name and ANNOTATION (if supplied) is used by '
'matching chromosome/record name. If INPUT contains a raw sequence, '
'only the first chromosome in ANNOTATION is used.')
parser.add_argument(
'sequence_file', metavar='INPUT', nargs='?', default=sys.stdin,
type=argparse.FileType('r'), help='file to read sequence(s) from, '
'can be in FASTA format (default: standard input)')
parser.add_argument(
'-b', '--block-length', metavar='LENGTH', dest='block_length',
type=int, default=10, help='block length in letters (default: 10)')
parser.add_argument(
'-l', '--blocks-per-line', metavar='BLOCKS', dest='blocks_per_line',
type=int, default=6, help='blocks per line (default: 6)')
parser.add_argument(
'-a', '--annotation', metavar='POS', dest='annotation', nargs=2,
action='append', type=int, help='first and last positions of '
'subsequence to annotate (allowed more than once)')
parser.add_argument(
'-e', '--bed', metavar='ANNOTATION', dest='annotation_file',
type=argparse.FileType('r'), help='file to read annotation from in '
'BED format')
args = parser.parse_args()
pprint(_until_eof(args.sequence_file), annotation=args.annotation,
annotation_file=args.annotation_file,
block_length=args.block_length,
blocks_per_line=args.blocks_per_line)
|
def function[main, parameter[]]:
constant[
Command line interface.
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[sequence_file]]]
call[name[parser].add_argument, parameter[constant[-b], constant[--block-length]]]
call[name[parser].add_argument, parameter[constant[-l], constant[--blocks-per-line]]]
call[name[parser].add_argument, parameter[constant[-a], constant[--annotation]]]
call[name[parser].add_argument, parameter[constant[-e], constant[--bed]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
call[name[pprint], parameter[call[name[_until_eof], parameter[name[args].sequence_file]]]]
|
keyword[def] identifier[main] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] = literal[string] ,
identifier[epilog] = literal[string]
literal[string]
literal[string]
literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[metavar] = literal[string] , identifier[nargs] = literal[string] , identifier[default] = identifier[sys] . identifier[stdin] ,
identifier[type] = identifier[argparse] . identifier[FileType] ( literal[string] ), identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[nargs] = literal[int] ,
identifier[action] = literal[string] , identifier[type] = identifier[int] , identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[argparse] . identifier[FileType] ( literal[string] ), identifier[help] = literal[string]
literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[pprint] ( identifier[_until_eof] ( identifier[args] . identifier[sequence_file] ), identifier[annotation] = identifier[args] . identifier[annotation] ,
identifier[annotation_file] = identifier[args] . identifier[annotation_file] ,
identifier[block_length] = identifier[args] . identifier[block_length] ,
identifier[blocks_per_line] = identifier[args] . identifier[blocks_per_line] )
|
def main():
"""
Command line interface.
"""
parser = argparse.ArgumentParser(description='monoseq: pretty-printing DNA and protein sequences', epilog='If INPUT is in FASTA format, each record is pretty-printed after printing its name and ANNOTATION (if supplied) is used by matching chromosome/record name. If INPUT contains a raw sequence, only the first chromosome in ANNOTATION is used.')
parser.add_argument('sequence_file', metavar='INPUT', nargs='?', default=sys.stdin, type=argparse.FileType('r'), help='file to read sequence(s) from, can be in FASTA format (default: standard input)')
parser.add_argument('-b', '--block-length', metavar='LENGTH', dest='block_length', type=int, default=10, help='block length in letters (default: 10)')
parser.add_argument('-l', '--blocks-per-line', metavar='BLOCKS', dest='blocks_per_line', type=int, default=6, help='blocks per line (default: 6)')
parser.add_argument('-a', '--annotation', metavar='POS', dest='annotation', nargs=2, action='append', type=int, help='first and last positions of subsequence to annotate (allowed more than once)')
parser.add_argument('-e', '--bed', metavar='ANNOTATION', dest='annotation_file', type=argparse.FileType('r'), help='file to read annotation from in BED format')
args = parser.parse_args()
pprint(_until_eof(args.sequence_file), annotation=args.annotation, annotation_file=args.annotation_file, block_length=args.block_length, blocks_per_line=args.blocks_per_line)
|
def _dereference(cls, documents, references):
"""Dereference one or more documents"""
# Dereference each reference
for path, projection in references.items():
# Check there is a $ref in the projection, else skip it
if '$ref' not in projection:
continue
# Collect Ids of documents to dereference
ids = set()
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
ids.update(value)
elif isinstance(value, dict):
ids.update(value.values())
else:
ids.add(value)
# Find the referenced documents
ref = projection.pop('$ref')
frames = ref.many(
{'_id': {'$in': list(ids)}},
projection=projection
)
frames = {f._id: f for f in frames}
# Add dereferenced frames to the document
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
# List of references
value = [frames[id] for id in value if id in frames]
elif isinstance(value, dict):
# Dictionary of references
value = {key: frames.get(id) for key, id in value.items()}
else:
value = frames.get(value, None)
child_document = document
keys = cls._path_to_keys(path)
for key in keys[:-1]:
child_document = child_document[key]
child_document[keys[-1]] = value
|
def function[_dereference, parameter[cls, documents, references]]:
constant[Dereference one or more documents]
for taget[tuple[[<ast.Name object at 0x7da1b0c77f70>, <ast.Name object at 0x7da1b0c77b50>]]] in starred[call[name[references].items, parameter[]]] begin[:]
if compare[constant[$ref] <ast.NotIn object at 0x7da2590d7190> name[projection]] begin[:]
continue
variable[ids] assign[=] call[name[set], parameter[]]
for taget[name[document]] in starred[name[documents]] begin[:]
variable[value] assign[=] call[name[cls]._path_to_value, parameter[name[path], name[document]]]
if <ast.UnaryOp object at 0x7da1b0c775b0> begin[:]
continue
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
call[name[ids].update, parameter[name[value]]]
variable[ref] assign[=] call[name[projection].pop, parameter[constant[$ref]]]
variable[frames] assign[=] call[name[ref].many, parameter[dictionary[[<ast.Constant object at 0x7da1b0c91f90>], [<ast.Dict object at 0x7da1b0c917b0>]]]]
variable[frames] assign[=] <ast.DictComp object at 0x7da1b0c91b40>
for taget[name[document]] in starred[name[documents]] begin[:]
variable[value] assign[=] call[name[cls]._path_to_value, parameter[name[path], name[document]]]
if <ast.UnaryOp object at 0x7da1b0c93af0> begin[:]
continue
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
variable[value] assign[=] <ast.ListComp object at 0x7da1b0c90a90>
variable[child_document] assign[=] name[document]
variable[keys] assign[=] call[name[cls]._path_to_keys, parameter[name[path]]]
for taget[name[key]] in starred[call[name[keys]][<ast.Slice object at 0x7da1b0c935e0>]] begin[:]
variable[child_document] assign[=] call[name[child_document]][name[key]]
call[name[child_document]][call[name[keys]][<ast.UnaryOp object at 0x7da1b0c90640>]] assign[=] name[value]
|
keyword[def] identifier[_dereference] ( identifier[cls] , identifier[documents] , identifier[references] ):
literal[string]
keyword[for] identifier[path] , identifier[projection] keyword[in] identifier[references] . identifier[items] ():
keyword[if] literal[string] keyword[not] keyword[in] identifier[projection] :
keyword[continue]
identifier[ids] = identifier[set] ()
keyword[for] identifier[document] keyword[in] identifier[documents] :
identifier[value] = identifier[cls] . identifier[_path_to_value] ( identifier[path] , identifier[document] )
keyword[if] keyword[not] identifier[value] :
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[ids] . identifier[update] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[ids] . identifier[update] ( identifier[value] . identifier[values] ())
keyword[else] :
identifier[ids] . identifier[add] ( identifier[value] )
identifier[ref] = identifier[projection] . identifier[pop] ( literal[string] )
identifier[frames] = identifier[ref] . identifier[many] (
{ literal[string] :{ literal[string] : identifier[list] ( identifier[ids] )}},
identifier[projection] = identifier[projection]
)
identifier[frames] ={ identifier[f] . identifier[_id] : identifier[f] keyword[for] identifier[f] keyword[in] identifier[frames] }
keyword[for] identifier[document] keyword[in] identifier[documents] :
identifier[value] = identifier[cls] . identifier[_path_to_value] ( identifier[path] , identifier[document] )
keyword[if] keyword[not] identifier[value] :
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[value] =[ identifier[frames] [ identifier[id] ] keyword[for] identifier[id] keyword[in] identifier[value] keyword[if] identifier[id] keyword[in] identifier[frames] ]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[value] ={ identifier[key] : identifier[frames] . identifier[get] ( identifier[id] ) keyword[for] identifier[key] , identifier[id] keyword[in] identifier[value] . identifier[items] ()}
keyword[else] :
identifier[value] = identifier[frames] . identifier[get] ( identifier[value] , keyword[None] )
identifier[child_document] = identifier[document]
identifier[keys] = identifier[cls] . identifier[_path_to_keys] ( identifier[path] )
keyword[for] identifier[key] keyword[in] identifier[keys] [:- literal[int] ]:
identifier[child_document] = identifier[child_document] [ identifier[key] ]
identifier[child_document] [ identifier[keys] [- literal[int] ]]= identifier[value]
|
def _dereference(cls, documents, references):
"""Dereference one or more documents"""
# Dereference each reference
for (path, projection) in references.items():
# Check there is a $ref in the projection, else skip it
if '$ref' not in projection:
continue # depends on [control=['if'], data=[]]
# Collect Ids of documents to dereference
ids = set()
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue # depends on [control=['if'], data=[]]
if isinstance(value, list):
ids.update(value) # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
ids.update(value.values()) # depends on [control=['if'], data=[]]
else:
ids.add(value) # depends on [control=['for'], data=['document']]
# Find the referenced documents
ref = projection.pop('$ref')
frames = ref.many({'_id': {'$in': list(ids)}}, projection=projection)
frames = {f._id: f for f in frames}
# Add dereferenced frames to the document
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue # depends on [control=['if'], data=[]]
if isinstance(value, list):
# List of references
value = [frames[id] for id in value if id in frames] # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
# Dictionary of references
value = {key: frames.get(id) for (key, id) in value.items()} # depends on [control=['if'], data=[]]
else:
value = frames.get(value, None)
child_document = document
keys = cls._path_to_keys(path)
for key in keys[:-1]:
child_document = child_document[key] # depends on [control=['for'], data=['key']]
child_document[keys[-1]] = value # depends on [control=['for'], data=['document']] # depends on [control=['for'], data=[]]
|
def block_specification_to_number(block: BlockSpecification, web3: Web3) -> BlockNumber:
""" Converts a block specification to an actual block number """
if isinstance(block, str):
msg = f"string block specification can't contain {block}"
assert block in ('latest', 'pending'), msg
number = web3.eth.getBlock(block)['number']
elif isinstance(block, T_BlockHash):
number = web3.eth.getBlock(block)['number']
elif isinstance(block, T_BlockNumber):
number = block
else:
if __debug__:
raise AssertionError(f'Unknown type {type(block)} given for block specification')
return BlockNumber(number)
|
def function[block_specification_to_number, parameter[block, web3]]:
constant[ Converts a block specification to an actual block number ]
if call[name[isinstance], parameter[name[block], name[str]]] begin[:]
variable[msg] assign[=] <ast.JoinedStr object at 0x7da1b19b80d0>
assert[compare[name[block] in tuple[[<ast.Constant object at 0x7da1b19bb400>, <ast.Constant object at 0x7da1b19b8250>]]]]
variable[number] assign[=] call[call[name[web3].eth.getBlock, parameter[name[block]]]][constant[number]]
return[call[name[BlockNumber], parameter[name[number]]]]
|
keyword[def] identifier[block_specification_to_number] ( identifier[block] : identifier[BlockSpecification] , identifier[web3] : identifier[Web3] )-> identifier[BlockNumber] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[block] , identifier[str] ):
identifier[msg] = literal[string]
keyword[assert] identifier[block] keyword[in] ( literal[string] , literal[string] ), identifier[msg]
identifier[number] = identifier[web3] . identifier[eth] . identifier[getBlock] ( identifier[block] )[ literal[string] ]
keyword[elif] identifier[isinstance] ( identifier[block] , identifier[T_BlockHash] ):
identifier[number] = identifier[web3] . identifier[eth] . identifier[getBlock] ( identifier[block] )[ literal[string] ]
keyword[elif] identifier[isinstance] ( identifier[block] , identifier[T_BlockNumber] ):
identifier[number] = identifier[block]
keyword[else] :
keyword[if] identifier[__debug__] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[return] identifier[BlockNumber] ( identifier[number] )
|
def block_specification_to_number(block: BlockSpecification, web3: Web3) -> BlockNumber:
""" Converts a block specification to an actual block number """
if isinstance(block, str):
msg = f"string block specification can't contain {block}"
assert block in ('latest', 'pending'), msg
number = web3.eth.getBlock(block)['number'] # depends on [control=['if'], data=[]]
elif isinstance(block, T_BlockHash):
number = web3.eth.getBlock(block)['number'] # depends on [control=['if'], data=[]]
elif isinstance(block, T_BlockNumber):
number = block # depends on [control=['if'], data=[]]
elif __debug__:
raise AssertionError(f'Unknown type {type(block)} given for block specification') # depends on [control=['if'], data=[]]
return BlockNumber(number)
|
def select_option(self):
""" Select this node if it is an option element inside a select tag. """
if self.disabled:
warn("Attempt to select disabled option: {}".format(self.value or self.text))
self.base.select_option()
|
def function[select_option, parameter[self]]:
constant[ Select this node if it is an option element inside a select tag. ]
if name[self].disabled begin[:]
call[name[warn], parameter[call[constant[Attempt to select disabled option: {}].format, parameter[<ast.BoolOp object at 0x7da1b0216860>]]]]
call[name[self].base.select_option, parameter[]]
|
keyword[def] identifier[select_option] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[disabled] :
identifier[warn] ( literal[string] . identifier[format] ( identifier[self] . identifier[value] keyword[or] identifier[self] . identifier[text] ))
identifier[self] . identifier[base] . identifier[select_option] ()
|
def select_option(self):
""" Select this node if it is an option element inside a select tag. """
if self.disabled:
warn('Attempt to select disabled option: {}'.format(self.value or self.text)) # depends on [control=['if'], data=[]]
self.base.select_option()
|
def modem_configuration(self, host, port):
"""Set the host:port for the Cellular device to send data to.
Returns True if the command was successful.
"""
request = {
"command": "modem_configuration",
"host": host,
"port": port
}
status = self._check_command_response_status(request)
return status
|
def function[modem_configuration, parameter[self, host, port]]:
constant[Set the host:port for the Cellular device to send data to.
Returns True if the command was successful.
]
variable[request] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b84730>, <ast.Constant object at 0x7da1b1b857e0>, <ast.Constant object at 0x7da1b1b876d0>], [<ast.Constant object at 0x7da1b1b853f0>, <ast.Name object at 0x7da1b1b86800>, <ast.Name object at 0x7da1b1b87c10>]]
variable[status] assign[=] call[name[self]._check_command_response_status, parameter[name[request]]]
return[name[status]]
|
keyword[def] identifier[modem_configuration] ( identifier[self] , identifier[host] , identifier[port] ):
literal[string]
identifier[request] ={
literal[string] : literal[string] ,
literal[string] : identifier[host] ,
literal[string] : identifier[port]
}
identifier[status] = identifier[self] . identifier[_check_command_response_status] ( identifier[request] )
keyword[return] identifier[status]
|
def modem_configuration(self, host, port):
"""Set the host:port for the Cellular device to send data to.
Returns True if the command was successful.
"""
request = {'command': 'modem_configuration', 'host': host, 'port': port}
status = self._check_command_response_status(request)
return status
|
def to_chunks(self, df, chunk_size='D', func=None, **kwargs):
"""
chunks the dataframe/series by dates
Parameters
----------
df: pandas dataframe or series
chunk_size: str
any valid Pandas frequency string
func: function
func will be applied to each `chunk` generated by the chunker.
This function CANNOT modify the date column of the dataframe!
Returns
-------
generator that produces tuples: (start date, end date,
chunk_size, dataframe/series)
"""
if 'date' in df.index.names:
dates = df.index.get_level_values('date')
if not df.index.is_monotonic_increasing:
df = df.sort_index()
elif 'date' in df.columns:
dates = pd.DatetimeIndex(df.date)
if not dates.is_monotonic_increasing:
# providing support for pandas 0.16.2 to 0.20.x
# neither sort method exists in both
try:
df = df.sort_values('date')
except AttributeError:
df = df.sort(columns='date')
dates = pd.DatetimeIndex(df.date)
else:
raise Exception("Data must be datetime indexed or have a column named 'date'")
period_obj = dates.to_period(chunk_size)
period_obj_reduced = period_obj.drop_duplicates()
count = 0
for _, g in df.groupby(period_obj._data):
start = period_obj_reduced[count].start_time.to_pydatetime(warn=False)
end = period_obj_reduced[count].end_time.to_pydatetime(warn=False)
count += 1
if func:
yield start, end, chunk_size, func(g)
else:
yield start, end, chunk_size, g
|
def function[to_chunks, parameter[self, df, chunk_size, func]]:
constant[
chunks the dataframe/series by dates
Parameters
----------
df: pandas dataframe or series
chunk_size: str
any valid Pandas frequency string
func: function
func will be applied to each `chunk` generated by the chunker.
This function CANNOT modify the date column of the dataframe!
Returns
-------
generator that produces tuples: (start date, end date,
chunk_size, dataframe/series)
]
if compare[constant[date] in name[df].index.names] begin[:]
variable[dates] assign[=] call[name[df].index.get_level_values, parameter[constant[date]]]
if <ast.UnaryOp object at 0x7da2041dad70> begin[:]
variable[df] assign[=] call[name[df].sort_index, parameter[]]
variable[period_obj] assign[=] call[name[dates].to_period, parameter[name[chunk_size]]]
variable[period_obj_reduced] assign[=] call[name[period_obj].drop_duplicates, parameter[]]
variable[count] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da2041db580>, <ast.Name object at 0x7da2041d91e0>]]] in starred[call[name[df].groupby, parameter[name[period_obj]._data]]] begin[:]
variable[start] assign[=] call[call[name[period_obj_reduced]][name[count]].start_time.to_pydatetime, parameter[]]
variable[end] assign[=] call[call[name[period_obj_reduced]][name[count]].end_time.to_pydatetime, parameter[]]
<ast.AugAssign object at 0x7da2041da710>
if name[func] begin[:]
<ast.Yield object at 0x7da2041da590>
|
keyword[def] identifier[to_chunks] ( identifier[self] , identifier[df] , identifier[chunk_size] = literal[string] , identifier[func] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[df] . identifier[index] . identifier[names] :
identifier[dates] = identifier[df] . identifier[index] . identifier[get_level_values] ( literal[string] )
keyword[if] keyword[not] identifier[df] . identifier[index] . identifier[is_monotonic_increasing] :
identifier[df] = identifier[df] . identifier[sort_index] ()
keyword[elif] literal[string] keyword[in] identifier[df] . identifier[columns] :
identifier[dates] = identifier[pd] . identifier[DatetimeIndex] ( identifier[df] . identifier[date] )
keyword[if] keyword[not] identifier[dates] . identifier[is_monotonic_increasing] :
keyword[try] :
identifier[df] = identifier[df] . identifier[sort_values] ( literal[string] )
keyword[except] identifier[AttributeError] :
identifier[df] = identifier[df] . identifier[sort] ( identifier[columns] = literal[string] )
identifier[dates] = identifier[pd] . identifier[DatetimeIndex] ( identifier[df] . identifier[date] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[period_obj] = identifier[dates] . identifier[to_period] ( identifier[chunk_size] )
identifier[period_obj_reduced] = identifier[period_obj] . identifier[drop_duplicates] ()
identifier[count] = literal[int]
keyword[for] identifier[_] , identifier[g] keyword[in] identifier[df] . identifier[groupby] ( identifier[period_obj] . identifier[_data] ):
identifier[start] = identifier[period_obj_reduced] [ identifier[count] ]. identifier[start_time] . identifier[to_pydatetime] ( identifier[warn] = keyword[False] )
identifier[end] = identifier[period_obj_reduced] [ identifier[count] ]. identifier[end_time] . identifier[to_pydatetime] ( identifier[warn] = keyword[False] )
identifier[count] += literal[int]
keyword[if] identifier[func] :
keyword[yield] identifier[start] , identifier[end] , identifier[chunk_size] , identifier[func] ( identifier[g] )
keyword[else] :
keyword[yield] identifier[start] , identifier[end] , identifier[chunk_size] , identifier[g]
|
def to_chunks(self, df, chunk_size='D', func=None, **kwargs):
"""
chunks the dataframe/series by dates
Parameters
----------
df: pandas dataframe or series
chunk_size: str
any valid Pandas frequency string
func: function
func will be applied to each `chunk` generated by the chunker.
This function CANNOT modify the date column of the dataframe!
Returns
-------
generator that produces tuples: (start date, end date,
chunk_size, dataframe/series)
"""
if 'date' in df.index.names:
dates = df.index.get_level_values('date')
if not df.index.is_monotonic_increasing:
df = df.sort_index() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'date' in df.columns:
dates = pd.DatetimeIndex(df.date)
if not dates.is_monotonic_increasing:
# providing support for pandas 0.16.2 to 0.20.x
# neither sort method exists in both
try:
df = df.sort_values('date') # depends on [control=['try'], data=[]]
except AttributeError:
df = df.sort(columns='date') # depends on [control=['except'], data=[]]
dates = pd.DatetimeIndex(df.date) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise Exception("Data must be datetime indexed or have a column named 'date'")
period_obj = dates.to_period(chunk_size)
period_obj_reduced = period_obj.drop_duplicates()
count = 0
for (_, g) in df.groupby(period_obj._data):
start = period_obj_reduced[count].start_time.to_pydatetime(warn=False)
end = period_obj_reduced[count].end_time.to_pydatetime(warn=False)
count += 1
if func:
yield (start, end, chunk_size, func(g)) # depends on [control=['if'], data=[]]
else:
yield (start, end, chunk_size, g) # depends on [control=['for'], data=[]]
|
def errinfo(msmt):
"""Return (limtype, repval, errval1, errval2). Like m_liminfo, but also
provides error bar information for values that have it."""
if isinstance(msmt, Textual):
msmt = msmt.unwrap()
if np.isscalar(msmt):
return 0, msmt, msmt, msmt
if isinstance(msmt, Uval):
rep, plus1, minus1 = msmt.repvals(uval_default_repval_method)
return 0, rep, plus1, minus1
if isinstance(msmt, Lval):
return limtype(msmt), msmt.value, msmt.value, msmt.value
raise ValueError('don\'t know how to treat %r as a measurement' % msmt)
|
def function[errinfo, parameter[msmt]]:
constant[Return (limtype, repval, errval1, errval2). Like m_liminfo, but also
provides error bar information for values that have it.]
if call[name[isinstance], parameter[name[msmt], name[Textual]]] begin[:]
variable[msmt] assign[=] call[name[msmt].unwrap, parameter[]]
if call[name[np].isscalar, parameter[name[msmt]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b2795bd0>, <ast.Name object at 0x7da1b2795300>, <ast.Name object at 0x7da1b2795990>, <ast.Name object at 0x7da1b2797940>]]]
if call[name[isinstance], parameter[name[msmt], name[Uval]]] begin[:]
<ast.Tuple object at 0x7da1b2797130> assign[=] call[name[msmt].repvals, parameter[name[uval_default_repval_method]]]
return[tuple[[<ast.Constant object at 0x7da18f58ce50>, <ast.Name object at 0x7da18f58e800>, <ast.Name object at 0x7da18f58f5e0>, <ast.Name object at 0x7da18f58dd20>]]]
if call[name[isinstance], parameter[name[msmt], name[Lval]]] begin[:]
return[tuple[[<ast.Call object at 0x7da204565ae0>, <ast.Attribute object at 0x7da204567640>, <ast.Attribute object at 0x7da204564be0>, <ast.Attribute object at 0x7da204566920>]]]
<ast.Raise object at 0x7da1b27abc10>
|
keyword[def] identifier[errinfo] ( identifier[msmt] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[msmt] , identifier[Textual] ):
identifier[msmt] = identifier[msmt] . identifier[unwrap] ()
keyword[if] identifier[np] . identifier[isscalar] ( identifier[msmt] ):
keyword[return] literal[int] , identifier[msmt] , identifier[msmt] , identifier[msmt]
keyword[if] identifier[isinstance] ( identifier[msmt] , identifier[Uval] ):
identifier[rep] , identifier[plus1] , identifier[minus1] = identifier[msmt] . identifier[repvals] ( identifier[uval_default_repval_method] )
keyword[return] literal[int] , identifier[rep] , identifier[plus1] , identifier[minus1]
keyword[if] identifier[isinstance] ( identifier[msmt] , identifier[Lval] ):
keyword[return] identifier[limtype] ( identifier[msmt] ), identifier[msmt] . identifier[value] , identifier[msmt] . identifier[value] , identifier[msmt] . identifier[value]
keyword[raise] identifier[ValueError] ( literal[string] % identifier[msmt] )
|
def errinfo(msmt):
"""Return (limtype, repval, errval1, errval2). Like m_liminfo, but also
provides error bar information for values that have it."""
if isinstance(msmt, Textual):
msmt = msmt.unwrap() # depends on [control=['if'], data=[]]
if np.isscalar(msmt):
return (0, msmt, msmt, msmt) # depends on [control=['if'], data=[]]
if isinstance(msmt, Uval):
(rep, plus1, minus1) = msmt.repvals(uval_default_repval_method)
return (0, rep, plus1, minus1) # depends on [control=['if'], data=[]]
if isinstance(msmt, Lval):
return (limtype(msmt), msmt.value, msmt.value, msmt.value) # depends on [control=['if'], data=[]]
raise ValueError("don't know how to treat %r as a measurement" % msmt)
|
def from_passphrase(cls, passphrase=None):
""" Create keypair from a passphrase input (a brain wallet keypair)."""
if not passphrase:
# run a rejection sampling algorithm to ensure the private key is
# less than the curve order
while True:
passphrase = create_passphrase(bits_of_entropy=160)
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if int(hex_private_key, 16) < cls._curve.order:
break
else:
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if not (int(hex_private_key, 16) < cls._curve.order):
raise ValueError(_errors["PHRASE_YIELDS_INVALID_EXPONENT"])
keypair = cls(hex_private_key)
keypair._passphrase = passphrase
return keypair
|
def function[from_passphrase, parameter[cls, passphrase]]:
constant[ Create keypair from a passphrase input (a brain wallet keypair).]
if <ast.UnaryOp object at 0x7da1b1042650> begin[:]
while constant[True] begin[:]
variable[passphrase] assign[=] call[name[create_passphrase], parameter[]]
variable[hex_private_key] assign[=] call[call[name[hashlib].sha256, parameter[name[passphrase]]].hexdigest, parameter[]]
if compare[call[name[int], parameter[name[hex_private_key], constant[16]]] less[<] name[cls]._curve.order] begin[:]
break
variable[keypair] assign[=] call[name[cls], parameter[name[hex_private_key]]]
name[keypair]._passphrase assign[=] name[passphrase]
return[name[keypair]]
|
keyword[def] identifier[from_passphrase] ( identifier[cls] , identifier[passphrase] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[passphrase] :
keyword[while] keyword[True] :
identifier[passphrase] = identifier[create_passphrase] ( identifier[bits_of_entropy] = literal[int] )
identifier[hex_private_key] = identifier[hashlib] . identifier[sha256] ( identifier[passphrase] ). identifier[hexdigest] ()
keyword[if] identifier[int] ( identifier[hex_private_key] , literal[int] )< identifier[cls] . identifier[_curve] . identifier[order] :
keyword[break]
keyword[else] :
identifier[hex_private_key] = identifier[hashlib] . identifier[sha256] ( identifier[passphrase] ). identifier[hexdigest] ()
keyword[if] keyword[not] ( identifier[int] ( identifier[hex_private_key] , literal[int] )< identifier[cls] . identifier[_curve] . identifier[order] ):
keyword[raise] identifier[ValueError] ( identifier[_errors] [ literal[string] ])
identifier[keypair] = identifier[cls] ( identifier[hex_private_key] )
identifier[keypair] . identifier[_passphrase] = identifier[passphrase]
keyword[return] identifier[keypair]
|
def from_passphrase(cls, passphrase=None):
""" Create keypair from a passphrase input (a brain wallet keypair)."""
if not passphrase:
# run a rejection sampling algorithm to ensure the private key is
# less than the curve order
while True:
passphrase = create_passphrase(bits_of_entropy=160)
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if int(hex_private_key, 16) < cls._curve.order:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if not int(hex_private_key, 16) < cls._curve.order:
raise ValueError(_errors['PHRASE_YIELDS_INVALID_EXPONENT']) # depends on [control=['if'], data=[]]
keypair = cls(hex_private_key)
keypair._passphrase = passphrase
return keypair
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.