code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _gate_pre_offset(self, gate):
"""
Return the offset to use before placing this gate.
:param string gate: The name of the gate whose pre-offset is desired.
:return: Offset to use before the gate.
:rtype: float
"""
try:
gates = self.settings['gates']
delta_pos = gates[gate.__class__.__name__]['pre_offset']
except KeyError:
delta_pos = self._gate_offset(gate)
return delta_pos
|
def function[_gate_pre_offset, parameter[self, gate]]:
constant[
Return the offset to use before placing this gate.
:param string gate: The name of the gate whose pre-offset is desired.
:return: Offset to use before the gate.
:rtype: float
]
<ast.Try object at 0x7da1b1bc0ee0>
return[name[delta_pos]]
|
keyword[def] identifier[_gate_pre_offset] ( identifier[self] , identifier[gate] ):
literal[string]
keyword[try] :
identifier[gates] = identifier[self] . identifier[settings] [ literal[string] ]
identifier[delta_pos] = identifier[gates] [ identifier[gate] . identifier[__class__] . identifier[__name__] ][ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[delta_pos] = identifier[self] . identifier[_gate_offset] ( identifier[gate] )
keyword[return] identifier[delta_pos]
|
def _gate_pre_offset(self, gate):
"""
Return the offset to use before placing this gate.
:param string gate: The name of the gate whose pre-offset is desired.
:return: Offset to use before the gate.
:rtype: float
"""
try:
gates = self.settings['gates']
delta_pos = gates[gate.__class__.__name__]['pre_offset'] # depends on [control=['try'], data=[]]
except KeyError:
delta_pos = self._gate_offset(gate) # depends on [control=['except'], data=[]]
return delta_pos
|
def decode_obj_table(table_entries, plugin):
"""Return root of obj table. Converts user-class objects"""
entries = []
for entry in table_entries:
if isinstance(entry, Container):
assert not hasattr(entry, '__recursion_lock__')
user_obj_def = plugin.user_objects[entry.classID]
assert entry.version == user_obj_def.version
entry = Container(class_name=entry.classID,
**dict(zip(user_obj_def.defaults.keys(),
entry.values)))
entries.append(entry)
return decode_network(entries)
|
def function[decode_obj_table, parameter[table_entries, plugin]]:
constant[Return root of obj table. Converts user-class objects]
variable[entries] assign[=] list[[]]
for taget[name[entry]] in starred[name[table_entries]] begin[:]
if call[name[isinstance], parameter[name[entry], name[Container]]] begin[:]
assert[<ast.UnaryOp object at 0x7da20c9931f0>]
variable[user_obj_def] assign[=] call[name[plugin].user_objects][name[entry].classID]
assert[compare[name[entry].version equal[==] name[user_obj_def].version]]
variable[entry] assign[=] call[name[Container], parameter[]]
call[name[entries].append, parameter[name[entry]]]
return[call[name[decode_network], parameter[name[entries]]]]
|
keyword[def] identifier[decode_obj_table] ( identifier[table_entries] , identifier[plugin] ):
literal[string]
identifier[entries] =[]
keyword[for] identifier[entry] keyword[in] identifier[table_entries] :
keyword[if] identifier[isinstance] ( identifier[entry] , identifier[Container] ):
keyword[assert] keyword[not] identifier[hasattr] ( identifier[entry] , literal[string] )
identifier[user_obj_def] = identifier[plugin] . identifier[user_objects] [ identifier[entry] . identifier[classID] ]
keyword[assert] identifier[entry] . identifier[version] == identifier[user_obj_def] . identifier[version]
identifier[entry] = identifier[Container] ( identifier[class_name] = identifier[entry] . identifier[classID] ,
** identifier[dict] ( identifier[zip] ( identifier[user_obj_def] . identifier[defaults] . identifier[keys] (),
identifier[entry] . identifier[values] )))
identifier[entries] . identifier[append] ( identifier[entry] )
keyword[return] identifier[decode_network] ( identifier[entries] )
|
def decode_obj_table(table_entries, plugin):
"""Return root of obj table. Converts user-class objects"""
entries = []
for entry in table_entries:
if isinstance(entry, Container):
assert not hasattr(entry, '__recursion_lock__')
user_obj_def = plugin.user_objects[entry.classID]
assert entry.version == user_obj_def.version
entry = Container(class_name=entry.classID, **dict(zip(user_obj_def.defaults.keys(), entry.values))) # depends on [control=['if'], data=[]]
entries.append(entry) # depends on [control=['for'], data=['entry']]
return decode_network(entries)
|
def get_transactions(self, include_investment=False):
"""Returns the transaction data as a Pandas DataFrame."""
assert_pd()
s = StringIO(self.get_transactions_csv(
include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.np.nan))
return df
|
def function[get_transactions, parameter[self, include_investment]]:
constant[Returns the transaction data as a Pandas DataFrame.]
call[name[assert_pd], parameter[]]
variable[s] assign[=] call[name[StringIO], parameter[call[name[self].get_transactions_csv, parameter[]]]]
call[name[s].seek, parameter[constant[0]]]
variable[df] assign[=] call[name[pd].read_csv, parameter[name[s]]]
name[df].columns assign[=] <ast.ListComp object at 0x7da2054a5480>
name[df].category assign[=] call[call[name[df].category.str.lower, parameter[]].replace, parameter[constant[uncategorized], name[pd].np.nan]]
return[name[df]]
|
keyword[def] identifier[get_transactions] ( identifier[self] , identifier[include_investment] = keyword[False] ):
literal[string]
identifier[assert_pd] ()
identifier[s] = identifier[StringIO] ( identifier[self] . identifier[get_transactions_csv] (
identifier[include_investment] = identifier[include_investment] ))
identifier[s] . identifier[seek] ( literal[int] )
identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[s] , identifier[parse_dates] =[ literal[string] ])
identifier[df] . identifier[columns] =[ identifier[c] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[c] keyword[in] identifier[df] . identifier[columns] ]
identifier[df] . identifier[category] =( identifier[df] . identifier[category] . identifier[str] . identifier[lower] ()
. identifier[replace] ( literal[string] , identifier[pd] . identifier[np] . identifier[nan] ))
keyword[return] identifier[df]
|
def get_transactions(self, include_investment=False):
"""Returns the transaction data as a Pandas DataFrame."""
assert_pd()
s = StringIO(self.get_transactions_csv(include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = df.category.str.lower().replace('uncategorized', pd.np.nan)
return df
|
def serialize(script_string):
'''
str -> bytearray
'''
string_tokens = script_string.split()
serialized_script = bytearray()
for token in string_tokens:
if token == 'OP_CODESEPARATOR' or token == 'OP_PUSHDATA4':
raise NotImplementedError('{} is a bad idea.'.format(token))
if token in riemann.network.CODE_TO_INT_OVERWRITE:
serialized_script.extend(
[riemann.network.CODE_TO_INT_OVERWRITE[token]])
elif token in CODE_TO_INT:
serialized_script.extend([CODE_TO_INT[token]])
else:
token_bytes = bytes.fromhex(token)
if len(token_bytes) <= 75:
op = 'OP_PUSH_{}'.format(len(token_bytes))
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(token_bytes)
elif len(token_bytes) > 75 and len(token_bytes) <= 255:
op = 'OP_PUSHDATA1'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le(len(token_bytes)))
serialized_script.extend(token_bytes)
elif len(token_bytes) > 255 and len(token_bytes) <= 1000:
op = 'OP_PUSHDATA2'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(
utils.i2le_padded(len(token_bytes), 2))
serialized_script.extend(token_bytes)
else:
raise NotImplementedError(
'Hex string too long to serialize.')
return serialized_script
|
def function[serialize, parameter[script_string]]:
constant[
str -> bytearray
]
variable[string_tokens] assign[=] call[name[script_string].split, parameter[]]
variable[serialized_script] assign[=] call[name[bytearray], parameter[]]
for taget[name[token]] in starred[name[string_tokens]] begin[:]
if <ast.BoolOp object at 0x7da1b06a1600> begin[:]
<ast.Raise object at 0x7da1b06a2a10>
if compare[name[token] in name[riemann].network.CODE_TO_INT_OVERWRITE] begin[:]
call[name[serialized_script].extend, parameter[list[[<ast.Subscript object at 0x7da1b06a06a0>]]]]
return[name[serialized_script]]
|
keyword[def] identifier[serialize] ( identifier[script_string] ):
literal[string]
identifier[string_tokens] = identifier[script_string] . identifier[split] ()
identifier[serialized_script] = identifier[bytearray] ()
keyword[for] identifier[token] keyword[in] identifier[string_tokens] :
keyword[if] identifier[token] == literal[string] keyword[or] identifier[token] == literal[string] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[token] ))
keyword[if] identifier[token] keyword[in] identifier[riemann] . identifier[network] . identifier[CODE_TO_INT_OVERWRITE] :
identifier[serialized_script] . identifier[extend] (
[ identifier[riemann] . identifier[network] . identifier[CODE_TO_INT_OVERWRITE] [ identifier[token] ]])
keyword[elif] identifier[token] keyword[in] identifier[CODE_TO_INT] :
identifier[serialized_script] . identifier[extend] ([ identifier[CODE_TO_INT] [ identifier[token] ]])
keyword[else] :
identifier[token_bytes] = identifier[bytes] . identifier[fromhex] ( identifier[token] )
keyword[if] identifier[len] ( identifier[token_bytes] )<= literal[int] :
identifier[op] = literal[string] . identifier[format] ( identifier[len] ( identifier[token_bytes] ))
identifier[serialized_script] . identifier[extend] ([ identifier[CODE_TO_INT] [ identifier[op] ]])
identifier[serialized_script] . identifier[extend] ( identifier[token_bytes] )
keyword[elif] identifier[len] ( identifier[token_bytes] )> literal[int] keyword[and] identifier[len] ( identifier[token_bytes] )<= literal[int] :
identifier[op] = literal[string]
identifier[serialized_script] . identifier[extend] ([ identifier[CODE_TO_INT] [ identifier[op] ]])
identifier[serialized_script] . identifier[extend] ( identifier[utils] . identifier[i2le] ( identifier[len] ( identifier[token_bytes] )))
identifier[serialized_script] . identifier[extend] ( identifier[token_bytes] )
keyword[elif] identifier[len] ( identifier[token_bytes] )> literal[int] keyword[and] identifier[len] ( identifier[token_bytes] )<= literal[int] :
identifier[op] = literal[string]
identifier[serialized_script] . identifier[extend] ([ identifier[CODE_TO_INT] [ identifier[op] ]])
identifier[serialized_script] . identifier[extend] (
identifier[utils] . identifier[i2le_padded] ( identifier[len] ( identifier[token_bytes] ), literal[int] ))
identifier[serialized_script] . identifier[extend] ( identifier[token_bytes] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] (
literal[string] )
keyword[return] identifier[serialized_script]
|
def serialize(script_string):
"""
str -> bytearray
"""
string_tokens = script_string.split()
serialized_script = bytearray()
for token in string_tokens:
if token == 'OP_CODESEPARATOR' or token == 'OP_PUSHDATA4':
raise NotImplementedError('{} is a bad idea.'.format(token)) # depends on [control=['if'], data=[]]
if token in riemann.network.CODE_TO_INT_OVERWRITE:
serialized_script.extend([riemann.network.CODE_TO_INT_OVERWRITE[token]]) # depends on [control=['if'], data=['token']]
elif token in CODE_TO_INT:
serialized_script.extend([CODE_TO_INT[token]]) # depends on [control=['if'], data=['token', 'CODE_TO_INT']]
else:
token_bytes = bytes.fromhex(token)
if len(token_bytes) <= 75:
op = 'OP_PUSH_{}'.format(len(token_bytes))
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(token_bytes) # depends on [control=['if'], data=[]]
elif len(token_bytes) > 75 and len(token_bytes) <= 255:
op = 'OP_PUSHDATA1'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le(len(token_bytes)))
serialized_script.extend(token_bytes) # depends on [control=['if'], data=[]]
elif len(token_bytes) > 255 and len(token_bytes) <= 1000:
op = 'OP_PUSHDATA2'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le_padded(len(token_bytes), 2))
serialized_script.extend(token_bytes) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Hex string too long to serialize.') # depends on [control=['for'], data=['token']]
return serialized_script
|
def _set_esp_auth(self, v, load=False):
"""
Setter method for esp_auth, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/esp_auth (algorithm-type-ah)
If this variable is read-only (config: false) in the
source YANG file, then _set_esp_auth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_esp_auth() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'hmac-sha1': {'value': 2}, u'hmac-md5': {'value': 1}},), is_leaf=True, yang_name="esp-auth", rest_name="esp-auth", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Use Authentication Algorithm', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-ah', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """esp_auth must be of a type compatible with algorithm-type-ah""",
'defined-type': "brocade-ospfv3:algorithm-type-ah",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'hmac-sha1': {'value': 2}, u'hmac-md5': {'value': 1}},), is_leaf=True, yang_name="esp-auth", rest_name="esp-auth", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Use Authentication Algorithm', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-ah', is_config=True)""",
})
self.__esp_auth = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_esp_auth, parameter[self, v, load]]:
constant[
Setter method for esp_auth, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/esp_auth (algorithm-type-ah)
If this variable is read-only (config: false) in the
source YANG file, then _set_esp_auth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_esp_auth() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c76dcf0>
name[self].__esp_auth assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_esp_auth] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__esp_auth] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_esp_auth(self, v, load=False):
"""
Setter method for esp_auth, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/esp_auth (algorithm-type-ah)
If this variable is read-only (config: false) in the
source YANG file, then _set_esp_auth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_esp_auth() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'hmac-sha1': {'value': 2}, u'hmac-md5': {'value': 1}}), is_leaf=True, yang_name='esp-auth', rest_name='esp-auth', parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Use Authentication Algorithm', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-ah', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'esp_auth must be of a type compatible with algorithm-type-ah', 'defined-type': 'brocade-ospfv3:algorithm-type-ah', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'hmac-sha1\': {\'value\': 2}, u\'hmac-md5\': {\'value\': 1}},), is_leaf=True, yang_name="esp-auth", rest_name="esp-auth", parent=self, choice=(u\'ch-algorithm\', u\'ca-esp-algorithm\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Use Authentication Algorithm\', u\'cli-drop-node-name\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ospfv3\', defining_module=\'brocade-ospfv3\', yang_type=\'algorithm-type-ah\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__esp_auth = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def retrieveVals(self):
"""Retrieve values for graphs."""
for iface in self._ifaceList:
stats = self._ifaceStats.get(iface)
graph_name = 'netiface_traffic_%s' % iface
if self.hasGraph(graph_name):
self.setGraphVal(graph_name, 'rx', stats.get('rxbytes') * 8)
self.setGraphVal(graph_name, 'tx', stats.get('txbytes') * 8)
graph_name = 'netiface_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier',
'rxdrop', 'txdrop', 'rxfifo', 'txfifo'):
self.setGraphVal(graph_name, field, stats.get(field))
|
def function[retrieveVals, parameter[self]]:
constant[Retrieve values for graphs.]
for taget[name[iface]] in starred[name[self]._ifaceList] begin[:]
variable[stats] assign[=] call[name[self]._ifaceStats.get, parameter[name[iface]]]
variable[graph_name] assign[=] binary_operation[constant[netiface_traffic_%s] <ast.Mod object at 0x7da2590d6920> name[iface]]
if call[name[self].hasGraph, parameter[name[graph_name]]] begin[:]
call[name[self].setGraphVal, parameter[name[graph_name], constant[rx], binary_operation[call[name[stats].get, parameter[constant[rxbytes]]] * constant[8]]]]
call[name[self].setGraphVal, parameter[name[graph_name], constant[tx], binary_operation[call[name[stats].get, parameter[constant[txbytes]]] * constant[8]]]]
variable[graph_name] assign[=] binary_operation[constant[netiface_errors_%s] <ast.Mod object at 0x7da2590d6920> name[iface]]
if call[name[self].hasGraph, parameter[name[graph_name]]] begin[:]
for taget[name[field]] in starred[tuple[[<ast.Constant object at 0x7da1b1026f80>, <ast.Constant object at 0x7da1b1026410>, <ast.Constant object at 0x7da1b1024070>, <ast.Constant object at 0x7da1b1024880>, <ast.Constant object at 0x7da1b1026110>, <ast.Constant object at 0x7da1b1026a40>, <ast.Constant object at 0x7da1b10269e0>, <ast.Constant object at 0x7da1b1026560>]]] begin[:]
call[name[self].setGraphVal, parameter[name[graph_name], name[field], call[name[stats].get, parameter[name[field]]]]]
|
keyword[def] identifier[retrieveVals] ( identifier[self] ):
literal[string]
keyword[for] identifier[iface] keyword[in] identifier[self] . identifier[_ifaceList] :
identifier[stats] = identifier[self] . identifier[_ifaceStats] . identifier[get] ( identifier[iface] )
identifier[graph_name] = literal[string] % identifier[iface]
keyword[if] identifier[self] . identifier[hasGraph] ( identifier[graph_name] ):
identifier[self] . identifier[setGraphVal] ( identifier[graph_name] , literal[string] , identifier[stats] . identifier[get] ( literal[string] )* literal[int] )
identifier[self] . identifier[setGraphVal] ( identifier[graph_name] , literal[string] , identifier[stats] . identifier[get] ( literal[string] )* literal[int] )
identifier[graph_name] = literal[string] % identifier[iface]
keyword[if] identifier[self] . identifier[hasGraph] ( identifier[graph_name] ):
keyword[for] identifier[field] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[self] . identifier[setGraphVal] ( identifier[graph_name] , identifier[field] , identifier[stats] . identifier[get] ( identifier[field] ))
|
def retrieveVals(self):
"""Retrieve values for graphs."""
for iface in self._ifaceList:
stats = self._ifaceStats.get(iface)
graph_name = 'netiface_traffic_%s' % iface
if self.hasGraph(graph_name):
self.setGraphVal(graph_name, 'rx', stats.get('rxbytes') * 8)
self.setGraphVal(graph_name, 'tx', stats.get('txbytes') * 8) # depends on [control=['if'], data=[]]
graph_name = 'netiface_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier', 'rxdrop', 'txdrop', 'rxfifo', 'txfifo'):
self.setGraphVal(graph_name, field, stats.get(field)) # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['iface']]
|
def sens_batmon_encode(self, temperature, voltage, current, SoC, batterystatus, serialnumber, hostfetcontrol, cellvoltage1, cellvoltage2, cellvoltage3, cellvoltage4, cellvoltage5, cellvoltage6):
'''
Battery pack monitoring data for Li-Ion batteries
temperature : Battery pack temperature in [deg C] (float)
voltage : Battery pack voltage in [mV] (uint16_t)
current : Battery pack current in [mA] (int16_t)
SoC : Battery pack state-of-charge (uint8_t)
batterystatus : Battery monitor status report bits in Hex (uint16_t)
serialnumber : Battery monitor serial number in Hex (uint16_t)
hostfetcontrol : Battery monitor sensor host FET control in Hex (uint16_t)
cellvoltage1 : Battery pack cell 1 voltage in [mV] (uint16_t)
cellvoltage2 : Battery pack cell 2 voltage in [mV] (uint16_t)
cellvoltage3 : Battery pack cell 3 voltage in [mV] (uint16_t)
cellvoltage4 : Battery pack cell 4 voltage in [mV] (uint16_t)
cellvoltage5 : Battery pack cell 5 voltage in [mV] (uint16_t)
cellvoltage6 : Battery pack cell 6 voltage in [mV] (uint16_t)
'''
return MAVLink_sens_batmon_message(temperature, voltage, current, SoC, batterystatus, serialnumber, hostfetcontrol, cellvoltage1, cellvoltage2, cellvoltage3, cellvoltage4, cellvoltage5, cellvoltage6)
|
def function[sens_batmon_encode, parameter[self, temperature, voltage, current, SoC, batterystatus, serialnumber, hostfetcontrol, cellvoltage1, cellvoltage2, cellvoltage3, cellvoltage4, cellvoltage5, cellvoltage6]]:
constant[
Battery pack monitoring data for Li-Ion batteries
temperature : Battery pack temperature in [deg C] (float)
voltage : Battery pack voltage in [mV] (uint16_t)
current : Battery pack current in [mA] (int16_t)
SoC : Battery pack state-of-charge (uint8_t)
batterystatus : Battery monitor status report bits in Hex (uint16_t)
serialnumber : Battery monitor serial number in Hex (uint16_t)
hostfetcontrol : Battery monitor sensor host FET control in Hex (uint16_t)
cellvoltage1 : Battery pack cell 1 voltage in [mV] (uint16_t)
cellvoltage2 : Battery pack cell 2 voltage in [mV] (uint16_t)
cellvoltage3 : Battery pack cell 3 voltage in [mV] (uint16_t)
cellvoltage4 : Battery pack cell 4 voltage in [mV] (uint16_t)
cellvoltage5 : Battery pack cell 5 voltage in [mV] (uint16_t)
cellvoltage6 : Battery pack cell 6 voltage in [mV] (uint16_t)
]
return[call[name[MAVLink_sens_batmon_message], parameter[name[temperature], name[voltage], name[current], name[SoC], name[batterystatus], name[serialnumber], name[hostfetcontrol], name[cellvoltage1], name[cellvoltage2], name[cellvoltage3], name[cellvoltage4], name[cellvoltage5], name[cellvoltage6]]]]
|
keyword[def] identifier[sens_batmon_encode] ( identifier[self] , identifier[temperature] , identifier[voltage] , identifier[current] , identifier[SoC] , identifier[batterystatus] , identifier[serialnumber] , identifier[hostfetcontrol] , identifier[cellvoltage1] , identifier[cellvoltage2] , identifier[cellvoltage3] , identifier[cellvoltage4] , identifier[cellvoltage5] , identifier[cellvoltage6] ):
literal[string]
keyword[return] identifier[MAVLink_sens_batmon_message] ( identifier[temperature] , identifier[voltage] , identifier[current] , identifier[SoC] , identifier[batterystatus] , identifier[serialnumber] , identifier[hostfetcontrol] , identifier[cellvoltage1] , identifier[cellvoltage2] , identifier[cellvoltage3] , identifier[cellvoltage4] , identifier[cellvoltage5] , identifier[cellvoltage6] )
|
def sens_batmon_encode(self, temperature, voltage, current, SoC, batterystatus, serialnumber, hostfetcontrol, cellvoltage1, cellvoltage2, cellvoltage3, cellvoltage4, cellvoltage5, cellvoltage6):
"""
Battery pack monitoring data for Li-Ion batteries
temperature : Battery pack temperature in [deg C] (float)
voltage : Battery pack voltage in [mV] (uint16_t)
current : Battery pack current in [mA] (int16_t)
SoC : Battery pack state-of-charge (uint8_t)
batterystatus : Battery monitor status report bits in Hex (uint16_t)
serialnumber : Battery monitor serial number in Hex (uint16_t)
hostfetcontrol : Battery monitor sensor host FET control in Hex (uint16_t)
cellvoltage1 : Battery pack cell 1 voltage in [mV] (uint16_t)
cellvoltage2 : Battery pack cell 2 voltage in [mV] (uint16_t)
cellvoltage3 : Battery pack cell 3 voltage in [mV] (uint16_t)
cellvoltage4 : Battery pack cell 4 voltage in [mV] (uint16_t)
cellvoltage5 : Battery pack cell 5 voltage in [mV] (uint16_t)
cellvoltage6 : Battery pack cell 6 voltage in [mV] (uint16_t)
"""
return MAVLink_sens_batmon_message(temperature, voltage, current, SoC, batterystatus, serialnumber, hostfetcontrol, cellvoltage1, cellvoltage2, cellvoltage3, cellvoltage4, cellvoltage5, cellvoltage6)
|
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value):
"""Set this key of this edge to this value."""
if (branch, turn, tick) in self._btts:
raise TimeError
self._btts.add((branch, turn, tick))
graph, orig, dest, key, value = map(self.pack, (graph, orig, dest, key, value))
self._edgevals2set.append(
(graph, orig, dest, idx, key, branch, turn, tick, value)
)
|
def function[edge_val_set, parameter[self, graph, orig, dest, idx, key, branch, turn, tick, value]]:
constant[Set this key of this edge to this value.]
if compare[tuple[[<ast.Name object at 0x7da2047ea530>, <ast.Name object at 0x7da2047eb190>, <ast.Name object at 0x7da2047e8a60>]] in name[self]._btts] begin[:]
<ast.Raise object at 0x7da20c7968c0>
call[name[self]._btts.add, parameter[tuple[[<ast.Name object at 0x7da20c7959c0>, <ast.Name object at 0x7da20c794eb0>, <ast.Name object at 0x7da20c795ba0>]]]]
<ast.Tuple object at 0x7da20c794e80> assign[=] call[name[map], parameter[name[self].pack, tuple[[<ast.Name object at 0x7da20c796410>, <ast.Name object at 0x7da20c794dc0>, <ast.Name object at 0x7da20c796290>, <ast.Name object at 0x7da20c7951b0>, <ast.Name object at 0x7da20c796530>]]]]
call[name[self]._edgevals2set.append, parameter[tuple[[<ast.Name object at 0x7da20c796080>, <ast.Name object at 0x7da20c7952d0>, <ast.Name object at 0x7da20c796a40>, <ast.Name object at 0x7da20c796a70>, <ast.Name object at 0x7da20c795390>, <ast.Name object at 0x7da20c795660>, <ast.Name object at 0x7da20c796650>, <ast.Name object at 0x7da20c7955a0>, <ast.Name object at 0x7da20c796230>]]]]
|
keyword[def] identifier[edge_val_set] ( identifier[self] , identifier[graph] , identifier[orig] , identifier[dest] , identifier[idx] , identifier[key] , identifier[branch] , identifier[turn] , identifier[tick] , identifier[value] ):
literal[string]
keyword[if] ( identifier[branch] , identifier[turn] , identifier[tick] ) keyword[in] identifier[self] . identifier[_btts] :
keyword[raise] identifier[TimeError]
identifier[self] . identifier[_btts] . identifier[add] (( identifier[branch] , identifier[turn] , identifier[tick] ))
identifier[graph] , identifier[orig] , identifier[dest] , identifier[key] , identifier[value] = identifier[map] ( identifier[self] . identifier[pack] ,( identifier[graph] , identifier[orig] , identifier[dest] , identifier[key] , identifier[value] ))
identifier[self] . identifier[_edgevals2set] . identifier[append] (
( identifier[graph] , identifier[orig] , identifier[dest] , identifier[idx] , identifier[key] , identifier[branch] , identifier[turn] , identifier[tick] , identifier[value] )
)
|
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value):
"""Set this key of this edge to this value."""
if (branch, turn, tick) in self._btts:
raise TimeError # depends on [control=['if'], data=[]]
self._btts.add((branch, turn, tick))
(graph, orig, dest, key, value) = map(self.pack, (graph, orig, dest, key, value))
self._edgevals2set.append((graph, orig, dest, idx, key, branch, turn, tick, value))
|
def set_store_to(self, store_to):
"""Update store_to in Cache and backend."""
assert store_to is None or isinstance(store_to, Cache), \
"store_to needs to be None or a Cache object."
assert store_to is None or store_to.cl_size <= self.cl_size, \
"cl_size may only increase towards main memory."
self.store_to = store_to
self.backend.store_to = store_to.backend
|
def function[set_store_to, parameter[self, store_to]]:
constant[Update store_to in Cache and backend.]
assert[<ast.BoolOp object at 0x7da207f985b0>]
assert[<ast.BoolOp object at 0x7da207f99990>]
name[self].store_to assign[=] name[store_to]
name[self].backend.store_to assign[=] name[store_to].backend
|
keyword[def] identifier[set_store_to] ( identifier[self] , identifier[store_to] ):
literal[string]
keyword[assert] identifier[store_to] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[store_to] , identifier[Cache] ), literal[string]
keyword[assert] identifier[store_to] keyword[is] keyword[None] keyword[or] identifier[store_to] . identifier[cl_size] <= identifier[self] . identifier[cl_size] , literal[string]
identifier[self] . identifier[store_to] = identifier[store_to]
identifier[self] . identifier[backend] . identifier[store_to] = identifier[store_to] . identifier[backend]
|
def set_store_to(self, store_to):
"""Update store_to in Cache and backend."""
assert store_to is None or isinstance(store_to, Cache), 'store_to needs to be None or a Cache object.'
assert store_to is None or store_to.cl_size <= self.cl_size, 'cl_size may only increase towards main memory.'
self.store_to = store_to
self.backend.store_to = store_to.backend
|
def get_nearest_entry(self, entry, type_measurement):
"""!
@brief Find nearest entry of node for the specified entry.
@param[in] entry (cfentry): Entry that is used for calculation distance.
@param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified.
@return (cfentry) Nearest entry of node for the specified entry.
"""
min_key = lambda cur_entity: cur_entity.get_distance(entry, type_measurement);
return min(self.__entries, key = min_key);
|
def function[get_nearest_entry, parameter[self, entry, type_measurement]]:
constant[!
@brief Find nearest entry of node for the specified entry.
@param[in] entry (cfentry): Entry that is used for calculation distance.
@param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified.
@return (cfentry) Nearest entry of node for the specified entry.
]
variable[min_key] assign[=] <ast.Lambda object at 0x7da1b01b13f0>
return[call[name[min], parameter[name[self].__entries]]]
|
keyword[def] identifier[get_nearest_entry] ( identifier[self] , identifier[entry] , identifier[type_measurement] ):
literal[string]
identifier[min_key] = keyword[lambda] identifier[cur_entity] : identifier[cur_entity] . identifier[get_distance] ( identifier[entry] , identifier[type_measurement] );
keyword[return] identifier[min] ( identifier[self] . identifier[__entries] , identifier[key] = identifier[min_key] );
|
def get_nearest_entry(self, entry, type_measurement):
"""!
@brief Find nearest entry of node for the specified entry.
@param[in] entry (cfentry): Entry that is used for calculation distance.
@param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified.
@return (cfentry) Nearest entry of node for the specified entry.
"""
min_key = lambda cur_entity: cur_entity.get_distance(entry, type_measurement)
return min(self.__entries, key=min_key)
|
def checkFITSFormat(filelist, ivmlist=None):
"""
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
"""
if ivmlist is None:
ivmlist = [None for l in filelist]
sci_ivm = list(zip(filelist, ivmlist))
removed_files, translated_names, newivmlist = convert2fits(sci_ivm)
newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)
if newfilelist == [] and translated_names == []:
return [], []
elif translated_names != []:
newfilelist.extend(translated_names)
ivmlist.extend(newivmlist)
return newfilelist, ivmlist
|
def function[checkFITSFormat, parameter[filelist, ivmlist]]:
constant[
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
]
if compare[name[ivmlist] is constant[None]] begin[:]
variable[ivmlist] assign[=] <ast.ListComp object at 0x7da1b0f055a0>
variable[sci_ivm] assign[=] call[name[list], parameter[call[name[zip], parameter[name[filelist], name[ivmlist]]]]]
<ast.Tuple object at 0x7da1b0f064d0> assign[=] call[name[convert2fits], parameter[name[sci_ivm]]]
<ast.Tuple object at 0x7da1b0ede050> assign[=] call[name[update_input], parameter[name[filelist], name[ivmlist], name[removed_files]]]
if <ast.BoolOp object at 0x7da1b0edf1c0> begin[:]
return[tuple[[<ast.List object at 0x7da1b0edc520>, <ast.List object at 0x7da1b0edc8b0>]]]
return[tuple[[<ast.Name object at 0x7da1b0ede620>, <ast.Name object at 0x7da1b0ede740>]]]
|
keyword[def] identifier[checkFITSFormat] ( identifier[filelist] , identifier[ivmlist] = keyword[None] ):
literal[string]
keyword[if] identifier[ivmlist] keyword[is] keyword[None] :
identifier[ivmlist] =[ keyword[None] keyword[for] identifier[l] keyword[in] identifier[filelist] ]
identifier[sci_ivm] = identifier[list] ( identifier[zip] ( identifier[filelist] , identifier[ivmlist] ))
identifier[removed_files] , identifier[translated_names] , identifier[newivmlist] = identifier[convert2fits] ( identifier[sci_ivm] )
identifier[newfilelist] , identifier[ivmlist] = identifier[update_input] ( identifier[filelist] , identifier[ivmlist] , identifier[removed_files] )
keyword[if] identifier[newfilelist] ==[] keyword[and] identifier[translated_names] ==[]:
keyword[return] [],[]
keyword[elif] identifier[translated_names] !=[]:
identifier[newfilelist] . identifier[extend] ( identifier[translated_names] )
identifier[ivmlist] . identifier[extend] ( identifier[newivmlist] )
keyword[return] identifier[newfilelist] , identifier[ivmlist]
|
def checkFITSFormat(filelist, ivmlist=None):
"""
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
"""
if ivmlist is None:
ivmlist = [None for l in filelist] # depends on [control=['if'], data=['ivmlist']]
sci_ivm = list(zip(filelist, ivmlist))
(removed_files, translated_names, newivmlist) = convert2fits(sci_ivm)
(newfilelist, ivmlist) = update_input(filelist, ivmlist, removed_files)
if newfilelist == [] and translated_names == []:
return ([], []) # depends on [control=['if'], data=[]]
elif translated_names != []:
newfilelist.extend(translated_names)
ivmlist.extend(newivmlist) # depends on [control=['if'], data=['translated_names']]
return (newfilelist, ivmlist)
|
def _valid(m, comment=VALID_RESPONSE, out=None):
'''
Return valid status.
'''
return _set_status(m, status=True, comment=comment, out=out)
|
def function[_valid, parameter[m, comment, out]]:
constant[
Return valid status.
]
return[call[name[_set_status], parameter[name[m]]]]
|
keyword[def] identifier[_valid] ( identifier[m] , identifier[comment] = identifier[VALID_RESPONSE] , identifier[out] = keyword[None] ):
literal[string]
keyword[return] identifier[_set_status] ( identifier[m] , identifier[status] = keyword[True] , identifier[comment] = identifier[comment] , identifier[out] = identifier[out] )
|
def _valid(m, comment=VALID_RESPONSE, out=None):
"""
Return valid status.
"""
return _set_status(m, status=True, comment=comment, out=out)
|
def get_connection_string(connection=None):
"""return SQLAlchemy connection string if it is set
:param connection: get the SQLAlchemy connection string #TODO
:rtype: str
"""
if not connection:
config = configparser.ConfigParser()
cfp = defaults.config_file_path
if os.path.exists(cfp):
log.info('fetch database configuration from %s', cfp)
config.read(cfp)
connection = config['database']['sqlalchemy_connection_string']
log.info('load connection string from %s: %s', cfp, connection)
else:
with open(cfp, 'w') as config_file:
connection = defaults.sqlalchemy_connection_string_default
config['database'] = {'sqlalchemy_connection_string': connection}
config.write(config_file)
log.info('create configuration file %s', cfp)
return connection
|
def function[get_connection_string, parameter[connection]]:
constant[return SQLAlchemy connection string if it is set
:param connection: get the SQLAlchemy connection string #TODO
:rtype: str
]
if <ast.UnaryOp object at 0x7da18f7221a0> begin[:]
variable[config] assign[=] call[name[configparser].ConfigParser, parameter[]]
variable[cfp] assign[=] name[defaults].config_file_path
if call[name[os].path.exists, parameter[name[cfp]]] begin[:]
call[name[log].info, parameter[constant[fetch database configuration from %s], name[cfp]]]
call[name[config].read, parameter[name[cfp]]]
variable[connection] assign[=] call[call[name[config]][constant[database]]][constant[sqlalchemy_connection_string]]
call[name[log].info, parameter[constant[load connection string from %s: %s], name[cfp], name[connection]]]
return[name[connection]]
|
keyword[def] identifier[get_connection_string] ( identifier[connection] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[connection] :
identifier[config] = identifier[configparser] . identifier[ConfigParser] ()
identifier[cfp] = identifier[defaults] . identifier[config_file_path]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[cfp] ):
identifier[log] . identifier[info] ( literal[string] , identifier[cfp] )
identifier[config] . identifier[read] ( identifier[cfp] )
identifier[connection] = identifier[config] [ literal[string] ][ literal[string] ]
identifier[log] . identifier[info] ( literal[string] , identifier[cfp] , identifier[connection] )
keyword[else] :
keyword[with] identifier[open] ( identifier[cfp] , literal[string] ) keyword[as] identifier[config_file] :
identifier[connection] = identifier[defaults] . identifier[sqlalchemy_connection_string_default]
identifier[config] [ literal[string] ]={ literal[string] : identifier[connection] }
identifier[config] . identifier[write] ( identifier[config_file] )
identifier[log] . identifier[info] ( literal[string] , identifier[cfp] )
keyword[return] identifier[connection]
|
def get_connection_string(connection=None):
"""return SQLAlchemy connection string if it is set
:param connection: get the SQLAlchemy connection string #TODO
:rtype: str
"""
if not connection:
config = configparser.ConfigParser()
cfp = defaults.config_file_path
if os.path.exists(cfp):
log.info('fetch database configuration from %s', cfp)
config.read(cfp)
connection = config['database']['sqlalchemy_connection_string']
log.info('load connection string from %s: %s', cfp, connection) # depends on [control=['if'], data=[]]
else:
with open(cfp, 'w') as config_file:
connection = defaults.sqlalchemy_connection_string_default
config['database'] = {'sqlalchemy_connection_string': connection}
config.write(config_file)
log.info('create configuration file %s', cfp) # depends on [control=['with'], data=['config_file']] # depends on [control=['if'], data=[]]
return connection
|
def _markup(p_todo, p_focus):
"""
Returns an attribute spec for the colors that correspond to the given todo
item.
"""
pri = p_todo.priority()
pri = 'pri_' + pri if pri else PaletteItem.DEFAULT
if not p_focus:
attr_dict = {None: pri}
else:
# use '_focus' palette entries instead of standard ones
attr_dict = {None: pri + '_focus'}
attr_dict[PaletteItem.PROJECT] = PaletteItem.PROJECT_FOCUS
attr_dict[PaletteItem.CONTEXT] = PaletteItem.CONTEXT_FOCUS
attr_dict[PaletteItem.METADATA] = PaletteItem.METADATA_FOCUS
attr_dict[PaletteItem.LINK] = PaletteItem.LINK_FOCUS
return attr_dict
|
def function[_markup, parameter[p_todo, p_focus]]:
constant[
Returns an attribute spec for the colors that correspond to the given todo
item.
]
variable[pri] assign[=] call[name[p_todo].priority, parameter[]]
variable[pri] assign[=] <ast.IfExp object at 0x7da207f03820>
if <ast.UnaryOp object at 0x7da207f03df0> begin[:]
variable[attr_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f03ca0>], [<ast.Name object at 0x7da207f01390>]]
return[name[attr_dict]]
|
keyword[def] identifier[_markup] ( identifier[p_todo] , identifier[p_focus] ):
literal[string]
identifier[pri] = identifier[p_todo] . identifier[priority] ()
identifier[pri] = literal[string] + identifier[pri] keyword[if] identifier[pri] keyword[else] identifier[PaletteItem] . identifier[DEFAULT]
keyword[if] keyword[not] identifier[p_focus] :
identifier[attr_dict] ={ keyword[None] : identifier[pri] }
keyword[else] :
identifier[attr_dict] ={ keyword[None] : identifier[pri] + literal[string] }
identifier[attr_dict] [ identifier[PaletteItem] . identifier[PROJECT] ]= identifier[PaletteItem] . identifier[PROJECT_FOCUS]
identifier[attr_dict] [ identifier[PaletteItem] . identifier[CONTEXT] ]= identifier[PaletteItem] . identifier[CONTEXT_FOCUS]
identifier[attr_dict] [ identifier[PaletteItem] . identifier[METADATA] ]= identifier[PaletteItem] . identifier[METADATA_FOCUS]
identifier[attr_dict] [ identifier[PaletteItem] . identifier[LINK] ]= identifier[PaletteItem] . identifier[LINK_FOCUS]
keyword[return] identifier[attr_dict]
|
def _markup(p_todo, p_focus):
"""
Returns an attribute spec for the colors that correspond to the given todo
item.
"""
pri = p_todo.priority()
pri = 'pri_' + pri if pri else PaletteItem.DEFAULT
if not p_focus:
attr_dict = {None: pri} # depends on [control=['if'], data=[]]
else:
# use '_focus' palette entries instead of standard ones
attr_dict = {None: pri + '_focus'}
attr_dict[PaletteItem.PROJECT] = PaletteItem.PROJECT_FOCUS
attr_dict[PaletteItem.CONTEXT] = PaletteItem.CONTEXT_FOCUS
attr_dict[PaletteItem.METADATA] = PaletteItem.METADATA_FOCUS
attr_dict[PaletteItem.LINK] = PaletteItem.LINK_FOCUS
return attr_dict
|
def to_csv(self,*args,**kwargs):
"""overload of pandas.DataFrame.to_csv() to account
for parameter transformation so that the saved
ParameterEnsemble csv is not in Log10 space
Parameters
----------
*args : list
positional arguments to pass to pandas.DataFrame.to_csv()
**kwrags : dict
keyword arguments to pass to pandas.DataFrame.to_csv()
Note
----
this function back-transforms inplace with respect to
log10 before writing
"""
retrans = False
if self.istransformed:
self._back_transform(inplace=True)
retrans = True
if self.isnull().values.any():
warnings.warn("NaN in par ensemble",PyemuWarning)
super(ParameterEnsemble,self).to_csv(*args,**kwargs)
if retrans:
self._transform(inplace=True)
|
def function[to_csv, parameter[self]]:
constant[overload of pandas.DataFrame.to_csv() to account
for parameter transformation so that the saved
ParameterEnsemble csv is not in Log10 space
Parameters
----------
*args : list
positional arguments to pass to pandas.DataFrame.to_csv()
**kwrags : dict
keyword arguments to pass to pandas.DataFrame.to_csv()
Note
----
this function back-transforms inplace with respect to
log10 before writing
]
variable[retrans] assign[=] constant[False]
if name[self].istransformed begin[:]
call[name[self]._back_transform, parameter[]]
variable[retrans] assign[=] constant[True]
if call[call[name[self].isnull, parameter[]].values.any, parameter[]] begin[:]
call[name[warnings].warn, parameter[constant[NaN in par ensemble], name[PyemuWarning]]]
call[call[name[super], parameter[name[ParameterEnsemble], name[self]]].to_csv, parameter[<ast.Starred object at 0x7da1b2390a30>]]
if name[retrans] begin[:]
call[name[self]._transform, parameter[]]
|
keyword[def] identifier[to_csv] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[retrans] = keyword[False]
keyword[if] identifier[self] . identifier[istransformed] :
identifier[self] . identifier[_back_transform] ( identifier[inplace] = keyword[True] )
identifier[retrans] = keyword[True]
keyword[if] identifier[self] . identifier[isnull] (). identifier[values] . identifier[any] ():
identifier[warnings] . identifier[warn] ( literal[string] , identifier[PyemuWarning] )
identifier[super] ( identifier[ParameterEnsemble] , identifier[self] ). identifier[to_csv] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[retrans] :
identifier[self] . identifier[_transform] ( identifier[inplace] = keyword[True] )
|
def to_csv(self, *args, **kwargs):
"""overload of pandas.DataFrame.to_csv() to account
for parameter transformation so that the saved
ParameterEnsemble csv is not in Log10 space
Parameters
----------
*args : list
positional arguments to pass to pandas.DataFrame.to_csv()
**kwrags : dict
keyword arguments to pass to pandas.DataFrame.to_csv()
Note
----
this function back-transforms inplace with respect to
log10 before writing
"""
retrans = False
if self.istransformed:
self._back_transform(inplace=True)
retrans = True # depends on [control=['if'], data=[]]
if self.isnull().values.any():
warnings.warn('NaN in par ensemble', PyemuWarning) # depends on [control=['if'], data=[]]
super(ParameterEnsemble, self).to_csv(*args, **kwargs)
if retrans:
self._transform(inplace=True) # depends on [control=['if'], data=[]]
|
def _prepare_discharge_hook(req, client):
''' Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning.
'''
class Retry:
# Define a local class so that we can use its class variable as
# mutable state accessed by the closures below.
count = 0
def hook(response, *args, **kwargs):
''' Requests hooks system, this is the hook for the response.
'''
status_code = response.status_code
if status_code != 407 and status_code != 401:
return response
if (status_code == 401 and response.headers.get('WWW-Authenticate') !=
'Macaroon'):
return response
if response.headers.get('Content-Type') != 'application/json':
return response
errorJSON = response.json()
if errorJSON.get('Code') != ERR_DISCHARGE_REQUIRED:
return response
error = Error.from_dict(errorJSON)
Retry.count += 1
if Retry.count >= MAX_DISCHARGE_RETRIES:
raise BakeryException('too many ({}) discharge requests'.format(
Retry.count)
)
client.handle_error(error, req.url)
req.headers.pop('Cookie', None)
req.prepare_cookies(client.cookies)
req.headers[BAKERY_PROTOCOL_HEADER] = \
str(bakery.LATEST_VERSION)
with requests.Session() as s:
return s.send(req)
return hook
|
def function[_prepare_discharge_hook, parameter[req, client]]:
constant[ Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning.
]
class class[Retry, parameter[]] begin[:]
variable[count] assign[=] constant[0]
def function[hook, parameter[response]]:
constant[ Requests hooks system, this is the hook for the response.
]
variable[status_code] assign[=] name[response].status_code
if <ast.BoolOp object at 0x7da1b247d090> begin[:]
return[name[response]]
if <ast.BoolOp object at 0x7da1b247c2e0> begin[:]
return[name[response]]
if compare[call[name[response].headers.get, parameter[constant[Content-Type]]] not_equal[!=] constant[application/json]] begin[:]
return[name[response]]
variable[errorJSON] assign[=] call[name[response].json, parameter[]]
if compare[call[name[errorJSON].get, parameter[constant[Code]]] not_equal[!=] name[ERR_DISCHARGE_REQUIRED]] begin[:]
return[name[response]]
variable[error] assign[=] call[name[Error].from_dict, parameter[name[errorJSON]]]
<ast.AugAssign object at 0x7da1b257f580>
if compare[name[Retry].count greater_or_equal[>=] name[MAX_DISCHARGE_RETRIES]] begin[:]
<ast.Raise object at 0x7da1b257e4a0>
call[name[client].handle_error, parameter[name[error], name[req].url]]
call[name[req].headers.pop, parameter[constant[Cookie], constant[None]]]
call[name[req].prepare_cookies, parameter[name[client].cookies]]
call[name[req].headers][name[BAKERY_PROTOCOL_HEADER]] assign[=] call[name[str], parameter[name[bakery].LATEST_VERSION]]
with call[name[requests].Session, parameter[]] begin[:]
return[call[name[s].send, parameter[name[req]]]]
return[name[hook]]
|
keyword[def] identifier[_prepare_discharge_hook] ( identifier[req] , identifier[client] ):
literal[string]
keyword[class] identifier[Retry] :
identifier[count] = literal[int]
keyword[def] identifier[hook] ( identifier[response] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[status_code] = identifier[response] . identifier[status_code]
keyword[if] identifier[status_code] != literal[int] keyword[and] identifier[status_code] != literal[int] :
keyword[return] identifier[response]
keyword[if] ( identifier[status_code] == literal[int] keyword[and] identifier[response] . identifier[headers] . identifier[get] ( literal[string] )!=
literal[string] ):
keyword[return] identifier[response]
keyword[if] identifier[response] . identifier[headers] . identifier[get] ( literal[string] )!= literal[string] :
keyword[return] identifier[response]
identifier[errorJSON] = identifier[response] . identifier[json] ()
keyword[if] identifier[errorJSON] . identifier[get] ( literal[string] )!= identifier[ERR_DISCHARGE_REQUIRED] :
keyword[return] identifier[response]
identifier[error] = identifier[Error] . identifier[from_dict] ( identifier[errorJSON] )
identifier[Retry] . identifier[count] += literal[int]
keyword[if] identifier[Retry] . identifier[count] >= identifier[MAX_DISCHARGE_RETRIES] :
keyword[raise] identifier[BakeryException] ( literal[string] . identifier[format] (
identifier[Retry] . identifier[count] )
)
identifier[client] . identifier[handle_error] ( identifier[error] , identifier[req] . identifier[url] )
identifier[req] . identifier[headers] . identifier[pop] ( literal[string] , keyword[None] )
identifier[req] . identifier[prepare_cookies] ( identifier[client] . identifier[cookies] )
identifier[req] . identifier[headers] [ identifier[BAKERY_PROTOCOL_HEADER] ]= identifier[str] ( identifier[bakery] . identifier[LATEST_VERSION] )
keyword[with] identifier[requests] . identifier[Session] () keyword[as] identifier[s] :
keyword[return] identifier[s] . identifier[send] ( identifier[req] )
keyword[return] identifier[hook]
|
def _prepare_discharge_hook(req, client):
""" Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning.
"""
class Retry:
# Define a local class so that we can use its class variable as
# mutable state accessed by the closures below.
count = 0
def hook(response, *args, **kwargs):
""" Requests hooks system, this is the hook for the response.
"""
status_code = response.status_code
if status_code != 407 and status_code != 401:
return response # depends on [control=['if'], data=[]]
if status_code == 401 and response.headers.get('WWW-Authenticate') != 'Macaroon':
return response # depends on [control=['if'], data=[]]
if response.headers.get('Content-Type') != 'application/json':
return response # depends on [control=['if'], data=[]]
errorJSON = response.json()
if errorJSON.get('Code') != ERR_DISCHARGE_REQUIRED:
return response # depends on [control=['if'], data=[]]
error = Error.from_dict(errorJSON)
Retry.count += 1
if Retry.count >= MAX_DISCHARGE_RETRIES:
raise BakeryException('too many ({}) discharge requests'.format(Retry.count)) # depends on [control=['if'], data=[]]
client.handle_error(error, req.url)
req.headers.pop('Cookie', None)
req.prepare_cookies(client.cookies)
req.headers[BAKERY_PROTOCOL_HEADER] = str(bakery.LATEST_VERSION)
with requests.Session() as s:
return s.send(req) # depends on [control=['with'], data=['s']]
return hook
|
def is_valid(self):
""" Only retain SNPs or single indels, and are bi-allelic
"""
return len(self.ref) == 1 and \
len(self.alt) == 1 and \
len(self.alt[0]) == 1
|
def function[is_valid, parameter[self]]:
constant[ Only retain SNPs or single indels, and are bi-allelic
]
return[<ast.BoolOp object at 0x7da2044c1b40>]
|
keyword[def] identifier[is_valid] ( identifier[self] ):
literal[string]
keyword[return] identifier[len] ( identifier[self] . identifier[ref] )== literal[int] keyword[and] identifier[len] ( identifier[self] . identifier[alt] )== literal[int] keyword[and] identifier[len] ( identifier[self] . identifier[alt] [ literal[int] ])== literal[int]
|
def is_valid(self):
""" Only retain SNPs or single indels, and are bi-allelic
"""
return len(self.ref) == 1 and len(self.alt) == 1 and (len(self.alt[0]) == 1)
|
def fill_dataset_tree(self, tree, data_sets):
"""
fills the tree with data sets where datasets is a dictionary of the form
Args:
tree:
data_sets: a dataset
Returns:
"""
tree.model().removeRows(0, tree.model().rowCount())
for index, (time, script) in enumerate(data_sets.items()):
name = script.settings['tag']
type = script.name
item_time = QtGui.QStandardItem(str(time))
item_name = QtGui.QStandardItem(str(name))
item_type = QtGui.QStandardItem(str(type))
item_time.setSelectable(False)
item_time.setEditable(False)
item_type.setSelectable(False)
item_type.setEditable(False)
tree.model().appendRow([item_time, item_name, item_type])
|
def function[fill_dataset_tree, parameter[self, tree, data_sets]]:
constant[
fills the tree with data sets where datasets is a dictionary of the form
Args:
tree:
data_sets: a dataset
Returns:
]
call[call[name[tree].model, parameter[]].removeRows, parameter[constant[0], call[call[name[tree].model, parameter[]].rowCount, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b236cc10>, <ast.Tuple object at 0x7da1b236cdf0>]]] in starred[call[name[enumerate], parameter[call[name[data_sets].items, parameter[]]]]] begin[:]
variable[name] assign[=] call[name[script].settings][constant[tag]]
variable[type] assign[=] name[script].name
variable[item_time] assign[=] call[name[QtGui].QStandardItem, parameter[call[name[str], parameter[name[time]]]]]
variable[item_name] assign[=] call[name[QtGui].QStandardItem, parameter[call[name[str], parameter[name[name]]]]]
variable[item_type] assign[=] call[name[QtGui].QStandardItem, parameter[call[name[str], parameter[name[type]]]]]
call[name[item_time].setSelectable, parameter[constant[False]]]
call[name[item_time].setEditable, parameter[constant[False]]]
call[name[item_type].setSelectable, parameter[constant[False]]]
call[name[item_type].setEditable, parameter[constant[False]]]
call[call[name[tree].model, parameter[]].appendRow, parameter[list[[<ast.Name object at 0x7da1b236c8b0>, <ast.Name object at 0x7da1b236c7f0>, <ast.Name object at 0x7da1b236c7c0>]]]]
|
keyword[def] identifier[fill_dataset_tree] ( identifier[self] , identifier[tree] , identifier[data_sets] ):
literal[string]
identifier[tree] . identifier[model] (). identifier[removeRows] ( literal[int] , identifier[tree] . identifier[model] (). identifier[rowCount] ())
keyword[for] identifier[index] ,( identifier[time] , identifier[script] ) keyword[in] identifier[enumerate] ( identifier[data_sets] . identifier[items] ()):
identifier[name] = identifier[script] . identifier[settings] [ literal[string] ]
identifier[type] = identifier[script] . identifier[name]
identifier[item_time] = identifier[QtGui] . identifier[QStandardItem] ( identifier[str] ( identifier[time] ))
identifier[item_name] = identifier[QtGui] . identifier[QStandardItem] ( identifier[str] ( identifier[name] ))
identifier[item_type] = identifier[QtGui] . identifier[QStandardItem] ( identifier[str] ( identifier[type] ))
identifier[item_time] . identifier[setSelectable] ( keyword[False] )
identifier[item_time] . identifier[setEditable] ( keyword[False] )
identifier[item_type] . identifier[setSelectable] ( keyword[False] )
identifier[item_type] . identifier[setEditable] ( keyword[False] )
identifier[tree] . identifier[model] (). identifier[appendRow] ([ identifier[item_time] , identifier[item_name] , identifier[item_type] ])
|
def fill_dataset_tree(self, tree, data_sets):
"""
fills the tree with data sets where datasets is a dictionary of the form
Args:
tree:
data_sets: a dataset
Returns:
"""
tree.model().removeRows(0, tree.model().rowCount())
for (index, (time, script)) in enumerate(data_sets.items()):
name = script.settings['tag']
type = script.name
item_time = QtGui.QStandardItem(str(time))
item_name = QtGui.QStandardItem(str(name))
item_type = QtGui.QStandardItem(str(type))
item_time.setSelectable(False)
item_time.setEditable(False)
item_type.setSelectable(False)
item_type.setEditable(False)
tree.model().appendRow([item_time, item_name, item_type]) # depends on [control=['for'], data=[]]
|
def complete_offer(self, offer_id, complete_dict):
"""
Completes an offer
:param complete_dict: the complete dict with the template id
:param offer_id: the offer id
:return: Response
"""
return self._create_put_request(
resource=OFFERS,
billomat_id=offer_id,
command=COMPLETE,
send_data=complete_dict
)
|
def function[complete_offer, parameter[self, offer_id, complete_dict]]:
constant[
Completes an offer
:param complete_dict: the complete dict with the template id
:param offer_id: the offer id
:return: Response
]
return[call[name[self]._create_put_request, parameter[]]]
|
keyword[def] identifier[complete_offer] ( identifier[self] , identifier[offer_id] , identifier[complete_dict] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_put_request] (
identifier[resource] = identifier[OFFERS] ,
identifier[billomat_id] = identifier[offer_id] ,
identifier[command] = identifier[COMPLETE] ,
identifier[send_data] = identifier[complete_dict]
)
|
def complete_offer(self, offer_id, complete_dict):
"""
Completes an offer
:param complete_dict: the complete dict with the template id
:param offer_id: the offer id
:return: Response
"""
return self._create_put_request(resource=OFFERS, billomat_id=offer_id, command=COMPLETE, send_data=complete_dict)
|
def v1_stream_id_associations(tags, stream_id):
'''Retrieve associations for a given stream_id.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
'''
stream_id = stream_id.decode('utf-8').strip()
return {'associations': tags.assocs_by_stream_id(stream_id)}
|
def function[v1_stream_id_associations, parameter[tags, stream_id]]:
constant[Retrieve associations for a given stream_id.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
]
variable[stream_id] assign[=] call[call[name[stream_id].decode, parameter[constant[utf-8]]].strip, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da1b15c6b90>], [<ast.Call object at 0x7da1b15c76d0>]]]
|
keyword[def] identifier[v1_stream_id_associations] ( identifier[tags] , identifier[stream_id] ):
literal[string]
identifier[stream_id] = identifier[stream_id] . identifier[decode] ( literal[string] ). identifier[strip] ()
keyword[return] { literal[string] : identifier[tags] . identifier[assocs_by_stream_id] ( identifier[stream_id] )}
|
def v1_stream_id_associations(tags, stream_id):
"""Retrieve associations for a given stream_id.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
"""
stream_id = stream_id.decode('utf-8').strip()
return {'associations': tags.assocs_by_stream_id(stream_id)}
|
def LoadPlugins(cls):
"""Load all registered iotile.update_record plugins."""
if cls.PLUGINS_LOADED:
return
reg = ComponentRegistry()
for _, record in reg.load_extensions('iotile.update_record'):
cls.RegisterRecordType(record)
cls.PLUGINS_LOADED = True
|
def function[LoadPlugins, parameter[cls]]:
constant[Load all registered iotile.update_record plugins.]
if name[cls].PLUGINS_LOADED begin[:]
return[None]
variable[reg] assign[=] call[name[ComponentRegistry], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c7a30>, <ast.Name object at 0x7da20c6c6530>]]] in starred[call[name[reg].load_extensions, parameter[constant[iotile.update_record]]]] begin[:]
call[name[cls].RegisterRecordType, parameter[name[record]]]
name[cls].PLUGINS_LOADED assign[=] constant[True]
|
keyword[def] identifier[LoadPlugins] ( identifier[cls] ):
literal[string]
keyword[if] identifier[cls] . identifier[PLUGINS_LOADED] :
keyword[return]
identifier[reg] = identifier[ComponentRegistry] ()
keyword[for] identifier[_] , identifier[record] keyword[in] identifier[reg] . identifier[load_extensions] ( literal[string] ):
identifier[cls] . identifier[RegisterRecordType] ( identifier[record] )
identifier[cls] . identifier[PLUGINS_LOADED] = keyword[True]
|
def LoadPlugins(cls):
"""Load all registered iotile.update_record plugins."""
if cls.PLUGINS_LOADED:
return # depends on [control=['if'], data=[]]
reg = ComponentRegistry()
for (_, record) in reg.load_extensions('iotile.update_record'):
cls.RegisterRecordType(record) # depends on [control=['for'], data=[]]
cls.PLUGINS_LOADED = True
|
def hexdump( src, length=16, sep='.', start = 0):
'''
@brief Return {src} in hex dump.
@param[in] length {Int} Nb Bytes by row.
@param[in] sep {Char} For the text part, {sep} will be used for non ASCII char.
@return {Str} The hexdump
@note Full support for python2 and python3 !
'''
result = [];
# Python3 support
try:
xrange(0,1);
except NameError:
xrange = range;
for i in xrange(0, len(src), length):
subSrc = src[i:i+length];
hexa = '';
isMiddle = False;
for h in xrange(0,len(subSrc)):
if h == length/2:
hexa += ' ';
h = subSrc[h];
if not isinstance(h, int):
h = ord(h);
h = hex(h).replace('0x','');
if len(h) == 1:
h = '0'+h;
hexa += h+' ';
hexa = hexa.strip(' ');
text = '';
for c in subSrc:
if not isinstance(c, int):
c = ord(c);
if 0x20 <= c < 0x7F:
text += chr(c);
else:
text += sep;
if start == 0:
result.append(('%08x: %-'+str(length*(2+1)+1)+'s |%s|') % (i, hexa, text));
else:
result.append(('%08x(+%04x): %-'+str(length*(2+1)+1)+'s |%s|') % (start+i, i, hexa, text));
return '\n'.join(result);
|
def function[hexdump, parameter[src, length, sep, start]]:
constant[
@brief Return {src} in hex dump.
@param[in] length {Int} Nb Bytes by row.
@param[in] sep {Char} For the text part, {sep} will be used for non ASCII char.
@return {Str} The hexdump
@note Full support for python2 and python3 !
]
variable[result] assign[=] list[[]]
<ast.Try object at 0x7da1b100c070>
for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], call[name[len], parameter[name[src]]], name[length]]]] begin[:]
variable[subSrc] assign[=] call[name[src]][<ast.Slice object at 0x7da1b100f4f0>]
variable[hexa] assign[=] constant[]
variable[isMiddle] assign[=] constant[False]
for taget[name[h]] in starred[call[name[xrange], parameter[constant[0], call[name[len], parameter[name[subSrc]]]]]] begin[:]
if compare[name[h] equal[==] binary_operation[name[length] / constant[2]]] begin[:]
<ast.AugAssign object at 0x7da1b100db40>
variable[h] assign[=] call[name[subSrc]][name[h]]
if <ast.UnaryOp object at 0x7da1b100ec50> begin[:]
variable[h] assign[=] call[name[ord], parameter[name[h]]]
variable[h] assign[=] call[call[name[hex], parameter[name[h]]].replace, parameter[constant[0x], constant[]]]
if compare[call[name[len], parameter[name[h]]] equal[==] constant[1]] begin[:]
variable[h] assign[=] binary_operation[constant[0] + name[h]]
<ast.AugAssign object at 0x7da1b100f9d0>
variable[hexa] assign[=] call[name[hexa].strip, parameter[constant[ ]]]
variable[text] assign[=] constant[]
for taget[name[c]] in starred[name[subSrc]] begin[:]
if <ast.UnaryOp object at 0x7da1b100e0e0> begin[:]
variable[c] assign[=] call[name[ord], parameter[name[c]]]
if compare[constant[32] less_or_equal[<=] name[c]] begin[:]
<ast.AugAssign object at 0x7da1b100cf70>
if compare[name[start] equal[==] constant[0]] begin[:]
call[name[result].append, parameter[binary_operation[binary_operation[binary_operation[constant[%08x: %-] + call[name[str], parameter[binary_operation[binary_operation[name[length] * binary_operation[constant[2] + constant[1]]] + constant[1]]]]] + constant[s |%s|]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b102be50>, <ast.Name object at 0x7da1b1029510>, <ast.Name object at 0x7da1b102ba60>]]]]]
return[call[constant[
].join, parameter[name[result]]]]
|
keyword[def] identifier[hexdump] ( identifier[src] , identifier[length] = literal[int] , identifier[sep] = literal[string] , identifier[start] = literal[int] ):
literal[string]
identifier[result] =[];
keyword[try] :
identifier[xrange] ( literal[int] , literal[int] );
keyword[except] identifier[NameError] :
identifier[xrange] = identifier[range] ;
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[src] ), identifier[length] ):
identifier[subSrc] = identifier[src] [ identifier[i] : identifier[i] + identifier[length] ];
identifier[hexa] = literal[string] ;
identifier[isMiddle] = keyword[False] ;
keyword[for] identifier[h] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[subSrc] )):
keyword[if] identifier[h] == identifier[length] / literal[int] :
identifier[hexa] += literal[string] ;
identifier[h] = identifier[subSrc] [ identifier[h] ];
keyword[if] keyword[not] identifier[isinstance] ( identifier[h] , identifier[int] ):
identifier[h] = identifier[ord] ( identifier[h] );
identifier[h] = identifier[hex] ( identifier[h] ). identifier[replace] ( literal[string] , literal[string] );
keyword[if] identifier[len] ( identifier[h] )== literal[int] :
identifier[h] = literal[string] + identifier[h] ;
identifier[hexa] += identifier[h] + literal[string] ;
identifier[hexa] = identifier[hexa] . identifier[strip] ( literal[string] );
identifier[text] = literal[string] ;
keyword[for] identifier[c] keyword[in] identifier[subSrc] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[c] , identifier[int] ):
identifier[c] = identifier[ord] ( identifier[c] );
keyword[if] literal[int] <= identifier[c] < literal[int] :
identifier[text] += identifier[chr] ( identifier[c] );
keyword[else] :
identifier[text] += identifier[sep] ;
keyword[if] identifier[start] == literal[int] :
identifier[result] . identifier[append] (( literal[string] + identifier[str] ( identifier[length] *( literal[int] + literal[int] )+ literal[int] )+ literal[string] )%( identifier[i] , identifier[hexa] , identifier[text] ));
keyword[else] :
identifier[result] . identifier[append] (( literal[string] + identifier[str] ( identifier[length] *( literal[int] + literal[int] )+ literal[int] )+ literal[string] )%( identifier[start] + identifier[i] , identifier[i] , identifier[hexa] , identifier[text] ));
keyword[return] literal[string] . identifier[join] ( identifier[result] );
|
def hexdump(src, length=16, sep='.', start=0):
"""
@brief Return {src} in hex dump.
@param[in] length {Int} Nb Bytes by row.
@param[in] sep {Char} For the text part, {sep} will be used for non ASCII char.
@return {Str} The hexdump
@note Full support for python2 and python3 !
"""
result = [] # Python3 support
try:
xrange(0, 1) # depends on [control=['try'], data=[]]
except NameError:
xrange = range # depends on [control=['except'], data=[]]
for i in xrange(0, len(src), length):
subSrc = src[i:i + length]
hexa = ''
isMiddle = False
for h in xrange(0, len(subSrc)):
if h == length / 2:
hexa += ' ' # depends on [control=['if'], data=[]]
h = subSrc[h]
if not isinstance(h, int):
h = ord(h) # depends on [control=['if'], data=[]]
h = hex(h).replace('0x', '')
if len(h) == 1:
h = '0' + h # depends on [control=['if'], data=[]]
hexa += h + ' ' # depends on [control=['for'], data=['h']]
hexa = hexa.strip(' ')
text = ''
for c in subSrc:
if not isinstance(c, int):
c = ord(c) # depends on [control=['if'], data=[]]
if 32 <= c < 127:
text += chr(c) # depends on [control=['if'], data=['c']]
else:
text += sep # depends on [control=['for'], data=['c']]
if start == 0:
result.append(('%08x: %-' + str(length * (2 + 1) + 1) + 's |%s|') % (i, hexa, text)) # depends on [control=['if'], data=[]]
else:
result.append(('%08x(+%04x): %-' + str(length * (2 + 1) + 1) + 's |%s|') % (start + i, i, hexa, text)) # depends on [control=['for'], data=['i']]
return '\n'.join(result)
|
def json_qs_parser(body):
"""
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
"""
try:
# Try JSON first.
return json.loads(body)
except (OverflowError, TypeError, ValueError):
pass
try:
# Then XML.
return ElementTree.fromstring(body)
except (ElementTree.ParseError, TypeError, ValueError):
pass
# Finally query string.
return dict(parse.parse_qsl(body))
|
def function[json_qs_parser, parameter[body]]:
constant[
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
]
<ast.Try object at 0x7da1b26af400>
<ast.Try object at 0x7da1b26ad9c0>
return[call[name[dict], parameter[call[name[parse].parse_qsl, parameter[name[body]]]]]]
|
keyword[def] identifier[json_qs_parser] ( identifier[body] ):
literal[string]
keyword[try] :
keyword[return] identifier[json] . identifier[loads] ( identifier[body] )
keyword[except] ( identifier[OverflowError] , identifier[TypeError] , identifier[ValueError] ):
keyword[pass]
keyword[try] :
keyword[return] identifier[ElementTree] . identifier[fromstring] ( identifier[body] )
keyword[except] ( identifier[ElementTree] . identifier[ParseError] , identifier[TypeError] , identifier[ValueError] ):
keyword[pass]
keyword[return] identifier[dict] ( identifier[parse] . identifier[parse_qsl] ( identifier[body] ))
|
def json_qs_parser(body):
"""
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
"""
try:
# Try JSON first.
return json.loads(body) # depends on [control=['try'], data=[]]
except (OverflowError, TypeError, ValueError):
pass # depends on [control=['except'], data=[]]
try:
# Then XML.
return ElementTree.fromstring(body) # depends on [control=['try'], data=[]]
except (ElementTree.ParseError, TypeError, ValueError):
pass # depends on [control=['except'], data=[]]
# Finally query string.
return dict(parse.parse_qsl(body))
|
def available_dataset_ids(self, reader_name=None, composites=False):
"""Get names of available datasets, globally or just for *reader_name*
if specified, that can be loaded.
Available dataset names are determined by what each individual reader
can load. This is normally determined by what files are needed to load
a dataset and what files have been provided to the scene/reader.
:return: list of available dataset names
"""
try:
if reader_name:
readers = [self.readers[reader_name]]
else:
readers = self.readers.values()
except (AttributeError, KeyError):
raise KeyError("No reader '%s' found in scene" % reader_name)
available_datasets = sorted([dataset_id
for reader in readers
for dataset_id in reader.available_dataset_ids])
if composites:
available_datasets += sorted(self.available_composite_ids(
available_datasets))
return available_datasets
|
def function[available_dataset_ids, parameter[self, reader_name, composites]]:
constant[Get names of available datasets, globally or just for *reader_name*
if specified, that can be loaded.
Available dataset names are determined by what each individual reader
can load. This is normally determined by what files are needed to load
a dataset and what files have been provided to the scene/reader.
:return: list of available dataset names
]
<ast.Try object at 0x7da1b1d5d3c0>
variable[available_datasets] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1d6d7b0>]]
if name[composites] begin[:]
<ast.AugAssign object at 0x7da1b1d6ff10>
return[name[available_datasets]]
|
keyword[def] identifier[available_dataset_ids] ( identifier[self] , identifier[reader_name] = keyword[None] , identifier[composites] = keyword[False] ):
literal[string]
keyword[try] :
keyword[if] identifier[reader_name] :
identifier[readers] =[ identifier[self] . identifier[readers] [ identifier[reader_name] ]]
keyword[else] :
identifier[readers] = identifier[self] . identifier[readers] . identifier[values] ()
keyword[except] ( identifier[AttributeError] , identifier[KeyError] ):
keyword[raise] identifier[KeyError] ( literal[string] % identifier[reader_name] )
identifier[available_datasets] = identifier[sorted] ([ identifier[dataset_id]
keyword[for] identifier[reader] keyword[in] identifier[readers]
keyword[for] identifier[dataset_id] keyword[in] identifier[reader] . identifier[available_dataset_ids] ])
keyword[if] identifier[composites] :
identifier[available_datasets] += identifier[sorted] ( identifier[self] . identifier[available_composite_ids] (
identifier[available_datasets] ))
keyword[return] identifier[available_datasets]
|
def available_dataset_ids(self, reader_name=None, composites=False):
"""Get names of available datasets, globally or just for *reader_name*
if specified, that can be loaded.
Available dataset names are determined by what each individual reader
can load. This is normally determined by what files are needed to load
a dataset and what files have been provided to the scene/reader.
:return: list of available dataset names
"""
try:
if reader_name:
readers = [self.readers[reader_name]] # depends on [control=['if'], data=[]]
else:
readers = self.readers.values() # depends on [control=['try'], data=[]]
except (AttributeError, KeyError):
raise KeyError("No reader '%s' found in scene" % reader_name) # depends on [control=['except'], data=[]]
available_datasets = sorted([dataset_id for reader in readers for dataset_id in reader.available_dataset_ids])
if composites:
available_datasets += sorted(self.available_composite_ids(available_datasets)) # depends on [control=['if'], data=[]]
return available_datasets
|
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
o = self.guild.owner
if o is not None and member.id == o.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
roles = member.roles
# Apply guild roles that the member has.
for role in roles:
base.value |= role.permissions.value
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
# not sure if doing member._roles.get(...) is better than the
# set approach. While this is O(N) to re-create into a set for O(1)
# the direct approach would just be O(log n) for searching with no
# extra memory overhead. For now, I'll keep the set cast
# Note that the member.roles accessor up top also creates a
# temporary list
member_role_ids = {r.id for r in roles}
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'role' and overwrite.id in member_role_ids:
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'member' and overwrite.id == member.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
return base
|
def function[permissions_for, parameter[self, member]]:
constant[Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
]
variable[o] assign[=] name[self].guild.owner
if <ast.BoolOp object at 0x7da1b1f8a530> begin[:]
return[call[name[Permissions].all, parameter[]]]
variable[default] assign[=] name[self].guild.default_role
variable[base] assign[=] call[name[Permissions], parameter[name[default].permissions.value]]
variable[roles] assign[=] name[member].roles
for taget[name[role]] in starred[name[roles]] begin[:]
<ast.AugAssign object at 0x7da1b1f8b550>
if name[base].administrator begin[:]
return[call[name[Permissions].all, parameter[]]]
<ast.Try object at 0x7da1b1ea0b20>
variable[member_role_ids] assign[=] <ast.SetComp object at 0x7da1b1fe4f10>
variable[denies] assign[=] constant[0]
variable[allows] assign[=] constant[0]
for taget[name[overwrite]] in starred[name[remaining_overwrites]] begin[:]
if <ast.BoolOp object at 0x7da1b1fe5330> begin[:]
<ast.AugAssign object at 0x7da1b1fe55d0>
<ast.AugAssign object at 0x7da1b1fe6080>
call[name[base].handle_overwrite, parameter[]]
for taget[name[overwrite]] in starred[name[remaining_overwrites]] begin[:]
if <ast.BoolOp object at 0x7da1b2045a80> begin[:]
call[name[base].handle_overwrite, parameter[]]
break
if <ast.UnaryOp object at 0x7da1b1f8b6a0> begin[:]
name[base].send_tts_messages assign[=] constant[False]
name[base].mention_everyone assign[=] constant[False]
name[base].embed_links assign[=] constant[False]
name[base].attach_files assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b1f89ae0> begin[:]
variable[denied] assign[=] call[name[Permissions].all_channel, parameter[]]
<ast.AugAssign object at 0x7da1b1f887f0>
return[name[base]]
|
keyword[def] identifier[permissions_for] ( identifier[self] , identifier[member] ):
literal[string]
identifier[o] = identifier[self] . identifier[guild] . identifier[owner]
keyword[if] identifier[o] keyword[is] keyword[not] keyword[None] keyword[and] identifier[member] . identifier[id] == identifier[o] . identifier[id] :
keyword[return] identifier[Permissions] . identifier[all] ()
identifier[default] = identifier[self] . identifier[guild] . identifier[default_role]
identifier[base] = identifier[Permissions] ( identifier[default] . identifier[permissions] . identifier[value] )
identifier[roles] = identifier[member] . identifier[roles]
keyword[for] identifier[role] keyword[in] identifier[roles] :
identifier[base] . identifier[value] |= identifier[role] . identifier[permissions] . identifier[value]
keyword[if] identifier[base] . identifier[administrator] :
keyword[return] identifier[Permissions] . identifier[all] ()
keyword[try] :
identifier[maybe_everyone] = identifier[self] . identifier[_overwrites] [ literal[int] ]
keyword[if] identifier[maybe_everyone] . identifier[id] == identifier[self] . identifier[guild] . identifier[id] :
identifier[base] . identifier[handle_overwrite] ( identifier[allow] = identifier[maybe_everyone] . identifier[allow] , identifier[deny] = identifier[maybe_everyone] . identifier[deny] )
identifier[remaining_overwrites] = identifier[self] . identifier[_overwrites] [ literal[int] :]
keyword[else] :
identifier[remaining_overwrites] = identifier[self] . identifier[_overwrites]
keyword[except] identifier[IndexError] :
identifier[remaining_overwrites] = identifier[self] . identifier[_overwrites]
identifier[member_role_ids] ={ identifier[r] . identifier[id] keyword[for] identifier[r] keyword[in] identifier[roles] }
identifier[denies] = literal[int]
identifier[allows] = literal[int]
keyword[for] identifier[overwrite] keyword[in] identifier[remaining_overwrites] :
keyword[if] identifier[overwrite] . identifier[type] == literal[string] keyword[and] identifier[overwrite] . identifier[id] keyword[in] identifier[member_role_ids] :
identifier[denies] |= identifier[overwrite] . identifier[deny]
identifier[allows] |= identifier[overwrite] . identifier[allow]
identifier[base] . identifier[handle_overwrite] ( identifier[allow] = identifier[allows] , identifier[deny] = identifier[denies] )
keyword[for] identifier[overwrite] keyword[in] identifier[remaining_overwrites] :
keyword[if] identifier[overwrite] . identifier[type] == literal[string] keyword[and] identifier[overwrite] . identifier[id] == identifier[member] . identifier[id] :
identifier[base] . identifier[handle_overwrite] ( identifier[allow] = identifier[overwrite] . identifier[allow] , identifier[deny] = identifier[overwrite] . identifier[deny] )
keyword[break]
keyword[if] keyword[not] identifier[base] . identifier[send_messages] :
identifier[base] . identifier[send_tts_messages] = keyword[False]
identifier[base] . identifier[mention_everyone] = keyword[False]
identifier[base] . identifier[embed_links] = keyword[False]
identifier[base] . identifier[attach_files] = keyword[False]
keyword[if] keyword[not] identifier[base] . identifier[read_messages] :
identifier[denied] = identifier[Permissions] . identifier[all_channel] ()
identifier[base] . identifier[value] &=~ identifier[denied] . identifier[value]
keyword[return] identifier[base]
|
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
o = self.guild.owner
if o is not None and member.id == o.id:
return Permissions.all() # depends on [control=['if'], data=[]]
default = self.guild.default_role
base = Permissions(default.permissions.value)
roles = member.roles
# Apply guild roles that the member has.
for role in roles:
base.value |= role.permissions.value # depends on [control=['for'], data=['role']]
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all() # depends on [control=['if'], data=[]]
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:] # depends on [control=['if'], data=[]]
else:
remaining_overwrites = self._overwrites # depends on [control=['try'], data=[]]
except IndexError:
remaining_overwrites = self._overwrites # depends on [control=['except'], data=[]]
# not sure if doing member._roles.get(...) is better than the
# set approach. While this is O(N) to re-create into a set for O(1)
# the direct approach would just be O(log n) for searching with no
# extra memory overhead. For now, I'll keep the set cast
# Note that the member.roles accessor up top also creates a
# temporary list
member_role_ids = {r.id for r in roles}
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'role' and overwrite.id in member_role_ids:
denies |= overwrite.deny
allows |= overwrite.allow # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['overwrite']]
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'member' and overwrite.id == member.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['overwrite']]
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False # depends on [control=['if'], data=[]]
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value # depends on [control=['if'], data=[]]
return base
|
def predict(inputs_list, problem, request_fn):
"""Encodes inputs, makes request to deployed TF model, and decodes outputs."""
assert isinstance(inputs_list, list)
fname = "inputs" if problem.has_inputs else "targets"
input_encoder = problem.feature_info[fname].encoder
input_ids_list = [
_encode(inputs, input_encoder, add_eos=problem.has_inputs)
for inputs in inputs_list
]
examples = [_make_example(input_ids, problem, fname)
for input_ids in input_ids_list]
predictions = request_fn(examples)
output_decoder = problem.feature_info["targets"].encoder
outputs = [
(_decode(prediction["outputs"], output_decoder),
prediction["scores"])
for prediction in predictions
]
return outputs
|
def function[predict, parameter[inputs_list, problem, request_fn]]:
constant[Encodes inputs, makes request to deployed TF model, and decodes outputs.]
assert[call[name[isinstance], parameter[name[inputs_list], name[list]]]]
variable[fname] assign[=] <ast.IfExp object at 0x7da1b20e4fd0>
variable[input_encoder] assign[=] call[name[problem].feature_info][name[fname]].encoder
variable[input_ids_list] assign[=] <ast.ListComp object at 0x7da1b20e4d30>
variable[examples] assign[=] <ast.ListComp object at 0x7da1b20e5ed0>
variable[predictions] assign[=] call[name[request_fn], parameter[name[examples]]]
variable[output_decoder] assign[=] call[name[problem].feature_info][constant[targets]].encoder
variable[outputs] assign[=] <ast.ListComp object at 0x7da1b20e5ae0>
return[name[outputs]]
|
keyword[def] identifier[predict] ( identifier[inputs_list] , identifier[problem] , identifier[request_fn] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[inputs_list] , identifier[list] )
identifier[fname] = literal[string] keyword[if] identifier[problem] . identifier[has_inputs] keyword[else] literal[string]
identifier[input_encoder] = identifier[problem] . identifier[feature_info] [ identifier[fname] ]. identifier[encoder]
identifier[input_ids_list] =[
identifier[_encode] ( identifier[inputs] , identifier[input_encoder] , identifier[add_eos] = identifier[problem] . identifier[has_inputs] )
keyword[for] identifier[inputs] keyword[in] identifier[inputs_list]
]
identifier[examples] =[ identifier[_make_example] ( identifier[input_ids] , identifier[problem] , identifier[fname] )
keyword[for] identifier[input_ids] keyword[in] identifier[input_ids_list] ]
identifier[predictions] = identifier[request_fn] ( identifier[examples] )
identifier[output_decoder] = identifier[problem] . identifier[feature_info] [ literal[string] ]. identifier[encoder]
identifier[outputs] =[
( identifier[_decode] ( identifier[prediction] [ literal[string] ], identifier[output_decoder] ),
identifier[prediction] [ literal[string] ])
keyword[for] identifier[prediction] keyword[in] identifier[predictions]
]
keyword[return] identifier[outputs]
|
def predict(inputs_list, problem, request_fn):
"""Encodes inputs, makes request to deployed TF model, and decodes outputs."""
assert isinstance(inputs_list, list)
fname = 'inputs' if problem.has_inputs else 'targets'
input_encoder = problem.feature_info[fname].encoder
input_ids_list = [_encode(inputs, input_encoder, add_eos=problem.has_inputs) for inputs in inputs_list]
examples = [_make_example(input_ids, problem, fname) for input_ids in input_ids_list]
predictions = request_fn(examples)
output_decoder = problem.feature_info['targets'].encoder
outputs = [(_decode(prediction['outputs'], output_decoder), prediction['scores']) for prediction in predictions]
return outputs
|
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
|
def function[_find_local_handlers, parameter[cls, handlers, namespace, configs]]:
constant[Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
]
for taget[tuple[[<ast.Name object at 0x7da1b1047310>, <ast.Name object at 0x7da1b10473a0>]]] in starred[call[name[namespace].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b10473d0> assign[=] call[name[cls]._is_handler, parameter[name[aname], name[avalue]]]
if name[sig_name] begin[:]
call[name[configs]][name[aname]] assign[=] name[config]
call[name[handlers]][name[aname]] assign[=] name[sig_name]
|
keyword[def] identifier[_find_local_handlers] ( identifier[cls] , identifier[handlers] , identifier[namespace] , identifier[configs] ):
literal[string]
keyword[for] identifier[aname] , identifier[avalue] keyword[in] identifier[namespace] . identifier[items] ():
identifier[sig_name] , identifier[config] = identifier[cls] . identifier[_is_handler] ( identifier[aname] , identifier[avalue] )
keyword[if] identifier[sig_name] :
identifier[configs] [ identifier[aname] ]= identifier[config]
identifier[handlers] [ identifier[aname] ]= identifier[sig_name]
|
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for (aname, avalue) in namespace.items():
(sig_name, config) = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def container(dec):
"""Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator
"""
# Credits: http://stackoverflow.com/a/1167248/1798683
@wraps(dec)
def meta_decorator(f):
decorator = dec(f)
decorator.orig_func = f
return decorator
return meta_decorator
|
def function[container, parameter[dec]]:
constant[Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator
]
def function[meta_decorator, parameter[f]]:
variable[decorator] assign[=] call[name[dec], parameter[name[f]]]
name[decorator].orig_func assign[=] name[f]
return[name[decorator]]
return[name[meta_decorator]]
|
keyword[def] identifier[container] ( identifier[dec] ):
literal[string]
@ identifier[wraps] ( identifier[dec] )
keyword[def] identifier[meta_decorator] ( identifier[f] ):
identifier[decorator] = identifier[dec] ( identifier[f] )
identifier[decorator] . identifier[orig_func] = identifier[f]
keyword[return] identifier[decorator]
keyword[return] identifier[meta_decorator]
|
def container(dec):
"""Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator
"""
# Credits: http://stackoverflow.com/a/1167248/1798683
@wraps(dec)
def meta_decorator(f):
decorator = dec(f)
decorator.orig_func = f
return decorator
return meta_decorator
|
def resolve_heron_suffix_issue(abs_pex_path, class_path):
"""Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
"""
# import top-level package named `heron` of a given pex file
importer = zipimport.zipimporter(abs_pex_path)
importer.load_module("heron")
# remove 'heron' and the classname
to_load_lst = class_path.split('.')[1:-1]
loaded = ['heron']
loaded_mod = None
for to_load in to_load_lst:
sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded)))
loaded_mod = sub_importer.load_module(to_load)
loaded.append(to_load)
return loaded_mod
|
def function[resolve_heron_suffix_issue, parameter[abs_pex_path, class_path]]:
constant[Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
]
variable[importer] assign[=] call[name[zipimport].zipimporter, parameter[name[abs_pex_path]]]
call[name[importer].load_module, parameter[constant[heron]]]
variable[to_load_lst] assign[=] call[call[name[class_path].split, parameter[constant[.]]]][<ast.Slice object at 0x7da204346b00>]
variable[loaded] assign[=] list[[<ast.Constant object at 0x7da2043469b0>]]
variable[loaded_mod] assign[=] constant[None]
for taget[name[to_load]] in starred[name[to_load_lst]] begin[:]
variable[sub_importer] assign[=] call[name[zipimport].zipimporter, parameter[call[name[os].path.join, parameter[name[abs_pex_path], call[constant[/].join, parameter[name[loaded]]]]]]]
variable[loaded_mod] assign[=] call[name[sub_importer].load_module, parameter[name[to_load]]]
call[name[loaded].append, parameter[name[to_load]]]
return[name[loaded_mod]]
|
keyword[def] identifier[resolve_heron_suffix_issue] ( identifier[abs_pex_path] , identifier[class_path] ):
literal[string]
identifier[importer] = identifier[zipimport] . identifier[zipimporter] ( identifier[abs_pex_path] )
identifier[importer] . identifier[load_module] ( literal[string] )
identifier[to_load_lst] = identifier[class_path] . identifier[split] ( literal[string] )[ literal[int] :- literal[int] ]
identifier[loaded] =[ literal[string] ]
identifier[loaded_mod] = keyword[None]
keyword[for] identifier[to_load] keyword[in] identifier[to_load_lst] :
identifier[sub_importer] = identifier[zipimport] . identifier[zipimporter] ( identifier[os] . identifier[path] . identifier[join] ( identifier[abs_pex_path] , literal[string] . identifier[join] ( identifier[loaded] )))
identifier[loaded_mod] = identifier[sub_importer] . identifier[load_module] ( identifier[to_load] )
identifier[loaded] . identifier[append] ( identifier[to_load] )
keyword[return] identifier[loaded_mod]
|
def resolve_heron_suffix_issue(abs_pex_path, class_path):
"""Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
"""
# import top-level package named `heron` of a given pex file
importer = zipimport.zipimporter(abs_pex_path)
importer.load_module('heron')
# remove 'heron' and the classname
to_load_lst = class_path.split('.')[1:-1]
loaded = ['heron']
loaded_mod = None
for to_load in to_load_lst:
sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded)))
loaded_mod = sub_importer.load_module(to_load)
loaded.append(to_load) # depends on [control=['for'], data=['to_load']]
return loaded_mod
|
def read_files(files, readers, **kwargs):
"""Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings.
"""
reading_content = [Content.from_file(filepath) for filepath in files]
output_list = []
for reader in readers:
res_list = reader.read(reading_content, **kwargs)
if res_list is None:
logger.info("Nothing read by %s." % reader.name)
else:
logger.info("Successfully read %d content entries with %s."
% (len(res_list), reader.name))
output_list += res_list
logger.info("Read %s text content entries in all." % len(output_list))
return output_list
|
def function[read_files, parameter[files, readers]]:
constant[Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings.
]
variable[reading_content] assign[=] <ast.ListComp object at 0x7da18fe924d0>
variable[output_list] assign[=] list[[]]
for taget[name[reader]] in starred[name[readers]] begin[:]
variable[res_list] assign[=] call[name[reader].read, parameter[name[reading_content]]]
if compare[name[res_list] is constant[None]] begin[:]
call[name[logger].info, parameter[binary_operation[constant[Nothing read by %s.] <ast.Mod object at 0x7da2590d6920> name[reader].name]]]
call[name[logger].info, parameter[binary_operation[constant[Read %s text content entries in all.] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[output_list]]]]]]
return[name[output_list]]
|
keyword[def] identifier[read_files] ( identifier[files] , identifier[readers] ,** identifier[kwargs] ):
literal[string]
identifier[reading_content] =[ identifier[Content] . identifier[from_file] ( identifier[filepath] ) keyword[for] identifier[filepath] keyword[in] identifier[files] ]
identifier[output_list] =[]
keyword[for] identifier[reader] keyword[in] identifier[readers] :
identifier[res_list] = identifier[reader] . identifier[read] ( identifier[reading_content] ,** identifier[kwargs] )
keyword[if] identifier[res_list] keyword[is] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] % identifier[reader] . identifier[name] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string]
%( identifier[len] ( identifier[res_list] ), identifier[reader] . identifier[name] ))
identifier[output_list] += identifier[res_list]
identifier[logger] . identifier[info] ( literal[string] % identifier[len] ( identifier[output_list] ))
keyword[return] identifier[output_list]
|
def read_files(files, readers, **kwargs):
"""Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings.
"""
reading_content = [Content.from_file(filepath) for filepath in files]
output_list = []
for reader in readers:
res_list = reader.read(reading_content, **kwargs)
if res_list is None:
logger.info('Nothing read by %s.' % reader.name) # depends on [control=['if'], data=[]]
else:
logger.info('Successfully read %d content entries with %s.' % (len(res_list), reader.name))
output_list += res_list # depends on [control=['for'], data=['reader']]
logger.info('Read %s text content entries in all.' % len(output_list))
return output_list
|
async def issue_events(self):
""" This function will be automatically run from main.py and triggers the following functions:
- on_unit_created
- on_unit_destroyed
- on_building_construction_complete
"""
await self._issue_unit_dead_events()
await self._issue_unit_added_events()
for unit in self.units.structure:
await self._issue_building_complete_event(unit)
|
<ast.AsyncFunctionDef object at 0x7da18dc993f0>
|
keyword[async] keyword[def] identifier[issue_events] ( identifier[self] ):
literal[string]
keyword[await] identifier[self] . identifier[_issue_unit_dead_events] ()
keyword[await] identifier[self] . identifier[_issue_unit_added_events] ()
keyword[for] identifier[unit] keyword[in] identifier[self] . identifier[units] . identifier[structure] :
keyword[await] identifier[self] . identifier[_issue_building_complete_event] ( identifier[unit] )
|
async def issue_events(self):
""" This function will be automatically run from main.py and triggers the following functions:
- on_unit_created
- on_unit_destroyed
- on_building_construction_complete
"""
await self._issue_unit_dead_events()
await self._issue_unit_added_events()
for unit in self.units.structure:
await self._issue_building_complete_event(unit) # depends on [control=['for'], data=['unit']]
|
def save_context(self) -> bool:
"""Save current position."""
self._contexts.append(self._cursor.position)
return True
|
def function[save_context, parameter[self]]:
constant[Save current position.]
call[name[self]._contexts.append, parameter[name[self]._cursor.position]]
return[constant[True]]
|
keyword[def] identifier[save_context] ( identifier[self] )-> identifier[bool] :
literal[string]
identifier[self] . identifier[_contexts] . identifier[append] ( identifier[self] . identifier[_cursor] . identifier[position] )
keyword[return] keyword[True]
|
def save_context(self) -> bool:
"""Save current position."""
self._contexts.append(self._cursor.position)
return True
|
def create_pw(length=8, digits=2, upper=2, lower=2):
"""Create a random password
From Stackoverflow:
http://stackoverflow.com/questions/7479442/high-quality-simple-random-password-generator
Create a random password with the specified length and no. of
digit, upper and lower case letters.
:param length: Maximum no. of characters in the password
:type length: int
:param digits: Minimum no. of digits in the password
:type digits: int
:param upper: Minimum no. of upper case letters in the password
:type upper: int
:param lower: Minimum no. of lower case letters in the password
:type lower: int
:returns: A random password with the above constaints
:rtype: str
"""
seed(time())
lowercase = string.ascii_lowercase
uppercase = string.ascii_uppercase
letters = string.ascii_letters
password = list(
chain(
(choice(uppercase) for _ in range(upper)),
(choice(lowercase) for _ in range(lower)),
(choice(string.digits) for _ in range(digits)),
(choice(letters) for _ in range((length - digits - upper - lower)))
)
)
return "".join(sample(password, len(password)))
|
def function[create_pw, parameter[length, digits, upper, lower]]:
constant[Create a random password
From Stackoverflow:
http://stackoverflow.com/questions/7479442/high-quality-simple-random-password-generator
Create a random password with the specified length and no. of
digit, upper and lower case letters.
:param length: Maximum no. of characters in the password
:type length: int
:param digits: Minimum no. of digits in the password
:type digits: int
:param upper: Minimum no. of upper case letters in the password
:type upper: int
:param lower: Minimum no. of lower case letters in the password
:type lower: int
:returns: A random password with the above constaints
:rtype: str
]
call[name[seed], parameter[call[name[time], parameter[]]]]
variable[lowercase] assign[=] name[string].ascii_lowercase
variable[uppercase] assign[=] name[string].ascii_uppercase
variable[letters] assign[=] name[string].ascii_letters
variable[password] assign[=] call[name[list], parameter[call[name[chain], parameter[<ast.GeneratorExp object at 0x7da2054a7010>, <ast.GeneratorExp object at 0x7da2054a5ba0>, <ast.GeneratorExp object at 0x7da2054a6c80>, <ast.GeneratorExp object at 0x7da2054a6710>]]]]
return[call[constant[].join, parameter[call[name[sample], parameter[name[password], call[name[len], parameter[name[password]]]]]]]]
|
keyword[def] identifier[create_pw] ( identifier[length] = literal[int] , identifier[digits] = literal[int] , identifier[upper] = literal[int] , identifier[lower] = literal[int] ):
literal[string]
identifier[seed] ( identifier[time] ())
identifier[lowercase] = identifier[string] . identifier[ascii_lowercase]
identifier[uppercase] = identifier[string] . identifier[ascii_uppercase]
identifier[letters] = identifier[string] . identifier[ascii_letters]
identifier[password] = identifier[list] (
identifier[chain] (
( identifier[choice] ( identifier[uppercase] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[upper] )),
( identifier[choice] ( identifier[lowercase] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[lower] )),
( identifier[choice] ( identifier[string] . identifier[digits] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[digits] )),
( identifier[choice] ( identifier[letters] ) keyword[for] identifier[_] keyword[in] identifier[range] (( identifier[length] - identifier[digits] - identifier[upper] - identifier[lower] )))
)
)
keyword[return] literal[string] . identifier[join] ( identifier[sample] ( identifier[password] , identifier[len] ( identifier[password] )))
|
def create_pw(length=8, digits=2, upper=2, lower=2):
"""Create a random password
From Stackoverflow:
http://stackoverflow.com/questions/7479442/high-quality-simple-random-password-generator
Create a random password with the specified length and no. of
digit, upper and lower case letters.
:param length: Maximum no. of characters in the password
:type length: int
:param digits: Minimum no. of digits in the password
:type digits: int
:param upper: Minimum no. of upper case letters in the password
:type upper: int
:param lower: Minimum no. of lower case letters in the password
:type lower: int
:returns: A random password with the above constaints
:rtype: str
"""
seed(time())
lowercase = string.ascii_lowercase
uppercase = string.ascii_uppercase
letters = string.ascii_letters
password = list(chain((choice(uppercase) for _ in range(upper)), (choice(lowercase) for _ in range(lower)), (choice(string.digits) for _ in range(digits)), (choice(letters) for _ in range(length - digits - upper - lower))))
return ''.join(sample(password, len(password)))
|
def reads(s, format, **kwargs):
"""Read a notebook from a string and return the NotebookNode object.
This function properly handles notebooks of any version. The notebook
returned will always be in the current version's format.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
format : (u'json', u'ipynb', u'py')
The format that the string is in.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return reads_json(s, **kwargs)
elif format == u'py':
return reads_py(s, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format)
|
def function[reads, parameter[s, format]]:
constant[Read a notebook from a string and return the NotebookNode object.
This function properly handles notebooks of any version. The notebook
returned will always be in the current version's format.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
format : (u'json', u'ipynb', u'py')
The format that the string is in.
Returns
-------
nb : NotebookNode
The notebook that was read.
]
variable[format] assign[=] call[name[unicode], parameter[name[format]]]
if <ast.BoolOp object at 0x7da18fe92410> begin[:]
return[call[name[reads_json], parameter[name[s]]]]
|
keyword[def] identifier[reads] ( identifier[s] , identifier[format] ,** identifier[kwargs] ):
literal[string]
identifier[format] = identifier[unicode] ( identifier[format] )
keyword[if] identifier[format] == literal[string] keyword[or] identifier[format] == literal[string] :
keyword[return] identifier[reads_json] ( identifier[s] ,** identifier[kwargs] )
keyword[elif] identifier[format] == literal[string] :
keyword[return] identifier[reads_py] ( identifier[s] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[NBFormatError] ( literal[string] % identifier[format] )
|
def reads(s, format, **kwargs):
"""Read a notebook from a string and return the NotebookNode object.
This function properly handles notebooks of any version. The notebook
returned will always be in the current version's format.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
format : (u'json', u'ipynb', u'py')
The format that the string is in.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return reads_json(s, **kwargs) # depends on [control=['if'], data=[]]
elif format == u'py':
return reads_py(s, **kwargs) # depends on [control=['if'], data=[]]
else:
raise NBFormatError('Unsupported format: %s' % format)
|
def _N_lines():
''' Determine how many lines to print, such that the number of items
displayed will fit on the terminal (i.e one 'screen-ful' of items)
This looks at the environmental prompt variable, and tries to determine
how many lines it takes up.
On Windows, it does this by looking for the '$_' sequence, which indicates
a new line, in the environmental variable PROMPT.
Otherwise, it looks for a newline ('\n') in the environmental variable
PS1.
'''
lines_in_prompt = 1 # prompt is assumed to take up one line, even
# without any newlines in it
if "win32" in sys.platform:
lines_in_prompt += 1 # Windows will typically print a free line after
# the program output
a = re.findall(r'\$_', os.getenv('PROMPT', ''))
lines_in_prompt += len(a)
else:
a = re.findall('\\n', os.getenv('PS1', ''))
lines_in_prompt += len(a)
n_lines = get_terminal_size().lines - lines_in_prompt
# print a minimum of one item
n_lines = max(n_lines, 1)
return n_lines
|
def function[_N_lines, parameter[]]:
constant[ Determine how many lines to print, such that the number of items
displayed will fit on the terminal (i.e one 'screen-ful' of items)
This looks at the environmental prompt variable, and tries to determine
how many lines it takes up.
On Windows, it does this by looking for the '$_' sequence, which indicates
a new line, in the environmental variable PROMPT.
Otherwise, it looks for a newline ('
') in the environmental variable
PS1.
]
variable[lines_in_prompt] assign[=] constant[1]
if compare[constant[win32] in name[sys].platform] begin[:]
<ast.AugAssign object at 0x7da1b2345a80>
variable[a] assign[=] call[name[re].findall, parameter[constant[\$_], call[name[os].getenv, parameter[constant[PROMPT], constant[]]]]]
<ast.AugAssign object at 0x7da1b2345300>
variable[n_lines] assign[=] binary_operation[call[name[get_terminal_size], parameter[]].lines - name[lines_in_prompt]]
variable[n_lines] assign[=] call[name[max], parameter[name[n_lines], constant[1]]]
return[name[n_lines]]
|
keyword[def] identifier[_N_lines] ():
literal[string]
identifier[lines_in_prompt] = literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[platform] :
identifier[lines_in_prompt] += literal[int]
identifier[a] = identifier[re] . identifier[findall] ( literal[string] , identifier[os] . identifier[getenv] ( literal[string] , literal[string] ))
identifier[lines_in_prompt] += identifier[len] ( identifier[a] )
keyword[else] :
identifier[a] = identifier[re] . identifier[findall] ( literal[string] , identifier[os] . identifier[getenv] ( literal[string] , literal[string] ))
identifier[lines_in_prompt] += identifier[len] ( identifier[a] )
identifier[n_lines] = identifier[get_terminal_size] (). identifier[lines] - identifier[lines_in_prompt]
identifier[n_lines] = identifier[max] ( identifier[n_lines] , literal[int] )
keyword[return] identifier[n_lines]
|
def _N_lines():
""" Determine how many lines to print, such that the number of items
displayed will fit on the terminal (i.e one 'screen-ful' of items)
This looks at the environmental prompt variable, and tries to determine
how many lines it takes up.
On Windows, it does this by looking for the '$_' sequence, which indicates
a new line, in the environmental variable PROMPT.
Otherwise, it looks for a newline ('
') in the environmental variable
PS1.
"""
lines_in_prompt = 1 # prompt is assumed to take up one line, even
# without any newlines in it
if 'win32' in sys.platform:
lines_in_prompt += 1 # Windows will typically print a free line after
# the program output
a = re.findall('\\$_', os.getenv('PROMPT', ''))
lines_in_prompt += len(a) # depends on [control=['if'], data=[]]
else:
a = re.findall('\\n', os.getenv('PS1', ''))
lines_in_prompt += len(a)
n_lines = get_terminal_size().lines - lines_in_prompt
# print a minimum of one item
n_lines = max(n_lines, 1)
return n_lines
|
def graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):
"""
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : networkx multidigraph
nodes : bool
if True, convert graph nodes to a GeoDataFrame and return it
edges : bool
if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool
if True, create a geometry column from node x and y data
fill_edge_geometry : bool
if True, fill in missing edge geometry fields using origin and
destination nodes
Returns
-------
GeoDataFrame or tuple
gdf_nodes or gdf_edges or both as a tuple
"""
if not (nodes or edges):
raise ValueError('You must request nodes or edges, or both.')
to_return = []
if nodes:
start_time = time.time()
nodes, data = zip(*G.nodes(data=True))
gdf_nodes = gpd.GeoDataFrame(list(data), index=nodes)
if node_geometry:
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes.crs = G.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])
to_return.append(gdf_nodes)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_nodes.gdf_name, time.time()-start_time))
if edges:
start_time = time.time()
# create a list to hold our edges, then loop through each edge in the
# graph
edges = []
for u, v, key, data in G.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the
# edge_details
edge_details = {'u':u, 'v':v, 'key':key}
for attr_key in data:
edge_details[attr_key] = data[attr_key]
# if edge doesn't already have a geometry attribute, create one now
# if fill_edge_geometry==True
if 'geometry' not in data:
if fill_edge_geometry:
point_u = Point((G.nodes[u]['x'], G.nodes[u]['y']))
point_v = Point((G.nodes[v]['x'], G.nodes[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v])
else:
edge_details['geometry'] = np.nan
edges.append(edge_details)
# create a GeoDataFrame from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])
to_return.append(gdf_edges)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_edges.gdf_name, time.time()-start_time))
if len(to_return) > 1:
return tuple(to_return)
else:
return to_return[0]
|
def function[graph_to_gdfs, parameter[G, nodes, edges, node_geometry, fill_edge_geometry]]:
constant[
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : networkx multidigraph
nodes : bool
if True, convert graph nodes to a GeoDataFrame and return it
edges : bool
if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool
if True, create a geometry column from node x and y data
fill_edge_geometry : bool
if True, fill in missing edge geometry fields using origin and
destination nodes
Returns
-------
GeoDataFrame or tuple
gdf_nodes or gdf_edges or both as a tuple
]
if <ast.UnaryOp object at 0x7da1b1b12e60> begin[:]
<ast.Raise object at 0x7da1b1b11ab0>
variable[to_return] assign[=] list[[]]
if name[nodes] begin[:]
variable[start_time] assign[=] call[name[time].time, parameter[]]
<ast.Tuple object at 0x7da1b1b12b90> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b1b115d0>]]
variable[gdf_nodes] assign[=] call[name[gpd].GeoDataFrame, parameter[call[name[list], parameter[name[data]]]]]
if name[node_geometry] begin[:]
call[name[gdf_nodes]][constant[geometry]] assign[=] call[name[gdf_nodes].apply, parameter[<ast.Lambda object at 0x7da1b1b12020>]]
name[gdf_nodes].crs assign[=] call[name[G].graph][constant[crs]]
name[gdf_nodes].gdf_name assign[=] call[constant[{}_nodes].format, parameter[call[name[G].graph][constant[name]]]]
call[name[to_return].append, parameter[name[gdf_nodes]]]
call[name[log], parameter[call[constant[Created GeoDataFrame "{}" from graph in {:,.2f} seconds].format, parameter[name[gdf_nodes].gdf_name, binary_operation[call[name[time].time, parameter[]] - name[start_time]]]]]]
if name[edges] begin[:]
variable[start_time] assign[=] call[name[time].time, parameter[]]
variable[edges] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1b10280>, <ast.Name object at 0x7da1b1b13bb0>, <ast.Name object at 0x7da1b1b129b0>, <ast.Name object at 0x7da1b1b11330>]]] in starred[call[name[G].edges, parameter[]]] begin[:]
variable[edge_details] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b11780>, <ast.Constant object at 0x7da1b1b10a60>, <ast.Constant object at 0x7da1b1b115a0>], [<ast.Name object at 0x7da1b1b124a0>, <ast.Name object at 0x7da1b1b10310>, <ast.Name object at 0x7da1b1b10c70>]]
for taget[name[attr_key]] in starred[name[data]] begin[:]
call[name[edge_details]][name[attr_key]] assign[=] call[name[data]][name[attr_key]]
if compare[constant[geometry] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
if name[fill_edge_geometry] begin[:]
variable[point_u] assign[=] call[name[Point], parameter[tuple[[<ast.Subscript object at 0x7da1b1b10b20>, <ast.Subscript object at 0x7da1b1b136d0>]]]]
variable[point_v] assign[=] call[name[Point], parameter[tuple[[<ast.Subscript object at 0x7da1b1b102b0>, <ast.Subscript object at 0x7da1b1b12c50>]]]]
call[name[edge_details]][constant[geometry]] assign[=] call[name[LineString], parameter[list[[<ast.Name object at 0x7da1b1b13f70>, <ast.Name object at 0x7da1b1b10df0>]]]]
call[name[edges].append, parameter[name[edge_details]]]
variable[gdf_edges] assign[=] call[name[gpd].GeoDataFrame, parameter[name[edges]]]
name[gdf_edges].crs assign[=] call[name[G].graph][constant[crs]]
name[gdf_edges].gdf_name assign[=] call[constant[{}_edges].format, parameter[call[name[G].graph][constant[name]]]]
call[name[to_return].append, parameter[name[gdf_edges]]]
call[name[log], parameter[call[constant[Created GeoDataFrame "{}" from graph in {:,.2f} seconds].format, parameter[name[gdf_edges].gdf_name, binary_operation[call[name[time].time, parameter[]] - name[start_time]]]]]]
if compare[call[name[len], parameter[name[to_return]]] greater[>] constant[1]] begin[:]
return[call[name[tuple], parameter[name[to_return]]]]
|
keyword[def] identifier[graph_to_gdfs] ( identifier[G] , identifier[nodes] = keyword[True] , identifier[edges] = keyword[True] , identifier[node_geometry] = keyword[True] , identifier[fill_edge_geometry] = keyword[True] ):
literal[string]
keyword[if] keyword[not] ( identifier[nodes] keyword[or] identifier[edges] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[to_return] =[]
keyword[if] identifier[nodes] :
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[nodes] , identifier[data] = identifier[zip] (* identifier[G] . identifier[nodes] ( identifier[data] = keyword[True] ))
identifier[gdf_nodes] = identifier[gpd] . identifier[GeoDataFrame] ( identifier[list] ( identifier[data] ), identifier[index] = identifier[nodes] )
keyword[if] identifier[node_geometry] :
identifier[gdf_nodes] [ literal[string] ]= identifier[gdf_nodes] . identifier[apply] ( keyword[lambda] identifier[row] : identifier[Point] ( identifier[row] [ literal[string] ], identifier[row] [ literal[string] ]), identifier[axis] = literal[int] )
identifier[gdf_nodes] . identifier[crs] = identifier[G] . identifier[graph] [ literal[string] ]
identifier[gdf_nodes] . identifier[gdf_name] = literal[string] . identifier[format] ( identifier[G] . identifier[graph] [ literal[string] ])
identifier[to_return] . identifier[append] ( identifier[gdf_nodes] )
identifier[log] ( literal[string] . identifier[format] ( identifier[gdf_nodes] . identifier[gdf_name] , identifier[time] . identifier[time] ()- identifier[start_time] ))
keyword[if] identifier[edges] :
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[edges] =[]
keyword[for] identifier[u] , identifier[v] , identifier[key] , identifier[data] keyword[in] identifier[G] . identifier[edges] ( identifier[keys] = keyword[True] , identifier[data] = keyword[True] ):
identifier[edge_details] ={ literal[string] : identifier[u] , literal[string] : identifier[v] , literal[string] : identifier[key] }
keyword[for] identifier[attr_key] keyword[in] identifier[data] :
identifier[edge_details] [ identifier[attr_key] ]= identifier[data] [ identifier[attr_key] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
keyword[if] identifier[fill_edge_geometry] :
identifier[point_u] = identifier[Point] (( identifier[G] . identifier[nodes] [ identifier[u] ][ literal[string] ], identifier[G] . identifier[nodes] [ identifier[u] ][ literal[string] ]))
identifier[point_v] = identifier[Point] (( identifier[G] . identifier[nodes] [ identifier[v] ][ literal[string] ], identifier[G] . identifier[nodes] [ identifier[v] ][ literal[string] ]))
identifier[edge_details] [ literal[string] ]= identifier[LineString] ([ identifier[point_u] , identifier[point_v] ])
keyword[else] :
identifier[edge_details] [ literal[string] ]= identifier[np] . identifier[nan]
identifier[edges] . identifier[append] ( identifier[edge_details] )
identifier[gdf_edges] = identifier[gpd] . identifier[GeoDataFrame] ( identifier[edges] )
identifier[gdf_edges] . identifier[crs] = identifier[G] . identifier[graph] [ literal[string] ]
identifier[gdf_edges] . identifier[gdf_name] = literal[string] . identifier[format] ( identifier[G] . identifier[graph] [ literal[string] ])
identifier[to_return] . identifier[append] ( identifier[gdf_edges] )
identifier[log] ( literal[string] . identifier[format] ( identifier[gdf_edges] . identifier[gdf_name] , identifier[time] . identifier[time] ()- identifier[start_time] ))
keyword[if] identifier[len] ( identifier[to_return] )> literal[int] :
keyword[return] identifier[tuple] ( identifier[to_return] )
keyword[else] :
keyword[return] identifier[to_return] [ literal[int] ]
|
def graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):
"""
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : networkx multidigraph
nodes : bool
if True, convert graph nodes to a GeoDataFrame and return it
edges : bool
if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool
if True, create a geometry column from node x and y data
fill_edge_geometry : bool
if True, fill in missing edge geometry fields using origin and
destination nodes
Returns
-------
GeoDataFrame or tuple
gdf_nodes or gdf_edges or both as a tuple
"""
if not (nodes or edges):
raise ValueError('You must request nodes or edges, or both.') # depends on [control=['if'], data=[]]
to_return = []
if nodes:
start_time = time.time()
(nodes, data) = zip(*G.nodes(data=True))
gdf_nodes = gpd.GeoDataFrame(list(data), index=nodes)
if node_geometry:
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1) # depends on [control=['if'], data=[]]
gdf_nodes.crs = G.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])
to_return.append(gdf_nodes)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_nodes.gdf_name, time.time() - start_time)) # depends on [control=['if'], data=[]]
if edges:
start_time = time.time()
# create a list to hold our edges, then loop through each edge in the
# graph
edges = []
for (u, v, key, data) in G.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the
# edge_details
edge_details = {'u': u, 'v': v, 'key': key}
for attr_key in data:
edge_details[attr_key] = data[attr_key] # depends on [control=['for'], data=['attr_key']]
# if edge doesn't already have a geometry attribute, create one now
# if fill_edge_geometry==True
if 'geometry' not in data:
if fill_edge_geometry:
point_u = Point((G.nodes[u]['x'], G.nodes[u]['y']))
point_v = Point((G.nodes[v]['x'], G.nodes[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v]) # depends on [control=['if'], data=[]]
else:
edge_details['geometry'] = np.nan # depends on [control=['if'], data=[]]
edges.append(edge_details) # depends on [control=['for'], data=[]]
# create a GeoDataFrame from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])
to_return.append(gdf_edges)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_edges.gdf_name, time.time() - start_time)) # depends on [control=['if'], data=[]]
if len(to_return) > 1:
return tuple(to_return) # depends on [control=['if'], data=[]]
else:
return to_return[0]
|
def write(self, proto):
"""
:param proto: capnp HTMPredictionModelProto message builder
"""
super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase)
proto.numRunCalls = self.__numRunCalls
proto.minLikelihoodThreshold = self._minLikelihoodThreshold
proto.maxPredictionsPerStep = self._maxPredictionsPerStep
self._netInfo.net.write(proto.network)
proto.spLearningEnabled = self.__spLearningEnabled
proto.tpLearningEnabled = self.__tpLearningEnabled
if self._predictedFieldIdx is None:
proto.predictedFieldIdx.none = None
else:
proto.predictedFieldIdx.value = self._predictedFieldIdx
if self._predictedFieldName is None:
proto.predictedFieldName.none = None
else:
proto.predictedFieldName.value = self._predictedFieldName
if self._numFields is None:
proto.numFields.none = None
else:
proto.numFields.value = self._numFields
proto.trainSPNetOnlyIfRequested = self.__trainSPNetOnlyIfRequested
proto.finishedLearning = self.__finishedLearning
|
def function[write, parameter[self, proto]]:
constant[
:param proto: capnp HTMPredictionModelProto message builder
]
call[call[name[super], parameter[name[HTMPredictionModel], name[self]]].writeBaseToProto, parameter[name[proto].modelBase]]
name[proto].numRunCalls assign[=] name[self].__numRunCalls
name[proto].minLikelihoodThreshold assign[=] name[self]._minLikelihoodThreshold
name[proto].maxPredictionsPerStep assign[=] name[self]._maxPredictionsPerStep
call[name[self]._netInfo.net.write, parameter[name[proto].network]]
name[proto].spLearningEnabled assign[=] name[self].__spLearningEnabled
name[proto].tpLearningEnabled assign[=] name[self].__tpLearningEnabled
if compare[name[self]._predictedFieldIdx is constant[None]] begin[:]
name[proto].predictedFieldIdx.none assign[=] constant[None]
if compare[name[self]._predictedFieldName is constant[None]] begin[:]
name[proto].predictedFieldName.none assign[=] constant[None]
if compare[name[self]._numFields is constant[None]] begin[:]
name[proto].numFields.none assign[=] constant[None]
name[proto].trainSPNetOnlyIfRequested assign[=] name[self].__trainSPNetOnlyIfRequested
name[proto].finishedLearning assign[=] name[self].__finishedLearning
|
keyword[def] identifier[write] ( identifier[self] , identifier[proto] ):
literal[string]
identifier[super] ( identifier[HTMPredictionModel] , identifier[self] ). identifier[writeBaseToProto] ( identifier[proto] . identifier[modelBase] )
identifier[proto] . identifier[numRunCalls] = identifier[self] . identifier[__numRunCalls]
identifier[proto] . identifier[minLikelihoodThreshold] = identifier[self] . identifier[_minLikelihoodThreshold]
identifier[proto] . identifier[maxPredictionsPerStep] = identifier[self] . identifier[_maxPredictionsPerStep]
identifier[self] . identifier[_netInfo] . identifier[net] . identifier[write] ( identifier[proto] . identifier[network] )
identifier[proto] . identifier[spLearningEnabled] = identifier[self] . identifier[__spLearningEnabled]
identifier[proto] . identifier[tpLearningEnabled] = identifier[self] . identifier[__tpLearningEnabled]
keyword[if] identifier[self] . identifier[_predictedFieldIdx] keyword[is] keyword[None] :
identifier[proto] . identifier[predictedFieldIdx] . identifier[none] = keyword[None]
keyword[else] :
identifier[proto] . identifier[predictedFieldIdx] . identifier[value] = identifier[self] . identifier[_predictedFieldIdx]
keyword[if] identifier[self] . identifier[_predictedFieldName] keyword[is] keyword[None] :
identifier[proto] . identifier[predictedFieldName] . identifier[none] = keyword[None]
keyword[else] :
identifier[proto] . identifier[predictedFieldName] . identifier[value] = identifier[self] . identifier[_predictedFieldName]
keyword[if] identifier[self] . identifier[_numFields] keyword[is] keyword[None] :
identifier[proto] . identifier[numFields] . identifier[none] = keyword[None]
keyword[else] :
identifier[proto] . identifier[numFields] . identifier[value] = identifier[self] . identifier[_numFields]
identifier[proto] . identifier[trainSPNetOnlyIfRequested] = identifier[self] . identifier[__trainSPNetOnlyIfRequested]
identifier[proto] . identifier[finishedLearning] = identifier[self] . identifier[__finishedLearning]
|
def write(self, proto):
"""
:param proto: capnp HTMPredictionModelProto message builder
"""
super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase)
proto.numRunCalls = self.__numRunCalls
proto.minLikelihoodThreshold = self._minLikelihoodThreshold
proto.maxPredictionsPerStep = self._maxPredictionsPerStep
self._netInfo.net.write(proto.network)
proto.spLearningEnabled = self.__spLearningEnabled
proto.tpLearningEnabled = self.__tpLearningEnabled
if self._predictedFieldIdx is None:
proto.predictedFieldIdx.none = None # depends on [control=['if'], data=[]]
else:
proto.predictedFieldIdx.value = self._predictedFieldIdx
if self._predictedFieldName is None:
proto.predictedFieldName.none = None # depends on [control=['if'], data=[]]
else:
proto.predictedFieldName.value = self._predictedFieldName
if self._numFields is None:
proto.numFields.none = None # depends on [control=['if'], data=[]]
else:
proto.numFields.value = self._numFields
proto.trainSPNetOnlyIfRequested = self.__trainSPNetOnlyIfRequested
proto.finishedLearning = self.__finishedLearning
|
def toNumber (str, default=None):
"""toNumber(str[, default]) -> integer | float | default
Converts the given string to a numeric value. The string may be a
hexadecimal, integer, or floating number. If string could not be
converted, default (None) is returned.
Examples:
>>> n = toNumber("0x2A")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42.0")
>>> assert type(n) is float and n == 42.0
>>> n = toNumber("Foo", 42)
>>> assert type(n) is int and n == 42
>>> n = toNumber("Foo")
>>> assert n is None
"""
value = default
try:
if str.startswith("0x"):
value = int(str, 16)
else:
try:
value = int(str)
except ValueError:
value = float(str)
except ValueError:
pass
return value
|
def function[toNumber, parameter[str, default]]:
constant[toNumber(str[, default]) -> integer | float | default
Converts the given string to a numeric value. The string may be a
hexadecimal, integer, or floating number. If string could not be
converted, default (None) is returned.
Examples:
>>> n = toNumber("0x2A")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42.0")
>>> assert type(n) is float and n == 42.0
>>> n = toNumber("Foo", 42)
>>> assert type(n) is int and n == 42
>>> n = toNumber("Foo")
>>> assert n is None
]
variable[value] assign[=] name[default]
<ast.Try object at 0x7da18f722590>
return[name[value]]
|
keyword[def] identifier[toNumber] ( identifier[str] , identifier[default] = keyword[None] ):
literal[string]
identifier[value] = identifier[default]
keyword[try] :
keyword[if] identifier[str] . identifier[startswith] ( literal[string] ):
identifier[value] = identifier[int] ( identifier[str] , literal[int] )
keyword[else] :
keyword[try] :
identifier[value] = identifier[int] ( identifier[str] )
keyword[except] identifier[ValueError] :
identifier[value] = identifier[float] ( identifier[str] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[value]
|
def toNumber(str, default=None):
"""toNumber(str[, default]) -> integer | float | default
Converts the given string to a numeric value. The string may be a
hexadecimal, integer, or floating number. If string could not be
converted, default (None) is returned.
Examples:
>>> n = toNumber("0x2A")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42")
>>> assert type(n) is int and n == 42
>>> n = toNumber("42.0")
>>> assert type(n) is float and n == 42.0
>>> n = toNumber("Foo", 42)
>>> assert type(n) is int and n == 42
>>> n = toNumber("Foo")
>>> assert n is None
"""
value = default
try:
if str.startswith('0x'):
value = int(str, 16) # depends on [control=['if'], data=[]]
else:
try:
value = int(str) # depends on [control=['try'], data=[]]
except ValueError:
value = float(str) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
return value
|
def adjoint(self):
"""Return the adjoint operator."""
if not self.is_linear:
raise ValueError('operator with nonzero pad_const ({}) is not'
' linear and has no adjoint'
''.format(self.pad_const))
return -PartialDerivative(self.range, self.axis, self.domain,
_ADJ_METHOD[self.method],
_ADJ_PADDING[self.pad_mode],
self.pad_const)
|
def function[adjoint, parameter[self]]:
constant[Return the adjoint operator.]
if <ast.UnaryOp object at 0x7da1b1e46410> begin[:]
<ast.Raise object at 0x7da1b1e464a0>
return[<ast.UnaryOp object at 0x7da1b1e46950>]
|
keyword[def] identifier[adjoint] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_linear] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[pad_const] ))
keyword[return] - identifier[PartialDerivative] ( identifier[self] . identifier[range] , identifier[self] . identifier[axis] , identifier[self] . identifier[domain] ,
identifier[_ADJ_METHOD] [ identifier[self] . identifier[method] ],
identifier[_ADJ_PADDING] [ identifier[self] . identifier[pad_mode] ],
identifier[self] . identifier[pad_const] )
|
def adjoint(self):
"""Return the adjoint operator."""
if not self.is_linear:
raise ValueError('operator with nonzero pad_const ({}) is not linear and has no adjoint'.format(self.pad_const)) # depends on [control=['if'], data=[]]
return -PartialDerivative(self.range, self.axis, self.domain, _ADJ_METHOD[self.method], _ADJ_PADDING[self.pad_mode], self.pad_const)
|
def short(self):
'''Short-form of the unit title, excluding any unit date, as an instance
of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting
anywhere the full form of the unittitle can be used.'''
# if there is no unitdate to remove, just return the current object
if not self.unitdate:
return self
# preserve any child elements (e.g., title or emph)
# initialize a unittitle with a *copy* of the current node
ut = UnitTitle(node=deepcopy(self.node))
# remove the unitdate node and return
ut.node.remove(ut.unitdate.node)
return ut
|
def function[short, parameter[self]]:
constant[Short-form of the unit title, excluding any unit date, as an instance
of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting
anywhere the full form of the unittitle can be used.]
if <ast.UnaryOp object at 0x7da1b28799c0> begin[:]
return[name[self]]
variable[ut] assign[=] call[name[UnitTitle], parameter[]]
call[name[ut].node.remove, parameter[name[ut].unitdate.node]]
return[name[ut]]
|
keyword[def] identifier[short] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[unitdate] :
keyword[return] identifier[self]
identifier[ut] = identifier[UnitTitle] ( identifier[node] = identifier[deepcopy] ( identifier[self] . identifier[node] ))
identifier[ut] . identifier[node] . identifier[remove] ( identifier[ut] . identifier[unitdate] . identifier[node] )
keyword[return] identifier[ut]
|
def short(self):
"""Short-form of the unit title, excluding any unit date, as an instance
of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting
anywhere the full form of the unittitle can be used."""
# if there is no unitdate to remove, just return the current object
if not self.unitdate:
return self # depends on [control=['if'], data=[]]
# preserve any child elements (e.g., title or emph)
# initialize a unittitle with a *copy* of the current node
ut = UnitTitle(node=deepcopy(self.node))
# remove the unitdate node and return
ut.node.remove(ut.unitdate.node)
return ut
|
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', body)
|
def function[send_os_command, parameter[self, os_command_text, is_priority]]:
constant[
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f722800>, <ast.Constant object at 0x7da18f720c40>], [<ast.Name object at 0x7da18f722980>, <ast.Name object at 0x7da18f7226e0>]]
call[name[self].manager.session.post, parameter[binary_operation[name[self].uri + constant[/operations/send-os-cmd]], name[body]]]
|
keyword[def] identifier[send_os_command] ( identifier[self] , identifier[os_command_text] , identifier[is_priority] = keyword[False] ):
literal[string]
identifier[body] ={ literal[string] : identifier[is_priority] ,
literal[string] : identifier[os_command_text] }
identifier[self] . identifier[manager] . identifier[session] . identifier[post] (
identifier[self] . identifier[uri] + literal[string] , identifier[body] )
|
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority, 'operating-system-command-text': os_command_text}
self.manager.session.post(self.uri + '/operations/send-os-cmd', body)
|
def size(self):
"""
Size of LazyCell: the size of the intension plus accounting for
excluded and included additions.
The exclusions are assumed to be part of the set
The inclusions are assumed to NOT be part of the intension
"""
return self._size_full_intension \
- len(self.exclude)\
+ len(self.include)
|
def function[size, parameter[self]]:
constant[
Size of LazyCell: the size of the intension plus accounting for
excluded and included additions.
The exclusions are assumed to be part of the set
The inclusions are assumed to NOT be part of the intension
]
return[binary_operation[binary_operation[name[self]._size_full_intension - call[name[len], parameter[name[self].exclude]]] + call[name[len], parameter[name[self].include]]]]
|
keyword[def] identifier[size] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[_size_full_intension] - identifier[len] ( identifier[self] . identifier[exclude] )+ identifier[len] ( identifier[self] . identifier[include] )
|
def size(self):
"""
Size of LazyCell: the size of the intension plus accounting for
excluded and included additions.
The exclusions are assumed to be part of the set
The inclusions are assumed to NOT be part of the intension
"""
return self._size_full_intension - len(self.exclude) + len(self.include)
|
def pypy_json_encode(value, pretty=False):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
global _dealing_with_problem
if pretty:
return pretty_json(value)
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
if _dealing_with_problem:
Log.error("Serialization of JSON problems", e)
else:
Log.warning("Serialization of JSON problems", e)
_dealing_with_problem = True
try:
return pretty_json(value)
except Exception as f:
Log.error("problem serializing object", f)
finally:
_dealing_with_problem = False
|
def function[pypy_json_encode, parameter[value, pretty]]:
constant[
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
]
<ast.Global object at 0x7da1b20aa0e0>
if name[pretty] begin[:]
return[call[name[pretty_json], parameter[name[value]]]]
<ast.Try object at 0x7da1b20a83a0>
|
keyword[def] identifier[pypy_json_encode] ( identifier[value] , identifier[pretty] = keyword[False] ):
literal[string]
keyword[global] identifier[_dealing_with_problem]
keyword[if] identifier[pretty] :
keyword[return] identifier[pretty_json] ( identifier[value] )
keyword[try] :
identifier[_buffer] = identifier[UnicodeBuilder] ( literal[int] )
identifier[_value2json] ( identifier[value] , identifier[_buffer] )
identifier[output] = identifier[_buffer] . identifier[build] ()
keyword[return] identifier[output]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[from] identifier[mo_logs] keyword[import] identifier[Log]
keyword[if] identifier[_dealing_with_problem] :
identifier[Log] . identifier[error] ( literal[string] , identifier[e] )
keyword[else] :
identifier[Log] . identifier[warning] ( literal[string] , identifier[e] )
identifier[_dealing_with_problem] = keyword[True]
keyword[try] :
keyword[return] identifier[pretty_json] ( identifier[value] )
keyword[except] identifier[Exception] keyword[as] identifier[f] :
identifier[Log] . identifier[error] ( literal[string] , identifier[f] )
keyword[finally] :
identifier[_dealing_with_problem] = keyword[False]
|
def pypy_json_encode(value, pretty=False):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
global _dealing_with_problem
if pretty:
return pretty_json(value) # depends on [control=['if'], data=[]]
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output # depends on [control=['try'], data=[]]
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
if _dealing_with_problem:
Log.error('Serialization of JSON problems', e) # depends on [control=['if'], data=[]]
else:
Log.warning('Serialization of JSON problems', e)
_dealing_with_problem = True
try:
return pretty_json(value) # depends on [control=['try'], data=[]]
except Exception as f:
Log.error('problem serializing object', f) # depends on [control=['except'], data=['f']]
finally:
_dealing_with_problem = False # depends on [control=['except'], data=['e']]
|
def setup_pilotpoints_grid(ml=None,sr=None,ibound=None,prefix_dict=None,
every_n_cell=4,
use_ibound_zones=False,
pp_dir='.',tpl_dir='.',
shapename="pp.shp"):
""" setup regularly-spaced (gridded) pilot point parameterization
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas.
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would
setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set
of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that containts pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
"""
from . import pp_utils
warnings.warn("setup_pilotpoint_grid has moved to pp_utils...",PyemuWarning)
return pp_utils.setup_pilotpoints_grid(ml=ml,sr=sr,ibound=ibound,
prefix_dict=prefix_dict,
every_n_cell=every_n_cell,
use_ibound_zones=use_ibound_zones,
pp_dir=pp_dir,tpl_dir=tpl_dir,
shapename=shapename)
|
def function[setup_pilotpoints_grid, parameter[ml, sr, ibound, prefix_dict, every_n_cell, use_ibound_zones, pp_dir, tpl_dir, shapename]]:
constant[ setup regularly-spaced (gridded) pilot point parameterization
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas.
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would
setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set
of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that containts pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
]
from relative_module[None] import module[pp_utils]
call[name[warnings].warn, parameter[constant[setup_pilotpoint_grid has moved to pp_utils...], name[PyemuWarning]]]
return[call[name[pp_utils].setup_pilotpoints_grid, parameter[]]]
|
keyword[def] identifier[setup_pilotpoints_grid] ( identifier[ml] = keyword[None] , identifier[sr] = keyword[None] , identifier[ibound] = keyword[None] , identifier[prefix_dict] = keyword[None] ,
identifier[every_n_cell] = literal[int] ,
identifier[use_ibound_zones] = keyword[False] ,
identifier[pp_dir] = literal[string] , identifier[tpl_dir] = literal[string] ,
identifier[shapename] = literal[string] ):
literal[string]
keyword[from] . keyword[import] identifier[pp_utils]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[PyemuWarning] )
keyword[return] identifier[pp_utils] . identifier[setup_pilotpoints_grid] ( identifier[ml] = identifier[ml] , identifier[sr] = identifier[sr] , identifier[ibound] = identifier[ibound] ,
identifier[prefix_dict] = identifier[prefix_dict] ,
identifier[every_n_cell] = identifier[every_n_cell] ,
identifier[use_ibound_zones] = identifier[use_ibound_zones] ,
identifier[pp_dir] = identifier[pp_dir] , identifier[tpl_dir] = identifier[tpl_dir] ,
identifier[shapename] = identifier[shapename] )
|
def setup_pilotpoints_grid(ml=None, sr=None, ibound=None, prefix_dict=None, every_n_cell=4, use_ibound_zones=False, pp_dir='.', tpl_dir='.', shapename='pp.shp'):
""" setup regularly-spaced (gridded) pilot point parameterization
Parameters
----------
ml : flopy.mbase
a flopy mbase dervied type. If None, sr must not be None.
sr : flopy.utils.reference.SpatialReference
a spatial reference use to locate the model grid in space. If None,
ml must not be None. Default is None
ibound : numpy.ndarray
the modflow ibound integer array. Used to set pilot points only in active areas.
If None and ml is None, then pilot points are set in all rows and columns according to
every_n_cell. Default is None.
prefix_dict : dict
a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would
setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set
of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None
use_ibound_zones : bool
a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound
values greater than zero are treated as a single zone. Default is False.
pp_dir : str
directory to write pilot point files to. Default is '.'
tpl_dir : str
directory to write pilot point template file to. Default is '.'
shapename : str
name of shapefile to write that containts pilot point information. Default is "pp.shp"
Returns
-------
pp_df : pandas.DataFrame
a dataframe summarizing pilot point information (same information
written to shapename
"""
from . import pp_utils
warnings.warn('setup_pilotpoint_grid has moved to pp_utils...', PyemuWarning)
return pp_utils.setup_pilotpoints_grid(ml=ml, sr=sr, ibound=ibound, prefix_dict=prefix_dict, every_n_cell=every_n_cell, use_ibound_zones=use_ibound_zones, pp_dir=pp_dir, tpl_dir=tpl_dir, shapename=shapename)
|
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
|
def function[definite_article, parameter[word, gender, role]]:
constant[ Returns the definite article (der/die/das/die) for a given word.
]
return[call[name[article_definite].get, parameter[tuple[[<ast.Call object at 0x7da1b26aece0>, <ast.Call object at 0x7da1b26afa00>]]]]]
|
keyword[def] identifier[definite_article] ( identifier[word] , identifier[gender] = identifier[MALE] , identifier[role] = identifier[SUBJECT] ):
literal[string]
keyword[return] identifier[article_definite] . identifier[get] (( identifier[gender] [: literal[int] ]. identifier[lower] (), identifier[role] [: literal[int] ]. identifier[lower] ()))
|
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
|
def _updateInferenceStats(self, statistics, objectName=None):
"""
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
"""
L4Representations = self.getL4Representations()
L4PredictedCells = self.getL4PredictedCells()
L4PredictedActiveCells = self.getL4PredictedActiveCells()
L2Representation = self.getL2Representations()
TMPredictedActive = self.getTMPredictedActiveCells()
TMNextPredicted = self.getTMNextPredictedCells()
TMRepresentation = self.getTMRepresentations()
for i in xrange(self.numColumns):
statistics["L4 Representation C" + str(i)].append(
len(L4Representations[i])
)
statistics["L4 Predicted C" + str(i)].append(
len(L4PredictedCells[i])
)
statistics["L4 PredictedActive C" + str(i)].append(
len(L4PredictedActiveCells[i])
)
statistics["L2 Representation C" + str(i)].append(
len(L2Representation[i])
)
statistics["L4 Apical Segments C" + str(i)].append(
len(self.L4Columns[i]._tm.getActiveApicalSegments())
)
statistics["L4 Basal Segments C" + str(i)].append(
len(self.L4Columns[i]._tm.getActiveBasalSegments())
)
statistics["TM Basal Segments C" + str(i)].append(
len(self.TMColumns[i]._tm.getActiveBasalSegments())
)
statistics["TM PredictedActive C" + str(i)].append(
len(TMPredictedActive[i])
)
# The number of cells that are in predictive state as a result of this
# input
statistics["TM NextPredicted C" + str(i)].append(
len(TMNextPredicted[i])
)
# The indices of all active cells in the TM
statistics["TM Full Representation C" + str(i)].append(
TMRepresentation[i]
)
# The indices of all active cells in the TM
statistics["L2 Full Representation C" + str(i)].append(
L2Representation[i]
)
# Insert exact TM representation into the classifier if the number of
# predictive active cells is potentially unique (otherwise we say it
# failed to correctly predict this step).
if ( (len(TMPredictedActive[i]) < 1.5*self.numInputBits) and
(len(TMPredictedActive[i]) > 0.5*self.numInputBits) ):
sdr = list(TMPredictedActive[i])
sdr.sort()
self.classifier.learn(sdr, objectName, isSparse=self.numTMCells)
# add true overlap if objectName was provided
if objectName in self.objectL2Representations:
objectRepresentation = self.objectL2Representations[objectName]
statistics["Overlap L2 with object C" + str(i)].append(
len(objectRepresentation[i] & L2Representation[i])
)
|
def function[_updateInferenceStats, parameter[self, statistics, objectName]]:
constant[
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
]
variable[L4Representations] assign[=] call[name[self].getL4Representations, parameter[]]
variable[L4PredictedCells] assign[=] call[name[self].getL4PredictedCells, parameter[]]
variable[L4PredictedActiveCells] assign[=] call[name[self].getL4PredictedActiveCells, parameter[]]
variable[L2Representation] assign[=] call[name[self].getL2Representations, parameter[]]
variable[TMPredictedActive] assign[=] call[name[self].getTMPredictedActiveCells, parameter[]]
variable[TMNextPredicted] assign[=] call[name[self].getTMNextPredictedCells, parameter[]]
variable[TMRepresentation] assign[=] call[name[self].getTMRepresentations, parameter[]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[self].numColumns]]] begin[:]
call[call[name[statistics]][binary_operation[constant[L4 Representation C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[L4Representations]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[L4 Predicted C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[L4PredictedCells]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[L4 PredictedActive C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[L4PredictedActiveCells]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[L2 Representation C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[L2Representation]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[L4 Apical Segments C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[call[name[self].L4Columns][name[i]]._tm.getActiveApicalSegments, parameter[]]]]]]
call[call[name[statistics]][binary_operation[constant[L4 Basal Segments C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[call[name[self].L4Columns][name[i]]._tm.getActiveBasalSegments, parameter[]]]]]]
call[call[name[statistics]][binary_operation[constant[TM Basal Segments C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[call[name[self].TMColumns][name[i]]._tm.getActiveBasalSegments, parameter[]]]]]]
call[call[name[statistics]][binary_operation[constant[TM PredictedActive C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[TMPredictedActive]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[TM NextPredicted C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[call[name[TMNextPredicted]][name[i]]]]]]
call[call[name[statistics]][binary_operation[constant[TM Full Representation C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[TMRepresentation]][name[i]]]]
call[call[name[statistics]][binary_operation[constant[L2 Full Representation C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[L2Representation]][name[i]]]]
if <ast.BoolOp object at 0x7da1b09009d0> begin[:]
variable[sdr] assign[=] call[name[list], parameter[call[name[TMPredictedActive]][name[i]]]]
call[name[sdr].sort, parameter[]]
call[name[self].classifier.learn, parameter[name[sdr], name[objectName]]]
if compare[name[objectName] in name[self].objectL2Representations] begin[:]
variable[objectRepresentation] assign[=] call[name[self].objectL2Representations][name[objectName]]
call[call[name[statistics]][binary_operation[constant[Overlap L2 with object C] + call[name[str], parameter[name[i]]]]].append, parameter[call[name[len], parameter[binary_operation[call[name[objectRepresentation]][name[i]] <ast.BitAnd object at 0x7da2590d6b60> call[name[L2Representation]][name[i]]]]]]]
|
keyword[def] identifier[_updateInferenceStats] ( identifier[self] , identifier[statistics] , identifier[objectName] = keyword[None] ):
literal[string]
identifier[L4Representations] = identifier[self] . identifier[getL4Representations] ()
identifier[L4PredictedCells] = identifier[self] . identifier[getL4PredictedCells] ()
identifier[L4PredictedActiveCells] = identifier[self] . identifier[getL4PredictedActiveCells] ()
identifier[L2Representation] = identifier[self] . identifier[getL2Representations] ()
identifier[TMPredictedActive] = identifier[self] . identifier[getTMPredictedActiveCells] ()
identifier[TMNextPredicted] = identifier[self] . identifier[getTMNextPredictedCells] ()
identifier[TMRepresentation] = identifier[self] . identifier[getTMRepresentations] ()
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[numColumns] ):
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[L4Representations] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[L4PredictedCells] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[L4PredictedActiveCells] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[L2Representation] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[self] . identifier[L4Columns] [ identifier[i] ]. identifier[_tm] . identifier[getActiveApicalSegments] ())
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[self] . identifier[L4Columns] [ identifier[i] ]. identifier[_tm] . identifier[getActiveBasalSegments] ())
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[self] . identifier[TMColumns] [ identifier[i] ]. identifier[_tm] . identifier[getActiveBasalSegments] ())
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[TMPredictedActive] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[TMNextPredicted] [ identifier[i] ])
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[TMRepresentation] [ identifier[i] ]
)
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[L2Representation] [ identifier[i] ]
)
keyword[if] (( identifier[len] ( identifier[TMPredictedActive] [ identifier[i] ])< literal[int] * identifier[self] . identifier[numInputBits] ) keyword[and]
( identifier[len] ( identifier[TMPredictedActive] [ identifier[i] ])> literal[int] * identifier[self] . identifier[numInputBits] )):
identifier[sdr] = identifier[list] ( identifier[TMPredictedActive] [ identifier[i] ])
identifier[sdr] . identifier[sort] ()
identifier[self] . identifier[classifier] . identifier[learn] ( identifier[sdr] , identifier[objectName] , identifier[isSparse] = identifier[self] . identifier[numTMCells] )
keyword[if] identifier[objectName] keyword[in] identifier[self] . identifier[objectL2Representations] :
identifier[objectRepresentation] = identifier[self] . identifier[objectL2Representations] [ identifier[objectName] ]
identifier[statistics] [ literal[string] + identifier[str] ( identifier[i] )]. identifier[append] (
identifier[len] ( identifier[objectRepresentation] [ identifier[i] ]& identifier[L2Representation] [ identifier[i] ])
)
|
def _updateInferenceStats(self, statistics, objectName=None):
"""
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
"""
L4Representations = self.getL4Representations()
L4PredictedCells = self.getL4PredictedCells()
L4PredictedActiveCells = self.getL4PredictedActiveCells()
L2Representation = self.getL2Representations()
TMPredictedActive = self.getTMPredictedActiveCells()
TMNextPredicted = self.getTMNextPredictedCells()
TMRepresentation = self.getTMRepresentations()
for i in xrange(self.numColumns):
statistics['L4 Representation C' + str(i)].append(len(L4Representations[i]))
statistics['L4 Predicted C' + str(i)].append(len(L4PredictedCells[i]))
statistics['L4 PredictedActive C' + str(i)].append(len(L4PredictedActiveCells[i]))
statistics['L2 Representation C' + str(i)].append(len(L2Representation[i]))
statistics['L4 Apical Segments C' + str(i)].append(len(self.L4Columns[i]._tm.getActiveApicalSegments()))
statistics['L4 Basal Segments C' + str(i)].append(len(self.L4Columns[i]._tm.getActiveBasalSegments()))
statistics['TM Basal Segments C' + str(i)].append(len(self.TMColumns[i]._tm.getActiveBasalSegments()))
statistics['TM PredictedActive C' + str(i)].append(len(TMPredictedActive[i]))
# The number of cells that are in predictive state as a result of this
# input
statistics['TM NextPredicted C' + str(i)].append(len(TMNextPredicted[i]))
# The indices of all active cells in the TM
statistics['TM Full Representation C' + str(i)].append(TMRepresentation[i])
# The indices of all active cells in the TM
statistics['L2 Full Representation C' + str(i)].append(L2Representation[i])
# Insert exact TM representation into the classifier if the number of
# predictive active cells is potentially unique (otherwise we say it
# failed to correctly predict this step).
if len(TMPredictedActive[i]) < 1.5 * self.numInputBits and len(TMPredictedActive[i]) > 0.5 * self.numInputBits:
sdr = list(TMPredictedActive[i])
sdr.sort()
self.classifier.learn(sdr, objectName, isSparse=self.numTMCells) # depends on [control=['if'], data=[]]
# add true overlap if objectName was provided
if objectName in self.objectL2Representations:
objectRepresentation = self.objectL2Representations[objectName]
statistics['Overlap L2 with object C' + str(i)].append(len(objectRepresentation[i] & L2Representation[i])) # depends on [control=['if'], data=['objectName']] # depends on [control=['for'], data=['i']]
|
def ipinfo_ip_check(ip):
"""Checks ipinfo.io for basic WHOIS-type data on an IP address"""
if not is_IPv4Address(ip):
return None
response = requests.get('http://ipinfo.io/%s/json' % ip)
return response.json()
|
def function[ipinfo_ip_check, parameter[ip]]:
constant[Checks ipinfo.io for basic WHOIS-type data on an IP address]
if <ast.UnaryOp object at 0x7da1b28ae080> begin[:]
return[constant[None]]
variable[response] assign[=] call[name[requests].get, parameter[binary_operation[constant[http://ipinfo.io/%s/json] <ast.Mod object at 0x7da2590d6920> name[ip]]]]
return[call[name[response].json, parameter[]]]
|
keyword[def] identifier[ipinfo_ip_check] ( identifier[ip] ):
literal[string]
keyword[if] keyword[not] identifier[is_IPv4Address] ( identifier[ip] ):
keyword[return] keyword[None]
identifier[response] = identifier[requests] . identifier[get] ( literal[string] % identifier[ip] )
keyword[return] identifier[response] . identifier[json] ()
|
def ipinfo_ip_check(ip):
"""Checks ipinfo.io for basic WHOIS-type data on an IP address"""
if not is_IPv4Address(ip):
return None # depends on [control=['if'], data=[]]
response = requests.get('http://ipinfo.io/%s/json' % ip)
return response.json()
|
def intercepts(self, joinpoint):
"""Self target interception if self is enabled
:param joinpoint: advices executor
"""
result = None
if self.enable:
interception = getattr(self, Interceptor.INTERCEPTION)
joinpoint.exec_ctx[Interceptor.INTERCEPTION] = self
result = interception(joinpoint)
else:
result = joinpoint.proceed()
return result
|
def function[intercepts, parameter[self, joinpoint]]:
constant[Self target interception if self is enabled
:param joinpoint: advices executor
]
variable[result] assign[=] constant[None]
if name[self].enable begin[:]
variable[interception] assign[=] call[name[getattr], parameter[name[self], name[Interceptor].INTERCEPTION]]
call[name[joinpoint].exec_ctx][name[Interceptor].INTERCEPTION] assign[=] name[self]
variable[result] assign[=] call[name[interception], parameter[name[joinpoint]]]
return[name[result]]
|
keyword[def] identifier[intercepts] ( identifier[self] , identifier[joinpoint] ):
literal[string]
identifier[result] = keyword[None]
keyword[if] identifier[self] . identifier[enable] :
identifier[interception] = identifier[getattr] ( identifier[self] , identifier[Interceptor] . identifier[INTERCEPTION] )
identifier[joinpoint] . identifier[exec_ctx] [ identifier[Interceptor] . identifier[INTERCEPTION] ]= identifier[self]
identifier[result] = identifier[interception] ( identifier[joinpoint] )
keyword[else] :
identifier[result] = identifier[joinpoint] . identifier[proceed] ()
keyword[return] identifier[result]
|
def intercepts(self, joinpoint):
"""Self target interception if self is enabled
:param joinpoint: advices executor
"""
result = None
if self.enable:
interception = getattr(self, Interceptor.INTERCEPTION)
joinpoint.exec_ctx[Interceptor.INTERCEPTION] = self
result = interception(joinpoint) # depends on [control=['if'], data=[]]
else:
result = joinpoint.proceed()
return result
|
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, str):
return value
elif isinstance(value, dict):
try:
value = value['url']
except KeyError:
raise ValidationError("dictionary must contain an 'url' element")
if not isinstance(value, str):
raise ValidationError("field's url element must be a string")
return value
elif not isinstance(value, None):
raise ValidationError("field must be a string or a dict")
|
def function[to_python, parameter[self, value]]:
constant[Convert value if needed.]
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
return[name[value]]
|
keyword[def] identifier[to_python] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
keyword[return] identifier[value]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[try] :
identifier[value] = identifier[value] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValidationError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[str] ):
keyword[raise] identifier[ValidationError] ( literal[string] )
keyword[return] identifier[value]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[value] , keyword[None] ):
keyword[raise] identifier[ValidationError] ( literal[string] )
|
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, str):
return value # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
try:
value = value['url'] # depends on [control=['try'], data=[]]
except KeyError:
raise ValidationError("dictionary must contain an 'url' element") # depends on [control=['except'], data=[]]
if not isinstance(value, str):
raise ValidationError("field's url element must be a string") # depends on [control=['if'], data=[]]
return value # depends on [control=['if'], data=[]]
elif not isinstance(value, None):
raise ValidationError('field must be a string or a dict') # depends on [control=['if'], data=[]]
|
def ckgpav(inst, sclkdp, tol, ref):
"""
Get pointing (attitude) and angular velocity
for a specified spacecraft clock time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html
:param inst: NAIF ID of instrument, spacecraft, or structure.
:type inst: int
:param sclkdp: Encoded spacecraft clock time.
:type sclkdp: float
:param tol: Time tolerance.
:type tol: float
:param ref: Reference frame.
:type ref: str
:return:
C-matrix pointing data,
Angular velocity vector,
Output encoded spacecraft clock time.
:rtype: tuple
"""
inst = ctypes.c_int(inst)
sclkdp = ctypes.c_double(sclkdp)
tol = ctypes.c_double(tol)
ref = stypes.stringToCharP(ref)
cmat = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
clkout = ctypes.c_double()
found = ctypes.c_int()
libspice.ckgpav_c(inst, sclkdp, tol, ref, cmat, av, ctypes.byref(clkout),
ctypes.byref(found))
return stypes.cMatrixToNumpy(cmat), stypes.cVectorToPython(
av), clkout.value, bool(found.value)
|
def function[ckgpav, parameter[inst, sclkdp, tol, ref]]:
constant[
Get pointing (attitude) and angular velocity
for a specified spacecraft clock time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html
:param inst: NAIF ID of instrument, spacecraft, or structure.
:type inst: int
:param sclkdp: Encoded spacecraft clock time.
:type sclkdp: float
:param tol: Time tolerance.
:type tol: float
:param ref: Reference frame.
:type ref: str
:return:
C-matrix pointing data,
Angular velocity vector,
Output encoded spacecraft clock time.
:rtype: tuple
]
variable[inst] assign[=] call[name[ctypes].c_int, parameter[name[inst]]]
variable[sclkdp] assign[=] call[name[ctypes].c_double, parameter[name[sclkdp]]]
variable[tol] assign[=] call[name[ctypes].c_double, parameter[name[tol]]]
variable[ref] assign[=] call[name[stypes].stringToCharP, parameter[name[ref]]]
variable[cmat] assign[=] call[name[stypes].emptyDoubleMatrix, parameter[]]
variable[av] assign[=] call[name[stypes].emptyDoubleVector, parameter[constant[3]]]
variable[clkout] assign[=] call[name[ctypes].c_double, parameter[]]
variable[found] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].ckgpav_c, parameter[name[inst], name[sclkdp], name[tol], name[ref], name[cmat], name[av], call[name[ctypes].byref, parameter[name[clkout]]], call[name[ctypes].byref, parameter[name[found]]]]]
return[tuple[[<ast.Call object at 0x7da20c6ab3d0>, <ast.Call object at 0x7da20c6abf70>, <ast.Attribute object at 0x7da20c6ab670>, <ast.Call object at 0x7da20c6a97b0>]]]
|
keyword[def] identifier[ckgpav] ( identifier[inst] , identifier[sclkdp] , identifier[tol] , identifier[ref] ):
literal[string]
identifier[inst] = identifier[ctypes] . identifier[c_int] ( identifier[inst] )
identifier[sclkdp] = identifier[ctypes] . identifier[c_double] ( identifier[sclkdp] )
identifier[tol] = identifier[ctypes] . identifier[c_double] ( identifier[tol] )
identifier[ref] = identifier[stypes] . identifier[stringToCharP] ( identifier[ref] )
identifier[cmat] = identifier[stypes] . identifier[emptyDoubleMatrix] ()
identifier[av] = identifier[stypes] . identifier[emptyDoubleVector] ( literal[int] )
identifier[clkout] = identifier[ctypes] . identifier[c_double] ()
identifier[found] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[ckgpav_c] ( identifier[inst] , identifier[sclkdp] , identifier[tol] , identifier[ref] , identifier[cmat] , identifier[av] , identifier[ctypes] . identifier[byref] ( identifier[clkout] ),
identifier[ctypes] . identifier[byref] ( identifier[found] ))
keyword[return] identifier[stypes] . identifier[cMatrixToNumpy] ( identifier[cmat] ), identifier[stypes] . identifier[cVectorToPython] (
identifier[av] ), identifier[clkout] . identifier[value] , identifier[bool] ( identifier[found] . identifier[value] )
|
def ckgpav(inst, sclkdp, tol, ref):
"""
Get pointing (attitude) and angular velocity
for a specified spacecraft clock time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html
:param inst: NAIF ID of instrument, spacecraft, or structure.
:type inst: int
:param sclkdp: Encoded spacecraft clock time.
:type sclkdp: float
:param tol: Time tolerance.
:type tol: float
:param ref: Reference frame.
:type ref: str
:return:
C-matrix pointing data,
Angular velocity vector,
Output encoded spacecraft clock time.
:rtype: tuple
"""
inst = ctypes.c_int(inst)
sclkdp = ctypes.c_double(sclkdp)
tol = ctypes.c_double(tol)
ref = stypes.stringToCharP(ref)
cmat = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
clkout = ctypes.c_double()
found = ctypes.c_int()
libspice.ckgpav_c(inst, sclkdp, tol, ref, cmat, av, ctypes.byref(clkout), ctypes.byref(found))
return (stypes.cMatrixToNumpy(cmat), stypes.cVectorToPython(av), clkout.value, bool(found.value))
|
def next(self):
"""Return 'next' version. Eg, next(1.2) is 1.2_"""
if self.tokens:
other = self.copy()
tok = other.tokens.pop()
other.tokens.append(tok.next())
return other
else:
return Version.inf
|
def function[next, parameter[self]]:
constant[Return 'next' version. Eg, next(1.2) is 1.2_]
if name[self].tokens begin[:]
variable[other] assign[=] call[name[self].copy, parameter[]]
variable[tok] assign[=] call[name[other].tokens.pop, parameter[]]
call[name[other].tokens.append, parameter[call[name[tok].next, parameter[]]]]
return[name[other]]
|
keyword[def] identifier[next] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[tokens] :
identifier[other] = identifier[self] . identifier[copy] ()
identifier[tok] = identifier[other] . identifier[tokens] . identifier[pop] ()
identifier[other] . identifier[tokens] . identifier[append] ( identifier[tok] . identifier[next] ())
keyword[return] identifier[other]
keyword[else] :
keyword[return] identifier[Version] . identifier[inf]
|
def next(self):
"""Return 'next' version. Eg, next(1.2) is 1.2_"""
if self.tokens:
other = self.copy()
tok = other.tokens.pop()
other.tokens.append(tok.next())
return other # depends on [control=['if'], data=[]]
else:
return Version.inf
|
def log(self, message):
"""
打印或写日志
:params message: 要打印或要写的日志
"""
theLog = '[日志名:%s] [时间:%s] \n[内容:\n%s]\n\n' % (
self.startName, timestamp_to_time(get_current_timestamp()), message)
if not self.fileName:
print(theLog)
else:
# 由于这里有很多的线程都要经过这道线程锁的控制,所以不会出现问题
self.check_log_file_size()
self.__f.write(theLog)
|
def function[log, parameter[self, message]]:
constant[
打印或写日志
:params message: 要打印或要写的日志
]
variable[theLog] assign[=] binary_operation[constant[[日志名:%s] [时间:%s]
[内容:
%s]
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b14d1e40>, <ast.Call object at 0x7da1b14d06d0>, <ast.Name object at 0x7da1b14d2ef0>]]]
if <ast.UnaryOp object at 0x7da1b14d2e30> begin[:]
call[name[print], parameter[name[theLog]]]
|
keyword[def] identifier[log] ( identifier[self] , identifier[message] ):
literal[string]
identifier[theLog] = literal[string] %(
identifier[self] . identifier[startName] , identifier[timestamp_to_time] ( identifier[get_current_timestamp] ()), identifier[message] )
keyword[if] keyword[not] identifier[self] . identifier[fileName] :
identifier[print] ( identifier[theLog] )
keyword[else] :
identifier[self] . identifier[check_log_file_size] ()
identifier[self] . identifier[__f] . identifier[write] ( identifier[theLog] )
|
def log(self, message):
"""
打印或写日志
:params message: 要打印或要写的日志
"""
theLog = '[日志名:%s] [时间:%s] \n[内容:\n%s]\n\n' % (self.startName, timestamp_to_time(get_current_timestamp()), message)
if not self.fileName:
print(theLog) # depends on [control=['if'], data=[]]
else:
# 由于这里有很多的线程都要经过这道线程锁的控制,所以不会出现问题
self.check_log_file_size()
self.__f.write(theLog)
|
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
|
def function[command_line_arguments, parameter[command_line_parameters]]:
constant[Defines the command line parameters that are accepted.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-a], constant[--algorithms]]]
call[name[parser].add_argument, parameter[constant[--all]]]
call[name[parser].add_argument, parameter[constant[-d], constant[--database]]]
call[name[parser].add_argument, parameter[constant[-b], constant[--baseline-directory]]]
call[name[parser].add_argument, parameter[constant[-f], constant[--directory]]]
call[name[parser].add_argument, parameter[constant[-g], constant[--grid]]]
call[name[parser].add_argument, parameter[constant[-l], constant[--parallel]]]
call[name[parser].add_argument, parameter[constant[-z], constant[--zt-norm]]]
call[name[parser].add_argument, parameter[constant[-q], constant[--dry-run]]]
call[name[parser].add_argument, parameter[constant[-e], constant[--evaluate]]]
call[name[parser].add_argument, parameter[constant[parameters]]]
call[name[bob].core.log.add_command_line_option, parameter[name[parser]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[command_line_parameters]]]
if name[args].all begin[:]
name[args].algorithms assign[=] name[all_algorithms]
call[name[bob].core.log.set_verbosity_level, parameter[name[logger], name[args].verbose]]
return[name[args]]
|
keyword[def] identifier[command_line_arguments] ( identifier[command_line_parameters] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[ArgumentDefaultsHelpFormatter] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[choices] = identifier[all_algorithms] , identifier[default] =( literal[string] ,), identifier[nargs] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[choices] = identifier[available_databases] , identifier[default] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[int] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[nargs] = literal[string] , identifier[choices] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ), identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = identifier[argparse] . identifier[REMAINDER] , identifier[help] = literal[string] )
identifier[bob] . identifier[core] . identifier[log] . identifier[add_command_line_option] ( identifier[parser] )
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[command_line_parameters] )
keyword[if] identifier[args] . identifier[all] :
identifier[args] . identifier[algorithms] = identifier[all_algorithms]
identifier[bob] . identifier[core] . identifier[log] . identifier[set_verbosity_level] ( identifier[logger] , identifier[args] . identifier[verbose] )
keyword[return] identifier[args]
|
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices=all_algorithms, default=('gmm-voxforge',), nargs='+', help='Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action='store_true', help='Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices=available_databases, default='voxforge', help='The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default='baselines', help='The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help='The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action='store_true', help='Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help='Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action='store_false', help='Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action='store_true', help='Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices=('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help='Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs=argparse.REMAINDER, help='Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms # depends on [control=['if'], data=[]]
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
|
def save_freesurfer_morph(filename, obj, face_count=0):
'''
save_freesurfer_morph(filename, obj) saves the given object using nibabel.freesurfer.io's
write_morph_data function, and returns the given filename.
'''
fsio.write_morph_data(filename, obj, fnum=face_count)
return filename
|
def function[save_freesurfer_morph, parameter[filename, obj, face_count]]:
constant[
save_freesurfer_morph(filename, obj) saves the given object using nibabel.freesurfer.io's
write_morph_data function, and returns the given filename.
]
call[name[fsio].write_morph_data, parameter[name[filename], name[obj]]]
return[name[filename]]
|
keyword[def] identifier[save_freesurfer_morph] ( identifier[filename] , identifier[obj] , identifier[face_count] = literal[int] ):
literal[string]
identifier[fsio] . identifier[write_morph_data] ( identifier[filename] , identifier[obj] , identifier[fnum] = identifier[face_count] )
keyword[return] identifier[filename]
|
def save_freesurfer_morph(filename, obj, face_count=0):
"""
save_freesurfer_morph(filename, obj) saves the given object using nibabel.freesurfer.io's
write_morph_data function, and returns the given filename.
"""
fsio.write_morph_data(filename, obj, fnum=face_count)
return filename
|
def mpl_rc_context(f):
"""
Decorator for MPLPlot methods applying the matplotlib rc params
in the plots fig_rcparams while when method is called.
"""
def wrapper(self, *args, **kwargs):
with _rc_context(self.fig_rcparams):
return f(self, *args, **kwargs)
return wrapper
|
def function[mpl_rc_context, parameter[f]]:
constant[
Decorator for MPLPlot methods applying the matplotlib rc params
in the plots fig_rcparams while when method is called.
]
def function[wrapper, parameter[self]]:
with call[name[_rc_context], parameter[name[self].fig_rcparams]] begin[:]
return[call[name[f], parameter[name[self], <ast.Starred object at 0x7da20c6aaf80>]]]
return[name[wrapper]]
|
keyword[def] identifier[mpl_rc_context] ( identifier[f] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[with] identifier[_rc_context] ( identifier[self] . identifier[fig_rcparams] ):
keyword[return] identifier[f] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
|
def mpl_rc_context(f):
"""
Decorator for MPLPlot methods applying the matplotlib rc params
in the plots fig_rcparams while when method is called.
"""
def wrapper(self, *args, **kwargs):
with _rc_context(self.fig_rcparams):
return f(self, *args, **kwargs) # depends on [control=['with'], data=[]]
return wrapper
|
def _pkcs1imify(self, data):
"""
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
"""
size = len(util.deflate_long(self.n, 0))
filler = max_byte * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
return zero_byte + one_byte + filler + zero_byte + SHA1_DIGESTINFO + data
|
def function[_pkcs1imify, parameter[self, data]]:
constant[
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's "emsa-pkcs1-v1_5" encoding. totally bizarre.
]
variable[size] assign[=] call[name[len], parameter[call[name[util].deflate_long, parameter[name[self].n, constant[0]]]]]
variable[filler] assign[=] binary_operation[name[max_byte] * binary_operation[binary_operation[binary_operation[name[size] - call[name[len], parameter[name[SHA1_DIGESTINFO]]]] - call[name[len], parameter[name[data]]]] - constant[3]]]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[zero_byte] + name[one_byte]] + name[filler]] + name[zero_byte]] + name[SHA1_DIGESTINFO]] + name[data]]]
|
keyword[def] identifier[_pkcs1imify] ( identifier[self] , identifier[data] ):
literal[string]
identifier[size] = identifier[len] ( identifier[util] . identifier[deflate_long] ( identifier[self] . identifier[n] , literal[int] ))
identifier[filler] = identifier[max_byte] *( identifier[size] - identifier[len] ( identifier[SHA1_DIGESTINFO] )- identifier[len] ( identifier[data] )- literal[int] )
keyword[return] identifier[zero_byte] + identifier[one_byte] + identifier[filler] + identifier[zero_byte] + identifier[SHA1_DIGESTINFO] + identifier[data]
|
def _pkcs1imify(self, data):
"""
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's "emsa-pkcs1-v1_5" encoding. totally bizarre.
"""
size = len(util.deflate_long(self.n, 0))
filler = max_byte * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
return zero_byte + one_byte + filler + zero_byte + SHA1_DIGESTINFO + data
|
def get_unavailable_brokers(zk, partition_metadata):
"""Returns the set of unavailable brokers from the difference of replica
set of given partition to the set of available replicas.
"""
topic_data = zk.get_topics(partition_metadata.topic)
topic = partition_metadata.topic
partition = partition_metadata.partition
expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas'])
available_replicas = set(partition_metadata.replicas)
return expected_replicas - available_replicas
|
def function[get_unavailable_brokers, parameter[zk, partition_metadata]]:
constant[Returns the set of unavailable brokers from the difference of replica
set of given partition to the set of available replicas.
]
variable[topic_data] assign[=] call[name[zk].get_topics, parameter[name[partition_metadata].topic]]
variable[topic] assign[=] name[partition_metadata].topic
variable[partition] assign[=] name[partition_metadata].partition
variable[expected_replicas] assign[=] call[name[set], parameter[call[call[call[call[name[topic_data]][name[topic]]][constant[partitions]]][call[name[str], parameter[name[partition]]]]][constant[replicas]]]]
variable[available_replicas] assign[=] call[name[set], parameter[name[partition_metadata].replicas]]
return[binary_operation[name[expected_replicas] - name[available_replicas]]]
|
keyword[def] identifier[get_unavailable_brokers] ( identifier[zk] , identifier[partition_metadata] ):
literal[string]
identifier[topic_data] = identifier[zk] . identifier[get_topics] ( identifier[partition_metadata] . identifier[topic] )
identifier[topic] = identifier[partition_metadata] . identifier[topic]
identifier[partition] = identifier[partition_metadata] . identifier[partition]
identifier[expected_replicas] = identifier[set] ( identifier[topic_data] [ identifier[topic] ][ literal[string] ][ identifier[str] ( identifier[partition] )][ literal[string] ])
identifier[available_replicas] = identifier[set] ( identifier[partition_metadata] . identifier[replicas] )
keyword[return] identifier[expected_replicas] - identifier[available_replicas]
|
def get_unavailable_brokers(zk, partition_metadata):
"""Returns the set of unavailable brokers from the difference of replica
set of given partition to the set of available replicas.
"""
topic_data = zk.get_topics(partition_metadata.topic)
topic = partition_metadata.topic
partition = partition_metadata.partition
expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas'])
available_replicas = set(partition_metadata.replicas)
return expected_replicas - available_replicas
|
def update_app_icon(self):
"""
Update the app icon if the user is not trying to resize the window.
"""
if os.name == 'nt' or not hasattr(self, '_last_window_size'): # pragma: no cover
# DO NOT EVEN ATTEMPT TO UPDATE ICON ON WINDOWS
return
cur_time = time.time()
if self._last_window_size != self.window_size: # pragma: no cover
# Window size hasn't remained constant since last render.
# This means the user is resizing it so ignore update.
pass
elif ((cur_time - self._last_update_time > BackgroundPlotter.ICON_TIME_STEP)
and self._last_camera_pos != self.camera_position):
# its been a while since last update OR
# the camera position has changed and its been at leat one second
# Update app icon as preview of the window
img = pad_image(self.image)
qimage = QtGui.QImage(img.copy(), img.shape[1],
img.shape[0], QtGui.QImage.Format_RGB888)
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(qimage))
self.app.setWindowIcon(icon)
# Update trackers
self._last_update_time = cur_time
self._last_camera_pos = self.camera_position
# Update trackers
self._last_window_size = self.window_size
|
def function[update_app_icon, parameter[self]]:
constant[
Update the app icon if the user is not trying to resize the window.
]
if <ast.BoolOp object at 0x7da20c6e5a80> begin[:]
return[None]
variable[cur_time] assign[=] call[name[time].time, parameter[]]
if compare[name[self]._last_window_size not_equal[!=] name[self].window_size] begin[:]
pass
name[self]._last_window_size assign[=] name[self].window_size
|
keyword[def] identifier[update_app_icon] ( identifier[self] ):
literal[string]
keyword[if] identifier[os] . identifier[name] == literal[string] keyword[or] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return]
identifier[cur_time] = identifier[time] . identifier[time] ()
keyword[if] identifier[self] . identifier[_last_window_size] != identifier[self] . identifier[window_size] :
keyword[pass]
keyword[elif] (( identifier[cur_time] - identifier[self] . identifier[_last_update_time] > identifier[BackgroundPlotter] . identifier[ICON_TIME_STEP] )
keyword[and] identifier[self] . identifier[_last_camera_pos] != identifier[self] . identifier[camera_position] ):
identifier[img] = identifier[pad_image] ( identifier[self] . identifier[image] )
identifier[qimage] = identifier[QtGui] . identifier[QImage] ( identifier[img] . identifier[copy] (), identifier[img] . identifier[shape] [ literal[int] ],
identifier[img] . identifier[shape] [ literal[int] ], identifier[QtGui] . identifier[QImage] . identifier[Format_RGB888] )
identifier[icon] = identifier[QtGui] . identifier[QIcon] ( identifier[QtGui] . identifier[QPixmap] . identifier[fromImage] ( identifier[qimage] ))
identifier[self] . identifier[app] . identifier[setWindowIcon] ( identifier[icon] )
identifier[self] . identifier[_last_update_time] = identifier[cur_time]
identifier[self] . identifier[_last_camera_pos] = identifier[self] . identifier[camera_position]
identifier[self] . identifier[_last_window_size] = identifier[self] . identifier[window_size]
|
def update_app_icon(self):
"""
Update the app icon if the user is not trying to resize the window.
"""
if os.name == 'nt' or not hasattr(self, '_last_window_size'): # pragma: no cover
# DO NOT EVEN ATTEMPT TO UPDATE ICON ON WINDOWS
return # depends on [control=['if'], data=[]]
cur_time = time.time()
if self._last_window_size != self.window_size: # pragma: no cover
# Window size hasn't remained constant since last render.
# This means the user is resizing it so ignore update.
pass # depends on [control=['if'], data=[]]
elif cur_time - self._last_update_time > BackgroundPlotter.ICON_TIME_STEP and self._last_camera_pos != self.camera_position:
# its been a while since last update OR
# the camera position has changed and its been at leat one second
# Update app icon as preview of the window
img = pad_image(self.image)
qimage = QtGui.QImage(img.copy(), img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(qimage))
self.app.setWindowIcon(icon)
# Update trackers
self._last_update_time = cur_time
self._last_camera_pos = self.camera_position # depends on [control=['if'], data=[]]
# Update trackers
self._last_window_size = self.window_size
|
def decode_payload(self, specialize = False):
"""Decode payload from the element passed to the stanza constructor.
Iterates over stanza children and creates StanzaPayload objects for
them. Called automatically by `get_payload()` and other methods that
access the payload.
For the `Stanza` class stanza namespace child elements will also be
included as the payload. For subclasses these are no considered
payload."""
if self._payload is not None:
# already decoded
return
if self._element is None:
raise ValueError("This stanza has no element to decode""")
payload = []
if specialize:
factory = payload_factory
else:
factory = XMLPayload
for child in self._element:
if self.__class__ is not Stanza:
if child.tag.startswith(self._ns_prefix):
continue
payload.append(factory(child))
self._payload = payload
|
def function[decode_payload, parameter[self, specialize]]:
constant[Decode payload from the element passed to the stanza constructor.
Iterates over stanza children and creates StanzaPayload objects for
them. Called automatically by `get_payload()` and other methods that
access the payload.
For the `Stanza` class stanza namespace child elements will also be
included as the payload. For subclasses these are no considered
payload.]
if compare[name[self]._payload is_not constant[None]] begin[:]
return[None]
if compare[name[self]._element is constant[None]] begin[:]
<ast.Raise object at 0x7da1b004f6d0>
variable[payload] assign[=] list[[]]
if name[specialize] begin[:]
variable[factory] assign[=] name[payload_factory]
for taget[name[child]] in starred[name[self]._element] begin[:]
if compare[name[self].__class__ is_not name[Stanza]] begin[:]
if call[name[child].tag.startswith, parameter[name[self]._ns_prefix]] begin[:]
continue
call[name[payload].append, parameter[call[name[factory], parameter[name[child]]]]]
name[self]._payload assign[=] name[payload]
|
keyword[def] identifier[decode_payload] ( identifier[self] , identifier[specialize] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[_payload] keyword[is] keyword[not] keyword[None] :
keyword[return]
keyword[if] identifier[self] . identifier[_element] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] literal[string] )
identifier[payload] =[]
keyword[if] identifier[specialize] :
identifier[factory] = identifier[payload_factory]
keyword[else] :
identifier[factory] = identifier[XMLPayload]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[_element] :
keyword[if] identifier[self] . identifier[__class__] keyword[is] keyword[not] identifier[Stanza] :
keyword[if] identifier[child] . identifier[tag] . identifier[startswith] ( identifier[self] . identifier[_ns_prefix] ):
keyword[continue]
identifier[payload] . identifier[append] ( identifier[factory] ( identifier[child] ))
identifier[self] . identifier[_payload] = identifier[payload]
|
def decode_payload(self, specialize=False):
"""Decode payload from the element passed to the stanza constructor.
Iterates over stanza children and creates StanzaPayload objects for
them. Called automatically by `get_payload()` and other methods that
access the payload.
For the `Stanza` class stanza namespace child elements will also be
included as the payload. For subclasses these are no considered
payload."""
if self._payload is not None:
# already decoded
return # depends on [control=['if'], data=[]]
if self._element is None:
raise ValueError('This stanza has no element to decode') # depends on [control=['if'], data=[]]
payload = []
if specialize:
factory = payload_factory # depends on [control=['if'], data=[]]
else:
factory = XMLPayload
for child in self._element:
if self.__class__ is not Stanza:
if child.tag.startswith(self._ns_prefix):
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
payload.append(factory(child)) # depends on [control=['for'], data=['child']]
self._payload = payload
|
def variable_map_items(variable_map):
"""Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs.
"""
for key, var_or_vars in six.iteritems(variable_map):
if isinstance(var_or_vars, (list, tuple)):
for variable in var_or_vars:
yield key, variable
else:
yield key, var_or_vars
|
def function[variable_map_items, parameter[variable_map]]:
constant[Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs.
]
for taget[tuple[[<ast.Name object at 0x7da1b1c485e0>, <ast.Name object at 0x7da1b1c48640>]]] in starred[call[name[six].iteritems, parameter[name[variable_map]]]] begin[:]
if call[name[isinstance], parameter[name[var_or_vars], tuple[[<ast.Name object at 0x7da1b1c4b670>, <ast.Name object at 0x7da1b1c4a3b0>]]]] begin[:]
for taget[name[variable]] in starred[name[var_or_vars]] begin[:]
<ast.Yield object at 0x7da1b1c49150>
|
keyword[def] identifier[variable_map_items] ( identifier[variable_map] ):
literal[string]
keyword[for] identifier[key] , identifier[var_or_vars] keyword[in] identifier[six] . identifier[iteritems] ( identifier[variable_map] ):
keyword[if] identifier[isinstance] ( identifier[var_or_vars] ,( identifier[list] , identifier[tuple] )):
keyword[for] identifier[variable] keyword[in] identifier[var_or_vars] :
keyword[yield] identifier[key] , identifier[variable]
keyword[else] :
keyword[yield] identifier[key] , identifier[var_or_vars]
|
def variable_map_items(variable_map):
"""Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs.
"""
for (key, var_or_vars) in six.iteritems(variable_map):
if isinstance(var_or_vars, (list, tuple)):
for variable in var_or_vars:
yield (key, variable) # depends on [control=['for'], data=['variable']] # depends on [control=['if'], data=[]]
else:
yield (key, var_or_vars) # depends on [control=['for'], data=[]]
|
def convert_body_to_bytes(resp):
"""
If the request body is a string, encode it to bytes (for python3 support)
By default yaml serializes to utf-8 encoded bytestrings.
When this cassette is loaded by python3, it's automatically decoded
into unicode strings. This makes sure that it stays a bytestring, since
that's what all the internal httplib machinery is expecting.
For more info on py3 yaml:
http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support
"""
try:
if resp['body']['string'] is not None and not isinstance(resp['body']['string'], six.binary_type):
resp['body']['string'] = resp['body']['string'].encode('utf-8')
except (KeyError, TypeError, UnicodeEncodeError):
# The thing we were converting either wasn't a dictionary or didn't
# have the keys we were expecting. Some of the tests just serialize
# and deserialize a string.
# Also, sometimes the thing actually is binary, so if you can't encode
# it, just give up.
pass
return resp
|
def function[convert_body_to_bytes, parameter[resp]]:
constant[
If the request body is a string, encode it to bytes (for python3 support)
By default yaml serializes to utf-8 encoded bytestrings.
When this cassette is loaded by python3, it's automatically decoded
into unicode strings. This makes sure that it stays a bytestring, since
that's what all the internal httplib machinery is expecting.
For more info on py3 yaml:
http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support
]
<ast.Try object at 0x7da1b1677df0>
return[name[resp]]
|
keyword[def] identifier[convert_body_to_bytes] ( identifier[resp] ):
literal[string]
keyword[try] :
keyword[if] identifier[resp] [ literal[string] ][ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[resp] [ literal[string] ][ literal[string] ], identifier[six] . identifier[binary_type] ):
identifier[resp] [ literal[string] ][ literal[string] ]= identifier[resp] [ literal[string] ][ literal[string] ]. identifier[encode] ( literal[string] )
keyword[except] ( identifier[KeyError] , identifier[TypeError] , identifier[UnicodeEncodeError] ):
keyword[pass]
keyword[return] identifier[resp]
|
def convert_body_to_bytes(resp):
"""
If the request body is a string, encode it to bytes (for python3 support)
By default yaml serializes to utf-8 encoded bytestrings.
When this cassette is loaded by python3, it's automatically decoded
into unicode strings. This makes sure that it stays a bytestring, since
that's what all the internal httplib machinery is expecting.
For more info on py3 yaml:
http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support
"""
try:
if resp['body']['string'] is not None and (not isinstance(resp['body']['string'], six.binary_type)):
resp['body']['string'] = resp['body']['string'].encode('utf-8') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (KeyError, TypeError, UnicodeEncodeError):
# The thing we were converting either wasn't a dictionary or didn't
# have the keys we were expecting. Some of the tests just serialize
# and deserialize a string.
# Also, sometimes the thing actually is binary, so if you can't encode
# it, just give up.
pass # depends on [control=['except'], data=[]]
return resp
|
def dependency(self, node1, node2):
"indicate that node1 depends on node2"
self.graph.add_node(node1)
self.graph.add_node(node2)
self.graph.add_edge(node2, node1)
|
def function[dependency, parameter[self, node1, node2]]:
constant[indicate that node1 depends on node2]
call[name[self].graph.add_node, parameter[name[node1]]]
call[name[self].graph.add_node, parameter[name[node2]]]
call[name[self].graph.add_edge, parameter[name[node2], name[node1]]]
|
keyword[def] identifier[dependency] ( identifier[self] , identifier[node1] , identifier[node2] ):
literal[string]
identifier[self] . identifier[graph] . identifier[add_node] ( identifier[node1] )
identifier[self] . identifier[graph] . identifier[add_node] ( identifier[node2] )
identifier[self] . identifier[graph] . identifier[add_edge] ( identifier[node2] , identifier[node1] )
|
def dependency(self, node1, node2):
"""indicate that node1 depends on node2"""
self.graph.add_node(node1)
self.graph.add_node(node2)
self.graph.add_edge(node2, node1)
|
def update_clipboard(self, text):
'''将文本复制到系统剪贴板里面'''
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(text, -1)
self.toast(_('{0} copied to clipboard'.format(text)))
|
def function[update_clipboard, parameter[self, text]]:
constant[将文本复制到系统剪贴板里面]
variable[clipboard] assign[=] call[name[Gtk].Clipboard.get, parameter[name[Gdk].SELECTION_CLIPBOARD]]
call[name[clipboard].set_text, parameter[name[text], <ast.UnaryOp object at 0x7da1b1d52650>]]
call[name[self].toast, parameter[call[name[_], parameter[call[constant[{0} copied to clipboard].format, parameter[name[text]]]]]]]
|
keyword[def] identifier[update_clipboard] ( identifier[self] , identifier[text] ):
literal[string]
identifier[clipboard] = identifier[Gtk] . identifier[Clipboard] . identifier[get] ( identifier[Gdk] . identifier[SELECTION_CLIPBOARD] )
identifier[clipboard] . identifier[set_text] ( identifier[text] ,- literal[int] )
identifier[self] . identifier[toast] ( identifier[_] ( literal[string] . identifier[format] ( identifier[text] )))
|
def update_clipboard(self, text):
"""将文本复制到系统剪贴板里面"""
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(text, -1)
self.toast(_('{0} copied to clipboard'.format(text)))
|
def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False, arguments=None):
"""Declare a named queue."""
if warn_if_exists and self.queue_exists(queue):
warnings.warn(QueueAlreadyExistsWarning(
QueueAlreadyExistsWarning.__doc__))
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments)
|
def function[queue_declare, parameter[self, queue, durable, exclusive, auto_delete, warn_if_exists, arguments]]:
constant[Declare a named queue.]
if <ast.BoolOp object at 0x7da1b0fc44c0> begin[:]
call[name[warnings].warn, parameter[call[name[QueueAlreadyExistsWarning], parameter[name[QueueAlreadyExistsWarning].__doc__]]]]
return[call[name[self].channel.queue_declare, parameter[]]]
|
keyword[def] identifier[queue_declare] ( identifier[self] , identifier[queue] , identifier[durable] , identifier[exclusive] , identifier[auto_delete] ,
identifier[warn_if_exists] = keyword[False] , identifier[arguments] = keyword[None] ):
literal[string]
keyword[if] identifier[warn_if_exists] keyword[and] identifier[self] . identifier[queue_exists] ( identifier[queue] ):
identifier[warnings] . identifier[warn] ( identifier[QueueAlreadyExistsWarning] (
identifier[QueueAlreadyExistsWarning] . identifier[__doc__] ))
keyword[return] identifier[self] . identifier[channel] . identifier[queue_declare] ( identifier[queue] = identifier[queue] ,
identifier[durable] = identifier[durable] ,
identifier[exclusive] = identifier[exclusive] ,
identifier[auto_delete] = identifier[auto_delete] ,
identifier[arguments] = identifier[arguments] )
|
def queue_declare(self, queue, durable, exclusive, auto_delete, warn_if_exists=False, arguments=None):
"""Declare a named queue."""
if warn_if_exists and self.queue_exists(queue):
warnings.warn(QueueAlreadyExistsWarning(QueueAlreadyExistsWarning.__doc__)) # depends on [control=['if'], data=[]]
return self.channel.queue_declare(queue=queue, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments)
|
def get_signing_key(self, key_type="", owner="", kid=None, **kwargs):
"""
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
"""
return self.get("sig", key_type, owner, kid, **kwargs)
|
def function[get_signing_key, parameter[self, key_type, owner, kid]]:
constant[
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
]
return[call[name[self].get, parameter[constant[sig], name[key_type], name[owner], name[kid]]]]
|
keyword[def] identifier[get_signing_key] ( identifier[self] , identifier[key_type] = literal[string] , identifier[owner] = literal[string] , identifier[kid] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( literal[string] , identifier[key_type] , identifier[owner] , identifier[kid] ,** identifier[kwargs] )
|
def get_signing_key(self, key_type='', owner='', kid=None, **kwargs):
"""
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
"""
return self.get('sig', key_type, owner, kid, **kwargs)
|
def _write_fact(self, fact_tuple):
"""
Create new fact element and populate attributes.
Once the child is prepared append it to ``fact_list``.
"""
fact = self.document.createElement("fact")
fact.setAttribute('start', fact_tuple.start)
fact.setAttribute('end', fact_tuple.end)
fact.setAttribute('activity', fact_tuple.activity)
fact.setAttribute('duration', fact_tuple.duration)
fact.setAttribute('category', fact_tuple.category)
fact.setAttribute('description', fact_tuple.description)
self.fact_list.appendChild(fact)
|
def function[_write_fact, parameter[self, fact_tuple]]:
constant[
Create new fact element and populate attributes.
Once the child is prepared append it to ``fact_list``.
]
variable[fact] assign[=] call[name[self].document.createElement, parameter[constant[fact]]]
call[name[fact].setAttribute, parameter[constant[start], name[fact_tuple].start]]
call[name[fact].setAttribute, parameter[constant[end], name[fact_tuple].end]]
call[name[fact].setAttribute, parameter[constant[activity], name[fact_tuple].activity]]
call[name[fact].setAttribute, parameter[constant[duration], name[fact_tuple].duration]]
call[name[fact].setAttribute, parameter[constant[category], name[fact_tuple].category]]
call[name[fact].setAttribute, parameter[constant[description], name[fact_tuple].description]]
call[name[self].fact_list.appendChild, parameter[name[fact]]]
|
keyword[def] identifier[_write_fact] ( identifier[self] , identifier[fact_tuple] ):
literal[string]
identifier[fact] = identifier[self] . identifier[document] . identifier[createElement] ( literal[string] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[start] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[end] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[activity] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[duration] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[category] )
identifier[fact] . identifier[setAttribute] ( literal[string] , identifier[fact_tuple] . identifier[description] )
identifier[self] . identifier[fact_list] . identifier[appendChild] ( identifier[fact] )
|
def _write_fact(self, fact_tuple):
"""
Create new fact element and populate attributes.
Once the child is prepared append it to ``fact_list``.
"""
fact = self.document.createElement('fact')
fact.setAttribute('start', fact_tuple.start)
fact.setAttribute('end', fact_tuple.end)
fact.setAttribute('activity', fact_tuple.activity)
fact.setAttribute('duration', fact_tuple.duration)
fact.setAttribute('category', fact_tuple.category)
fact.setAttribute('description', fact_tuple.description)
self.fact_list.appendChild(fact)
|
def stemmed(text):
"""
Returns a list of simplified and stemmed down terms for the inputted text.
This will remove common terms and words from the search and return only
the important root terms. This is useful in searching algorithms.
:param text | <str>
:return [<str>, ..]
"""
terms = re.split('\s*', toAscii(text))
output = []
for term in terms:
# ignore apostrophe's
if term.endswith("'s"):
stripped_term = term[:-2]
else:
stripped_term = term
single_term = singularize(stripped_term)
if term in COMMON_TERMS or stripped_term in COMMON_TERMS or single_term in COMMON_TERMS:
continue
output.append(single_term)
return output
|
def function[stemmed, parameter[text]]:
constant[
Returns a list of simplified and stemmed down terms for the inputted text.
This will remove common terms and words from the search and return only
the important root terms. This is useful in searching algorithms.
:param text | <str>
:return [<str>, ..]
]
variable[terms] assign[=] call[name[re].split, parameter[constant[\s*], call[name[toAscii], parameter[name[text]]]]]
variable[output] assign[=] list[[]]
for taget[name[term]] in starred[name[terms]] begin[:]
if call[name[term].endswith, parameter[constant['s]]] begin[:]
variable[stripped_term] assign[=] call[name[term]][<ast.Slice object at 0x7da1b28fd420>]
variable[single_term] assign[=] call[name[singularize], parameter[name[stripped_term]]]
if <ast.BoolOp object at 0x7da1b28fc160> begin[:]
continue
call[name[output].append, parameter[name[single_term]]]
return[name[output]]
|
keyword[def] identifier[stemmed] ( identifier[text] ):
literal[string]
identifier[terms] = identifier[re] . identifier[split] ( literal[string] , identifier[toAscii] ( identifier[text] ))
identifier[output] =[]
keyword[for] identifier[term] keyword[in] identifier[terms] :
keyword[if] identifier[term] . identifier[endswith] ( literal[string] ):
identifier[stripped_term] = identifier[term] [:- literal[int] ]
keyword[else] :
identifier[stripped_term] = identifier[term]
identifier[single_term] = identifier[singularize] ( identifier[stripped_term] )
keyword[if] identifier[term] keyword[in] identifier[COMMON_TERMS] keyword[or] identifier[stripped_term] keyword[in] identifier[COMMON_TERMS] keyword[or] identifier[single_term] keyword[in] identifier[COMMON_TERMS] :
keyword[continue]
identifier[output] . identifier[append] ( identifier[single_term] )
keyword[return] identifier[output]
|
def stemmed(text):
"""
Returns a list of simplified and stemmed down terms for the inputted text.
This will remove common terms and words from the search and return only
the important root terms. This is useful in searching algorithms.
:param text | <str>
:return [<str>, ..]
"""
terms = re.split('\\s*', toAscii(text))
output = []
for term in terms:
# ignore apostrophe's
if term.endswith("'s"):
stripped_term = term[:-2] # depends on [control=['if'], data=[]]
else:
stripped_term = term
single_term = singularize(stripped_term)
if term in COMMON_TERMS or stripped_term in COMMON_TERMS or single_term in COMMON_TERMS:
continue # depends on [control=['if'], data=[]]
output.append(single_term) # depends on [control=['for'], data=['term']]
return output
|
def export_as_string(self):
"""
Returns a CQL query string that can be used to recreate the entire keyspace,
including user-defined types and tables.
"""
cql = "\n\n".join([self.as_cql_query() + ';'] +
self.user_type_strings() +
[f.export_as_string() for f in self.functions.values()] +
[a.export_as_string() for a in self.aggregates.values()] +
[t.export_as_string() for t in self.tables.values()])
if self._exc_info:
import traceback
ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \
(self.name)
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % cql
return ret
if self.virtual:
return ("/*\nWarning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.\n"
"Structure, for reference:*/\n"
"{cql}\n"
"").format(ks=self.name, cql=cql)
return cql
|
def function[export_as_string, parameter[self]]:
constant[
Returns a CQL query string that can be used to recreate the entire keyspace,
including user-defined types and tables.
]
variable[cql] assign[=] call[constant[
].join, parameter[binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.BinOp object at 0x7da1b1df9d20>]] + call[name[self].user_type_strings, parameter[]]] + <ast.ListComp object at 0x7da1b1dfa4d0>] + <ast.ListComp object at 0x7da1b1dfa590>] + <ast.ListComp object at 0x7da1b22b8880>]]]
if name[self]._exc_info begin[:]
import module[traceback]
variable[ret] assign[=] binary_operation[constant[/*
Warning: Keyspace %s is incomplete because of an error processing metadata.
] <ast.Mod object at 0x7da2590d6920> name[self].name]
for taget[name[line]] in starred[call[name[traceback].format_exception, parameter[<ast.Starred object at 0x7da1b1dfab60>]]] begin[:]
<ast.AugAssign object at 0x7da1b1df8fd0>
<ast.AugAssign object at 0x7da1b1df95d0>
return[name[ret]]
if name[self].virtual begin[:]
return[call[constant[/*
Warning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.
Structure, for reference:*/
{cql}
].format, parameter[]]]
return[name[cql]]
|
keyword[def] identifier[export_as_string] ( identifier[self] ):
literal[string]
identifier[cql] = literal[string] . identifier[join] ([ identifier[self] . identifier[as_cql_query] ()+ literal[string] ]+
identifier[self] . identifier[user_type_strings] ()+
[ identifier[f] . identifier[export_as_string] () keyword[for] identifier[f] keyword[in] identifier[self] . identifier[functions] . identifier[values] ()]+
[ identifier[a] . identifier[export_as_string] () keyword[for] identifier[a] keyword[in] identifier[self] . identifier[aggregates] . identifier[values] ()]+
[ identifier[t] . identifier[export_as_string] () keyword[for] identifier[t] keyword[in] identifier[self] . identifier[tables] . identifier[values] ()])
keyword[if] identifier[self] . identifier[_exc_info] :
keyword[import] identifier[traceback]
identifier[ret] = literal[string] %( identifier[self] . identifier[name] )
keyword[for] identifier[line] keyword[in] identifier[traceback] . identifier[format_exception] (* identifier[self] . identifier[_exc_info] ):
identifier[ret] += identifier[line]
identifier[ret] += literal[string] % identifier[cql]
keyword[return] identifier[ret]
keyword[if] identifier[self] . identifier[virtual] :
keyword[return] ( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[ks] = identifier[self] . identifier[name] , identifier[cql] = identifier[cql] )
keyword[return] identifier[cql]
|
def export_as_string(self):
"""
Returns a CQL query string that can be used to recreate the entire keyspace,
including user-defined types and tables.
"""
cql = '\n\n'.join([self.as_cql_query() + ';'] + self.user_type_strings() + [f.export_as_string() for f in self.functions.values()] + [a.export_as_string() for a in self.aggregates.values()] + [t.export_as_string() for t in self.tables.values()])
if self._exc_info:
import traceback
ret = '/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n' % self.name
for line in traceback.format_exception(*self._exc_info):
ret += line # depends on [control=['for'], data=['line']]
ret += '\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/' % cql
return ret # depends on [control=['if'], data=[]]
if self.virtual:
return '/*\nWarning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.\nStructure, for reference:*/\n{cql}\n'.format(ks=self.name, cql=cql) # depends on [control=['if'], data=[]]
return cql
|
def append(self, clause, is_atmost=False):
"""
Add a single clause or a single AtMostK constraint to CNF+ formula.
This method additionally updates the number of variables, i.e.
variable ``self.nv``, used in the formula.
If the clause is an AtMostK constraint, this should be set with the
use of the additional default argument ``is_atmost``, which is set
to ``False`` by default.
:param clause: a new clause to add.
:param is_atmost: if ``True``, the clause is AtMostK.
:type clause: list(int)
:type is_atmost: bool
.. code-block:: python
>>> from pysat.formula import CNFPlus
>>> cnf = CNFPlus()
>>> cnf.append([-3, 4])
>>> cnf.append([[1, 2, 3], 1], is_atmost=True)
>>> print cnf.clauses
[[-3, 4]]
>>> print cnf.atmosts
[[1, 2, 3], 1]
"""
if not is_atmost:
self.nv = max([abs(l) for l in clause] + [self.nv])
self.clauses.append(clause)
else:
self.nv = max([abs(l) for l in clause[0]] + [self.nv])
self.atmosts.append(clause)
|
def function[append, parameter[self, clause, is_atmost]]:
constant[
Add a single clause or a single AtMostK constraint to CNF+ formula.
This method additionally updates the number of variables, i.e.
variable ``self.nv``, used in the formula.
If the clause is an AtMostK constraint, this should be set with the
use of the additional default argument ``is_atmost``, which is set
to ``False`` by default.
:param clause: a new clause to add.
:param is_atmost: if ``True``, the clause is AtMostK.
:type clause: list(int)
:type is_atmost: bool
.. code-block:: python
>>> from pysat.formula import CNFPlus
>>> cnf = CNFPlus()
>>> cnf.append([-3, 4])
>>> cnf.append([[1, 2, 3], 1], is_atmost=True)
>>> print cnf.clauses
[[-3, 4]]
>>> print cnf.atmosts
[[1, 2, 3], 1]
]
if <ast.UnaryOp object at 0x7da1b1116a70> begin[:]
name[self].nv assign[=] call[name[max], parameter[binary_operation[<ast.ListComp object at 0x7da1b1117010> + list[[<ast.Attribute object at 0x7da1b1116ce0>]]]]]
call[name[self].clauses.append, parameter[name[clause]]]
|
keyword[def] identifier[append] ( identifier[self] , identifier[clause] , identifier[is_atmost] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[is_atmost] :
identifier[self] . identifier[nv] = identifier[max] ([ identifier[abs] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[clause] ]+[ identifier[self] . identifier[nv] ])
identifier[self] . identifier[clauses] . identifier[append] ( identifier[clause] )
keyword[else] :
identifier[self] . identifier[nv] = identifier[max] ([ identifier[abs] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[clause] [ literal[int] ]]+[ identifier[self] . identifier[nv] ])
identifier[self] . identifier[atmosts] . identifier[append] ( identifier[clause] )
|
def append(self, clause, is_atmost=False):
"""
Add a single clause or a single AtMostK constraint to CNF+ formula.
This method additionally updates the number of variables, i.e.
variable ``self.nv``, used in the formula.
If the clause is an AtMostK constraint, this should be set with the
use of the additional default argument ``is_atmost``, which is set
to ``False`` by default.
:param clause: a new clause to add.
:param is_atmost: if ``True``, the clause is AtMostK.
:type clause: list(int)
:type is_atmost: bool
.. code-block:: python
>>> from pysat.formula import CNFPlus
>>> cnf = CNFPlus()
>>> cnf.append([-3, 4])
>>> cnf.append([[1, 2, 3], 1], is_atmost=True)
>>> print cnf.clauses
[[-3, 4]]
>>> print cnf.atmosts
[[1, 2, 3], 1]
"""
if not is_atmost:
self.nv = max([abs(l) for l in clause] + [self.nv])
self.clauses.append(clause) # depends on [control=['if'], data=[]]
else:
self.nv = max([abs(l) for l in clause[0]] + [self.nv])
self.atmosts.append(clause)
|
def is_stopword(self, text):
"""
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
"""
found_content_word = False
for record in self.analyze(text):
if not self.is_stopword_record(record):
found_content_word = True
break
return not found_content_word
|
def function[is_stopword, parameter[self, text]]:
constant[
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
]
variable[found_content_word] assign[=] constant[False]
for taget[name[record]] in starred[call[name[self].analyze, parameter[name[text]]]] begin[:]
if <ast.UnaryOp object at 0x7da207f99030> begin[:]
variable[found_content_word] assign[=] constant[True]
break
return[<ast.UnaryOp object at 0x7da1b26ae500>]
|
keyword[def] identifier[is_stopword] ( identifier[self] , identifier[text] ):
literal[string]
identifier[found_content_word] = keyword[False]
keyword[for] identifier[record] keyword[in] identifier[self] . identifier[analyze] ( identifier[text] ):
keyword[if] keyword[not] identifier[self] . identifier[is_stopword_record] ( identifier[record] ):
identifier[found_content_word] = keyword[True]
keyword[break]
keyword[return] keyword[not] identifier[found_content_word]
|
def is_stopword(self, text):
"""
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
"""
found_content_word = False
for record in self.analyze(text):
if not self.is_stopword_record(record):
found_content_word = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['record']]
return not found_content_word
|
def start_tty(self, conf, interactive):
"""Startup a tty"""
try:
api = conf.harpoon.docker_context_maker().api
container_id = conf.container_id
stdin = conf.harpoon.tty_stdin
stdout = conf.harpoon.tty_stdout
stderr = conf.harpoon.tty_stderr
if callable(stdin): stdin = stdin()
if callable(stdout): stdout = stdout()
if callable(stderr): stderr = stderr()
dockerpty.start(api, container_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
except KeyboardInterrupt:
pass
|
def function[start_tty, parameter[self, conf, interactive]]:
constant[Startup a tty]
<ast.Try object at 0x7da20c6c6890>
|
keyword[def] identifier[start_tty] ( identifier[self] , identifier[conf] , identifier[interactive] ):
literal[string]
keyword[try] :
identifier[api] = identifier[conf] . identifier[harpoon] . identifier[docker_context_maker] (). identifier[api]
identifier[container_id] = identifier[conf] . identifier[container_id]
identifier[stdin] = identifier[conf] . identifier[harpoon] . identifier[tty_stdin]
identifier[stdout] = identifier[conf] . identifier[harpoon] . identifier[tty_stdout]
identifier[stderr] = identifier[conf] . identifier[harpoon] . identifier[tty_stderr]
keyword[if] identifier[callable] ( identifier[stdin] ): identifier[stdin] = identifier[stdin] ()
keyword[if] identifier[callable] ( identifier[stdout] ): identifier[stdout] = identifier[stdout] ()
keyword[if] identifier[callable] ( identifier[stderr] ): identifier[stderr] = identifier[stderr] ()
identifier[dockerpty] . identifier[start] ( identifier[api] , identifier[container_id] , identifier[interactive] = identifier[interactive] , identifier[stdout] = identifier[stdout] , identifier[stderr] = identifier[stderr] , identifier[stdin] = identifier[stdin] )
keyword[except] identifier[KeyboardInterrupt] :
keyword[pass]
|
def start_tty(self, conf, interactive):
"""Startup a tty"""
try:
api = conf.harpoon.docker_context_maker().api
container_id = conf.container_id
stdin = conf.harpoon.tty_stdin
stdout = conf.harpoon.tty_stdout
stderr = conf.harpoon.tty_stderr
if callable(stdin):
stdin = stdin() # depends on [control=['if'], data=[]]
if callable(stdout):
stdout = stdout() # depends on [control=['if'], data=[]]
if callable(stderr):
stderr = stderr() # depends on [control=['if'], data=[]]
dockerpty.start(api, container_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
pass # depends on [control=['except'], data=[]]
|
def _asdict(self):
"""Return an OrderedDict of the fields."""
return OrderedDict((f.name, getattr(self, f.name))
for f in self._struct)
|
def function[_asdict, parameter[self]]:
constant[Return an OrderedDict of the fields.]
return[call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b1f20190>]]]
|
keyword[def] identifier[_asdict] ( identifier[self] ):
literal[string]
keyword[return] identifier[OrderedDict] (( identifier[f] . identifier[name] , identifier[getattr] ( identifier[self] , identifier[f] . identifier[name] ))
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[_struct] )
|
def _asdict(self):
"""Return an OrderedDict of the fields."""
return OrderedDict(((f.name, getattr(self, f.name)) for f in self._struct))
|
def make_inst():
"""make_inst: prepare data for the diet model"""
F,c,d = multidict({ # cost # composition
"QPounder" : [ 1.84, {"Cal":510, "Carbo":34, "Protein":28,
"VitA":15, "VitC": 6, "Calc":30, "Iron":20}],
"McLean" : [ 2.19, {"Cal":370, "Carbo":35, "Protein":24, "VitA":15,
"VitC": 10, "Calc":20, "Iron":20}],
"Big Mac" : [ 1.84, {"Cal":500, "Carbo":42, "Protein":25,
"VitA": 6, "VitC": 2, "Calc":25, "Iron":20}],
"FFilet" : [ 1.44, {"Cal":370, "Carbo":38, "Protein":14,
"VitA": 2, "VitC": 0, "Calc":15, "Iron":10}],
"Chicken" : [ 2.29, {"Cal":400, "Carbo":42, "Protein":31,
"VitA": 8, "VitC": 15, "Calc":15, "Iron": 8}],
"Fries" : [ .77, {"Cal":220, "Carbo":26, "Protein": 3,
"VitA": 0, "VitC": 15, "Calc": 0, "Iron": 2}],
"McMuffin" : [ 1.29, {"Cal":345, "Carbo":27, "Protein":15,
"VitA": 4, "VitC": 0, "Calc":20, "Iron":15}],
"1% LFMilk": [ .60, {"Cal":110, "Carbo":12, "Protein": 9,
"VitA":10, "VitC": 4, "Calc":30, "Iron": 0}],
"OrgJuice" : [ .72, {"Cal": 80, "Carbo":20, "Protein": 1,
"VitA": 2, "VitC":120, "Calc": 2, "Iron": 2}],
})
N,a,b = multidict({ # min,max intake
"Cal" : [ 2000, None ],
"Carbo" : [ 350, 375 ],
"Protein" : [ 55, None ],
"VitA" : [ 100, None ],
"VitC" : [ 100, None ],
"Calc" : [ 100, None ],
"Iron" : [ 100, None ],
})
return F,N,a,b,c,d
|
def function[make_inst, parameter[]]:
constant[make_inst: prepare data for the diet model]
<ast.Tuple object at 0x7da18f00feb0> assign[=] call[name[multidict], parameter[dictionary[[<ast.Constant object at 0x7da18f00d000>, <ast.Constant object at 0x7da18f00f340>, <ast.Constant object at 0x7da18f00e1d0>, <ast.Constant object at 0x7da18f00e290>, <ast.Constant object at 0x7da18f00c460>, <ast.Constant object at 0x7da18f00ffd0>, <ast.Constant object at 0x7da18f00f640>, <ast.Constant object at 0x7da18f00dcc0>, <ast.Constant object at 0x7da18f00e7d0>], [<ast.List object at 0x7da18f00c670>, <ast.List object at 0x7da2054a4520>, <ast.List object at 0x7da18f00e890>, <ast.List object at 0x7da18f00f6d0>, <ast.List object at 0x7da18f00e680>, <ast.List object at 0x7da18f00fc10>, <ast.List object at 0x7da20c6c76d0>, <ast.List object at 0x7da20c6c5e10>, <ast.List object at 0x7da20c6c4d90>]]]]
<ast.Tuple object at 0x7da1b18e5fc0> assign[=] call[name[multidict], parameter[dictionary[[<ast.Constant object at 0x7da1b18e7af0>, <ast.Constant object at 0x7da1b18e6410>, <ast.Constant object at 0x7da1b18e6c50>, <ast.Constant object at 0x7da1b18e79d0>, <ast.Constant object at 0x7da1b18e7cd0>, <ast.Constant object at 0x7da1b18e6110>, <ast.Constant object at 0x7da1b18e5780>], [<ast.List object at 0x7da1b18e4c40>, <ast.List object at 0x7da1b18e4cd0>, <ast.List object at 0x7da1b18e7130>, <ast.List object at 0x7da1b18e45e0>, <ast.List object at 0x7da1b18e7010>, <ast.List object at 0x7da1b18e5690>, <ast.List object at 0x7da1b18e6890>]]]]
return[tuple[[<ast.Name object at 0x7da1b18e48b0>, <ast.Name object at 0x7da1b18e4d30>, <ast.Name object at 0x7da1b18e4100>, <ast.Name object at 0x7da1b18e4370>, <ast.Name object at 0x7da1b18e4490>, <ast.Name object at 0x7da1b18e7190>]]]
|
keyword[def] identifier[make_inst] ():
literal[string]
identifier[F] , identifier[c] , identifier[d] = identifier[multidict] ({
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
literal[string] :[ literal[int] ,{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }],
})
identifier[N] , identifier[a] , identifier[b] = identifier[multidict] ({
literal[string] :[ literal[int] , keyword[None] ],
literal[string] :[ literal[int] , literal[int] ],
literal[string] :[ literal[int] , keyword[None] ],
literal[string] :[ literal[int] , keyword[None] ],
literal[string] :[ literal[int] , keyword[None] ],
literal[string] :[ literal[int] , keyword[None] ],
literal[string] :[ literal[int] , keyword[None] ],
})
keyword[return] identifier[F] , identifier[N] , identifier[a] , identifier[b] , identifier[c] , identifier[d]
|
def make_inst():
"""make_inst: prepare data for the diet model""" # cost # composition
(F, c, d) = multidict({'QPounder': [1.84, {'Cal': 510, 'Carbo': 34, 'Protein': 28, 'VitA': 15, 'VitC': 6, 'Calc': 30, 'Iron': 20}], 'McLean': [2.19, {'Cal': 370, 'Carbo': 35, 'Protein': 24, 'VitA': 15, 'VitC': 10, 'Calc': 20, 'Iron': 20}], 'Big Mac': [1.84, {'Cal': 500, 'Carbo': 42, 'Protein': 25, 'VitA': 6, 'VitC': 2, 'Calc': 25, 'Iron': 20}], 'FFilet': [1.44, {'Cal': 370, 'Carbo': 38, 'Protein': 14, 'VitA': 2, 'VitC': 0, 'Calc': 15, 'Iron': 10}], 'Chicken': [2.29, {'Cal': 400, 'Carbo': 42, 'Protein': 31, 'VitA': 8, 'VitC': 15, 'Calc': 15, 'Iron': 8}], 'Fries': [0.77, {'Cal': 220, 'Carbo': 26, 'Protein': 3, 'VitA': 0, 'VitC': 15, 'Calc': 0, 'Iron': 2}], 'McMuffin': [1.29, {'Cal': 345, 'Carbo': 27, 'Protein': 15, 'VitA': 4, 'VitC': 0, 'Calc': 20, 'Iron': 15}], '1% LFMilk': [0.6, {'Cal': 110, 'Carbo': 12, 'Protein': 9, 'VitA': 10, 'VitC': 4, 'Calc': 30, 'Iron': 0}], 'OrgJuice': [0.72, {'Cal': 80, 'Carbo': 20, 'Protein': 1, 'VitA': 2, 'VitC': 120, 'Calc': 2, 'Iron': 2}]}) # min,max intake
(N, a, b) = multidict({'Cal': [2000, None], 'Carbo': [350, 375], 'Protein': [55, None], 'VitA': [100, None], 'VitC': [100, None], 'Calc': [100, None], 'Iron': [100, None]})
return (F, N, a, b, c, d)
|
def show_profit_attribution(round_trips):
"""
Prints the share of total PnL contributed by each
traded name.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
total_pnl = round_trips['pnl'].sum()
pnl_attribution = round_trips.groupby('symbol')['pnl'].sum() / total_pnl
pnl_attribution.name = ''
pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)
utils.print_table(
pnl_attribution.sort_values(
inplace=False,
ascending=False,
),
name='Profitability (PnL / PnL total) per name',
float_format='{:.2%}'.format,
)
|
def function[show_profit_attribution, parameter[round_trips]]:
constant[
Prints the share of total PnL contributed by each
traded name.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
]
variable[total_pnl] assign[=] call[call[name[round_trips]][constant[pnl]].sum, parameter[]]
variable[pnl_attribution] assign[=] binary_operation[call[call[call[name[round_trips].groupby, parameter[constant[symbol]]]][constant[pnl]].sum, parameter[]] / name[total_pnl]]
name[pnl_attribution].name assign[=] constant[]
name[pnl_attribution].index assign[=] call[name[pnl_attribution].index.map, parameter[name[utils].format_asset]]
call[name[utils].print_table, parameter[call[name[pnl_attribution].sort_values, parameter[]]]]
|
keyword[def] identifier[show_profit_attribution] ( identifier[round_trips] ):
literal[string]
identifier[total_pnl] = identifier[round_trips] [ literal[string] ]. identifier[sum] ()
identifier[pnl_attribution] = identifier[round_trips] . identifier[groupby] ( literal[string] )[ literal[string] ]. identifier[sum] ()/ identifier[total_pnl]
identifier[pnl_attribution] . identifier[name] = literal[string]
identifier[pnl_attribution] . identifier[index] = identifier[pnl_attribution] . identifier[index] . identifier[map] ( identifier[utils] . identifier[format_asset] )
identifier[utils] . identifier[print_table] (
identifier[pnl_attribution] . identifier[sort_values] (
identifier[inplace] = keyword[False] ,
identifier[ascending] = keyword[False] ,
),
identifier[name] = literal[string] ,
identifier[float_format] = literal[string] . identifier[format] ,
)
|
def show_profit_attribution(round_trips):
"""
Prints the share of total PnL contributed by each
traded name.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
total_pnl = round_trips['pnl'].sum()
pnl_attribution = round_trips.groupby('symbol')['pnl'].sum() / total_pnl
pnl_attribution.name = ''
pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)
utils.print_table(pnl_attribution.sort_values(inplace=False, ascending=False), name='Profitability (PnL / PnL total) per name', float_format='{:.2%}'.format)
|
def insertReadGroup(self, readGroup):
"""
Inserts the specified readGroup into the DB.
"""
statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats()))
experimentJson = json.dumps(
protocol.toJsonDict(readGroup.getExperiment()))
try:
models.Readgroup.create(
id=readGroup.getId(),
readgroupsetid=readGroup.getParentContainer().getId(),
name=readGroup.getLocalId(),
predictedinsertedsize=readGroup.getPredictedInsertSize(),
samplename=readGroup.getSampleName(),
description=readGroup.getDescription(),
stats=statsJson,
experiment=experimentJson,
biosampleid=readGroup.getBiosampleId(),
attributes=json.dumps(readGroup.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e)
|
def function[insertReadGroup, parameter[self, readGroup]]:
constant[
Inserts the specified readGroup into the DB.
]
variable[statsJson] assign[=] call[name[json].dumps, parameter[call[name[protocol].toJsonDict, parameter[call[name[readGroup].getStats, parameter[]]]]]]
variable[experimentJson] assign[=] call[name[json].dumps, parameter[call[name[protocol].toJsonDict, parameter[call[name[readGroup].getExperiment, parameter[]]]]]]
<ast.Try object at 0x7da18f810310>
|
keyword[def] identifier[insertReadGroup] ( identifier[self] , identifier[readGroup] ):
literal[string]
identifier[statsJson] = identifier[json] . identifier[dumps] ( identifier[protocol] . identifier[toJsonDict] ( identifier[readGroup] . identifier[getStats] ()))
identifier[experimentJson] = identifier[json] . identifier[dumps] (
identifier[protocol] . identifier[toJsonDict] ( identifier[readGroup] . identifier[getExperiment] ()))
keyword[try] :
identifier[models] . identifier[Readgroup] . identifier[create] (
identifier[id] = identifier[readGroup] . identifier[getId] (),
identifier[readgroupsetid] = identifier[readGroup] . identifier[getParentContainer] (). identifier[getId] (),
identifier[name] = identifier[readGroup] . identifier[getLocalId] (),
identifier[predictedinsertedsize] = identifier[readGroup] . identifier[getPredictedInsertSize] (),
identifier[samplename] = identifier[readGroup] . identifier[getSampleName] (),
identifier[description] = identifier[readGroup] . identifier[getDescription] (),
identifier[stats] = identifier[statsJson] ,
identifier[experiment] = identifier[experimentJson] ,
identifier[biosampleid] = identifier[readGroup] . identifier[getBiosampleId] (),
identifier[attributes] = identifier[json] . identifier[dumps] ( identifier[readGroup] . identifier[getAttributes] ()))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[exceptions] . identifier[RepoManagerException] ( identifier[e] )
|
def insertReadGroup(self, readGroup):
"""
Inserts the specified readGroup into the DB.
"""
statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats()))
experimentJson = json.dumps(protocol.toJsonDict(readGroup.getExperiment()))
try:
models.Readgroup.create(id=readGroup.getId(), readgroupsetid=readGroup.getParentContainer().getId(), name=readGroup.getLocalId(), predictedinsertedsize=readGroup.getPredictedInsertSize(), samplename=readGroup.getSampleName(), description=readGroup.getDescription(), stats=statsJson, experiment=experimentJson, biosampleid=readGroup.getBiosampleId(), attributes=json.dumps(readGroup.getAttributes())) # depends on [control=['try'], data=[]]
except Exception as e:
raise exceptions.RepoManagerException(e) # depends on [control=['except'], data=['e']]
|
def include_file(self, path, include_dirs = []):
"""
Includes a file into the current model.
@param path: Path to the file to be included.
@type path: str
@param include_dirs: Optional alternate include search path.
@type include_dirs: list(str)
"""
if self.include_includes:
if self.debug: print("------------------ Including a file: %s"%path)
inc_dirs = include_dirs if include_dirs else self.include_dirs
parser = LEMSFileParser(self, inc_dirs, self.include_includes)
if os.access(path, os.F_OK):
if not path in self.included_files:
parser.parse(open(path).read())
self.included_files.append(path)
return
else:
if self.debug: print("Already included: %s"%path)
return
else:
for inc_dir in inc_dirs:
new_path = (inc_dir + '/' + path)
if os.access(new_path, os.F_OK):
if not new_path in self.included_files:
parser.parse(open(new_path).read())
self.included_files.append(new_path)
return
else:
if self.debug: print("Already included: %s"%path)
return
msg = 'Unable to open ' + path
if self.fail_on_missing_includes:
raise Exception(msg)
elif self.debug:
print(msg)
|
def function[include_file, parameter[self, path, include_dirs]]:
constant[
Includes a file into the current model.
@param path: Path to the file to be included.
@type path: str
@param include_dirs: Optional alternate include search path.
@type include_dirs: list(str)
]
if name[self].include_includes begin[:]
if name[self].debug begin[:]
call[name[print], parameter[binary_operation[constant[------------------ Including a file: %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
variable[inc_dirs] assign[=] <ast.IfExp object at 0x7da1b24c72e0>
variable[parser] assign[=] call[name[LEMSFileParser], parameter[name[self], name[inc_dirs], name[self].include_includes]]
if call[name[os].access, parameter[name[path], name[os].F_OK]] begin[:]
if <ast.UnaryOp object at 0x7da1b24c7550> begin[:]
call[name[parser].parse, parameter[call[call[name[open], parameter[name[path]]].read, parameter[]]]]
call[name[self].included_files.append, parameter[name[path]]]
return[None]
variable[msg] assign[=] binary_operation[constant[Unable to open ] + name[path]]
if name[self].fail_on_missing_includes begin[:]
<ast.Raise object at 0x7da1b24c5960>
|
keyword[def] identifier[include_file] ( identifier[self] , identifier[path] , identifier[include_dirs] =[]):
literal[string]
keyword[if] identifier[self] . identifier[include_includes] :
keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] % identifier[path] )
identifier[inc_dirs] = identifier[include_dirs] keyword[if] identifier[include_dirs] keyword[else] identifier[self] . identifier[include_dirs]
identifier[parser] = identifier[LEMSFileParser] ( identifier[self] , identifier[inc_dirs] , identifier[self] . identifier[include_includes] )
keyword[if] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[F_OK] ):
keyword[if] keyword[not] identifier[path] keyword[in] identifier[self] . identifier[included_files] :
identifier[parser] . identifier[parse] ( identifier[open] ( identifier[path] ). identifier[read] ())
identifier[self] . identifier[included_files] . identifier[append] ( identifier[path] )
keyword[return]
keyword[else] :
keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] % identifier[path] )
keyword[return]
keyword[else] :
keyword[for] identifier[inc_dir] keyword[in] identifier[inc_dirs] :
identifier[new_path] =( identifier[inc_dir] + literal[string] + identifier[path] )
keyword[if] identifier[os] . identifier[access] ( identifier[new_path] , identifier[os] . identifier[F_OK] ):
keyword[if] keyword[not] identifier[new_path] keyword[in] identifier[self] . identifier[included_files] :
identifier[parser] . identifier[parse] ( identifier[open] ( identifier[new_path] ). identifier[read] ())
identifier[self] . identifier[included_files] . identifier[append] ( identifier[new_path] )
keyword[return]
keyword[else] :
keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] % identifier[path] )
keyword[return]
identifier[msg] = literal[string] + identifier[path]
keyword[if] identifier[self] . identifier[fail_on_missing_includes] :
keyword[raise] identifier[Exception] ( identifier[msg] )
keyword[elif] identifier[self] . identifier[debug] :
identifier[print] ( identifier[msg] )
|
def include_file(self, path, include_dirs=[]):
"""
Includes a file into the current model.
@param path: Path to the file to be included.
@type path: str
@param include_dirs: Optional alternate include search path.
@type include_dirs: list(str)
"""
if self.include_includes:
if self.debug:
print('------------------ Including a file: %s' % path) # depends on [control=['if'], data=[]]
inc_dirs = include_dirs if include_dirs else self.include_dirs
parser = LEMSFileParser(self, inc_dirs, self.include_includes)
if os.access(path, os.F_OK):
if not path in self.included_files:
parser.parse(open(path).read())
self.included_files.append(path)
return # depends on [control=['if'], data=[]]
else:
if self.debug:
print('Already included: %s' % path) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
else:
for inc_dir in inc_dirs:
new_path = inc_dir + '/' + path
if os.access(new_path, os.F_OK):
if not new_path in self.included_files:
parser.parse(open(new_path).read())
self.included_files.append(new_path)
return # depends on [control=['if'], data=[]]
else:
if self.debug:
print('Already included: %s' % path) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['inc_dir']]
msg = 'Unable to open ' + path
if self.fail_on_missing_includes:
raise Exception(msg) # depends on [control=['if'], data=[]]
elif self.debug:
print(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def draw(self, data):
"""Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
"""
data = data.translate(
self.g1_charset if self.charset else self.g0_charset)
for char in data:
char_width = wcwidth(char)
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.dirty.add(self.cursor.y)
self.carriage_return()
self.linefeed()
elif char_width > 0:
self.cursor.x -= char_width
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode and char_width > 0:
self.insert_characters(char_width)
line = self.buffer[self.cursor.y]
if char_width == 1:
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
elif char_width == 2:
# A two-cell character has a stub slot after it.
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
if self.cursor.x + 1 < self.columns:
line[self.cursor.x + 1] = self.cursor.attrs \
._replace(data="")
elif char_width == 0 and unicodedata.combining(char):
# A zero-cell character is combined with the previous
# character either on this or preceeding line.
if self.cursor.x:
last = line[self.cursor.x - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
line[self.cursor.x - 1] = last._replace(data=normalized)
elif self.cursor.y:
last = self.buffer[self.cursor.y - 1][self.columns - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
self.buffer[self.cursor.y - 1][self.columns - 1] = \
last._replace(data=normalized)
else:
break # Unprintable character or doesn't advance the cursor.
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
if char_width > 0:
self.cursor.x = min(self.cursor.x + char_width, self.columns)
self.dirty.add(self.cursor.y)
|
def function[draw, parameter[self, data]]:
constant[Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
]
variable[data] assign[=] call[name[data].translate, parameter[<ast.IfExp object at 0x7da1b06789a0>]]
for taget[name[char]] in starred[name[data]] begin[:]
variable[char_width] assign[=] call[name[wcwidth], parameter[name[char]]]
if compare[name[self].cursor.x equal[==] name[self].columns] begin[:]
if compare[name[mo].DECAWM in name[self].mode] begin[:]
call[name[self].dirty.add, parameter[name[self].cursor.y]]
call[name[self].carriage_return, parameter[]]
call[name[self].linefeed, parameter[]]
if <ast.BoolOp object at 0x7da1b0678040> begin[:]
call[name[self].insert_characters, parameter[name[char_width]]]
variable[line] assign[=] call[name[self].buffer][name[self].cursor.y]
if compare[name[char_width] equal[==] constant[1]] begin[:]
call[name[line]][name[self].cursor.x] assign[=] call[name[self].cursor.attrs._replace, parameter[]]
if compare[name[char_width] greater[>] constant[0]] begin[:]
name[self].cursor.x assign[=] call[name[min], parameter[binary_operation[name[self].cursor.x + name[char_width]], name[self].columns]]
call[name[self].dirty.add, parameter[name[self].cursor.y]]
|
keyword[def] identifier[draw] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[translate] (
identifier[self] . identifier[g1_charset] keyword[if] identifier[self] . identifier[charset] keyword[else] identifier[self] . identifier[g0_charset] )
keyword[for] identifier[char] keyword[in] identifier[data] :
identifier[char_width] = identifier[wcwidth] ( identifier[char] )
keyword[if] identifier[self] . identifier[cursor] . identifier[x] == identifier[self] . identifier[columns] :
keyword[if] identifier[mo] . identifier[DECAWM] keyword[in] identifier[self] . identifier[mode] :
identifier[self] . identifier[dirty] . identifier[add] ( identifier[self] . identifier[cursor] . identifier[y] )
identifier[self] . identifier[carriage_return] ()
identifier[self] . identifier[linefeed] ()
keyword[elif] identifier[char_width] > literal[int] :
identifier[self] . identifier[cursor] . identifier[x] -= identifier[char_width]
keyword[if] identifier[mo] . identifier[IRM] keyword[in] identifier[self] . identifier[mode] keyword[and] identifier[char_width] > literal[int] :
identifier[self] . identifier[insert_characters] ( identifier[char_width] )
identifier[line] = identifier[self] . identifier[buffer] [ identifier[self] . identifier[cursor] . identifier[y] ]
keyword[if] identifier[char_width] == literal[int] :
identifier[line] [ identifier[self] . identifier[cursor] . identifier[x] ]= identifier[self] . identifier[cursor] . identifier[attrs] . identifier[_replace] ( identifier[data] = identifier[char] )
keyword[elif] identifier[char_width] == literal[int] :
identifier[line] [ identifier[self] . identifier[cursor] . identifier[x] ]= identifier[self] . identifier[cursor] . identifier[attrs] . identifier[_replace] ( identifier[data] = identifier[char] )
keyword[if] identifier[self] . identifier[cursor] . identifier[x] + literal[int] < identifier[self] . identifier[columns] :
identifier[line] [ identifier[self] . identifier[cursor] . identifier[x] + literal[int] ]= identifier[self] . identifier[cursor] . identifier[attrs] . identifier[_replace] ( identifier[data] = literal[string] )
keyword[elif] identifier[char_width] == literal[int] keyword[and] identifier[unicodedata] . identifier[combining] ( identifier[char] ):
keyword[if] identifier[self] . identifier[cursor] . identifier[x] :
identifier[last] = identifier[line] [ identifier[self] . identifier[cursor] . identifier[x] - literal[int] ]
identifier[normalized] = identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[last] . identifier[data] + identifier[char] )
identifier[line] [ identifier[self] . identifier[cursor] . identifier[x] - literal[int] ]= identifier[last] . identifier[_replace] ( identifier[data] = identifier[normalized] )
keyword[elif] identifier[self] . identifier[cursor] . identifier[y] :
identifier[last] = identifier[self] . identifier[buffer] [ identifier[self] . identifier[cursor] . identifier[y] - literal[int] ][ identifier[self] . identifier[columns] - literal[int] ]
identifier[normalized] = identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[last] . identifier[data] + identifier[char] )
identifier[self] . identifier[buffer] [ identifier[self] . identifier[cursor] . identifier[y] - literal[int] ][ identifier[self] . identifier[columns] - literal[int] ]= identifier[last] . identifier[_replace] ( identifier[data] = identifier[normalized] )
keyword[else] :
keyword[break]
keyword[if] identifier[char_width] > literal[int] :
identifier[self] . identifier[cursor] . identifier[x] = identifier[min] ( identifier[self] . identifier[cursor] . identifier[x] + identifier[char_width] , identifier[self] . identifier[columns] )
identifier[self] . identifier[dirty] . identifier[add] ( identifier[self] . identifier[cursor] . identifier[y] )
|
def draw(self, data):
"""Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
"""
data = data.translate(self.g1_charset if self.charset else self.g0_charset)
for char in data:
char_width = wcwidth(char)
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.dirty.add(self.cursor.y)
self.carriage_return()
self.linefeed() # depends on [control=['if'], data=[]]
elif char_width > 0:
self.cursor.x -= char_width # depends on [control=['if'], data=['char_width']] # depends on [control=['if'], data=[]]
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode and char_width > 0:
self.insert_characters(char_width) # depends on [control=['if'], data=[]]
line = self.buffer[self.cursor.y]
if char_width == 1:
line[self.cursor.x] = self.cursor.attrs._replace(data=char) # depends on [control=['if'], data=[]]
elif char_width == 2:
# A two-cell character has a stub slot after it.
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
if self.cursor.x + 1 < self.columns:
line[self.cursor.x + 1] = self.cursor.attrs._replace(data='') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif char_width == 0 and unicodedata.combining(char):
# A zero-cell character is combined with the previous
# character either on this or preceeding line.
if self.cursor.x:
last = line[self.cursor.x - 1]
normalized = unicodedata.normalize('NFC', last.data + char)
line[self.cursor.x - 1] = last._replace(data=normalized) # depends on [control=['if'], data=[]]
elif self.cursor.y:
last = self.buffer[self.cursor.y - 1][self.columns - 1]
normalized = unicodedata.normalize('NFC', last.data + char)
self.buffer[self.cursor.y - 1][self.columns - 1] = last._replace(data=normalized) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
break # Unprintable character or doesn't advance the cursor.
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
if char_width > 0:
self.cursor.x = min(self.cursor.x + char_width, self.columns) # depends on [control=['if'], data=['char_width']] # depends on [control=['for'], data=['char']]
self.dirty.add(self.cursor.y)
|
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile."""
contracts = {}
try:
raw_res = yield from self._session.get(PROFILE_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {"class": "contrat"}):
rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text)
if rematch is not None:
contracts[rematch.group(1).replace(" ", "")] = None
# search for links
for node in soup.find_all('a', {"class": "big iconLink"}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href')
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
raise PyHydroQuebecError("Could not get p_p_id")
return p_p_id, contracts
|
def function[_get_p_p_id_and_contract, parameter[self]]:
constant[Get id of consumption profile.]
variable[contracts] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b0f05c90>
variable[content] assign[=] <ast.YieldFrom object at 0x7da1b0f05f30>
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[content], constant[html.parser]]]
for taget[name[node]] in starred[call[name[soup].find_all, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b0f058d0>], [<ast.Constant object at 0x7da1b0f062f0>]]]]] begin[:]
variable[rematch] assign[=] call[name[re].match, parameter[constant[C[a-z]* ([0-9]{4} [0-9]{5})], name[node].text]]
if compare[name[rematch] is_not constant[None]] begin[:]
call[name[contracts]][call[call[name[rematch].group, parameter[constant[1]]].replace, parameter[constant[ ], constant[]]]] assign[=] constant[None]
for taget[name[node]] in starred[call[name[soup].find_all, parameter[constant[a], dictionary[[<ast.Constant object at 0x7da1b0e2c760>], [<ast.Constant object at 0x7da1b0e2f070>]]]]] begin[:]
for taget[name[contract]] in starred[name[contracts]] begin[:]
if compare[name[contract] in call[name[node].attrs.get, parameter[constant[href]]]] begin[:]
call[name[contracts]][name[contract]] assign[=] call[name[node].attrs.get, parameter[constant[href]]]
variable[p_p_id] assign[=] constant[None]
for taget[name[node]] in starred[call[name[soup].find_all, parameter[constant[span]]]] begin[:]
variable[node_id] assign[=] call[name[node].attrs.get, parameter[constant[id], constant[]]]
if call[name[node_id].startswith, parameter[constant[p_portraitConsommation_WAR]]] begin[:]
variable[p_p_id] assign[=] call[name[node_id]][<ast.Slice object at 0x7da1b0e2ceb0>]
break
if compare[name[p_p_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0e2d270>
return[tuple[[<ast.Name object at 0x7da1b0e2c3d0>, <ast.Name object at 0x7da1b0e2e8f0>]]]
|
keyword[def] identifier[_get_p_p_id_and_contract] ( identifier[self] ):
literal[string]
identifier[contracts] ={}
keyword[try] :
identifier[raw_res] = keyword[yield] keyword[from] identifier[self] . identifier[_session] . identifier[get] ( identifier[PROFILE_URL] ,
identifier[timeout] = identifier[self] . identifier[_timeout] )
keyword[except] identifier[OSError] :
keyword[raise] identifier[PyHydroQuebecError] ( literal[string] )
identifier[content] = keyword[yield] keyword[from] identifier[raw_res] . identifier[text] ()
identifier[soup] = identifier[BeautifulSoup] ( identifier[content] , literal[string] )
keyword[for] identifier[node] keyword[in] identifier[soup] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }):
identifier[rematch] = identifier[re] . identifier[match] ( literal[string] , identifier[node] . identifier[text] )
keyword[if] identifier[rematch] keyword[is] keyword[not] keyword[None] :
identifier[contracts] [ identifier[rematch] . identifier[group] ( literal[int] ). identifier[replace] ( literal[string] , literal[string] )]= keyword[None]
keyword[for] identifier[node] keyword[in] identifier[soup] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] }):
keyword[for] identifier[contract] keyword[in] identifier[contracts] :
keyword[if] identifier[contract] keyword[in] identifier[node] . identifier[attrs] . identifier[get] ( literal[string] ):
identifier[contracts] [ identifier[contract] ]= identifier[node] . identifier[attrs] . identifier[get] ( literal[string] )
identifier[p_p_id] = keyword[None]
keyword[for] identifier[node] keyword[in] identifier[soup] . identifier[find_all] ( literal[string] ):
identifier[node_id] = identifier[node] . identifier[attrs] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[node_id] . identifier[startswith] ( literal[string] ):
identifier[p_p_id] = identifier[node_id] [ literal[int] :]
keyword[break]
keyword[if] identifier[p_p_id] keyword[is] keyword[None] :
keyword[raise] identifier[PyHydroQuebecError] ( literal[string] )
keyword[return] identifier[p_p_id] , identifier[contracts]
|
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile."""
contracts = {}
try:
raw_res = (yield from self._session.get(PROFILE_URL, timeout=self._timeout)) # depends on [control=['try'], data=[]]
except OSError:
raise PyHydroQuebecError('Can not get profile page') # depends on [control=['except'], data=[]]
# Parse html
content = (yield from raw_res.text())
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {'class': 'contrat'}):
rematch = re.match('C[a-z]* ([0-9]{4} [0-9]{5})', node.text)
if rematch is not None:
contracts[rematch.group(1).replace(' ', '')] = None # depends on [control=['if'], data=['rematch']] # depends on [control=['for'], data=['node']]
# search for links
for node in soup.find_all('a', {'class': 'big iconLink'}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href') # depends on [control=['if'], data=['contract']] # depends on [control=['for'], data=['contract']] # depends on [control=['for'], data=['node']]
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', '')
if node_id.startswith('p_portraitConsommation_WAR'):
p_p_id = node_id[2:]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
if p_p_id is None:
raise PyHydroQuebecError('Could not get p_p_id') # depends on [control=['if'], data=[]]
return (p_p_id, contracts)
|
def size_footing_for_capacity(sl, vertical_load, fos=1.0, length_to_width=1.0, verbose=0, **kwargs):
"""
Determine the size of a footing given an aspect ratio and a load
:param sl: Soil object
:param vertical_load: The applied load to the foundation
:param fos: The target factor of safety
:param length_to_width: The desired length to width ratio of the foundation
:param verbose: verbosity
:return: a Foundation object
"""
method = kwargs.get("method", 'vesics')
depth_to_width = kwargs.get("depth_to_width", 0)
depth = kwargs.get("depth", 0)
use_depth_to_width = 0
if not depth:
use_depth_to_width = 1
# Find approximate size
fd = models.FoundationRaft()
fd.width = .5 # start with B=1.0m
for i in range(50):
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width
capacity_method_selector(sl, fd, method)
q = fd.q_ult
bearing_capacity = q * fd.length * fd.width
fs_actual = bearing_capacity / vertical_load
if fs_actual < fos:
# Need to increase foundation sizes
fd.width += 0.5
else:
if verbose:
log("fs_actual: ", fs_actual)
log("fd.width: ", fd.width)
break
# at this stage the current size should be too big
width_array = []
fs_array = []
for j in range(11):
width_array.append(fd.width)
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width
capacity_method_selector(sl, fd, method)
q = fd.q_ult
capacity = q * fd.length * fd.width
fs_array.append(capacity / vertical_load)
fd.width = fd.width - 0.5 / 10
# search the array until FS satisfied:
if verbose:
log("reqFS: ", fos)
log("width array: \n", width_array)
log("FS array: \n", fs_array)
for fs in range(len(fs_array)):
if fs_array[fs] < fos:
fd.width = width_array[fs - 1]
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width
capacity_method_selector(sl, fd, method)
break
if fs == len(fs_array) - 1:
DesignError("No suitable foundation sizes could be determined!")
return fd
|
def function[size_footing_for_capacity, parameter[sl, vertical_load, fos, length_to_width, verbose]]:
constant[
Determine the size of a footing given an aspect ratio and a load
:param sl: Soil object
:param vertical_load: The applied load to the foundation
:param fos: The target factor of safety
:param length_to_width: The desired length to width ratio of the foundation
:param verbose: verbosity
:return: a Foundation object
]
variable[method] assign[=] call[name[kwargs].get, parameter[constant[method], constant[vesics]]]
variable[depth_to_width] assign[=] call[name[kwargs].get, parameter[constant[depth_to_width], constant[0]]]
variable[depth] assign[=] call[name[kwargs].get, parameter[constant[depth], constant[0]]]
variable[use_depth_to_width] assign[=] constant[0]
if <ast.UnaryOp object at 0x7da1b03503a0> begin[:]
variable[use_depth_to_width] assign[=] constant[1]
variable[fd] assign[=] call[name[models].FoundationRaft, parameter[]]
name[fd].width assign[=] constant[0.5]
for taget[name[i]] in starred[call[name[range], parameter[constant[50]]]] begin[:]
name[fd].length assign[=] binary_operation[name[length_to_width] * name[fd].width]
if name[use_depth_to_width] begin[:]
name[fd].depth assign[=] binary_operation[name[depth_to_width] * name[fd].width]
call[name[capacity_method_selector], parameter[name[sl], name[fd], name[method]]]
variable[q] assign[=] name[fd].q_ult
variable[bearing_capacity] assign[=] binary_operation[binary_operation[name[q] * name[fd].length] * name[fd].width]
variable[fs_actual] assign[=] binary_operation[name[bearing_capacity] / name[vertical_load]]
if compare[name[fs_actual] less[<] name[fos]] begin[:]
<ast.AugAssign object at 0x7da1b0388130>
variable[width_array] assign[=] list[[]]
variable[fs_array] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[constant[11]]]] begin[:]
call[name[width_array].append, parameter[name[fd].width]]
name[fd].length assign[=] binary_operation[name[length_to_width] * name[fd].width]
if name[use_depth_to_width] begin[:]
name[fd].depth assign[=] binary_operation[name[depth_to_width] * name[fd].width]
call[name[capacity_method_selector], parameter[name[sl], name[fd], name[method]]]
variable[q] assign[=] name[fd].q_ult
variable[capacity] assign[=] binary_operation[binary_operation[name[q] * name[fd].length] * name[fd].width]
call[name[fs_array].append, parameter[binary_operation[name[capacity] / name[vertical_load]]]]
name[fd].width assign[=] binary_operation[name[fd].width - binary_operation[constant[0.5] / constant[10]]]
if name[verbose] begin[:]
call[name[log], parameter[constant[reqFS: ], name[fos]]]
call[name[log], parameter[constant[width array:
], name[width_array]]]
call[name[log], parameter[constant[FS array:
], name[fs_array]]]
for taget[name[fs]] in starred[call[name[range], parameter[call[name[len], parameter[name[fs_array]]]]]] begin[:]
if compare[call[name[fs_array]][name[fs]] less[<] name[fos]] begin[:]
name[fd].width assign[=] call[name[width_array]][binary_operation[name[fs] - constant[1]]]
name[fd].length assign[=] binary_operation[name[length_to_width] * name[fd].width]
if name[use_depth_to_width] begin[:]
name[fd].depth assign[=] binary_operation[name[depth_to_width] * name[fd].width]
call[name[capacity_method_selector], parameter[name[sl], name[fd], name[method]]]
break
if compare[name[fs] equal[==] binary_operation[call[name[len], parameter[name[fs_array]]] - constant[1]]] begin[:]
call[name[DesignError], parameter[constant[No suitable foundation sizes could be determined!]]]
return[name[fd]]
|
keyword[def] identifier[size_footing_for_capacity] ( identifier[sl] , identifier[vertical_load] , identifier[fos] = literal[int] , identifier[length_to_width] = literal[int] , identifier[verbose] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[method] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[depth_to_width] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[depth] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[use_depth_to_width] = literal[int]
keyword[if] keyword[not] identifier[depth] :
identifier[use_depth_to_width] = literal[int]
identifier[fd] = identifier[models] . identifier[FoundationRaft] ()
identifier[fd] . identifier[width] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[fd] . identifier[length] = identifier[length_to_width] * identifier[fd] . identifier[width]
keyword[if] identifier[use_depth_to_width] :
identifier[fd] . identifier[depth] = identifier[depth_to_width] * identifier[fd] . identifier[width]
identifier[capacity_method_selector] ( identifier[sl] , identifier[fd] , identifier[method] )
identifier[q] = identifier[fd] . identifier[q_ult]
identifier[bearing_capacity] = identifier[q] * identifier[fd] . identifier[length] * identifier[fd] . identifier[width]
identifier[fs_actual] = identifier[bearing_capacity] / identifier[vertical_load]
keyword[if] identifier[fs_actual] < identifier[fos] :
identifier[fd] . identifier[width] += literal[int]
keyword[else] :
keyword[if] identifier[verbose] :
identifier[log] ( literal[string] , identifier[fs_actual] )
identifier[log] ( literal[string] , identifier[fd] . identifier[width] )
keyword[break]
identifier[width_array] =[]
identifier[fs_array] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] ):
identifier[width_array] . identifier[append] ( identifier[fd] . identifier[width] )
identifier[fd] . identifier[length] = identifier[length_to_width] * identifier[fd] . identifier[width]
keyword[if] identifier[use_depth_to_width] :
identifier[fd] . identifier[depth] = identifier[depth_to_width] * identifier[fd] . identifier[width]
identifier[capacity_method_selector] ( identifier[sl] , identifier[fd] , identifier[method] )
identifier[q] = identifier[fd] . identifier[q_ult]
identifier[capacity] = identifier[q] * identifier[fd] . identifier[length] * identifier[fd] . identifier[width]
identifier[fs_array] . identifier[append] ( identifier[capacity] / identifier[vertical_load] )
identifier[fd] . identifier[width] = identifier[fd] . identifier[width] - literal[int] / literal[int]
keyword[if] identifier[verbose] :
identifier[log] ( literal[string] , identifier[fos] )
identifier[log] ( literal[string] , identifier[width_array] )
identifier[log] ( literal[string] , identifier[fs_array] )
keyword[for] identifier[fs] keyword[in] identifier[range] ( identifier[len] ( identifier[fs_array] )):
keyword[if] identifier[fs_array] [ identifier[fs] ]< identifier[fos] :
identifier[fd] . identifier[width] = identifier[width_array] [ identifier[fs] - literal[int] ]
identifier[fd] . identifier[length] = identifier[length_to_width] * identifier[fd] . identifier[width]
keyword[if] identifier[use_depth_to_width] :
identifier[fd] . identifier[depth] = identifier[depth_to_width] * identifier[fd] . identifier[width]
identifier[capacity_method_selector] ( identifier[sl] , identifier[fd] , identifier[method] )
keyword[break]
keyword[if] identifier[fs] == identifier[len] ( identifier[fs_array] )- literal[int] :
identifier[DesignError] ( literal[string] )
keyword[return] identifier[fd]
|
def size_footing_for_capacity(sl, vertical_load, fos=1.0, length_to_width=1.0, verbose=0, **kwargs):
"""
Determine the size of a footing given an aspect ratio and a load
:param sl: Soil object
:param vertical_load: The applied load to the foundation
:param fos: The target factor of safety
:param length_to_width: The desired length to width ratio of the foundation
:param verbose: verbosity
:return: a Foundation object
"""
method = kwargs.get('method', 'vesics')
depth_to_width = kwargs.get('depth_to_width', 0)
depth = kwargs.get('depth', 0)
use_depth_to_width = 0
if not depth:
use_depth_to_width = 1 # depends on [control=['if'], data=[]]
# Find approximate size
fd = models.FoundationRaft()
fd.width = 0.5 # start with B=1.0m
for i in range(50):
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width # depends on [control=['if'], data=[]]
capacity_method_selector(sl, fd, method)
q = fd.q_ult
bearing_capacity = q * fd.length * fd.width
fs_actual = bearing_capacity / vertical_load
if fs_actual < fos:
# Need to increase foundation sizes
fd.width += 0.5 # depends on [control=['if'], data=[]]
else:
if verbose:
log('fs_actual: ', fs_actual)
log('fd.width: ', fd.width) # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=[]]
# at this stage the current size should be too big
width_array = []
fs_array = []
for j in range(11):
width_array.append(fd.width)
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width # depends on [control=['if'], data=[]]
capacity_method_selector(sl, fd, method)
q = fd.q_ult
capacity = q * fd.length * fd.width
fs_array.append(capacity / vertical_load)
fd.width = fd.width - 0.5 / 10 # depends on [control=['for'], data=[]]
# search the array until FS satisfied:
if verbose:
log('reqFS: ', fos)
log('width array: \n', width_array)
log('FS array: \n', fs_array) # depends on [control=['if'], data=[]]
for fs in range(len(fs_array)):
if fs_array[fs] < fos:
fd.width = width_array[fs - 1]
fd.length = length_to_width * fd.width
if use_depth_to_width:
fd.depth = depth_to_width * fd.width # depends on [control=['if'], data=[]]
capacity_method_selector(sl, fd, method)
break # depends on [control=['if'], data=[]]
if fs == len(fs_array) - 1:
DesignError('No suitable foundation sizes could be determined!') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fs']]
return fd
|
def seqs_from_fastacmd(acc_list, blast_db,is_protein=True):
"""Get dict of description:seq from fastacmd."""
fasta_cmd_res = fasta_cmd_get_seqs(acc_list, blast_db=blast_db, \
is_protein=is_protein)
recs = FastaCmdFinder(fasta_cmd_res['StdOut'])
result = {}
for rec in recs:
try:
result[rec[0][1:].strip()] = ''.join(map(strip, rec[1:]))
except IndexError: #maybe we didn't get a sequence?
pass
fasta_cmd_res.cleanUp()
return result
|
def function[seqs_from_fastacmd, parameter[acc_list, blast_db, is_protein]]:
constant[Get dict of description:seq from fastacmd.]
variable[fasta_cmd_res] assign[=] call[name[fasta_cmd_get_seqs], parameter[name[acc_list]]]
variable[recs] assign[=] call[name[FastaCmdFinder], parameter[call[name[fasta_cmd_res]][constant[StdOut]]]]
variable[result] assign[=] dictionary[[], []]
for taget[name[rec]] in starred[name[recs]] begin[:]
<ast.Try object at 0x7da1b0a6e020>
call[name[fasta_cmd_res].cleanUp, parameter[]]
return[name[result]]
|
keyword[def] identifier[seqs_from_fastacmd] ( identifier[acc_list] , identifier[blast_db] , identifier[is_protein] = keyword[True] ):
literal[string]
identifier[fasta_cmd_res] = identifier[fasta_cmd_get_seqs] ( identifier[acc_list] , identifier[blast_db] = identifier[blast_db] , identifier[is_protein] = identifier[is_protein] )
identifier[recs] = identifier[FastaCmdFinder] ( identifier[fasta_cmd_res] [ literal[string] ])
identifier[result] ={}
keyword[for] identifier[rec] keyword[in] identifier[recs] :
keyword[try] :
identifier[result] [ identifier[rec] [ literal[int] ][ literal[int] :]. identifier[strip] ()]= literal[string] . identifier[join] ( identifier[map] ( identifier[strip] , identifier[rec] [ literal[int] :]))
keyword[except] identifier[IndexError] :
keyword[pass]
identifier[fasta_cmd_res] . identifier[cleanUp] ()
keyword[return] identifier[result]
|
def seqs_from_fastacmd(acc_list, blast_db, is_protein=True):
"""Get dict of description:seq from fastacmd."""
fasta_cmd_res = fasta_cmd_get_seqs(acc_list, blast_db=blast_db, is_protein=is_protein)
recs = FastaCmdFinder(fasta_cmd_res['StdOut'])
result = {}
for rec in recs:
try:
result[rec[0][1:].strip()] = ''.join(map(strip, rec[1:])) # depends on [control=['try'], data=[]]
except IndexError: #maybe we didn't get a sequence?
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['rec']]
fasta_cmd_res.cleanUp()
return result
|
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds
|
def function[_build_ds_from_instruction, parameter[instruction, ds_from_file_fn]]:
constant[Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
]
variable[examples_ds] assign[=] call[name[ds_from_file_fn], parameter[call[name[instruction]][constant[filepath]]]]
variable[mask_ds] assign[=] call[name[_build_mask_ds], parameter[]]
variable[ds] assign[=] call[name[tf].data.Dataset.zip, parameter[tuple[[<ast.Name object at 0x7da1b2044b20>, <ast.Name object at 0x7da1b20470a0>]]]]
variable[ds] assign[=] call[name[ds].filter, parameter[<ast.Lambda object at 0x7da1b2044220>]]
variable[ds] assign[=] call[name[ds].map, parameter[<ast.Lambda object at 0x7da1b2046c20>]]
return[name[ds]]
|
keyword[def] identifier[_build_ds_from_instruction] ( identifier[instruction] , identifier[ds_from_file_fn] ):
literal[string]
identifier[examples_ds] = identifier[ds_from_file_fn] ( identifier[instruction] [ literal[string] ])
identifier[mask_ds] = identifier[_build_mask_ds] (
identifier[mask_offset] = identifier[instruction] [ literal[string] ],
identifier[mask] = identifier[instruction] [ literal[string] ],
)
identifier[ds] = identifier[tf] . identifier[data] . identifier[Dataset] . identifier[zip] (( identifier[examples_ds] , identifier[mask_ds] ))
identifier[ds] = identifier[ds] . identifier[filter] ( keyword[lambda] identifier[example] , identifier[mask] : identifier[mask] )
identifier[ds] = identifier[ds] . identifier[map] ( keyword[lambda] identifier[example] , identifier[mask] : identifier[example] )
keyword[return] identifier[ds]
|
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction['filepath'])
mask_ds = _build_mask_ds(mask_offset=instruction['mask_offset'], mask=instruction['mask'])
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds
|
def _countmatrix(lxs):
""" fill a matrix with pairwise data sharing """
## an empty matrix
share = np.zeros((lxs.shape[0], lxs.shape[0]))
## fill above
names = range(lxs.shape[0])
for row in lxs:
for samp1, samp2 in itertools.combinations(names, 2):
shared = lxs[samp1, lxs[samp2] > 0].sum()
share[samp1, samp2] = shared
## mirror below
##share[]
## fill diagonal with total sample coverage
for row in xrange(len(names)):
share[row, row] = lxs[row].sum()
return share
|
def function[_countmatrix, parameter[lxs]]:
constant[ fill a matrix with pairwise data sharing ]
variable[share] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b0035810>, <ast.Subscript object at 0x7da1b0037b80>]]]]
variable[names] assign[=] call[name[range], parameter[call[name[lxs].shape][constant[0]]]]
for taget[name[row]] in starred[name[lxs]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2044c14b0>, <ast.Name object at 0x7da2044c3b20>]]] in starred[call[name[itertools].combinations, parameter[name[names], constant[2]]]] begin[:]
variable[shared] assign[=] call[call[name[lxs]][tuple[[<ast.Name object at 0x7da2044c0730>, <ast.Compare object at 0x7da2044c3f40>]]].sum, parameter[]]
call[name[share]][tuple[[<ast.Name object at 0x7da2044c1ff0>, <ast.Name object at 0x7da2044c2470>]]] assign[=] name[shared]
for taget[name[row]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[names]]]]]] begin[:]
call[name[share]][tuple[[<ast.Name object at 0x7da2044c1ea0>, <ast.Name object at 0x7da2044c18a0>]]] assign[=] call[call[name[lxs]][name[row]].sum, parameter[]]
return[name[share]]
|
keyword[def] identifier[_countmatrix] ( identifier[lxs] ):
literal[string]
identifier[share] = identifier[np] . identifier[zeros] (( identifier[lxs] . identifier[shape] [ literal[int] ], identifier[lxs] . identifier[shape] [ literal[int] ]))
identifier[names] = identifier[range] ( identifier[lxs] . identifier[shape] [ literal[int] ])
keyword[for] identifier[row] keyword[in] identifier[lxs] :
keyword[for] identifier[samp1] , identifier[samp2] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[names] , literal[int] ):
identifier[shared] = identifier[lxs] [ identifier[samp1] , identifier[lxs] [ identifier[samp2] ]> literal[int] ]. identifier[sum] ()
identifier[share] [ identifier[samp1] , identifier[samp2] ]= identifier[shared]
keyword[for] identifier[row] keyword[in] identifier[xrange] ( identifier[len] ( identifier[names] )):
identifier[share] [ identifier[row] , identifier[row] ]= identifier[lxs] [ identifier[row] ]. identifier[sum] ()
keyword[return] identifier[share]
|
def _countmatrix(lxs):
""" fill a matrix with pairwise data sharing """
## an empty matrix
share = np.zeros((lxs.shape[0], lxs.shape[0]))
## fill above
names = range(lxs.shape[0])
for row in lxs:
for (samp1, samp2) in itertools.combinations(names, 2):
shared = lxs[samp1, lxs[samp2] > 0].sum()
share[samp1, samp2] = shared # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
## mirror below
##share[]
## fill diagonal with total sample coverage
for row in xrange(len(names)):
share[row, row] = lxs[row].sum() # depends on [control=['for'], data=['row']]
return share
|
def is_method(func):
"""Detects if the given callable is a method. In context of pytypes this
function is more reliable than plain inspect.ismethod, e.g. it automatically
bypasses wrappers from typechecked and override decorators.
"""
func0 = _actualfunc(func)
argNames = getargnames(getargspecs(func0))
if len(argNames) > 0:
if argNames[0] == 'self':
if inspect.ismethod(func):
return True
elif sys.version_info.major >= 3:
# In Python3 there are no unbound methods, so we count as method,
# if first arg is called 'self'
return True
else:
_warn_argname('is_method encountered non-method declaring self',
func0, False, False, None)
else:
return inspect.ismethod(func)
return False
|
def function[is_method, parameter[func]]:
constant[Detects if the given callable is a method. In context of pytypes this
function is more reliable than plain inspect.ismethod, e.g. it automatically
bypasses wrappers from typechecked and override decorators.
]
variable[func0] assign[=] call[name[_actualfunc], parameter[name[func]]]
variable[argNames] assign[=] call[name[getargnames], parameter[call[name[getargspecs], parameter[name[func0]]]]]
if compare[call[name[len], parameter[name[argNames]]] greater[>] constant[0]] begin[:]
if compare[call[name[argNames]][constant[0]] equal[==] constant[self]] begin[:]
if call[name[inspect].ismethod, parameter[name[func]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_method] ( identifier[func] ):
literal[string]
identifier[func0] = identifier[_actualfunc] ( identifier[func] )
identifier[argNames] = identifier[getargnames] ( identifier[getargspecs] ( identifier[func0] ))
keyword[if] identifier[len] ( identifier[argNames] )> literal[int] :
keyword[if] identifier[argNames] [ literal[int] ]== literal[string] :
keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[func] ):
keyword[return] keyword[True]
keyword[elif] identifier[sys] . identifier[version_info] . identifier[major] >= literal[int] :
keyword[return] keyword[True]
keyword[else] :
identifier[_warn_argname] ( literal[string] ,
identifier[func0] , keyword[False] , keyword[False] , keyword[None] )
keyword[else] :
keyword[return] identifier[inspect] . identifier[ismethod] ( identifier[func] )
keyword[return] keyword[False]
|
def is_method(func):
"""Detects if the given callable is a method. In context of pytypes this
function is more reliable than plain inspect.ismethod, e.g. it automatically
bypasses wrappers from typechecked and override decorators.
"""
func0 = _actualfunc(func)
argNames = getargnames(getargspecs(func0))
if len(argNames) > 0:
if argNames[0] == 'self':
if inspect.ismethod(func):
return True # depends on [control=['if'], data=[]]
elif sys.version_info.major >= 3:
# In Python3 there are no unbound methods, so we count as method,
# if first arg is called 'self'
return True # depends on [control=['if'], data=[]]
else:
_warn_argname('is_method encountered non-method declaring self', func0, False, False, None) # depends on [control=['if'], data=[]]
else:
return inspect.ismethod(func) # depends on [control=['if'], data=[]]
return False
|
def start(self, channel):
"""Start running this virtual device including any necessary worker threads.
Args:
channel (IOTilePushChannel): the channel with a stream and trace
routine for streaming and tracing data through a VirtualInterface
"""
if self._started:
raise InternalError("The method start() was called twice on VirtualIOTileDevice.")
self._push_channel = channel
self.start_workers()
|
def function[start, parameter[self, channel]]:
constant[Start running this virtual device including any necessary worker threads.
Args:
channel (IOTilePushChannel): the channel with a stream and trace
routine for streaming and tracing data through a VirtualInterface
]
if name[self]._started begin[:]
<ast.Raise object at 0x7da20c6c7760>
name[self]._push_channel assign[=] name[channel]
call[name[self].start_workers, parameter[]]
|
keyword[def] identifier[start] ( identifier[self] , identifier[channel] ):
literal[string]
keyword[if] identifier[self] . identifier[_started] :
keyword[raise] identifier[InternalError] ( literal[string] )
identifier[self] . identifier[_push_channel] = identifier[channel]
identifier[self] . identifier[start_workers] ()
|
def start(self, channel):
"""Start running this virtual device including any necessary worker threads.
Args:
channel (IOTilePushChannel): the channel with a stream and trace
routine for streaming and tracing data through a VirtualInterface
"""
if self._started:
raise InternalError('The method start() was called twice on VirtualIOTileDevice.') # depends on [control=['if'], data=[]]
self._push_channel = channel
self.start_workers()
|
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
def function[fave_slices, parameter[self, user_id]]:
constant[Favorite slices for a user]
if <ast.UnaryOp object at 0x7da1b2060e50> begin[:]
variable[user_id] assign[=] name[g].user.id
variable[qry] assign[=] call[call[call[name[db].session.query, parameter[name[models].Slice, name[models].FavStar.dttm]].join, parameter[name[models].FavStar, call[name[sqla].and_, parameter[compare[name[models].FavStar.user_id equal[==] call[name[int], parameter[name[user_id]]]], compare[name[models].FavStar.class_name equal[==] constant[slice]], compare[name[models].Slice.id equal[==] name[models].FavStar.obj_id]]]]].order_by, parameter[call[name[models].FavStar.dttm.desc, parameter[]]]]
variable[payload] assign[=] list[[]]
for taget[name[o]] in starred[call[name[qry].all, parameter[]]] begin[:]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e9a470>, <ast.Constant object at 0x7da1b1e991e0>, <ast.Constant object at 0x7da1b1e99390>, <ast.Constant object at 0x7da1b1e9aa40>, <ast.Constant object at 0x7da1b1e98520>], [<ast.Attribute object at 0x7da1b1e98070>, <ast.Attribute object at 0x7da1b1e9a410>, <ast.Attribute object at 0x7da1b1e99fc0>, <ast.Attribute object at 0x7da1b2031b70>, <ast.Attribute object at 0x7da1b20327d0>]]
if name[o].Slice.created_by begin[:]
variable[user] assign[=] name[o].Slice.created_by
call[name[d]][constant[creator]] assign[=] call[name[str], parameter[name[user]]]
call[name[d]][constant[creator_url]] assign[=] call[constant[/superset/profile/{}/].format, parameter[name[user].username]]
call[name[payload].append, parameter[name[d]]]
return[call[name[json_success], parameter[call[name[json].dumps, parameter[name[payload]]]]]]
|
keyword[def] identifier[fave_slices] ( identifier[self] , identifier[user_id] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[user_id] :
identifier[user_id] = identifier[g] . identifier[user] . identifier[id]
identifier[qry] =(
identifier[db] . identifier[session] . identifier[query] (
identifier[models] . identifier[Slice] ,
identifier[models] . identifier[FavStar] . identifier[dttm] ,
)
. identifier[join] (
identifier[models] . identifier[FavStar] ,
identifier[sqla] . identifier[and_] (
identifier[models] . identifier[FavStar] . identifier[user_id] == identifier[int] ( identifier[user_id] ),
identifier[models] . identifier[FavStar] . identifier[class_name] == literal[string] ,
identifier[models] . identifier[Slice] . identifier[id] == identifier[models] . identifier[FavStar] . identifier[obj_id] ,
),
)
. identifier[order_by] (
identifier[models] . identifier[FavStar] . identifier[dttm] . identifier[desc] (),
)
)
identifier[payload] =[]
keyword[for] identifier[o] keyword[in] identifier[qry] . identifier[all] ():
identifier[d] ={
literal[string] : identifier[o] . identifier[Slice] . identifier[id] ,
literal[string] : identifier[o] . identifier[Slice] . identifier[slice_name] ,
literal[string] : identifier[o] . identifier[Slice] . identifier[slice_url] ,
literal[string] : identifier[o] . identifier[dttm] ,
literal[string] : identifier[o] . identifier[Slice] . identifier[viz_type] ,
}
keyword[if] identifier[o] . identifier[Slice] . identifier[created_by] :
identifier[user] = identifier[o] . identifier[Slice] . identifier[created_by]
identifier[d] [ literal[string] ]= identifier[str] ( identifier[user] )
identifier[d] [ literal[string] ]= literal[string] . identifier[format] (
identifier[user] . identifier[username] )
identifier[payload] . identifier[append] ( identifier[d] )
keyword[return] identifier[json_success] (
identifier[json] . identifier[dumps] ( identifier[payload] , identifier[default] = identifier[utils] . identifier[json_int_dttm_ser] ))
|
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id # depends on [control=['if'], data=[]]
qry = db.session.query(models.Slice, models.FavStar.dttm).join(models.FavStar, sqla.and_(models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id)).order_by(models.FavStar.dttm.desc())
payload = []
for o in qry.all():
d = {'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'dttm': o.dttm, 'viz_type': o.Slice.viz_type}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(user.username) # depends on [control=['if'], data=[]]
payload.append(d) # depends on [control=['for'], data=['o']]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
|
def _get_route_args(self, namespace, route, tag=False): # pylint: disable=unused-argument
"""Returns a list of name / value string pairs representing the arguments for
a particular route."""
data_type, _ = unwrap_nullable(route.arg_data_type)
if is_struct_type(data_type):
arg_list = []
for field in data_type.all_fields:
arg_list.append((fmt_var(field.name), fmt_type(
field.data_type, tag=tag, has_default=field.has_default)))
doc_list = [(fmt_var(f.name), self.process_doc(f.doc, self._docf))
for f in data_type.fields if f.doc]
elif is_union_type(data_type):
arg_list = [(fmt_var(data_type.name), fmt_type(
route.arg_data_type, tag=tag))]
doc_list = [(fmt_var(data_type.name),
self.process_doc(data_type.doc,
self._docf) if data_type.doc
else 'The {} union'.format(
fmt_class(data_type
.name)))]
else:
arg_list = []
doc_list = []
return arg_list, doc_list
|
def function[_get_route_args, parameter[self, namespace, route, tag]]:
constant[Returns a list of name / value string pairs representing the arguments for
a particular route.]
<ast.Tuple object at 0x7da18f58fb50> assign[=] call[name[unwrap_nullable], parameter[name[route].arg_data_type]]
if call[name[is_struct_type], parameter[name[data_type]]] begin[:]
variable[arg_list] assign[=] list[[]]
for taget[name[field]] in starred[name[data_type].all_fields] begin[:]
call[name[arg_list].append, parameter[tuple[[<ast.Call object at 0x7da18f58f430>, <ast.Call object at 0x7da18f58fc10>]]]]
variable[doc_list] assign[=] <ast.ListComp object at 0x7da20c7c8ca0>
return[tuple[[<ast.Name object at 0x7da18f58da20>, <ast.Name object at 0x7da18f58ca60>]]]
|
keyword[def] identifier[_get_route_args] ( identifier[self] , identifier[namespace] , identifier[route] , identifier[tag] = keyword[False] ):
literal[string]
identifier[data_type] , identifier[_] = identifier[unwrap_nullable] ( identifier[route] . identifier[arg_data_type] )
keyword[if] identifier[is_struct_type] ( identifier[data_type] ):
identifier[arg_list] =[]
keyword[for] identifier[field] keyword[in] identifier[data_type] . identifier[all_fields] :
identifier[arg_list] . identifier[append] (( identifier[fmt_var] ( identifier[field] . identifier[name] ), identifier[fmt_type] (
identifier[field] . identifier[data_type] , identifier[tag] = identifier[tag] , identifier[has_default] = identifier[field] . identifier[has_default] )))
identifier[doc_list] =[( identifier[fmt_var] ( identifier[f] . identifier[name] ), identifier[self] . identifier[process_doc] ( identifier[f] . identifier[doc] , identifier[self] . identifier[_docf] ))
keyword[for] identifier[f] keyword[in] identifier[data_type] . identifier[fields] keyword[if] identifier[f] . identifier[doc] ]
keyword[elif] identifier[is_union_type] ( identifier[data_type] ):
identifier[arg_list] =[( identifier[fmt_var] ( identifier[data_type] . identifier[name] ), identifier[fmt_type] (
identifier[route] . identifier[arg_data_type] , identifier[tag] = identifier[tag] ))]
identifier[doc_list] =[( identifier[fmt_var] ( identifier[data_type] . identifier[name] ),
identifier[self] . identifier[process_doc] ( identifier[data_type] . identifier[doc] ,
identifier[self] . identifier[_docf] ) keyword[if] identifier[data_type] . identifier[doc]
keyword[else] literal[string] . identifier[format] (
identifier[fmt_class] ( identifier[data_type]
. identifier[name] )))]
keyword[else] :
identifier[arg_list] =[]
identifier[doc_list] =[]
keyword[return] identifier[arg_list] , identifier[doc_list]
|
def _get_route_args(self, namespace, route, tag=False): # pylint: disable=unused-argument
'Returns a list of name / value string pairs representing the arguments for\n a particular route.'
(data_type, _) = unwrap_nullable(route.arg_data_type)
if is_struct_type(data_type):
arg_list = []
for field in data_type.all_fields:
arg_list.append((fmt_var(field.name), fmt_type(field.data_type, tag=tag, has_default=field.has_default))) # depends on [control=['for'], data=['field']]
doc_list = [(fmt_var(f.name), self.process_doc(f.doc, self._docf)) for f in data_type.fields if f.doc] # depends on [control=['if'], data=[]]
elif is_union_type(data_type):
arg_list = [(fmt_var(data_type.name), fmt_type(route.arg_data_type, tag=tag))]
doc_list = [(fmt_var(data_type.name), self.process_doc(data_type.doc, self._docf) if data_type.doc else 'The {} union'.format(fmt_class(data_type.name)))] # depends on [control=['if'], data=[]]
else:
arg_list = []
doc_list = []
return (arg_list, doc_list)
|
def convert(self, key, value):
"""Get the serialized value for a given key."""
if key not in self._dtypes:
self.read_types()
if key not in self._dtypes:
name = utils.name(value)
serializer = utils.serializer(name)
deserializer = utils.deserializer(name)
self._dtypes[key] = (name, serializer, deserializer)
with self.db:
self.db.execute("replace into value_types (key, value_type) values (?, ?)", (key, name))
return self._dtypes[key][1](value)
|
def function[convert, parameter[self, key, value]]:
constant[Get the serialized value for a given key.]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]._dtypes] begin[:]
call[name[self].read_types, parameter[]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]._dtypes] begin[:]
variable[name] assign[=] call[name[utils].name, parameter[name[value]]]
variable[serializer] assign[=] call[name[utils].serializer, parameter[name[name]]]
variable[deserializer] assign[=] call[name[utils].deserializer, parameter[name[name]]]
call[name[self]._dtypes][name[key]] assign[=] tuple[[<ast.Name object at 0x7da1b243b280>, <ast.Name object at 0x7da1b243ae90>, <ast.Name object at 0x7da1b2438670>]]
with name[self].db begin[:]
call[name[self].db.execute, parameter[constant[replace into value_types (key, value_type) values (?, ?)], tuple[[<ast.Name object at 0x7da1b243afe0>, <ast.Name object at 0x7da1b243b010>]]]]
return[call[call[call[name[self]._dtypes][name[key]]][constant[1]], parameter[name[value]]]]
|
keyword[def] identifier[convert] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[_dtypes] :
identifier[self] . identifier[read_types] ()
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[_dtypes] :
identifier[name] = identifier[utils] . identifier[name] ( identifier[value] )
identifier[serializer] = identifier[utils] . identifier[serializer] ( identifier[name] )
identifier[deserializer] = identifier[utils] . identifier[deserializer] ( identifier[name] )
identifier[self] . identifier[_dtypes] [ identifier[key] ]=( identifier[name] , identifier[serializer] , identifier[deserializer] )
keyword[with] identifier[self] . identifier[db] :
identifier[self] . identifier[db] . identifier[execute] ( literal[string] ,( identifier[key] , identifier[name] ))
keyword[return] identifier[self] . identifier[_dtypes] [ identifier[key] ][ literal[int] ]( identifier[value] )
|
def convert(self, key, value):
"""Get the serialized value for a given key."""
if key not in self._dtypes:
self.read_types()
if key not in self._dtypes:
name = utils.name(value)
serializer = utils.serializer(name)
deserializer = utils.deserializer(name)
self._dtypes[key] = (name, serializer, deserializer)
with self.db:
self.db.execute('replace into value_types (key, value_type) values (?, ?)', (key, name)) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=['key']] # depends on [control=['if'], data=['key']]
return self._dtypes[key][1](value)
|
def _FormatServiceText(self, service):
"""Produces a human readable multi-line string representing the service.
Args:
service (WindowsService): service to format.
Returns:
str: human readable representation of a Windows Service.
"""
string_segments = [
service.name,
'\tImage Path = {0:s}'.format(service.image_path),
'\tService Type = {0:s}'.format(service.HumanReadableType()),
'\tStart Type = {0:s}'.format(service.HumanReadableStartType()),
'\tService Dll = {0:s}'.format(service.service_dll),
'\tObject Name = {0:s}'.format(service.object_name),
'\tSources:']
for source in service.sources:
string_segments.append('\t\t{0:s}:{1:s}'.format(source[0], source[1]))
return '\n'.join(string_segments)
|
def function[_FormatServiceText, parameter[self, service]]:
constant[Produces a human readable multi-line string representing the service.
Args:
service (WindowsService): service to format.
Returns:
str: human readable representation of a Windows Service.
]
variable[string_segments] assign[=] list[[<ast.Attribute object at 0x7da18bc72a70>, <ast.Call object at 0x7da18bc73790>, <ast.Call object at 0x7da18bc724a0>, <ast.Call object at 0x7da18bc71a80>, <ast.Call object at 0x7da18bc721a0>, <ast.Call object at 0x7da18bc72140>, <ast.Constant object at 0x7da18bc71150>]]
for taget[name[source]] in starred[name[service].sources] begin[:]
call[name[string_segments].append, parameter[call[constant[ {0:s}:{1:s}].format, parameter[call[name[source]][constant[0]], call[name[source]][constant[1]]]]]]
return[call[constant[
].join, parameter[name[string_segments]]]]
|
keyword[def] identifier[_FormatServiceText] ( identifier[self] , identifier[service] ):
literal[string]
identifier[string_segments] =[
identifier[service] . identifier[name] ,
literal[string] . identifier[format] ( identifier[service] . identifier[image_path] ),
literal[string] . identifier[format] ( identifier[service] . identifier[HumanReadableType] ()),
literal[string] . identifier[format] ( identifier[service] . identifier[HumanReadableStartType] ()),
literal[string] . identifier[format] ( identifier[service] . identifier[service_dll] ),
literal[string] . identifier[format] ( identifier[service] . identifier[object_name] ),
literal[string] ]
keyword[for] identifier[source] keyword[in] identifier[service] . identifier[sources] :
identifier[string_segments] . identifier[append] ( literal[string] . identifier[format] ( identifier[source] [ literal[int] ], identifier[source] [ literal[int] ]))
keyword[return] literal[string] . identifier[join] ( identifier[string_segments] )
|
def _FormatServiceText(self, service):
"""Produces a human readable multi-line string representing the service.
Args:
service (WindowsService): service to format.
Returns:
str: human readable representation of a Windows Service.
"""
string_segments = [service.name, '\tImage Path = {0:s}'.format(service.image_path), '\tService Type = {0:s}'.format(service.HumanReadableType()), '\tStart Type = {0:s}'.format(service.HumanReadableStartType()), '\tService Dll = {0:s}'.format(service.service_dll), '\tObject Name = {0:s}'.format(service.object_name), '\tSources:']
for source in service.sources:
string_segments.append('\t\t{0:s}:{1:s}'.format(source[0], source[1])) # depends on [control=['for'], data=['source']]
return '\n'.join(string_segments)
|
def relative_to_all(features, groups, bin_edges, weight_func,
use_orig_distr,
group_ids, num_groups,
return_networkx_graph, out_weights_path):
"""
Computes the given function (aka weight or distance) between histogram from each of the groups to a "grand histogram" derived from all groups.
Parameters
----------
features : ndarray or str
1d array of scalar values, either provided directly as a 1d numpy array,
or as a path to a file containing these values
groups : ndarray or str
Membership array of same length as `features`, each value specifying which group that particular node belongs to.
Input can be either provided directly as a 1d numpy array,or as a path to a file containing these values.
For example, if you have cortical thickness values for 1000 vertices (`features` is ndarray of length 1000),
belonging to 100 patches, the groups array (of length 1000) could have numbers 1 to 100 (number of unique values)
specifying which element belongs to which cortical patch.
Grouping with numerical values (contiguous from 1 to num_patches) is strongly recommended for simplicity,
but this could also be a list of strings of length p, in which case a tuple is returned,
identifying which weight belongs to which pair of patches.
bin_edges : list or ndarray
Array of bin edges within which to compute the histogram in.
weight_func : callable
Function to compute the edge weight between groups/nodes.
use_orig_distr : bool, optional
When using a user-defined callable, this flag
1) allows skipping of pre-processing (trimming outliers) and histogram construction,
2) enables the application of arbitrary callable (user-defined) on the original distributions coming from the two groups/ROIs/nodes directly.
Example: ``diff_in_medians = lambda x, y: abs(np.median(x)-np.median(y))``
This option is valid only when weight_method is a valid callable,
which must take two inputs (possibly of different lengths) and return a single scalar.
group_ids : list
List of unique group ids to construct the nodes from (must all be present in the `groups` argument)
num_groups : int
Number of unique groups in the `group_ids`
return_networkx_graph : bool, optional
Specifies the need for a networkx graph populated with weights computed. Default: False.
out_weights_path : str, optional
Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format.
Default: nothing saved unless instructed.
Returns
-------
distance_vector : ndarray
vector of distances between the grand histogram and the individual ROIs
Raises
------
ValueError
If one or more of the arrays are empty.
"""
# notice the use of all features without regard to group membership
hist_whole = compute_histogram(features, bin_edges, use_orig_distr)
# to identify the central node capturing distribution from all roi's
whole_node = 'whole'
if return_networkx_graph:
graph = nx.Graph()
graph.add_nodes_from(group_ids)
graph.add_node(whole_node)
else:
edge_weights = np.full([num_groups, 1], np.nan)
for src in range(num_groups):
index_roi = groups == group_ids[src]
hist_roi = compute_histogram(features[index_roi], bin_edges, use_orig_distr)
edge_value = weight_func(hist_whole, hist_roi)
if return_networkx_graph:
graph.add_edge(group_ids[src], whole_node, weight=float(edge_value))
else:
edge_weights[src] = edge_value
if return_networkx_graph:
if out_weights_path is not None:
graph.write_graphml(out_weights_path)
return graph
else:
if out_weights_path is not None:
np.savetxt(out_weights_path, edge_weights, delimiter=',', fmt='%.9f')
return edge_weights
|
def function[relative_to_all, parameter[features, groups, bin_edges, weight_func, use_orig_distr, group_ids, num_groups, return_networkx_graph, out_weights_path]]:
constant[
Computes the given function (aka weight or distance) between histogram from each of the groups to a "grand histogram" derived from all groups.
Parameters
----------
features : ndarray or str
1d array of scalar values, either provided directly as a 1d numpy array,
or as a path to a file containing these values
groups : ndarray or str
Membership array of same length as `features`, each value specifying which group that particular node belongs to.
Input can be either provided directly as a 1d numpy array,or as a path to a file containing these values.
For example, if you have cortical thickness values for 1000 vertices (`features` is ndarray of length 1000),
belonging to 100 patches, the groups array (of length 1000) could have numbers 1 to 100 (number of unique values)
specifying which element belongs to which cortical patch.
Grouping with numerical values (contiguous from 1 to num_patches) is strongly recommended for simplicity,
but this could also be a list of strings of length p, in which case a tuple is returned,
identifying which weight belongs to which pair of patches.
bin_edges : list or ndarray
Array of bin edges within which to compute the histogram in.
weight_func : callable
Function to compute the edge weight between groups/nodes.
use_orig_distr : bool, optional
When using a user-defined callable, this flag
1) allows skipping of pre-processing (trimming outliers) and histogram construction,
2) enables the application of arbitrary callable (user-defined) on the original distributions coming from the two groups/ROIs/nodes directly.
Example: ``diff_in_medians = lambda x, y: abs(np.median(x)-np.median(y))``
This option is valid only when weight_method is a valid callable,
which must take two inputs (possibly of different lengths) and return a single scalar.
group_ids : list
List of unique group ids to construct the nodes from (must all be present in the `groups` argument)
num_groups : int
Number of unique groups in the `group_ids`
return_networkx_graph : bool, optional
Specifies the need for a networkx graph populated with weights computed. Default: False.
out_weights_path : str, optional
Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format.
Default: nothing saved unless instructed.
Returns
-------
distance_vector : ndarray
vector of distances between the grand histogram and the individual ROIs
Raises
------
ValueError
If one or more of the arrays are empty.
]
variable[hist_whole] assign[=] call[name[compute_histogram], parameter[name[features], name[bin_edges], name[use_orig_distr]]]
variable[whole_node] assign[=] constant[whole]
if name[return_networkx_graph] begin[:]
variable[graph] assign[=] call[name[nx].Graph, parameter[]]
call[name[graph].add_nodes_from, parameter[name[group_ids]]]
call[name[graph].add_node, parameter[name[whole_node]]]
for taget[name[src]] in starred[call[name[range], parameter[name[num_groups]]]] begin[:]
variable[index_roi] assign[=] compare[name[groups] equal[==] call[name[group_ids]][name[src]]]
variable[hist_roi] assign[=] call[name[compute_histogram], parameter[call[name[features]][name[index_roi]], name[bin_edges], name[use_orig_distr]]]
variable[edge_value] assign[=] call[name[weight_func], parameter[name[hist_whole], name[hist_roi]]]
if name[return_networkx_graph] begin[:]
call[name[graph].add_edge, parameter[call[name[group_ids]][name[src]], name[whole_node]]]
if name[return_networkx_graph] begin[:]
if compare[name[out_weights_path] is_not constant[None]] begin[:]
call[name[graph].write_graphml, parameter[name[out_weights_path]]]
return[name[graph]]
|
keyword[def] identifier[relative_to_all] ( identifier[features] , identifier[groups] , identifier[bin_edges] , identifier[weight_func] ,
identifier[use_orig_distr] ,
identifier[group_ids] , identifier[num_groups] ,
identifier[return_networkx_graph] , identifier[out_weights_path] ):
literal[string]
identifier[hist_whole] = identifier[compute_histogram] ( identifier[features] , identifier[bin_edges] , identifier[use_orig_distr] )
identifier[whole_node] = literal[string]
keyword[if] identifier[return_networkx_graph] :
identifier[graph] = identifier[nx] . identifier[Graph] ()
identifier[graph] . identifier[add_nodes_from] ( identifier[group_ids] )
identifier[graph] . identifier[add_node] ( identifier[whole_node] )
keyword[else] :
identifier[edge_weights] = identifier[np] . identifier[full] ([ identifier[num_groups] , literal[int] ], identifier[np] . identifier[nan] )
keyword[for] identifier[src] keyword[in] identifier[range] ( identifier[num_groups] ):
identifier[index_roi] = identifier[groups] == identifier[group_ids] [ identifier[src] ]
identifier[hist_roi] = identifier[compute_histogram] ( identifier[features] [ identifier[index_roi] ], identifier[bin_edges] , identifier[use_orig_distr] )
identifier[edge_value] = identifier[weight_func] ( identifier[hist_whole] , identifier[hist_roi] )
keyword[if] identifier[return_networkx_graph] :
identifier[graph] . identifier[add_edge] ( identifier[group_ids] [ identifier[src] ], identifier[whole_node] , identifier[weight] = identifier[float] ( identifier[edge_value] ))
keyword[else] :
identifier[edge_weights] [ identifier[src] ]= identifier[edge_value]
keyword[if] identifier[return_networkx_graph] :
keyword[if] identifier[out_weights_path] keyword[is] keyword[not] keyword[None] :
identifier[graph] . identifier[write_graphml] ( identifier[out_weights_path] )
keyword[return] identifier[graph]
keyword[else] :
keyword[if] identifier[out_weights_path] keyword[is] keyword[not] keyword[None] :
identifier[np] . identifier[savetxt] ( identifier[out_weights_path] , identifier[edge_weights] , identifier[delimiter] = literal[string] , identifier[fmt] = literal[string] )
keyword[return] identifier[edge_weights]
|
def relative_to_all(features, groups, bin_edges, weight_func, use_orig_distr, group_ids, num_groups, return_networkx_graph, out_weights_path):
"""
Computes the given function (aka weight or distance) between histogram from each of the groups to a "grand histogram" derived from all groups.
Parameters
----------
features : ndarray or str
1d array of scalar values, either provided directly as a 1d numpy array,
or as a path to a file containing these values
groups : ndarray or str
Membership array of same length as `features`, each value specifying which group that particular node belongs to.
Input can be either provided directly as a 1d numpy array,or as a path to a file containing these values.
For example, if you have cortical thickness values for 1000 vertices (`features` is ndarray of length 1000),
belonging to 100 patches, the groups array (of length 1000) could have numbers 1 to 100 (number of unique values)
specifying which element belongs to which cortical patch.
Grouping with numerical values (contiguous from 1 to num_patches) is strongly recommended for simplicity,
but this could also be a list of strings of length p, in which case a tuple is returned,
identifying which weight belongs to which pair of patches.
bin_edges : list or ndarray
Array of bin edges within which to compute the histogram in.
weight_func : callable
Function to compute the edge weight between groups/nodes.
use_orig_distr : bool, optional
When using a user-defined callable, this flag
1) allows skipping of pre-processing (trimming outliers) and histogram construction,
2) enables the application of arbitrary callable (user-defined) on the original distributions coming from the two groups/ROIs/nodes directly.
Example: ``diff_in_medians = lambda x, y: abs(np.median(x)-np.median(y))``
This option is valid only when weight_method is a valid callable,
which must take two inputs (possibly of different lengths) and return a single scalar.
group_ids : list
List of unique group ids to construct the nodes from (must all be present in the `groups` argument)
num_groups : int
Number of unique groups in the `group_ids`
return_networkx_graph : bool, optional
Specifies the need for a networkx graph populated with weights computed. Default: False.
out_weights_path : str, optional
Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format.
Default: nothing saved unless instructed.
Returns
-------
distance_vector : ndarray
vector of distances between the grand histogram and the individual ROIs
Raises
------
ValueError
If one or more of the arrays are empty.
"""
# notice the use of all features without regard to group membership
hist_whole = compute_histogram(features, bin_edges, use_orig_distr)
# to identify the central node capturing distribution from all roi's
whole_node = 'whole'
if return_networkx_graph:
graph = nx.Graph()
graph.add_nodes_from(group_ids)
graph.add_node(whole_node) # depends on [control=['if'], data=[]]
else:
edge_weights = np.full([num_groups, 1], np.nan)
for src in range(num_groups):
index_roi = groups == group_ids[src]
hist_roi = compute_histogram(features[index_roi], bin_edges, use_orig_distr)
edge_value = weight_func(hist_whole, hist_roi)
if return_networkx_graph:
graph.add_edge(group_ids[src], whole_node, weight=float(edge_value)) # depends on [control=['if'], data=[]]
else:
edge_weights[src] = edge_value # depends on [control=['for'], data=['src']]
if return_networkx_graph:
if out_weights_path is not None:
graph.write_graphml(out_weights_path) # depends on [control=['if'], data=['out_weights_path']]
return graph # depends on [control=['if'], data=[]]
else:
if out_weights_path is not None:
np.savetxt(out_weights_path, edge_weights, delimiter=',', fmt='%.9f') # depends on [control=['if'], data=['out_weights_path']]
return edge_weights
|
def complete(self, text: str) -> Iterable[str]:
"""Return an iterable of possible completions for the given text in
this namespace."""
assert not text.startswith(":")
if "/" in text:
prefix, suffix = text.split("/", maxsplit=1)
results = itertools.chain(
self.__complete_alias(prefix, name_in_ns=suffix),
self.__complete_imports_and_aliases(prefix, name_in_module=suffix),
)
else:
results = itertools.chain(
self.__complete_alias(text),
self.__complete_imports_and_aliases(text),
self.__complete_interns(text),
self.__complete_refers(text),
)
return results
|
def function[complete, parameter[self, text]]:
constant[Return an iterable of possible completions for the given text in
this namespace.]
assert[<ast.UnaryOp object at 0x7da1b033d7e0>]
if compare[constant[/] in name[text]] begin[:]
<ast.Tuple object at 0x7da1b033d570> assign[=] call[name[text].split, parameter[constant[/]]]
variable[results] assign[=] call[name[itertools].chain, parameter[call[name[self].__complete_alias, parameter[name[prefix]]], call[name[self].__complete_imports_and_aliases, parameter[name[prefix]]]]]
return[name[results]]
|
keyword[def] identifier[complete] ( identifier[self] , identifier[text] : identifier[str] )-> identifier[Iterable] [ identifier[str] ]:
literal[string]
keyword[assert] keyword[not] identifier[text] . identifier[startswith] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[text] :
identifier[prefix] , identifier[suffix] = identifier[text] . identifier[split] ( literal[string] , identifier[maxsplit] = literal[int] )
identifier[results] = identifier[itertools] . identifier[chain] (
identifier[self] . identifier[__complete_alias] ( identifier[prefix] , identifier[name_in_ns] = identifier[suffix] ),
identifier[self] . identifier[__complete_imports_and_aliases] ( identifier[prefix] , identifier[name_in_module] = identifier[suffix] ),
)
keyword[else] :
identifier[results] = identifier[itertools] . identifier[chain] (
identifier[self] . identifier[__complete_alias] ( identifier[text] ),
identifier[self] . identifier[__complete_imports_and_aliases] ( identifier[text] ),
identifier[self] . identifier[__complete_interns] ( identifier[text] ),
identifier[self] . identifier[__complete_refers] ( identifier[text] ),
)
keyword[return] identifier[results]
|
def complete(self, text: str) -> Iterable[str]:
"""Return an iterable of possible completions for the given text in
this namespace."""
assert not text.startswith(':')
if '/' in text:
(prefix, suffix) = text.split('/', maxsplit=1)
results = itertools.chain(self.__complete_alias(prefix, name_in_ns=suffix), self.__complete_imports_and_aliases(prefix, name_in_module=suffix)) # depends on [control=['if'], data=['text']]
else:
results = itertools.chain(self.__complete_alias(text), self.__complete_imports_and_aliases(text), self.__complete_interns(text), self.__complete_refers(text))
return results
|
def rule_from_pattern(pattern, base_path=None, source=None):
"""
Take a .gitignore match pattern, such as "*.py[cod]" or "**/*.bak",
and return an IgnoreRule suitable for matching against files and
directories. Patterns which do not match files, such as comments
and blank lines, will return None.
Because git allows for nested .gitignore files, a base_path value
is required for correct behavior. The base path should be absolute.
"""
if base_path and base_path != abspath(base_path):
raise ValueError('base_path must be absolute')
# Store the exact pattern for our repr and string functions
orig_pattern = pattern
# Early returns follow
# Discard comments and seperators
if pattern.strip() == '' or pattern[0] == '#':
return
# Discard anything with more than two consecutive asterisks
if pattern.find('***') > -1:
return
# Strip leading bang before examining double asterisks
if pattern[0] == '!':
negation = True
pattern = pattern[1:]
else:
negation = False
# Discard anything with invalid double-asterisks -- they can appear
# at the start or the end, or be surrounded by slashes
for m in re.finditer(r'\*\*', pattern):
start_index = m.start()
if (start_index != 0 and start_index != len(pattern) - 2 and
(pattern[start_index - 1] != '/' or
pattern[start_index + 2] != '/')):
return
# Special-casing '/', which doesn't match any files or directories
if pattern.rstrip() == '/':
return
directory_only = pattern[-1] == '/'
# A slash is a sign that we're tied to the base_path of our rule
# set.
anchored = '/' in pattern[:-1]
if pattern[0] == '/':
pattern = pattern[1:]
if pattern[0] == '*' and pattern[1] == '*':
pattern = pattern[2:]
anchored = False
if pattern[0] == '/':
pattern = pattern[1:]
if pattern[-1] == '/':
pattern = pattern[:-1]
regex = fnmatch_pathname_to_regex(
pattern
)
if anchored:
regex = ''.join(['^', regex])
return IgnoreRule(
pattern=orig_pattern,
regex=regex,
negation=negation,
directory_only=directory_only,
anchored=anchored,
base_path=Path(base_path) if base_path else None,
source=source
)
|
def function[rule_from_pattern, parameter[pattern, base_path, source]]:
constant[
Take a .gitignore match pattern, such as "*.py[cod]" or "**/*.bak",
and return an IgnoreRule suitable for matching against files and
directories. Patterns which do not match files, such as comments
and blank lines, will return None.
Because git allows for nested .gitignore files, a base_path value
is required for correct behavior. The base path should be absolute.
]
if <ast.BoolOp object at 0x7da1b11b4250> begin[:]
<ast.Raise object at 0x7da20c6e43d0>
variable[orig_pattern] assign[=] name[pattern]
if <ast.BoolOp object at 0x7da20c6e53c0> begin[:]
return[None]
if compare[call[name[pattern].find, parameter[constant[***]]] greater[>] <ast.UnaryOp object at 0x7da20c6e72e0>] begin[:]
return[None]
if compare[call[name[pattern]][constant[0]] equal[==] constant[!]] begin[:]
variable[negation] assign[=] constant[True]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da20c6e66e0>]
for taget[name[m]] in starred[call[name[re].finditer, parameter[constant[\*\*], name[pattern]]]] begin[:]
variable[start_index] assign[=] call[name[m].start, parameter[]]
if <ast.BoolOp object at 0x7da20c6e5ab0> begin[:]
return[None]
if compare[call[name[pattern].rstrip, parameter[]] equal[==] constant[/]] begin[:]
return[None]
variable[directory_only] assign[=] compare[call[name[pattern]][<ast.UnaryOp object at 0x7da1b2344760>] equal[==] constant[/]]
variable[anchored] assign[=] compare[constant[/] in call[name[pattern]][<ast.Slice object at 0x7da1b2347430>]]
if compare[call[name[pattern]][constant[0]] equal[==] constant[/]] begin[:]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da20c6e7b20>]
if <ast.BoolOp object at 0x7da20c6e52d0> begin[:]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da20c6e5d20>]
variable[anchored] assign[=] constant[False]
if compare[call[name[pattern]][constant[0]] equal[==] constant[/]] begin[:]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da20c6e77c0>]
if compare[call[name[pattern]][<ast.UnaryOp object at 0x7da20c6e55d0>] equal[==] constant[/]] begin[:]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da20c6e45e0>]
variable[regex] assign[=] call[name[fnmatch_pathname_to_regex], parameter[name[pattern]]]
if name[anchored] begin[:]
variable[regex] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da1b127a3e0>, <ast.Name object at 0x7da1b127ad40>]]]]
return[call[name[IgnoreRule], parameter[]]]
|
keyword[def] identifier[rule_from_pattern] ( identifier[pattern] , identifier[base_path] = keyword[None] , identifier[source] = keyword[None] ):
literal[string]
keyword[if] identifier[base_path] keyword[and] identifier[base_path] != identifier[abspath] ( identifier[base_path] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[orig_pattern] = identifier[pattern]
keyword[if] identifier[pattern] . identifier[strip] ()== literal[string] keyword[or] identifier[pattern] [ literal[int] ]== literal[string] :
keyword[return]
keyword[if] identifier[pattern] . identifier[find] ( literal[string] )>- literal[int] :
keyword[return]
keyword[if] identifier[pattern] [ literal[int] ]== literal[string] :
identifier[negation] = keyword[True]
identifier[pattern] = identifier[pattern] [ literal[int] :]
keyword[else] :
identifier[negation] = keyword[False]
keyword[for] identifier[m] keyword[in] identifier[re] . identifier[finditer] ( literal[string] , identifier[pattern] ):
identifier[start_index] = identifier[m] . identifier[start] ()
keyword[if] ( identifier[start_index] != literal[int] keyword[and] identifier[start_index] != identifier[len] ( identifier[pattern] )- literal[int] keyword[and]
( identifier[pattern] [ identifier[start_index] - literal[int] ]!= literal[string] keyword[or]
identifier[pattern] [ identifier[start_index] + literal[int] ]!= literal[string] )):
keyword[return]
keyword[if] identifier[pattern] . identifier[rstrip] ()== literal[string] :
keyword[return]
identifier[directory_only] = identifier[pattern] [- literal[int] ]== literal[string]
identifier[anchored] = literal[string] keyword[in] identifier[pattern] [:- literal[int] ]
keyword[if] identifier[pattern] [ literal[int] ]== literal[string] :
identifier[pattern] = identifier[pattern] [ literal[int] :]
keyword[if] identifier[pattern] [ literal[int] ]== literal[string] keyword[and] identifier[pattern] [ literal[int] ]== literal[string] :
identifier[pattern] = identifier[pattern] [ literal[int] :]
identifier[anchored] = keyword[False]
keyword[if] identifier[pattern] [ literal[int] ]== literal[string] :
identifier[pattern] = identifier[pattern] [ literal[int] :]
keyword[if] identifier[pattern] [- literal[int] ]== literal[string] :
identifier[pattern] = identifier[pattern] [:- literal[int] ]
identifier[regex] = identifier[fnmatch_pathname_to_regex] (
identifier[pattern]
)
keyword[if] identifier[anchored] :
identifier[regex] = literal[string] . identifier[join] ([ literal[string] , identifier[regex] ])
keyword[return] identifier[IgnoreRule] (
identifier[pattern] = identifier[orig_pattern] ,
identifier[regex] = identifier[regex] ,
identifier[negation] = identifier[negation] ,
identifier[directory_only] = identifier[directory_only] ,
identifier[anchored] = identifier[anchored] ,
identifier[base_path] = identifier[Path] ( identifier[base_path] ) keyword[if] identifier[base_path] keyword[else] keyword[None] ,
identifier[source] = identifier[source]
)
|
def rule_from_pattern(pattern, base_path=None, source=None):
"""
Take a .gitignore match pattern, such as "*.py[cod]" or "**/*.bak",
and return an IgnoreRule suitable for matching against files and
directories. Patterns which do not match files, such as comments
and blank lines, will return None.
Because git allows for nested .gitignore files, a base_path value
is required for correct behavior. The base path should be absolute.
"""
if base_path and base_path != abspath(base_path):
raise ValueError('base_path must be absolute') # depends on [control=['if'], data=[]] # Store the exact pattern for our repr and string functions
orig_pattern = pattern # Early returns follow
# Discard comments and seperators
if pattern.strip() == '' or pattern[0] == '#':
return # depends on [control=['if'], data=[]] # Discard anything with more than two consecutive asterisks
if pattern.find('***') > -1:
return # depends on [control=['if'], data=[]] # Strip leading bang before examining double asterisks
if pattern[0] == '!':
negation = True
pattern = pattern[1:] # depends on [control=['if'], data=[]]
else:
negation = False # Discard anything with invalid double-asterisks -- they can appear
# at the start or the end, or be surrounded by slashes
for m in re.finditer('\\*\\*', pattern):
start_index = m.start()
if start_index != 0 and start_index != len(pattern) - 2 and (pattern[start_index - 1] != '/' or pattern[start_index + 2] != '/'):
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']] # Special-casing '/', which doesn't match any files or directories
if pattern.rstrip() == '/':
return # depends on [control=['if'], data=[]]
directory_only = pattern[-1] == '/' # A slash is a sign that we're tied to the base_path of our rule
# set.
anchored = '/' in pattern[:-1]
if pattern[0] == '/':
pattern = pattern[1:] # depends on [control=['if'], data=[]]
if pattern[0] == '*' and pattern[1] == '*':
pattern = pattern[2:]
anchored = False # depends on [control=['if'], data=[]]
if pattern[0] == '/':
pattern = pattern[1:] # depends on [control=['if'], data=[]]
if pattern[-1] == '/':
pattern = pattern[:-1] # depends on [control=['if'], data=[]]
regex = fnmatch_pathname_to_regex(pattern)
if anchored:
regex = ''.join(['^', regex]) # depends on [control=['if'], data=[]]
return IgnoreRule(pattern=orig_pattern, regex=regex, negation=negation, directory_only=directory_only, anchored=anchored, base_path=Path(base_path) if base_path else None, source=source)
|
def MI_references(self,
env,
objectName,
resultClassName,
role,
propertyList):
# pylint: disable=invalid-name
"""Return instances of an association class.
Implements the WBEM operation References in terms
of the references method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_references called. ' \
'resultClass: %s' % (resultClassName))
if not resultClassName:
raise pywbem.CIMError(
pywbem.CIM_ERR_FAILED,
"Empty resultClassName passed to References")
plist = None
if propertyList is not None:
plist = [s.lower() for s in propertyList]
model = pywbem.CIMInstance(classname=resultClassName,
property_list=plist)
model.path = pywbem.CIMInstanceName(classname=resultClassName,
namespace=objectName.namespace)
if role:
if role in model.properties:
model[role] = objectName
gen = self.references(env=env,
object_name=objectName,
model=model,
result_class_name='',
role=role,
result_role=None,
keys_only=False)
if gen is None:
logger.log_debug('references() returned None instead of ' \
'generator object')
return
for inst in gen:
for prop in inst.properties.values():
if hasattr(prop.value, 'namespace') and \
prop.value.namespace is None:
prop.value.namespace = objectName.namespace
yield inst
logger.log_debug('CIMProvider2 MI_references returning')
|
def function[MI_references, parameter[self, env, objectName, resultClassName, role, propertyList]]:
constant[Return instances of an association class.
Implements the WBEM operation References in terms
of the references method. A derived class will not normally
override this method.
]
variable[logger] assign[=] call[name[env].get_logger, parameter[]]
call[name[logger].log_debug, parameter[binary_operation[constant[CIMProvider2 MI_references called. resultClass: %s] <ast.Mod object at 0x7da2590d6920> name[resultClassName]]]]
if <ast.UnaryOp object at 0x7da18f09ea10> begin[:]
<ast.Raise object at 0x7da18f09e6e0>
variable[plist] assign[=] constant[None]
if compare[name[propertyList] is_not constant[None]] begin[:]
variable[plist] assign[=] <ast.ListComp object at 0x7da18f09d6c0>
variable[model] assign[=] call[name[pywbem].CIMInstance, parameter[]]
name[model].path assign[=] call[name[pywbem].CIMInstanceName, parameter[]]
if name[role] begin[:]
if compare[name[role] in name[model].properties] begin[:]
call[name[model]][name[role]] assign[=] name[objectName]
variable[gen] assign[=] call[name[self].references, parameter[]]
if compare[name[gen] is constant[None]] begin[:]
call[name[logger].log_debug, parameter[constant[references() returned None instead of generator object]]]
return[None]
for taget[name[inst]] in starred[name[gen]] begin[:]
for taget[name[prop]] in starred[call[name[inst].properties.values, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da204962a10> begin[:]
name[prop].value.namespace assign[=] name[objectName].namespace
<ast.Yield object at 0x7da204962ce0>
call[name[logger].log_debug, parameter[constant[CIMProvider2 MI_references returning]]]
|
keyword[def] identifier[MI_references] ( identifier[self] ,
identifier[env] ,
identifier[objectName] ,
identifier[resultClassName] ,
identifier[role] ,
identifier[propertyList] ):
literal[string]
identifier[logger] = identifier[env] . identifier[get_logger] ()
identifier[logger] . identifier[log_debug] ( literal[string] literal[string] %( identifier[resultClassName] ))
keyword[if] keyword[not] identifier[resultClassName] :
keyword[raise] identifier[pywbem] . identifier[CIMError] (
identifier[pywbem] . identifier[CIM_ERR_FAILED] ,
literal[string] )
identifier[plist] = keyword[None]
keyword[if] identifier[propertyList] keyword[is] keyword[not] keyword[None] :
identifier[plist] =[ identifier[s] . identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[propertyList] ]
identifier[model] = identifier[pywbem] . identifier[CIMInstance] ( identifier[classname] = identifier[resultClassName] ,
identifier[property_list] = identifier[plist] )
identifier[model] . identifier[path] = identifier[pywbem] . identifier[CIMInstanceName] ( identifier[classname] = identifier[resultClassName] ,
identifier[namespace] = identifier[objectName] . identifier[namespace] )
keyword[if] identifier[role] :
keyword[if] identifier[role] keyword[in] identifier[model] . identifier[properties] :
identifier[model] [ identifier[role] ]= identifier[objectName]
identifier[gen] = identifier[self] . identifier[references] ( identifier[env] = identifier[env] ,
identifier[object_name] = identifier[objectName] ,
identifier[model] = identifier[model] ,
identifier[result_class_name] = literal[string] ,
identifier[role] = identifier[role] ,
identifier[result_role] = keyword[None] ,
identifier[keys_only] = keyword[False] )
keyword[if] identifier[gen] keyword[is] keyword[None] :
identifier[logger] . identifier[log_debug] ( literal[string] literal[string] )
keyword[return]
keyword[for] identifier[inst] keyword[in] identifier[gen] :
keyword[for] identifier[prop] keyword[in] identifier[inst] . identifier[properties] . identifier[values] ():
keyword[if] identifier[hasattr] ( identifier[prop] . identifier[value] , literal[string] ) keyword[and] identifier[prop] . identifier[value] . identifier[namespace] keyword[is] keyword[None] :
identifier[prop] . identifier[value] . identifier[namespace] = identifier[objectName] . identifier[namespace]
keyword[yield] identifier[inst]
identifier[logger] . identifier[log_debug] ( literal[string] )
|
def MI_references(self, env, objectName, resultClassName, role, propertyList):
# pylint: disable=invalid-name
'Return instances of an association class.\n\n Implements the WBEM operation References in terms\n of the references method. A derived class will not normally\n override this method.\n\n '
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_references called. resultClass: %s' % resultClassName)
if not resultClassName:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED, 'Empty resultClassName passed to References') # depends on [control=['if'], data=[]]
plist = None
if propertyList is not None:
plist = [s.lower() for s in propertyList] # depends on [control=['if'], data=['propertyList']]
model = pywbem.CIMInstance(classname=resultClassName, property_list=plist)
model.path = pywbem.CIMInstanceName(classname=resultClassName, namespace=objectName.namespace)
if role:
if role in model.properties:
model[role] = objectName # depends on [control=['if'], data=['role']] # depends on [control=['if'], data=[]]
gen = self.references(env=env, object_name=objectName, model=model, result_class_name='', role=role, result_role=None, keys_only=False)
if gen is None:
logger.log_debug('references() returned None instead of generator object')
return # depends on [control=['if'], data=[]]
for inst in gen:
for prop in inst.properties.values():
if hasattr(prop.value, 'namespace') and prop.value.namespace is None:
prop.value.namespace = objectName.namespace # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prop']]
yield inst # depends on [control=['for'], data=['inst']]
logger.log_debug('CIMProvider2 MI_references returning')
|
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
|
def function[calc_std, parameter[c0, c1]]:
constant[ Calculates the variance of the data.]
if compare[name[c1] equal[==] list[[]]] begin[:]
return[call[name[numpy].std, parameter[name[c0], constant[0]]]]
variable[prop] assign[=] binary_operation[call[name[float], parameter[call[name[len], parameter[name[c0]]]]] / call[name[float], parameter[call[name[len], parameter[name[c1]]]]]]
if compare[name[prop] less[<] constant[1]] begin[:]
variable[p0] assign[=] call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[constant[1] / name[prop]]]]]]
variable[p1] assign[=] constant[1]
return[call[name[numpy].std, parameter[call[name[numpy].vstack, parameter[binary_operation[binary_operation[name[p0] * list[[<ast.Name object at 0x7da1b1993b80>]]] + binary_operation[name[p1] * list[[<ast.Name object at 0x7da1b19939d0>]]]]]], constant[0]]]]
|
keyword[def] identifier[calc_std] ( identifier[c0] , identifier[c1] =[]):
literal[string]
keyword[if] identifier[c1] ==[]:
keyword[return] identifier[numpy] . identifier[std] ( identifier[c0] , literal[int] )
identifier[prop] = identifier[float] ( identifier[len] ( identifier[c0] ))/ identifier[float] ( identifier[len] ( identifier[c1] ))
keyword[if] identifier[prop] < literal[int] :
identifier[p0] = identifier[int] ( identifier[math] . identifier[ceil] ( literal[int] / identifier[prop] ))
identifier[p1] = literal[int]
keyword[else] :
identifier[p0] = literal[int]
identifier[p1] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[prop] ))
keyword[return] identifier[numpy] . identifier[std] ( identifier[numpy] . identifier[vstack] ( identifier[p0] *[ identifier[c0] ]+ identifier[p1] *[ identifier[c1] ]), literal[int] )
|
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0) # depends on [control=['if'], data=[]]
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1 # depends on [control=['if'], data=['prop']]
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
|
def pst(self):
""" get the pyemu.Pst attribute
Returns
-------
pst : pyemu.Pst
Note
----
returns a references
If LinearAnalysis.__pst is None, then the pst attribute is
dynamically loaded before returning
"""
if self.__pst is None and self.pst_arg is None:
raise Exception("linear_analysis.pst: can't access self.pst:" +
"no pest control argument passed")
elif self.__pst:
return self.__pst
else:
self.__load_pst()
return self.__pst
|
def function[pst, parameter[self]]:
constant[ get the pyemu.Pst attribute
Returns
-------
pst : pyemu.Pst
Note
----
returns a references
If LinearAnalysis.__pst is None, then the pst attribute is
dynamically loaded before returning
]
if <ast.BoolOp object at 0x7da1b1d39570> begin[:]
<ast.Raise object at 0x7da1b1d39690>
|
keyword[def] identifier[pst] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__pst] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[pst_arg] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] )
keyword[elif] identifier[self] . identifier[__pst] :
keyword[return] identifier[self] . identifier[__pst]
keyword[else] :
identifier[self] . identifier[__load_pst] ()
keyword[return] identifier[self] . identifier[__pst]
|
def pst(self):
""" get the pyemu.Pst attribute
Returns
-------
pst : pyemu.Pst
Note
----
returns a references
If LinearAnalysis.__pst is None, then the pst attribute is
dynamically loaded before returning
"""
if self.__pst is None and self.pst_arg is None:
raise Exception("linear_analysis.pst: can't access self.pst:" + 'no pest control argument passed') # depends on [control=['if'], data=[]]
elif self.__pst:
return self.__pst # depends on [control=['if'], data=[]]
else:
self.__load_pst()
return self.__pst
|
def _construct_instance(self, constructor, full_name, *args, **kwargs):
""" Creates a new node. Checks if the new node needs to know the trajectory.
:param constructor: The constructor to use
:param full_name: Full name of node
:param args: Arguments passed to constructor
:param kwargs: Keyword arguments passed to the constructor
:return:
"""
if getattr(constructor, 'KNOWS_TRAJECTORY', False):
return constructor(full_name, self, *args, **kwargs)
else:
return constructor(full_name, *args, **kwargs)
|
def function[_construct_instance, parameter[self, constructor, full_name]]:
constant[ Creates a new node. Checks if the new node needs to know the trajectory.
:param constructor: The constructor to use
:param full_name: Full name of node
:param args: Arguments passed to constructor
:param kwargs: Keyword arguments passed to the constructor
:return:
]
if call[name[getattr], parameter[name[constructor], constant[KNOWS_TRAJECTORY], constant[False]]] begin[:]
return[call[name[constructor], parameter[name[full_name], name[self], <ast.Starred object at 0x7da18f7216f0>]]]
|
keyword[def] identifier[_construct_instance] ( identifier[self] , identifier[constructor] , identifier[full_name] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[getattr] ( identifier[constructor] , literal[string] , keyword[False] ):
keyword[return] identifier[constructor] ( identifier[full_name] , identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[constructor] ( identifier[full_name] ,* identifier[args] ,** identifier[kwargs] )
|
def _construct_instance(self, constructor, full_name, *args, **kwargs):
""" Creates a new node. Checks if the new node needs to know the trajectory.
:param constructor: The constructor to use
:param full_name: Full name of node
:param args: Arguments passed to constructor
:param kwargs: Keyword arguments passed to the constructor
:return:
"""
if getattr(constructor, 'KNOWS_TRAJECTORY', False):
return constructor(full_name, self, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
return constructor(full_name, *args, **kwargs)
|
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
|
def function[readlines, parameter[self, sizehint]]:
constant[
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
]
if name[self].closed begin[:]
<ast.Raise object at 0x7da18f7216f0>
variable[lines] assign[=] list[[]]
while constant[True] begin[:]
variable[line] assign[=] call[name[self].readline, parameter[name[sizehint]]]
if <ast.UnaryOp object at 0x7da18f00d8d0> begin[:]
break
call[name[lines].append, parameter[name[line]]]
if compare[name[sizehint] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f00ffa0>
if compare[name[sizehint] less_or_equal[<=] constant[0]] begin[:]
break
return[name[lines]]
|
keyword[def] identifier[readlines] ( identifier[self] , identifier[sizehint] =- literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[lines] =[]
keyword[while] keyword[True] :
identifier[line] = identifier[self] . identifier[readline] ( identifier[sizehint] )
keyword[if] keyword[not] identifier[line] :
keyword[break]
identifier[lines] . identifier[append] ( identifier[line] )
keyword[if] identifier[sizehint] >= literal[int] :
identifier[sizehint] -= identifier[len] ( identifier[line] )
keyword[if] identifier[sizehint] <= literal[int] :
keyword[break]
keyword[return] identifier[lines]
|
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file') # depends on [control=['if'], data=[]]
lines = []
while True:
line = self.readline(sizehint)
if not line:
break # depends on [control=['if'], data=[]]
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['sizehint']] # depends on [control=['while'], data=[]]
return lines
|
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
|
def function[download, parameter[self, spec, tmpdir]]:
constant[Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
]
if <ast.UnaryOp object at 0x7da1b1b01a80> begin[:]
variable[scheme] assign[=] call[name[URL_SCHEME], parameter[name[spec]]]
if name[scheme] begin[:]
variable[found] assign[=] call[name[self]._download_url, parameter[call[name[scheme].group, parameter[constant[1]]], name[spec], name[tmpdir]]]
<ast.Tuple object at 0x7da1b1cd7fa0> assign[=] call[name[egg_info_for_url], parameter[name[spec]]]
if call[name[base].endswith, parameter[constant[.py]]] begin[:]
variable[found] assign[=] call[name[self].gen_setup, parameter[name[found], name[fragment], name[tmpdir]]]
return[name[found]]
return[call[name[getattr], parameter[call[name[self].fetch_distribution, parameter[name[spec], name[tmpdir]]], constant[location], constant[None]]]]
|
keyword[def] identifier[download] ( identifier[self] , identifier[spec] , identifier[tmpdir] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[spec] , identifier[Requirement] ):
identifier[scheme] = identifier[URL_SCHEME] ( identifier[spec] )
keyword[if] identifier[scheme] :
identifier[found] = identifier[self] . identifier[_download_url] ( identifier[scheme] . identifier[group] ( literal[int] ), identifier[spec] , identifier[tmpdir] )
identifier[base] , identifier[fragment] = identifier[egg_info_for_url] ( identifier[spec] )
keyword[if] identifier[base] . identifier[endswith] ( literal[string] ):
identifier[found] = identifier[self] . identifier[gen_setup] ( identifier[found] , identifier[fragment] , identifier[tmpdir] )
keyword[return] identifier[found]
keyword[elif] identifier[os] . identifier[path] . identifier[exists] ( identifier[spec] ):
keyword[return] identifier[spec]
keyword[else] :
identifier[spec] = identifier[parse_requirement_arg] ( identifier[spec] )
keyword[return] identifier[getattr] ( identifier[self] . identifier[fetch_distribution] ( identifier[spec] , identifier[tmpdir] ), literal[string] , keyword[None] )
|
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
(base, fragment) = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir) # depends on [control=['if'], data=[]]
return found # depends on [control=['if'], data=[]]
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec # depends on [control=['if'], data=[]]
else:
spec = parse_requirement_arg(spec) # depends on [control=['if'], data=[]]
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
|
def parse_exclusions(exclusions):
""" Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
"""
fname = fileutil.osfn(exclusions)
if os.path.exists(fname):
with open(fname) as f:
flines = f.readlines()
else:
print('No valid exclusions file "', fname, '" could be found!')
print('Skipping application of exclusions files to source catalogs.')
return None
# Parse out lines which can be interpreted as positions and distances
exclusion_list = []
units = None
for line in flines:
if line[0] == '#' or 'global' in line[:6]:
continue
# Only interpret the part of the line prior to the comment
# if a comment has been attached to the line
if '#' in line:
line = line.split('#')[0].rstrip()
if units is None:
units = 'pixels'
if line[:3] in ['fk4', 'fk5', 'sky']:
units = 'sky'
if line[:5] in ['image', 'physi', 'pixel']:
units = 'pixels'
continue
if 'circle(' in line:
nline = line.replace('circle(', '')
nline = nline.replace(')', '')
nline = nline.replace('"', '')
vals = nline.split(',')
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
# Try to interpret unformatted line
if ',' in line:
split_tok = ','
else:
split_tok = ' '
vals = line.split(split_tok)
if len(vals) == 3:
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
continue
exclusion_list.append(
{'pos': posval, 'distance': float(vals[2]), 'units': units}
)
return exclusion_list
|
def function[parse_exclusions, parameter[exclusions]]:
constant[ Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
]
variable[fname] assign[=] call[name[fileutil].osfn, parameter[name[exclusions]]]
if call[name[os].path.exists, parameter[name[fname]]] begin[:]
with call[name[open], parameter[name[fname]]] begin[:]
variable[flines] assign[=] call[name[f].readlines, parameter[]]
variable[exclusion_list] assign[=] list[[]]
variable[units] assign[=] constant[None]
for taget[name[line]] in starred[name[flines]] begin[:]
if <ast.BoolOp object at 0x7da1b1a23640> begin[:]
continue
if compare[constant[#] in name[line]] begin[:]
variable[line] assign[=] call[call[call[name[line].split, parameter[constant[#]]]][constant[0]].rstrip, parameter[]]
if compare[name[units] is constant[None]] begin[:]
variable[units] assign[=] constant[pixels]
if compare[call[name[line]][<ast.Slice object at 0x7da1b1a22da0>] in list[[<ast.Constant object at 0x7da1b1a22d10>, <ast.Constant object at 0x7da1b1a22ce0>, <ast.Constant object at 0x7da1b1a22cb0>]]] begin[:]
variable[units] assign[=] constant[sky]
if compare[call[name[line]][<ast.Slice object at 0x7da1b1a23b80>] in list[[<ast.Constant object at 0x7da1b1a23af0>, <ast.Constant object at 0x7da1b1a23ac0>, <ast.Constant object at 0x7da1b1a23a90>]]] begin[:]
variable[units] assign[=] constant[pixels]
continue
if compare[constant[circle(] in name[line]] begin[:]
variable[nline] assign[=] call[name[line].replace, parameter[constant[circle(], constant[]]]
variable[nline] assign[=] call[name[nline].replace, parameter[constant[)], constant[]]]
variable[nline] assign[=] call[name[nline].replace, parameter[constant["], constant[]]]
variable[vals] assign[=] call[name[nline].split, parameter[constant[,]]]
if compare[constant[:] in call[name[vals]][constant[0]]] begin[:]
variable[posval] assign[=] binary_operation[binary_operation[call[name[vals]][constant[0]] + constant[ ]] + call[name[vals]][constant[1]]]
call[name[exclusion_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1b64dc0>, <ast.Constant object at 0x7da1b1b67640>, <ast.Constant object at 0x7da1b1b64c70>], [<ast.Name object at 0x7da1b1b66aa0>, <ast.Call object at 0x7da1b1b65990>, <ast.Name object at 0x7da1b1b65480>]]]]
return[name[exclusion_list]]
|
keyword[def] identifier[parse_exclusions] ( identifier[exclusions] ):
literal[string]
identifier[fname] = identifier[fileutil] . identifier[osfn] ( identifier[exclusions] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[fname] ):
keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[f] :
identifier[flines] = identifier[f] . identifier[readlines] ()
keyword[else] :
identifier[print] ( literal[string] , identifier[fname] , literal[string] )
identifier[print] ( literal[string] )
keyword[return] keyword[None]
identifier[exclusion_list] =[]
identifier[units] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[flines] :
keyword[if] identifier[line] [ literal[int] ]== literal[string] keyword[or] literal[string] keyword[in] identifier[line] [: literal[int] ]:
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[line] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[rstrip] ()
keyword[if] identifier[units] keyword[is] keyword[None] :
identifier[units] = literal[string]
keyword[if] identifier[line] [: literal[int] ] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[units] = literal[string]
keyword[if] identifier[line] [: literal[int] ] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[units] = literal[string]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[nline] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
identifier[nline] = identifier[nline] . identifier[replace] ( literal[string] , literal[string] )
identifier[nline] = identifier[nline] . identifier[replace] ( literal[string] , literal[string] )
identifier[vals] = identifier[nline] . identifier[split] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[vals] [ literal[int] ]:
identifier[posval] = identifier[vals] [ literal[int] ]+ literal[string] + identifier[vals] [ literal[int] ]
keyword[else] :
identifier[posval] =( identifier[float] ( identifier[vals] [ literal[int] ]), identifier[float] ( identifier[vals] [ literal[int] ]))
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[split_tok] = literal[string]
keyword[else] :
identifier[split_tok] = literal[string]
identifier[vals] = identifier[line] . identifier[split] ( identifier[split_tok] )
keyword[if] identifier[len] ( identifier[vals] )== literal[int] :
keyword[if] literal[string] keyword[in] identifier[vals] [ literal[int] ]:
identifier[posval] = identifier[vals] [ literal[int] ]+ literal[string] + identifier[vals] [ literal[int] ]
keyword[else] :
identifier[posval] =( identifier[float] ( identifier[vals] [ literal[int] ]), identifier[float] ( identifier[vals] [ literal[int] ]))
keyword[else] :
keyword[continue]
identifier[exclusion_list] . identifier[append] (
{ literal[string] : identifier[posval] , literal[string] : identifier[float] ( identifier[vals] [ literal[int] ]), literal[string] : identifier[units] }
)
keyword[return] identifier[exclusion_list]
|
def parse_exclusions(exclusions):
""" Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
"""
fname = fileutil.osfn(exclusions)
if os.path.exists(fname):
with open(fname) as f:
flines = f.readlines() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
print('No valid exclusions file "', fname, '" could be found!')
print('Skipping application of exclusions files to source catalogs.')
return None
# Parse out lines which can be interpreted as positions and distances
exclusion_list = []
units = None
for line in flines:
if line[0] == '#' or 'global' in line[:6]:
continue # depends on [control=['if'], data=[]]
# Only interpret the part of the line prior to the comment
# if a comment has been attached to the line
if '#' in line:
line = line.split('#')[0].rstrip() # depends on [control=['if'], data=['line']]
if units is None:
units = 'pixels'
if line[:3] in ['fk4', 'fk5', 'sky']:
units = 'sky' # depends on [control=['if'], data=[]]
if line[:5] in ['image', 'physi', 'pixel']:
units = 'pixels' # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['units']]
if 'circle(' in line:
nline = line.replace('circle(', '')
nline = nline.replace(')', '')
nline = nline.replace('"', '')
vals = nline.split(',')
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1] # depends on [control=['if'], data=[]]
else:
posval = (float(vals[0]), float(vals[1])) # depends on [control=['if'], data=['line']]
else:
# Try to interpret unformatted line
if ',' in line:
split_tok = ',' # depends on [control=['if'], data=[]]
else:
split_tok = ' '
vals = line.split(split_tok)
if len(vals) == 3:
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1] # depends on [control=['if'], data=[]]
else:
posval = (float(vals[0]), float(vals[1])) # depends on [control=['if'], data=[]]
else:
continue
exclusion_list.append({'pos': posval, 'distance': float(vals[2]), 'units': units}) # depends on [control=['for'], data=['line']]
return exclusion_list
|
def deactivate(self, address):
""" Deactivate an address from the connection pool,
if present, remove from the routing table and also closing
all idle connections to that address.
"""
log_debug("[#0000] C: <ROUTING> Deactivating address %r", address)
# We use `discard` instead of `remove` here since the former
# will not fail if the address has already been removed.
self.routing_table.routers.discard(address)
self.routing_table.readers.discard(address)
self.routing_table.writers.discard(address)
log_debug("[#0000] C: <ROUTING> table=%r", self.routing_table)
super(RoutingConnectionPool, self).deactivate(address)
|
def function[deactivate, parameter[self, address]]:
constant[ Deactivate an address from the connection pool,
if present, remove from the routing table and also closing
all idle connections to that address.
]
call[name[log_debug], parameter[constant[[#0000] C: <ROUTING> Deactivating address %r], name[address]]]
call[name[self].routing_table.routers.discard, parameter[name[address]]]
call[name[self].routing_table.readers.discard, parameter[name[address]]]
call[name[self].routing_table.writers.discard, parameter[name[address]]]
call[name[log_debug], parameter[constant[[#0000] C: <ROUTING> table=%r], name[self].routing_table]]
call[call[name[super], parameter[name[RoutingConnectionPool], name[self]]].deactivate, parameter[name[address]]]
|
keyword[def] identifier[deactivate] ( identifier[self] , identifier[address] ):
literal[string]
identifier[log_debug] ( literal[string] , identifier[address] )
identifier[self] . identifier[routing_table] . identifier[routers] . identifier[discard] ( identifier[address] )
identifier[self] . identifier[routing_table] . identifier[readers] . identifier[discard] ( identifier[address] )
identifier[self] . identifier[routing_table] . identifier[writers] . identifier[discard] ( identifier[address] )
identifier[log_debug] ( literal[string] , identifier[self] . identifier[routing_table] )
identifier[super] ( identifier[RoutingConnectionPool] , identifier[self] ). identifier[deactivate] ( identifier[address] )
|
def deactivate(self, address):
""" Deactivate an address from the connection pool,
if present, remove from the routing table and also closing
all idle connections to that address.
"""
log_debug('[#0000] C: <ROUTING> Deactivating address %r', address)
# We use `discard` instead of `remove` here since the former
# will not fail if the address has already been removed.
self.routing_table.routers.discard(address)
self.routing_table.readers.discard(address)
self.routing_table.writers.discard(address)
log_debug('[#0000] C: <ROUTING> table=%r', self.routing_table)
super(RoutingConnectionPool, self).deactivate(address)
|
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
"""
# get the server scheduler.info file
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
try:
server_sched = json.loads(req.content)
except Exception as exp:
logging.exception("Error parsing server scheduler: %s" % exp)
raise exp
sched_filename = os.path.join(self.config['dirs']['experiments_dir'],
'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p, indent=2,
separators=(',', ': '))
return
client_sched = {}
try:
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p)
except Exception as exp:
client_sched = {}
logging.exception("Error loading scheduler file: %s" % exp)
logging.info("Making an empty scheduler")
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency']
else:
client_sched[exp] = server_sched[exp]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p, indent=2,
separators=(',', ': '))
|
def function[sync_scheduler, parameter[self]]:
constant[Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
]
variable[url] assign[=] binary_operation[constant[%s/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b264a950>, <ast.Constant object at 0x7da1b26496c0>, <ast.Constant object at 0x7da1b2649630>]]]
<ast.Try object at 0x7da1b264a9e0>
<ast.Try object at 0x7da1b26487f0>
variable[sched_filename] assign[=] call[name[os].path.join, parameter[call[call[name[self].config][constant[dirs]]][constant[experiments_dir]], constant[scheduler.info]]]
if <ast.UnaryOp object at 0x7da1b264a890> begin[:]
with call[name[open], parameter[name[sched_filename], constant[w]]] begin[:]
call[name[json].dump, parameter[name[server_sched], name[file_p]]]
return[None]
variable[client_sched] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b264abf0>
variable[client_exp_keys] assign[=] call[name[client_sched].keys, parameter[]]
for taget[name[exp]] in starred[name[client_exp_keys]] begin[:]
if compare[name[exp] <ast.NotIn object at 0x7da2590d7190> name[server_sched]] begin[:]
<ast.Delete object at 0x7da1b264a500>
for taget[name[exp]] in starred[name[server_sched]] begin[:]
if compare[name[exp] in name[client_sched]] begin[:]
call[call[name[client_sched]][name[exp]]][constant[frequency]] assign[=] call[call[name[server_sched]][name[exp]]][constant[frequency]]
with call[name[open], parameter[name[sched_filename], constant[w]]] begin[:]
call[name[json].dump, parameter[name[client_sched], name[file_p]]]
|
keyword[def] identifier[sync_scheduler] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] %( identifier[self] . identifier[config] [ literal[string] ][ literal[string] ],
literal[string] , literal[string] )
keyword[try] :
identifier[req] = identifier[requests] . identifier[get] ( identifier[url] , identifier[proxies] = identifier[self] . identifier[config] [ literal[string] ][ literal[string] ],
identifier[auth] = identifier[self] . identifier[auth] ,
identifier[verify] = identifier[self] . identifier[verify] )
identifier[req] . identifier[raise_for_status] ()
keyword[except] identifier[Exception] keyword[as] identifier[exp] :
identifier[logging] . identifier[exception] ( literal[string] % identifier[exp] )
keyword[raise] identifier[exp]
keyword[try] :
identifier[server_sched] = identifier[json] . identifier[loads] ( identifier[req] . identifier[content] )
keyword[except] identifier[Exception] keyword[as] identifier[exp] :
identifier[logging] . identifier[exception] ( literal[string] % identifier[exp] )
keyword[raise] identifier[exp]
identifier[sched_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[config] [ literal[string] ][ literal[string] ],
literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[sched_filename] ):
keyword[with] identifier[open] ( identifier[sched_filename] , literal[string] ) keyword[as] identifier[file_p] :
identifier[json] . identifier[dump] ( identifier[server_sched] , identifier[file_p] , identifier[indent] = literal[int] ,
identifier[separators] =( literal[string] , literal[string] ))
keyword[return]
identifier[client_sched] ={}
keyword[try] :
keyword[with] identifier[open] ( identifier[sched_filename] , literal[string] ) keyword[as] identifier[file_p] :
identifier[client_sched] = identifier[json] . identifier[load] ( identifier[file_p] )
keyword[except] identifier[Exception] keyword[as] identifier[exp] :
identifier[client_sched] ={}
identifier[logging] . identifier[exception] ( literal[string] % identifier[exp] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[client_exp_keys] = identifier[client_sched] . identifier[keys] ()
keyword[for] identifier[exp] keyword[in] identifier[client_exp_keys] :
keyword[if] identifier[exp] keyword[not] keyword[in] identifier[server_sched] :
keyword[del] identifier[client_sched] [ identifier[exp] ]
keyword[for] identifier[exp] keyword[in] identifier[server_sched] :
keyword[if] identifier[exp] keyword[in] identifier[client_sched] :
identifier[client_sched] [ identifier[exp] ][ literal[string] ]= identifier[server_sched] [ identifier[exp] ][ literal[string] ]
keyword[else] :
identifier[client_sched] [ identifier[exp] ]= identifier[server_sched] [ identifier[exp] ]
keyword[with] identifier[open] ( identifier[sched_filename] , literal[string] ) keyword[as] identifier[file_p] :
identifier[json] . identifier[dump] ( identifier[client_sched] , identifier[file_p] , identifier[indent] = literal[int] ,
identifier[separators] =( literal[string] , literal[string] ))
|
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
"""
# get the server scheduler.info file
url = '%s/%s/%s' % (self.config['server']['server_url'], 'experiments', 'scheduler.info')
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'], auth=self.auth, verify=self.verify)
req.raise_for_status() # depends on [control=['try'], data=[]]
except Exception as exp:
logging.exception('Error trying to download scheduler.info: %s' % exp)
raise exp # depends on [control=['except'], data=['exp']]
try:
server_sched = json.loads(req.content) # depends on [control=['try'], data=[]]
except Exception as exp:
logging.exception('Error parsing server scheduler: %s' % exp)
raise exp # depends on [control=['except'], data=['exp']]
sched_filename = os.path.join(self.config['dirs']['experiments_dir'], 'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p, indent=2, separators=(',', ': ')) # depends on [control=['with'], data=['file_p']]
return # depends on [control=['if'], data=[]]
client_sched = {}
try:
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p) # depends on [control=['with'], data=['file_p']] # depends on [control=['try'], data=[]]
except Exception as exp:
client_sched = {}
logging.exception('Error loading scheduler file: %s' % exp)
logging.info('Making an empty scheduler') # depends on [control=['except'], data=['exp']]
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp] # depends on [control=['if'], data=['exp']] # depends on [control=['for'], data=['exp']]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency'] # depends on [control=['if'], data=['exp', 'client_sched']]
else:
client_sched[exp] = server_sched[exp] # depends on [control=['for'], data=['exp']]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p, indent=2, separators=(',', ': ')) # depends on [control=['with'], data=['file_p']]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.