code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def get_cover_image(self, output_file_path=None, scope='profile/public'):
"""
Retrieve the Mxit user's cover image
No user authentication required
"""
data = _get(
token=self.oauth.get_user_token(scope),
uri='/user/cover'
)
if output_file_path:
with open(output_file_path, 'w') as f:
f.write(data)
else:
return data
|
def function[get_cover_image, parameter[self, output_file_path, scope]]:
constant[
Retrieve the Mxit user's cover image
No user authentication required
]
variable[data] assign[=] call[name[_get], parameter[]]
if name[output_file_path] begin[:]
with call[name[open], parameter[name[output_file_path], constant[w]]] begin[:]
call[name[f].write, parameter[name[data]]]
|
keyword[def] identifier[get_cover_image] ( identifier[self] , identifier[output_file_path] = keyword[None] , identifier[scope] = literal[string] ):
literal[string]
identifier[data] = identifier[_get] (
identifier[token] = identifier[self] . identifier[oauth] . identifier[get_user_token] ( identifier[scope] ),
identifier[uri] = literal[string]
)
keyword[if] identifier[output_file_path] :
keyword[with] identifier[open] ( identifier[output_file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[data] )
keyword[else] :
keyword[return] identifier[data]
|
def get_cover_image(self, output_file_path=None, scope='profile/public'):
"""
Retrieve the Mxit user's cover image
No user authentication required
"""
data = _get(token=self.oauth.get_user_token(scope), uri='/user/cover')
if output_file_path:
with open(output_file_path, 'w') as f:
f.write(data) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
return data
|
def JSONList(*args, **kwargs):
"""Stores a list as JSON on database, with mutability support.
If kwargs has a param `unique_sorted` (which evaluated to True),
list values are made unique and sorted.
"""
type_ = JSON
try:
if kwargs.pop("unique_sorted"):
type_ = JSONUniqueListType
except KeyError:
pass
return MutationList.as_mutable(type_(*args, **kwargs))
|
def function[JSONList, parameter[]]:
constant[Stores a list as JSON on database, with mutability support.
If kwargs has a param `unique_sorted` (which evaluated to True),
list values are made unique and sorted.
]
variable[type_] assign[=] name[JSON]
<ast.Try object at 0x7da20c6c44c0>
return[call[name[MutationList].as_mutable, parameter[call[name[type_], parameter[<ast.Starred object at 0x7da20c6c53f0>]]]]]
|
keyword[def] identifier[JSONList] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[type_] = identifier[JSON]
keyword[try] :
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] ):
identifier[type_] = identifier[JSONUniqueListType]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] identifier[MutationList] . identifier[as_mutable] ( identifier[type_] (* identifier[args] ,** identifier[kwargs] ))
|
def JSONList(*args, **kwargs):
"""Stores a list as JSON on database, with mutability support.
If kwargs has a param `unique_sorted` (which evaluated to True),
list values are made unique and sorted.
"""
type_ = JSON
try:
if kwargs.pop('unique_sorted'):
type_ = JSONUniqueListType # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
return MutationList.as_mutable(type_(*args, **kwargs))
|
def rtype_to_model(rtype):
""" Return a model class object given a string resource type
:param rtype:
string resource type
:return:
model class object
:raise:
ValueError
"""
models = goldman.config.MODELS
for model in models:
if rtype.lower() == model.RTYPE.lower():
return model
raise ValueError('%s resource type not registered' % rtype)
|
def function[rtype_to_model, parameter[rtype]]:
constant[ Return a model class object given a string resource type
:param rtype:
string resource type
:return:
model class object
:raise:
ValueError
]
variable[models] assign[=] name[goldman].config.MODELS
for taget[name[model]] in starred[name[models]] begin[:]
if compare[call[name[rtype].lower, parameter[]] equal[==] call[name[model].RTYPE.lower, parameter[]]] begin[:]
return[name[model]]
<ast.Raise object at 0x7da204344220>
|
keyword[def] identifier[rtype_to_model] ( identifier[rtype] ):
literal[string]
identifier[models] = identifier[goldman] . identifier[config] . identifier[MODELS]
keyword[for] identifier[model] keyword[in] identifier[models] :
keyword[if] identifier[rtype] . identifier[lower] ()== identifier[model] . identifier[RTYPE] . identifier[lower] ():
keyword[return] identifier[model]
keyword[raise] identifier[ValueError] ( literal[string] % identifier[rtype] )
|
def rtype_to_model(rtype):
""" Return a model class object given a string resource type
:param rtype:
string resource type
:return:
model class object
:raise:
ValueError
"""
models = goldman.config.MODELS
for model in models:
if rtype.lower() == model.RTYPE.lower():
return model # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['model']]
raise ValueError('%s resource type not registered' % rtype)
|
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(
channel,
(origin_addr, origin_port),
(server_addr, server_port),
)
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises:
`.SSHException` -- if the server refused the TCP forward request
"""
if not self.active:
raise SSHException("SSH session not active")
port = int(port)
response = self.global_request(
"tcpip-forward", (address, port), wait=True
)
if response is None:
raise SSHException("TCP forwarding request denied")
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
|
def function[request_port_forward, parameter[self, address, port, handler]]:
constant[
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(
channel,
(origin_addr, origin_port),
(server_addr, server_port),
)
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises:
`.SSHException` -- if the server refused the TCP forward request
]
if <ast.UnaryOp object at 0x7da1b2117220> begin[:]
<ast.Raise object at 0x7da1b2116c80>
variable[port] assign[=] call[name[int], parameter[name[port]]]
variable[response] assign[=] call[name[self].global_request, parameter[constant[tcpip-forward], tuple[[<ast.Name object at 0x7da1b2117e20>, <ast.Name object at 0x7da1b2117970>]]]]
if compare[name[response] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2115780>
if compare[name[port] equal[==] constant[0]] begin[:]
variable[port] assign[=] call[name[response].get_int, parameter[]]
if compare[name[handler] is constant[None]] begin[:]
def function[default_handler, parameter[channel, src_addr, dest_addr_port]]:
call[name[self]._queue_incoming_channel, parameter[name[channel]]]
variable[handler] assign[=] name[default_handler]
name[self]._tcp_handler assign[=] name[handler]
return[name[port]]
|
keyword[def] identifier[request_port_forward] ( identifier[self] , identifier[address] , identifier[port] , identifier[handler] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[active] :
keyword[raise] identifier[SSHException] ( literal[string] )
identifier[port] = identifier[int] ( identifier[port] )
identifier[response] = identifier[self] . identifier[global_request] (
literal[string] ,( identifier[address] , identifier[port] ), identifier[wait] = keyword[True]
)
keyword[if] identifier[response] keyword[is] keyword[None] :
keyword[raise] identifier[SSHException] ( literal[string] )
keyword[if] identifier[port] == literal[int] :
identifier[port] = identifier[response] . identifier[get_int] ()
keyword[if] identifier[handler] keyword[is] keyword[None] :
keyword[def] identifier[default_handler] ( identifier[channel] , identifier[src_addr] , identifier[dest_addr_port] ):
identifier[self] . identifier[_queue_incoming_channel] ( identifier[channel] )
identifier[handler] = identifier[default_handler]
identifier[self] . identifier[_tcp_handler] = identifier[handler]
keyword[return] identifier[port]
|
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(
channel,
(origin_addr, origin_port),
(server_addr, server_port),
)
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises:
`.SSHException` -- if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active') # depends on [control=['if'], data=[]]
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied') # depends on [control=['if'], data=[]]
if port == 0:
port = response.get_int() # depends on [control=['if'], data=['port']]
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler # depends on [control=['if'], data=['handler']]
self._tcp_handler = handler
return port
|
def parse_mode(mode, default_bitdepth=None):
"""Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)"""
# few special cases
if mode == 'P':
# Don't know what is pallette
raise Error('Unknown colour mode:' + mode)
elif mode == '1':
# Logical
return (True, False, 1)
elif mode == 'I':
# Integer
return (True, False, 16)
# here we go
if mode.startswith('L'):
grayscale = True
mode = mode[1:]
elif mode.startswith('RGB'):
grayscale = False
mode = mode[3:]
else:
raise Error('Unknown colour mode:' + mode)
if mode.startswith('A'):
alpha = True
mode = mode[1:]
else:
alpha = False
bitdepth = default_bitdepth
if mode.startswith(';'):
mode = mode[1:]
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error('Unsupported bitdepth mode:' + mode)
return (grayscale, alpha, bitdepth)
|
def function[parse_mode, parameter[mode, default_bitdepth]]:
constant[Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)]
if compare[name[mode] equal[==] constant[P]] begin[:]
<ast.Raise object at 0x7da1b26ae290>
if call[name[mode].startswith, parameter[constant[L]]] begin[:]
variable[grayscale] assign[=] constant[True]
variable[mode] assign[=] call[name[mode]][<ast.Slice object at 0x7da1b26ac250>]
if call[name[mode].startswith, parameter[constant[A]]] begin[:]
variable[alpha] assign[=] constant[True]
variable[mode] assign[=] call[name[mode]][<ast.Slice object at 0x7da18f58fb50>]
variable[bitdepth] assign[=] name[default_bitdepth]
if call[name[mode].startswith, parameter[constant[;]]] begin[:]
variable[mode] assign[=] call[name[mode]][<ast.Slice object at 0x7da18f58f6a0>]
if name[mode] begin[:]
<ast.Try object at 0x7da18f58c700>
return[tuple[[<ast.Name object at 0x7da18f58cca0>, <ast.Name object at 0x7da18f58ed10>, <ast.Name object at 0x7da18f58f160>]]]
|
keyword[def] identifier[parse_mode] ( identifier[mode] , identifier[default_bitdepth] = keyword[None] ):
literal[string]
keyword[if] identifier[mode] == literal[string] :
keyword[raise] identifier[Error] ( literal[string] + identifier[mode] )
keyword[elif] identifier[mode] == literal[string] :
keyword[return] ( keyword[True] , keyword[False] , literal[int] )
keyword[elif] identifier[mode] == literal[string] :
keyword[return] ( keyword[True] , keyword[False] , literal[int] )
keyword[if] identifier[mode] . identifier[startswith] ( literal[string] ):
identifier[grayscale] = keyword[True]
identifier[mode] = identifier[mode] [ literal[int] :]
keyword[elif] identifier[mode] . identifier[startswith] ( literal[string] ):
identifier[grayscale] = keyword[False]
identifier[mode] = identifier[mode] [ literal[int] :]
keyword[else] :
keyword[raise] identifier[Error] ( literal[string] + identifier[mode] )
keyword[if] identifier[mode] . identifier[startswith] ( literal[string] ):
identifier[alpha] = keyword[True]
identifier[mode] = identifier[mode] [ literal[int] :]
keyword[else] :
identifier[alpha] = keyword[False]
identifier[bitdepth] = identifier[default_bitdepth]
keyword[if] identifier[mode] . identifier[startswith] ( literal[string] ):
identifier[mode] = identifier[mode] [ literal[int] :]
keyword[if] identifier[mode] :
keyword[try] :
identifier[bitdepth] = identifier[int] ( identifier[mode] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[Error] ( literal[string] + identifier[mode] )
keyword[return] ( identifier[grayscale] , identifier[alpha] , identifier[bitdepth] )
|
def parse_mode(mode, default_bitdepth=None):
"""Parse PIL-style mode and return tuple (grayscale, alpha, bitdeph)"""
# few special cases
if mode == 'P':
# Don't know what is pallette
raise Error('Unknown colour mode:' + mode) # depends on [control=['if'], data=['mode']]
elif mode == '1':
# Logical
return (True, False, 1) # depends on [control=['if'], data=[]]
elif mode == 'I':
# Integer
return (True, False, 16) # depends on [control=['if'], data=[]]
# here we go
if mode.startswith('L'):
grayscale = True
mode = mode[1:] # depends on [control=['if'], data=[]]
elif mode.startswith('RGB'):
grayscale = False
mode = mode[3:] # depends on [control=['if'], data=[]]
else:
raise Error('Unknown colour mode:' + mode)
if mode.startswith('A'):
alpha = True
mode = mode[1:] # depends on [control=['if'], data=[]]
else:
alpha = False
bitdepth = default_bitdepth
if mode.startswith(';'):
mode = mode[1:] # depends on [control=['if'], data=[]]
if mode:
try:
bitdepth = int(mode) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise Error('Unsupported bitdepth mode:' + mode) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return (grayscale, alpha, bitdepth)
|
def _makeResult(self):
"""Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
"""
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config)
|
def function[_makeResult, parameter[self]]:
constant[Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
]
return[call[name[ProgressiveResult], parameter[name[self]._cwd, name[self]._totalTests, name[self].stream]]]
|
keyword[def] identifier[_makeResult] ( identifier[self] ):
literal[string]
keyword[return] identifier[ProgressiveResult] ( identifier[self] . identifier[_cwd] ,
identifier[self] . identifier[_totalTests] ,
identifier[self] . identifier[stream] ,
identifier[config] = identifier[self] . identifier[config] )
|
def _makeResult(self):
"""Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
"""
return ProgressiveResult(self._cwd, self._totalTests, self.stream, config=self.config)
|
def _defaultDepsVoc(self):
"""Vocabulary of all departments
"""
# Getting the assigned departments
deps = self.getDepartments()
items = []
for d in deps:
items.append((api.get_uid(d), api.get_title(d)))
return api.to_display_list(items, sort_by="value", allow_empty=True)
|
def function[_defaultDepsVoc, parameter[self]]:
constant[Vocabulary of all departments
]
variable[deps] assign[=] call[name[self].getDepartments, parameter[]]
variable[items] assign[=] list[[]]
for taget[name[d]] in starred[name[deps]] begin[:]
call[name[items].append, parameter[tuple[[<ast.Call object at 0x7da2047e85b0>, <ast.Call object at 0x7da2047e9150>]]]]
return[call[name[api].to_display_list, parameter[name[items]]]]
|
keyword[def] identifier[_defaultDepsVoc] ( identifier[self] ):
literal[string]
identifier[deps] = identifier[self] . identifier[getDepartments] ()
identifier[items] =[]
keyword[for] identifier[d] keyword[in] identifier[deps] :
identifier[items] . identifier[append] (( identifier[api] . identifier[get_uid] ( identifier[d] ), identifier[api] . identifier[get_title] ( identifier[d] )))
keyword[return] identifier[api] . identifier[to_display_list] ( identifier[items] , identifier[sort_by] = literal[string] , identifier[allow_empty] = keyword[True] )
|
def _defaultDepsVoc(self):
"""Vocabulary of all departments
"""
# Getting the assigned departments
deps = self.getDepartments()
items = []
for d in deps:
items.append((api.get_uid(d), api.get_title(d))) # depends on [control=['for'], data=['d']]
return api.to_display_list(items, sort_by='value', allow_empty=True)
|
def get_work_item_next_states_on_checkin_action(self, ids, action=None):
"""GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
"""
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if action is not None:
query_parameters['action'] = self._serialize.query('action', action, 'str')
response = self._send(http_method='GET',
location_id='afae844b-e2f6-44c2-8053-17b3bb936a40',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[WorkItemNextStateOnTransition]', self._unwrap_collection(response))
|
def function[get_work_item_next_states_on_checkin_action, parameter[self, ids, action]]:
constant[GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[ids] is_not constant[None]] begin[:]
variable[ids] assign[=] call[constant[,].join, parameter[call[name[map], parameter[name[str], name[ids]]]]]
call[name[query_parameters]][constant[ids]] assign[=] call[name[self]._serialize.query, parameter[constant[ids], name[ids], constant[str]]]
if compare[name[action] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[action]] assign[=] call[name[self]._serialize.query, parameter[constant[action], name[action], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[WorkItemNextStateOnTransition]], call[name[self]._unwrap_collection, parameter[name[response]]]]]]
|
keyword[def] identifier[get_work_item_next_states_on_checkin_action] ( identifier[self] , identifier[ids] , identifier[action] = keyword[None] ):
literal[string]
identifier[query_parameters] ={}
keyword[if] identifier[ids] keyword[is] keyword[not] keyword[None] :
identifier[ids] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[ids] ))
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[ids] , literal[string] )
keyword[if] identifier[action] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[action] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] ))
|
def get_work_item_next_states_on_checkin_action(self, ids, action=None):
"""GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
"""
query_parameters = {}
if ids is not None:
ids = ','.join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str') # depends on [control=['if'], data=['ids']]
if action is not None:
query_parameters['action'] = self._serialize.query('action', action, 'str') # depends on [control=['if'], data=['action']]
response = self._send(http_method='GET', location_id='afae844b-e2f6-44c2-8053-17b3bb936a40', version='5.1-preview.1', query_parameters=query_parameters)
return self._deserialize('[WorkItemNextStateOnTransition]', self._unwrap_collection(response))
|
def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
return json.dumps({"reception_time": self._reception_time,
"Location": json.loads(self._location.to_JSON()),
"Weather": json.loads(self._weather.to_JSON())
})
|
def function[to_JSON, parameter[self]]:
constant[Dumps object fields into a JSON formatted string
:returns: the JSON string
]
return[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da2044c3490>, <ast.Constant object at 0x7da2044c1ed0>, <ast.Constant object at 0x7da2044c0040>], [<ast.Attribute object at 0x7da2044c0ee0>, <ast.Call object at 0x7da2044c2ef0>, <ast.Call object at 0x7da2044c1150>]]]]]
|
keyword[def] identifier[to_JSON] ( identifier[self] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ({ literal[string] : identifier[self] . identifier[_reception_time] ,
literal[string] : identifier[json] . identifier[loads] ( identifier[self] . identifier[_location] . identifier[to_JSON] ()),
literal[string] : identifier[json] . identifier[loads] ( identifier[self] . identifier[_weather] . identifier[to_JSON] ())
})
|
def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
return json.dumps({'reception_time': self._reception_time, 'Location': json.loads(self._location.to_JSON()), 'Weather': json.loads(self._weather.to_JSON())})
|
def _compute_magnitude_squared_term(self, P, M, Q, W, mag):
"""
Compute magnitude squared term, equation 5, p. 909.
"""
return P * (mag - M) + Q * (mag - M) ** 2 + W
|
def function[_compute_magnitude_squared_term, parameter[self, P, M, Q, W, mag]]:
constant[
Compute magnitude squared term, equation 5, p. 909.
]
return[binary_operation[binary_operation[binary_operation[name[P] * binary_operation[name[mag] - name[M]]] + binary_operation[name[Q] * binary_operation[binary_operation[name[mag] - name[M]] ** constant[2]]]] + name[W]]]
|
keyword[def] identifier[_compute_magnitude_squared_term] ( identifier[self] , identifier[P] , identifier[M] , identifier[Q] , identifier[W] , identifier[mag] ):
literal[string]
keyword[return] identifier[P] *( identifier[mag] - identifier[M] )+ identifier[Q] *( identifier[mag] - identifier[M] )** literal[int] + identifier[W]
|
def _compute_magnitude_squared_term(self, P, M, Q, W, mag):
"""
Compute magnitude squared term, equation 5, p. 909.
"""
return P * (mag - M) + Q * (mag - M) ** 2 + W
|
def mkres(self):
"""
Create a directory tree for the resized assets
"""
for d in DENSITY_TYPES:
if d == 'ldpi' and not self.ldpi:
continue # skip ldpi
if d == 'xxxhdpi' and not self.xxxhdpi:
continue # skip xxxhdpi
try:
path = os.path.join(self.out, 'res/drawable-%s' % d)
os.makedirs(path, 0o755)
except OSError:
pass
|
def function[mkres, parameter[self]]:
constant[
Create a directory tree for the resized assets
]
for taget[name[d]] in starred[name[DENSITY_TYPES]] begin[:]
if <ast.BoolOp object at 0x7da18dc05600> begin[:]
continue
if <ast.BoolOp object at 0x7da18dc06b90> begin[:]
continue
<ast.Try object at 0x7da20c7c8370>
|
keyword[def] identifier[mkres] ( identifier[self] ):
literal[string]
keyword[for] identifier[d] keyword[in] identifier[DENSITY_TYPES] :
keyword[if] identifier[d] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[ldpi] :
keyword[continue]
keyword[if] identifier[d] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[xxxhdpi] :
keyword[continue]
keyword[try] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[out] , literal[string] % identifier[d] )
identifier[os] . identifier[makedirs] ( identifier[path] , literal[int] )
keyword[except] identifier[OSError] :
keyword[pass]
|
def mkres(self):
"""
Create a directory tree for the resized assets
"""
for d in DENSITY_TYPES:
if d == 'ldpi' and (not self.ldpi):
continue # skip ldpi # depends on [control=['if'], data=[]]
if d == 'xxxhdpi' and (not self.xxxhdpi):
continue # skip xxxhdpi # depends on [control=['if'], data=[]]
try:
path = os.path.join(self.out, 'res/drawable-%s' % d)
os.makedirs(path, 493) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['d']]
|
def build_software_cache(sw_dir=None):
"""
Builds up the software cache directory at *sw_dir* by simply copying all required python
modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`.
"""
# ensure the cache is empty
sw_dir = get_sw_dir(sw_dir)
remove_software_cache(sw_dir)
os.makedirs(sw_dir)
# reload dependencies to find the proper module paths
reload_dependencies(force=True)
for mod in deps:
path = os.path.dirname(mod.__file__)
name, ext = os.path.splitext(os.path.basename(mod.__file__))
# single file or module?
if name == "__init__":
# copy the entire module
name = os.path.basename(path)
shutil.copytree(path, os.path.join(sw_dir, name))
else:
shutil.copy2(os.path.join(path, name + ".py"), sw_dir)
|
def function[build_software_cache, parameter[sw_dir]]:
constant[
Builds up the software cache directory at *sw_dir* by simply copying all required python
modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`.
]
variable[sw_dir] assign[=] call[name[get_sw_dir], parameter[name[sw_dir]]]
call[name[remove_software_cache], parameter[name[sw_dir]]]
call[name[os].makedirs, parameter[name[sw_dir]]]
call[name[reload_dependencies], parameter[]]
for taget[name[mod]] in starred[name[deps]] begin[:]
variable[path] assign[=] call[name[os].path.dirname, parameter[name[mod].__file__]]
<ast.Tuple object at 0x7da1b05ed120> assign[=] call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[mod].__file__]]]]
if compare[name[name] equal[==] constant[__init__]] begin[:]
variable[name] assign[=] call[name[os].path.basename, parameter[name[path]]]
call[name[shutil].copytree, parameter[name[path], call[name[os].path.join, parameter[name[sw_dir], name[name]]]]]
|
keyword[def] identifier[build_software_cache] ( identifier[sw_dir] = keyword[None] ):
literal[string]
identifier[sw_dir] = identifier[get_sw_dir] ( identifier[sw_dir] )
identifier[remove_software_cache] ( identifier[sw_dir] )
identifier[os] . identifier[makedirs] ( identifier[sw_dir] )
identifier[reload_dependencies] ( identifier[force] = keyword[True] )
keyword[for] identifier[mod] keyword[in] identifier[deps] :
identifier[path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[mod] . identifier[__file__] )
identifier[name] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[mod] . identifier[__file__] ))
keyword[if] identifier[name] == literal[string] :
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
identifier[shutil] . identifier[copytree] ( identifier[path] , identifier[os] . identifier[path] . identifier[join] ( identifier[sw_dir] , identifier[name] ))
keyword[else] :
identifier[shutil] . identifier[copy2] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[name] + literal[string] ), identifier[sw_dir] )
|
def build_software_cache(sw_dir=None):
"""
Builds up the software cache directory at *sw_dir* by simply copying all required python
modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`.
"""
# ensure the cache is empty
sw_dir = get_sw_dir(sw_dir)
remove_software_cache(sw_dir)
os.makedirs(sw_dir)
# reload dependencies to find the proper module paths
reload_dependencies(force=True)
for mod in deps:
path = os.path.dirname(mod.__file__)
(name, ext) = os.path.splitext(os.path.basename(mod.__file__))
# single file or module?
if name == '__init__':
# copy the entire module
name = os.path.basename(path)
shutil.copytree(path, os.path.join(sw_dir, name)) # depends on [control=['if'], data=['name']]
else:
shutil.copy2(os.path.join(path, name + '.py'), sw_dir) # depends on [control=['for'], data=['mod']]
|
def create_vocabulary(
vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True,
_DIGIT_RE=re.compile(br"\d"), _START_VOCAB=None
):
r"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if _START_VOCAB is None:
_START_VOCAB = [b"_PAD", b"_GO", b"_EOS", b"_UNK"]
if not gfile.Exists(vocabulary_path):
tl.logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
tl.logging.info(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
else:
tl.logging.info("Vocabulary %s from data %s exists" % (vocabulary_path, data_path))
|
def function[create_vocabulary, parameter[vocabulary_path, data_path, max_vocabulary_size, tokenizer, normalize_digits, _DIGIT_RE, _START_VOCAB]]:
constant[Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
]
if compare[name[_START_VOCAB] is constant[None]] begin[:]
variable[_START_VOCAB] assign[=] list[[<ast.Constant object at 0x7da18bc71f00>, <ast.Constant object at 0x7da18bc73af0>, <ast.Constant object at 0x7da18bc704c0>, <ast.Constant object at 0x7da18bc72d10>]]
if <ast.UnaryOp object at 0x7da18bc70d00> begin[:]
call[name[tl].logging.info, parameter[binary_operation[constant[Creating vocabulary %s from data %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc723b0>, <ast.Name object at 0x7da18bc73fd0>]]]]]
variable[vocab] assign[=] dictionary[[], []]
with call[name[gfile].GFile, parameter[name[data_path]]] begin[:]
variable[counter] assign[=] constant[0]
for taget[name[line]] in starred[name[f]] begin[:]
<ast.AugAssign object at 0x7da18bc720e0>
if compare[binary_operation[name[counter] <ast.Mod object at 0x7da2590d6920> constant[100000]] equal[==] constant[0]] begin[:]
call[name[tl].logging.info, parameter[binary_operation[constant[ processing line %d] <ast.Mod object at 0x7da2590d6920> name[counter]]]]
variable[tokens] assign[=] <ast.IfExp object at 0x7da18bc71600>
for taget[name[w]] in starred[name[tokens]] begin[:]
variable[word] assign[=] <ast.IfExp object at 0x7da18bc70df0>
if compare[name[word] in name[vocab]] begin[:]
<ast.AugAssign object at 0x7da18bc719c0>
variable[vocab_list] assign[=] binary_operation[name[_START_VOCAB] + call[name[sorted], parameter[name[vocab]]]]
if compare[call[name[len], parameter[name[vocab_list]]] greater[>] name[max_vocabulary_size]] begin[:]
variable[vocab_list] assign[=] call[name[vocab_list]][<ast.Slice object at 0x7da18bc71c00>]
with call[name[gfile].GFile, parameter[name[vocabulary_path]]] begin[:]
for taget[name[w]] in starred[name[vocab_list]] begin[:]
call[name[vocab_file].write, parameter[binary_operation[name[w] + constant[b'\n']]]]
|
keyword[def] identifier[create_vocabulary] (
identifier[vocabulary_path] , identifier[data_path] , identifier[max_vocabulary_size] , identifier[tokenizer] = keyword[None] , identifier[normalize_digits] = keyword[True] ,
identifier[_DIGIT_RE] = identifier[re] . identifier[compile] ( literal[string] ), identifier[_START_VOCAB] = keyword[None]
):
literal[string]
keyword[if] identifier[_START_VOCAB] keyword[is] keyword[None] :
identifier[_START_VOCAB] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] keyword[not] identifier[gfile] . identifier[Exists] ( identifier[vocabulary_path] ):
identifier[tl] . identifier[logging] . identifier[info] ( literal[string] %( identifier[vocabulary_path] , identifier[data_path] ))
identifier[vocab] ={}
keyword[with] identifier[gfile] . identifier[GFile] ( identifier[data_path] , identifier[mode] = literal[string] ) keyword[as] identifier[f] :
identifier[counter] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[counter] += literal[int]
keyword[if] identifier[counter] % literal[int] == literal[int] :
identifier[tl] . identifier[logging] . identifier[info] ( literal[string] % identifier[counter] )
identifier[tokens] = identifier[tokenizer] ( identifier[line] ) keyword[if] identifier[tokenizer] keyword[else] identifier[basic_tokenizer] ( identifier[line] )
keyword[for] identifier[w] keyword[in] identifier[tokens] :
identifier[word] = identifier[re] . identifier[sub] ( identifier[_DIGIT_RE] , literal[string] , identifier[w] ) keyword[if] identifier[normalize_digits] keyword[else] identifier[w]
keyword[if] identifier[word] keyword[in] identifier[vocab] :
identifier[vocab] [ identifier[word] ]+= literal[int]
keyword[else] :
identifier[vocab] [ identifier[word] ]= literal[int]
identifier[vocab_list] = identifier[_START_VOCAB] + identifier[sorted] ( identifier[vocab] , identifier[key] = identifier[vocab] . identifier[get] , identifier[reverse] = keyword[True] )
keyword[if] identifier[len] ( identifier[vocab_list] )> identifier[max_vocabulary_size] :
identifier[vocab_list] = identifier[vocab_list] [: identifier[max_vocabulary_size] ]
keyword[with] identifier[gfile] . identifier[GFile] ( identifier[vocabulary_path] , identifier[mode] = literal[string] ) keyword[as] identifier[vocab_file] :
keyword[for] identifier[w] keyword[in] identifier[vocab_list] :
identifier[vocab_file] . identifier[write] ( identifier[w] + literal[string] )
keyword[else] :
identifier[tl] . identifier[logging] . identifier[info] ( literal[string] %( identifier[vocabulary_path] , identifier[data_path] ))
|
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True, _DIGIT_RE=re.compile(b'\\d'), _START_VOCAB=None):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if _START_VOCAB is None:
_START_VOCAB = [b'_PAD', b'_GO', b'_EOS', b'_UNK'] # depends on [control=['if'], data=['_START_VOCAB']]
if not gfile.Exists(vocabulary_path):
tl.logging.info('Creating vocabulary %s from data %s' % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode='rb') as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
tl.logging.info(' processing line %d' % counter) # depends on [control=['if'], data=[]]
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b'0', w) if normalize_digits else w
if word in vocab:
vocab[word] += 1 # depends on [control=['if'], data=['word', 'vocab']]
else:
vocab[word] = 1 # depends on [control=['for'], data=['w']] # depends on [control=['for'], data=['line']]
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size] # depends on [control=['if'], data=['max_vocabulary_size']]
with gfile.GFile(vocabulary_path, mode='wb') as vocab_file:
for w in vocab_list:
vocab_file.write(w + b'\n') # depends on [control=['for'], data=['w']] # depends on [control=['with'], data=['vocab_file']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
tl.logging.info('Vocabulary %s from data %s exists' % (vocabulary_path, data_path))
|
def _set_dscp_ttl_mode(self, v, load=False):
"""
Setter method for dscp_ttl_mode, mapped from YANG variable /interface/tunnel/dscp_ttl_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_ttl_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_ttl_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'pipe': {'value': 2}, u'uniform': {'value': 1}},), is_leaf=True, yang_name="dscp-ttl-mode", rest_name="dscp-ttl-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel dscp ttl mode', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_ttl_mode must be of a type compatible with enumeration""",
'defined-type': "brocade-gre-vxlan:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'pipe': {'value': 2}, u'uniform': {'value': 1}},), is_leaf=True, yang_name="dscp-ttl-mode", rest_name="dscp-ttl-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel dscp ttl mode', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='enumeration', is_config=True)""",
})
self.__dscp_ttl_mode = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_dscp_ttl_mode, parameter[self, v, load]]:
constant[
Setter method for dscp_ttl_mode, mapped from YANG variable /interface/tunnel/dscp_ttl_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_ttl_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_ttl_mode() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da1b2607dc0>
name[self].__dscp_ttl_mode assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_dscp_ttl_mode] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__dscp_ttl_mode] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_dscp_ttl_mode(self, v, load=False):
"""
Setter method for dscp_ttl_mode, mapped from YANG variable /interface/tunnel/dscp_ttl_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_ttl_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_ttl_mode() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'pipe': {'value': 2}, u'uniform': {'value': 1}}), is_leaf=True, yang_name='dscp-ttl-mode', rest_name='dscp-ttl-mode', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel dscp ttl mode', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='enumeration', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'dscp_ttl_mode must be of a type compatible with enumeration', 'defined-type': 'brocade-gre-vxlan:enumeration', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'pipe\': {\'value\': 2}, u\'uniform\': {\'value\': 1}},), is_leaf=True, yang_name="dscp-ttl-mode", rest_name="dscp-ttl-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'info\': u\'Tunnel dscp ttl mode\', u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-gre-vxlan\', defining_module=\'brocade-gre-vxlan\', yang_type=\'enumeration\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__dscp_ttl_mode = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def is_blocked(self, ip):
"""Determine if an IP address should be considered blocked."""
blocked = True
if ip in self.allowed_admin_ips:
blocked = False
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False
return blocked
|
def function[is_blocked, parameter[self, ip]]:
constant[Determine if an IP address should be considered blocked.]
variable[blocked] assign[=] constant[True]
if compare[name[ip] in name[self].allowed_admin_ips] begin[:]
variable[blocked] assign[=] constant[False]
for taget[name[allowed_range]] in starred[name[self].allowed_admin_ip_ranges] begin[:]
if compare[call[name[ipaddress].ip_address, parameter[name[ip]]] in call[name[ipaddress].ip_network, parameter[name[allowed_range]]]] begin[:]
variable[blocked] assign[=] constant[False]
return[name[blocked]]
|
keyword[def] identifier[is_blocked] ( identifier[self] , identifier[ip] ):
literal[string]
identifier[blocked] = keyword[True]
keyword[if] identifier[ip] keyword[in] identifier[self] . identifier[allowed_admin_ips] :
identifier[blocked] = keyword[False]
keyword[for] identifier[allowed_range] keyword[in] identifier[self] . identifier[allowed_admin_ip_ranges] :
keyword[if] identifier[ipaddress] . identifier[ip_address] ( identifier[ip] ) keyword[in] identifier[ipaddress] . identifier[ip_network] ( identifier[allowed_range] ):
identifier[blocked] = keyword[False]
keyword[return] identifier[blocked]
|
def is_blocked(self, ip):
"""Determine if an IP address should be considered blocked."""
blocked = True
if ip in self.allowed_admin_ips:
blocked = False # depends on [control=['if'], data=[]]
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['allowed_range']]
return blocked
|
def nodes(self):
"""Set of all currently connected servers.
.. warning:: When connected to a replica set the value of :attr:`nodes`
can change over time as :class:`MongoClient`'s view of the replica
set changes. :attr:`nodes` can also be an empty set when
:class:`MongoClient` is first instantiated and hasn't yet connected
to any servers, or a network partition causes it to lose connection
to all servers.
"""
description = self._topology.description
return frozenset(s.address for s in description.known_servers)
|
def function[nodes, parameter[self]]:
constant[Set of all currently connected servers.
.. warning:: When connected to a replica set the value of :attr:`nodes`
can change over time as :class:`MongoClient`'s view of the replica
set changes. :attr:`nodes` can also be an empty set when
:class:`MongoClient` is first instantiated and hasn't yet connected
to any servers, or a network partition causes it to lose connection
to all servers.
]
variable[description] assign[=] name[self]._topology.description
return[call[name[frozenset], parameter[<ast.GeneratorExp object at 0x7da18c4cc340>]]]
|
keyword[def] identifier[nodes] ( identifier[self] ):
literal[string]
identifier[description] = identifier[self] . identifier[_topology] . identifier[description]
keyword[return] identifier[frozenset] ( identifier[s] . identifier[address] keyword[for] identifier[s] keyword[in] identifier[description] . identifier[known_servers] )
|
def nodes(self):
"""Set of all currently connected servers.
.. warning:: When connected to a replica set the value of :attr:`nodes`
can change over time as :class:`MongoClient`'s view of the replica
set changes. :attr:`nodes` can also be an empty set when
:class:`MongoClient` is first instantiated and hasn't yet connected
to any servers, or a network partition causes it to lose connection
to all servers.
"""
description = self._topology.description
return frozenset((s.address for s in description.known_servers))
|
def geost_1d(*args,**kwargs) : #(lon,lat,nu): OR (dst,nu)
"""
;+
;
; GEOST_1D : Compute geostrophic speeds from a sea level dataset <br />
;
; Reference : Powell, B. S., et R. R. Leben (2004), An Optimal Filter for <br />
; Geostrophic Mesoscale Currents from Along-Track Satellite Altimetry, <br />
; Journal of Atmospheric and Oceanic Technology, 21(10), 1633-1642.
;
; @param lon {in}{optional}{type:NUMERIC} longitude in degrees
; @param lat {in}{optional}{type:NUMERIC} latitude in degrees
; @param dst {in}{optional}{type:NUMERIC} along-track distance.
; @param z {in}{required}{type:NUMERIC} sea level surface. Can either be absolute<br />
; values (SSH) or relative values (SLA). This MUST be given in METERS.
; @keyword strict {in}{optional}{type:BOOLEAN} If True, compute gradient at mid-distance.
; @keyword pl04 {in}{optional}{type:BOOLEAN} If True, use the Powell & Leben 2004 method.
;
; @returns Geostrophic velocity component, positive eastward
;
;
;
; @author Renaud DUSSURGET, LEGOS/CTOH
; @history Created Sep. 2009 from genweights.m (Brian Powell (c) 2004, <br />
; University of Colorado, Boulder)<br />
; Modified May 2010 to be compliant with 20Hz datasets (p & n can vary).<br />
; Warining may also be issued for data with holes within the width of the<br />
; window.<br />
; Modified June 2010 to include filtering window width in KM instead of nb. of<br />
; points (Equivalent btw. 1Hz and 20Hz data).<br />
;
; @uses CALCUL_DISTANCE, EXIST, GENWEIGTHS, SETINTERSECTION, SETUNION, <br />
; OPTIMAL_SLOPE, GRAVITY, CORIOLIS, TRACK_ORIENT
;
; @example dummy1=geost_1D(lon,lat,sla,pl04=True,p=11,q=11) :<br />
; Return along-track velocity anomalies using a 11km by 11km Powell & Leben 2004 filter window <br />
; dummy2=geost_1D(dst,sla,strict=True) :<br />
; Return along-track velocity anomalies computed at mid-distance <br />
;
;-
"""
lon = args[0]
lat = args[1]
dst = args[2] if len(args) == 4 else calcul_distance(lat,lon) * 1e3 #distance in meters
nu = args [3] if len(args) == 4 else args[2]
isVector = len(np.shape(nu)) == 1
#Reshape nu if vector
if isVector : nu=np.reshape(nu,(len(nu),1))
nt = np.shape(nu)[1] if not isVector else 1
sh = nu.shape
nufilt=np.ma.array(np.empty(sh),mask=True,dtype=nu.dtype)
pl04 = kwargs.pop('pl04',False)
filter = kwargs.pop('filter', None)
strict = kwargs.pop('strict',False)
verbose = kwargs.pop('verbose',False)
if filter is not None :
for t in np.arange(nt) :
nufilt[:,t] =loess(nu[:,t],dst,filter*1e3)
nu=nufilt
if pl04 :
ug = np.ma.array(np.empty(sh),mask=True,dtype=nu.dtype)
for t in np.arange(nt) :
ug[:,t] = powell_leben_filter_km(lon,lat,nu[:,t],verbose=verbose,**kwargs)
if isVector : ug=ug.flatten()
return ug
#If strict option is set to True, compute gradients at mid-distance between points
if strict :
lon = (lon[1:] - lon[:-1])/2. + lon[0:-1]
lat = (lat[1:] - lat[:-1])/2. + lat[0:-1]
#Compute gravitational & coriolis forces
if strict : sh = (sh[0]-1,sh[1])
g = np.repeat(gravity(lat),nt).reshape(sh)
f = np.repeat(coriolis(lat),nt).reshape(sh)
#Compute SSH 1st derivative
# dh = deriv(dst,nu) #(deriv is very bad...)
dh = np.ma.array(np.empty(sh),mask=True,dtype=nu.dtype)
for t in np.arange(nt) :
dh[:,t] = (nu[1:,t] - nu[:-1,t])/(dst[1:] - dst[:-1]) if strict else deriv(dst,nu[:,t])
#Compute geostrophy
# print f
# print g
# print dh
ug = - (g*dh) / (f)
#Inverse sign of ug for descending tracks as Coriolis is oriented to the right
#northward
if (not track_orient(lon,lat)) : #descending tracks
ug *=-1
if isVector : ug=ug.flatten()
return (lon,lat,ug) if strict else ug
|
def function[geost_1d, parameter[]]:
constant[
;+
;
; GEOST_1D : Compute geostrophic speeds from a sea level dataset <br />
;
; Reference : Powell, B. S., et R. R. Leben (2004), An Optimal Filter for <br />
; Geostrophic Mesoscale Currents from Along-Track Satellite Altimetry, <br />
; Journal of Atmospheric and Oceanic Technology, 21(10), 1633-1642.
;
; @param lon {in}{optional}{type:NUMERIC} longitude in degrees
; @param lat {in}{optional}{type:NUMERIC} latitude in degrees
; @param dst {in}{optional}{type:NUMERIC} along-track distance.
; @param z {in}{required}{type:NUMERIC} sea level surface. Can either be absolute<br />
; values (SSH) or relative values (SLA). This MUST be given in METERS.
; @keyword strict {in}{optional}{type:BOOLEAN} If True, compute gradient at mid-distance.
; @keyword pl04 {in}{optional}{type:BOOLEAN} If True, use the Powell & Leben 2004 method.
;
; @returns Geostrophic velocity component, positive eastward
;
;
;
; @author Renaud DUSSURGET, LEGOS/CTOH
; @history Created Sep. 2009 from genweights.m (Brian Powell (c) 2004, <br />
; University of Colorado, Boulder)<br />
; Modified May 2010 to be compliant with 20Hz datasets (p & n can vary).<br />
; Warining may also be issued for data with holes within the width of the<br />
; window.<br />
; Modified June 2010 to include filtering window width in KM instead of nb. of<br />
; points (Equivalent btw. 1Hz and 20Hz data).<br />
;
; @uses CALCUL_DISTANCE, EXIST, GENWEIGTHS, SETINTERSECTION, SETUNION, <br />
; OPTIMAL_SLOPE, GRAVITY, CORIOLIS, TRACK_ORIENT
;
; @example dummy1=geost_1D(lon,lat,sla,pl04=True,p=11,q=11) :<br />
; Return along-track velocity anomalies using a 11km by 11km Powell & Leben 2004 filter window <br />
; dummy2=geost_1D(dst,sla,strict=True) :<br />
; Return along-track velocity anomalies computed at mid-distance <br />
;
;-
]
variable[lon] assign[=] call[name[args]][constant[0]]
variable[lat] assign[=] call[name[args]][constant[1]]
variable[dst] assign[=] <ast.IfExp object at 0x7da1b08a0b50>
variable[nu] assign[=] <ast.IfExp object at 0x7da1b08a0e80>
variable[isVector] assign[=] compare[call[name[len], parameter[call[name[np].shape, parameter[name[nu]]]]] equal[==] constant[1]]
if name[isVector] begin[:]
variable[nu] assign[=] call[name[np].reshape, parameter[name[nu], tuple[[<ast.Call object at 0x7da1b08a1450>, <ast.Constant object at 0x7da1b08a14e0>]]]]
variable[nt] assign[=] <ast.IfExp object at 0x7da1b08a1570>
variable[sh] assign[=] name[nu].shape
variable[nufilt] assign[=] call[name[np].ma.array, parameter[call[name[np].empty, parameter[name[sh]]]]]
variable[pl04] assign[=] call[name[kwargs].pop, parameter[constant[pl04], constant[False]]]
variable[filter] assign[=] call[name[kwargs].pop, parameter[constant[filter], constant[None]]]
variable[strict] assign[=] call[name[kwargs].pop, parameter[constant[strict], constant[False]]]
variable[verbose] assign[=] call[name[kwargs].pop, parameter[constant[verbose], constant[False]]]
if compare[name[filter] is_not constant[None]] begin[:]
for taget[name[t]] in starred[call[name[np].arange, parameter[name[nt]]]] begin[:]
call[name[nufilt]][tuple[[<ast.Slice object at 0x7da1b08a22c0>, <ast.Name object at 0x7da1b08a22f0>]]] assign[=] call[name[loess], parameter[call[name[nu]][tuple[[<ast.Slice object at 0x7da1b08a2410>, <ast.Name object at 0x7da1b08a2440>]]], name[dst], binary_operation[name[filter] * constant[1000.0]]]]
variable[nu] assign[=] name[nufilt]
if name[pl04] begin[:]
variable[ug] assign[=] call[name[np].ma.array, parameter[call[name[np].empty, parameter[name[sh]]]]]
for taget[name[t]] in starred[call[name[np].arange, parameter[name[nt]]]] begin[:]
call[name[ug]][tuple[[<ast.Slice object at 0x7da1b08a2ad0>, <ast.Name object at 0x7da1b08a2b00>]]] assign[=] call[name[powell_leben_filter_km], parameter[name[lon], name[lat], call[name[nu]][tuple[[<ast.Slice object at 0x7da1b08a2c80>, <ast.Name object at 0x7da1b08a2cb0>]]]]]
if name[isVector] begin[:]
variable[ug] assign[=] call[name[ug].flatten, parameter[]]
return[name[ug]]
if name[strict] begin[:]
variable[lon] assign[=] binary_operation[binary_operation[binary_operation[call[name[lon]][<ast.Slice object at 0x7da1b08a3130>] - call[name[lon]][<ast.Slice object at 0x7da1b08a31f0>]] / constant[2.0]] + call[name[lon]][<ast.Slice object at 0x7da1b08a3310>]]
variable[lat] assign[=] binary_operation[binary_operation[binary_operation[call[name[lat]][<ast.Slice object at 0x7da1b08a07f0>] - call[name[lat]][<ast.Slice object at 0x7da1b08a0730>]] / constant[2.0]] + call[name[lat]][<ast.Slice object at 0x7da1b08a0610>]]
if name[strict] begin[:]
variable[sh] assign[=] tuple[[<ast.BinOp object at 0x7da1b087a4d0>, <ast.Subscript object at 0x7da1b087b790>]]
variable[g] assign[=] call[call[name[np].repeat, parameter[call[name[gravity], parameter[name[lat]]], name[nt]]].reshape, parameter[name[sh]]]
variable[f] assign[=] call[call[name[np].repeat, parameter[call[name[coriolis], parameter[name[lat]]], name[nt]]].reshape, parameter[name[sh]]]
variable[dh] assign[=] call[name[np].ma.array, parameter[call[name[np].empty, parameter[name[sh]]]]]
for taget[name[t]] in starred[call[name[np].arange, parameter[name[nt]]]] begin[:]
call[name[dh]][tuple[[<ast.Slice object at 0x7da1b087b6d0>, <ast.Name object at 0x7da1b0879f00>]]] assign[=] <ast.IfExp object at 0x7da1b08786a0>
variable[ug] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b087bcd0> / name[f]]
if <ast.UnaryOp object at 0x7da1b087b1c0> begin[:]
<ast.AugAssign object at 0x7da1b0878760>
if name[isVector] begin[:]
variable[ug] assign[=] call[name[ug].flatten, parameter[]]
return[<ast.IfExp object at 0x7da1b0878730>]
|
keyword[def] identifier[geost_1d] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[lon] = identifier[args] [ literal[int] ]
identifier[lat] = identifier[args] [ literal[int] ]
identifier[dst] = identifier[args] [ literal[int] ] keyword[if] identifier[len] ( identifier[args] )== literal[int] keyword[else] identifier[calcul_distance] ( identifier[lat] , identifier[lon] )* literal[int]
identifier[nu] = identifier[args] [ literal[int] ] keyword[if] identifier[len] ( identifier[args] )== literal[int] keyword[else] identifier[args] [ literal[int] ]
identifier[isVector] = identifier[len] ( identifier[np] . identifier[shape] ( identifier[nu] ))== literal[int]
keyword[if] identifier[isVector] : identifier[nu] = identifier[np] . identifier[reshape] ( identifier[nu] ,( identifier[len] ( identifier[nu] ), literal[int] ))
identifier[nt] = identifier[np] . identifier[shape] ( identifier[nu] )[ literal[int] ] keyword[if] keyword[not] identifier[isVector] keyword[else] literal[int]
identifier[sh] = identifier[nu] . identifier[shape]
identifier[nufilt] = identifier[np] . identifier[ma] . identifier[array] ( identifier[np] . identifier[empty] ( identifier[sh] ), identifier[mask] = keyword[True] , identifier[dtype] = identifier[nu] . identifier[dtype] )
identifier[pl04] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[filter] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[strict] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[verbose] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] identifier[filter] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[t] keyword[in] identifier[np] . identifier[arange] ( identifier[nt] ):
identifier[nufilt] [:, identifier[t] ]= identifier[loess] ( identifier[nu] [:, identifier[t] ], identifier[dst] , identifier[filter] * literal[int] )
identifier[nu] = identifier[nufilt]
keyword[if] identifier[pl04] :
identifier[ug] = identifier[np] . identifier[ma] . identifier[array] ( identifier[np] . identifier[empty] ( identifier[sh] ), identifier[mask] = keyword[True] , identifier[dtype] = identifier[nu] . identifier[dtype] )
keyword[for] identifier[t] keyword[in] identifier[np] . identifier[arange] ( identifier[nt] ):
identifier[ug] [:, identifier[t] ]= identifier[powell_leben_filter_km] ( identifier[lon] , identifier[lat] , identifier[nu] [:, identifier[t] ], identifier[verbose] = identifier[verbose] ,** identifier[kwargs] )
keyword[if] identifier[isVector] : identifier[ug] = identifier[ug] . identifier[flatten] ()
keyword[return] identifier[ug]
keyword[if] identifier[strict] :
identifier[lon] =( identifier[lon] [ literal[int] :]- identifier[lon] [:- literal[int] ])/ literal[int] + identifier[lon] [ literal[int] :- literal[int] ]
identifier[lat] =( identifier[lat] [ literal[int] :]- identifier[lat] [:- literal[int] ])/ literal[int] + identifier[lat] [ literal[int] :- literal[int] ]
keyword[if] identifier[strict] : identifier[sh] =( identifier[sh] [ literal[int] ]- literal[int] , identifier[sh] [ literal[int] ])
identifier[g] = identifier[np] . identifier[repeat] ( identifier[gravity] ( identifier[lat] ), identifier[nt] ). identifier[reshape] ( identifier[sh] )
identifier[f] = identifier[np] . identifier[repeat] ( identifier[coriolis] ( identifier[lat] ), identifier[nt] ). identifier[reshape] ( identifier[sh] )
identifier[dh] = identifier[np] . identifier[ma] . identifier[array] ( identifier[np] . identifier[empty] ( identifier[sh] ), identifier[mask] = keyword[True] , identifier[dtype] = identifier[nu] . identifier[dtype] )
keyword[for] identifier[t] keyword[in] identifier[np] . identifier[arange] ( identifier[nt] ):
identifier[dh] [:, identifier[t] ]=( identifier[nu] [ literal[int] :, identifier[t] ]- identifier[nu] [:- literal[int] , identifier[t] ])/( identifier[dst] [ literal[int] :]- identifier[dst] [:- literal[int] ]) keyword[if] identifier[strict] keyword[else] identifier[deriv] ( identifier[dst] , identifier[nu] [:, identifier[t] ])
identifier[ug] =-( identifier[g] * identifier[dh] )/( identifier[f] )
keyword[if] ( keyword[not] identifier[track_orient] ( identifier[lon] , identifier[lat] )):
identifier[ug] *=- literal[int]
keyword[if] identifier[isVector] : identifier[ug] = identifier[ug] . identifier[flatten] ()
keyword[return] ( identifier[lon] , identifier[lat] , identifier[ug] ) keyword[if] identifier[strict] keyword[else] identifier[ug]
|
def geost_1d(*args, **kwargs): #(lon,lat,nu): OR (dst,nu)
'\n ;+\n ;\n ; GEOST_1D : Compute geostrophic speeds from a sea level dataset <br />\n ;\n ; Reference : Powell, B. S., et R. R. Leben (2004), An Optimal Filter for <br />\n ; Geostrophic Mesoscale Currents from Along-Track Satellite Altimetry, <br />\n ; Journal of Atmospheric and Oceanic Technology, 21(10), 1633-1642.\n ;\n ; @param lon {in}{optional}{type:NUMERIC} longitude in degrees\n ; @param lat {in}{optional}{type:NUMERIC} latitude in degrees\n ; @param dst {in}{optional}{type:NUMERIC} along-track distance.\n ; @param z {in}{required}{type:NUMERIC} sea level surface. Can either be absolute<br />\n ; values (SSH) or relative values (SLA). This MUST be given in METERS.\n ; @keyword strict {in}{optional}{type:BOOLEAN} If True, compute gradient at mid-distance.\n ; @keyword pl04 {in}{optional}{type:BOOLEAN} If True, use the Powell & Leben 2004 method.\n ; \n ; @returns Geostrophic velocity component, positive eastward\n ;\n ;\n ;\n ; @author Renaud DUSSURGET, LEGOS/CTOH\n ; @history Created Sep. 2009 from genweights.m (Brian Powell (c) 2004, <br />\n ; University of Colorado, Boulder)<br />\n ; Modified May 2010 to be compliant with 20Hz datasets (p & n can vary).<br />\n ; Warining may also be issued for data with holes within the width of the<br />\n ; window.<br />\n ; Modified June 2010 to include filtering window width in KM instead of nb. of<br />\n ; points (Equivalent btw. 1Hz and 20Hz data).<br />\n ;\n ; @uses CALCUL_DISTANCE, EXIST, GENWEIGTHS, SETINTERSECTION, SETUNION, <br />\n ; OPTIMAL_SLOPE, GRAVITY, CORIOLIS, TRACK_ORIENT\n ;\n ; @example dummy1=geost_1D(lon,lat,sla,pl04=True,p=11,q=11) :<br />\n ; Return along-track velocity anomalies using a 11km by 11km Powell & Leben 2004 filter window <br />\n ; dummy2=geost_1D(dst,sla,strict=True) :<br />\n ; Return along-track velocity anomalies computed at mid-distance <br />\n ;\n ;-\n '
lon = args[0]
lat = args[1]
dst = args[2] if len(args) == 4 else calcul_distance(lat, lon) * 1000.0 #distance in meters
nu = args[3] if len(args) == 4 else args[2]
isVector = len(np.shape(nu)) == 1 #Reshape nu if vector
if isVector:
nu = np.reshape(nu, (len(nu), 1)) # depends on [control=['if'], data=[]]
nt = np.shape(nu)[1] if not isVector else 1
sh = nu.shape
nufilt = np.ma.array(np.empty(sh), mask=True, dtype=nu.dtype)
pl04 = kwargs.pop('pl04', False)
filter = kwargs.pop('filter', None)
strict = kwargs.pop('strict', False)
verbose = kwargs.pop('verbose', False)
if filter is not None:
for t in np.arange(nt):
nufilt[:, t] = loess(nu[:, t], dst, filter * 1000.0) # depends on [control=['for'], data=['t']]
nu = nufilt # depends on [control=['if'], data=['filter']]
if pl04:
ug = np.ma.array(np.empty(sh), mask=True, dtype=nu.dtype)
for t in np.arange(nt):
ug[:, t] = powell_leben_filter_km(lon, lat, nu[:, t], verbose=verbose, **kwargs) # depends on [control=['for'], data=['t']]
if isVector:
ug = ug.flatten() # depends on [control=['if'], data=[]]
return ug # depends on [control=['if'], data=[]] #If strict option is set to True, compute gradients at mid-distance between points
if strict:
lon = (lon[1:] - lon[:-1]) / 2.0 + lon[0:-1]
lat = (lat[1:] - lat[:-1]) / 2.0 + lat[0:-1] # depends on [control=['if'], data=[]] #Compute gravitational & coriolis forces
if strict:
sh = (sh[0] - 1, sh[1]) # depends on [control=['if'], data=[]]
g = np.repeat(gravity(lat), nt).reshape(sh)
f = np.repeat(coriolis(lat), nt).reshape(sh) #Compute SSH 1st derivative
# dh = deriv(dst,nu) #(deriv is very bad...)
dh = np.ma.array(np.empty(sh), mask=True, dtype=nu.dtype)
for t in np.arange(nt):
dh[:, t] = (nu[1:, t] - nu[:-1, t]) / (dst[1:] - dst[:-1]) if strict else deriv(dst, nu[:, t]) # depends on [control=['for'], data=['t']] #Compute geostrophy
# print f
# print g
# print dh
ug = -(g * dh) / f #Inverse sign of ug for descending tracks as Coriolis is oriented to the right
#northward
if not track_orient(lon, lat): #descending tracks
ug *= -1 # depends on [control=['if'], data=[]]
if isVector:
ug = ug.flatten() # depends on [control=['if'], data=[]]
return (lon, lat, ug) if strict else ug
|
def sizefromlen(limit, *properties):
'''
Factory to generate a function which get size from specified field with limits.
Often used in nstruct "size" parameter.
To retrieve size without limit, simply use lambda expression: lambda x: x.header.length
:param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised
to protect against serious result like memory overflow or dead loop.
:param properties: the name of the specified fields. Specify more than one string to form a property path,
like: sizefromlen(256, 'header', 'length') -> s.header.length
:returns: a function which takes a NamedStruct as parameter, and returns the length value from specified
property path.
'''
def func(namedstruct):
v = namedstruct._target
for p in properties:
v = getattr(v, p)
if v > limit:
raise BadLenError('Struct length exceeds limit ' + str(limit))
return v
return func
|
def function[sizefromlen, parameter[limit]]:
constant[
Factory to generate a function which get size from specified field with limits.
Often used in nstruct "size" parameter.
To retrieve size without limit, simply use lambda expression: lambda x: x.header.length
:param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised
to protect against serious result like memory overflow or dead loop.
:param properties: the name of the specified fields. Specify more than one string to form a property path,
like: sizefromlen(256, 'header', 'length') -> s.header.length
:returns: a function which takes a NamedStruct as parameter, and returns the length value from specified
property path.
]
def function[func, parameter[namedstruct]]:
variable[v] assign[=] name[namedstruct]._target
for taget[name[p]] in starred[name[properties]] begin[:]
variable[v] assign[=] call[name[getattr], parameter[name[v], name[p]]]
if compare[name[v] greater[>] name[limit]] begin[:]
<ast.Raise object at 0x7da1b055ece0>
return[name[v]]
return[name[func]]
|
keyword[def] identifier[sizefromlen] ( identifier[limit] ,* identifier[properties] ):
literal[string]
keyword[def] identifier[func] ( identifier[namedstruct] ):
identifier[v] = identifier[namedstruct] . identifier[_target]
keyword[for] identifier[p] keyword[in] identifier[properties] :
identifier[v] = identifier[getattr] ( identifier[v] , identifier[p] )
keyword[if] identifier[v] > identifier[limit] :
keyword[raise] identifier[BadLenError] ( literal[string] + identifier[str] ( identifier[limit] ))
keyword[return] identifier[v]
keyword[return] identifier[func]
|
def sizefromlen(limit, *properties):
"""
Factory to generate a function which get size from specified field with limits.
Often used in nstruct "size" parameter.
To retrieve size without limit, simply use lambda expression: lambda x: x.header.length
:param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised
to protect against serious result like memory overflow or dead loop.
:param properties: the name of the specified fields. Specify more than one string to form a property path,
like: sizefromlen(256, 'header', 'length') -> s.header.length
:returns: a function which takes a NamedStruct as parameter, and returns the length value from specified
property path.
"""
def func(namedstruct):
v = namedstruct._target
for p in properties:
v = getattr(v, p) # depends on [control=['for'], data=['p']]
if v > limit:
raise BadLenError('Struct length exceeds limit ' + str(limit)) # depends on [control=['if'], data=['limit']]
return v
return func
|
def _run_task_internal(self, task):
''' run a particular module step in a playbook '''
hosts = self._list_available_hosts()
self.inventory.restrict_to(hosts)
runner = cirruscluster.ext.ansible.runner.Runner(
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
module_args=task.module_args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=task.play.remote_user,
remote_port=task.play.remote_port, module_vars=task.module_vars,
private_key_file=self.private_key_file,
private_key=self.private_key,
setup_cache=self.SETUP_CACHE, basedir=task.play.basedir,
conditional=task.only_if, callbacks=self.runner_callbacks,
sudo=task.sudo, sudo_user=task.sudo_user,
transport=task.transport, sudo_pass=task.sudo_pass, is_playbook=True
)
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
contacted = results.get('contacted',{})
dark = results.get('dark', {})
self.inventory.lift_restriction()
if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
return None
return results
|
def function[_run_task_internal, parameter[self, task]]:
constant[ run a particular module step in a playbook ]
variable[hosts] assign[=] call[name[self]._list_available_hosts, parameter[]]
call[name[self].inventory.restrict_to, parameter[name[hosts]]]
variable[runner] assign[=] call[name[cirruscluster].ext.ansible.runner.Runner, parameter[]]
if compare[name[task].async_seconds equal[==] constant[0]] begin[:]
variable[results] assign[=] call[name[runner].run, parameter[]]
variable[contacted] assign[=] call[name[results].get, parameter[constant[contacted], dictionary[[], []]]]
variable[dark] assign[=] call[name[results].get, parameter[constant[dark], dictionary[[], []]]]
call[name[self].inventory.lift_restriction, parameter[]]
if <ast.BoolOp object at 0x7da1b135c640> begin[:]
return[constant[None]]
return[name[results]]
|
keyword[def] identifier[_run_task_internal] ( identifier[self] , identifier[task] ):
literal[string]
identifier[hosts] = identifier[self] . identifier[_list_available_hosts] ()
identifier[self] . identifier[inventory] . identifier[restrict_to] ( identifier[hosts] )
identifier[runner] = identifier[cirruscluster] . identifier[ext] . identifier[ansible] . identifier[runner] . identifier[Runner] (
identifier[pattern] = identifier[task] . identifier[play] . identifier[hosts] , identifier[inventory] = identifier[self] . identifier[inventory] , identifier[module_name] = identifier[task] . identifier[module_name] ,
identifier[module_args] = identifier[task] . identifier[module_args] , identifier[forks] = identifier[self] . identifier[forks] ,
identifier[remote_pass] = identifier[self] . identifier[remote_pass] , identifier[module_path] = identifier[self] . identifier[module_path] ,
identifier[timeout] = identifier[self] . identifier[timeout] , identifier[remote_user] = identifier[task] . identifier[play] . identifier[remote_user] ,
identifier[remote_port] = identifier[task] . identifier[play] . identifier[remote_port] , identifier[module_vars] = identifier[task] . identifier[module_vars] ,
identifier[private_key_file] = identifier[self] . identifier[private_key_file] ,
identifier[private_key] = identifier[self] . identifier[private_key] ,
identifier[setup_cache] = identifier[self] . identifier[SETUP_CACHE] , identifier[basedir] = identifier[task] . identifier[play] . identifier[basedir] ,
identifier[conditional] = identifier[task] . identifier[only_if] , identifier[callbacks] = identifier[self] . identifier[runner_callbacks] ,
identifier[sudo] = identifier[task] . identifier[sudo] , identifier[sudo_user] = identifier[task] . identifier[sudo_user] ,
identifier[transport] = identifier[task] . identifier[transport] , identifier[sudo_pass] = identifier[task] . identifier[sudo_pass] , identifier[is_playbook] = keyword[True]
)
keyword[if] identifier[task] . identifier[async_seconds] == literal[int] :
identifier[results] = identifier[runner] . identifier[run] ()
keyword[else] :
identifier[results] , identifier[poller] = identifier[runner] . identifier[run_async] ( identifier[task] . identifier[async_seconds] )
identifier[self] . identifier[stats] . identifier[compute] ( identifier[results] )
keyword[if] identifier[task] . identifier[async_poll_interval] > literal[int] :
identifier[results] = identifier[self] . identifier[_async_poll] ( identifier[poller] , identifier[task] . identifier[async_seconds] , identifier[task] . identifier[async_poll_interval] )
identifier[contacted] = identifier[results] . identifier[get] ( literal[string] ,{})
identifier[dark] = identifier[results] . identifier[get] ( literal[string] ,{})
identifier[self] . identifier[inventory] . identifier[lift_restriction] ()
keyword[if] identifier[len] ( identifier[contacted] . identifier[keys] ())== literal[int] keyword[and] identifier[len] ( identifier[dark] . identifier[keys] ())== literal[int] :
keyword[return] keyword[None]
keyword[return] identifier[results]
|
def _run_task_internal(self, task):
""" run a particular module step in a playbook """
hosts = self._list_available_hosts()
self.inventory.restrict_to(hosts)
runner = cirruscluster.ext.ansible.runner.Runner(pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name, module_args=task.module_args, forks=self.forks, remote_pass=self.remote_pass, module_path=self.module_path, timeout=self.timeout, remote_user=task.play.remote_user, remote_port=task.play.remote_port, module_vars=task.module_vars, private_key_file=self.private_key_file, private_key=self.private_key, setup_cache=self.SETUP_CACHE, basedir=task.play.basedir, conditional=task.only_if, callbacks=self.runner_callbacks, sudo=task.sudo, sudo_user=task.sudo_user, transport=task.transport, sudo_pass=task.sudo_pass, is_playbook=True)
if task.async_seconds == 0:
results = runner.run() # depends on [control=['if'], data=[]]
else:
(results, poller) = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval) # depends on [control=['if'], data=[]]
contacted = results.get('contacted', {})
dark = results.get('dark', {})
self.inventory.lift_restriction()
if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
return None # depends on [control=['if'], data=[]]
return results
|
def _mod_defpriv_opts(object_type, defprivileges):
'''
Format options
'''
object_type = object_type.lower()
defprivileges = '' if defprivileges is None else defprivileges
_defprivs = re.split(r'\s?,\s?', defprivileges.upper())
return object_type, defprivileges, _defprivs
|
def function[_mod_defpriv_opts, parameter[object_type, defprivileges]]:
constant[
Format options
]
variable[object_type] assign[=] call[name[object_type].lower, parameter[]]
variable[defprivileges] assign[=] <ast.IfExp object at 0x7da20c7c8bb0>
variable[_defprivs] assign[=] call[name[re].split, parameter[constant[\s?,\s?], call[name[defprivileges].upper, parameter[]]]]
return[tuple[[<ast.Name object at 0x7da18dc06bf0>, <ast.Name object at 0x7da18dc06d40>, <ast.Name object at 0x7da18dc05270>]]]
|
keyword[def] identifier[_mod_defpriv_opts] ( identifier[object_type] , identifier[defprivileges] ):
literal[string]
identifier[object_type] = identifier[object_type] . identifier[lower] ()
identifier[defprivileges] = literal[string] keyword[if] identifier[defprivileges] keyword[is] keyword[None] keyword[else] identifier[defprivileges]
identifier[_defprivs] = identifier[re] . identifier[split] ( literal[string] , identifier[defprivileges] . identifier[upper] ())
keyword[return] identifier[object_type] , identifier[defprivileges] , identifier[_defprivs]
|
def _mod_defpriv_opts(object_type, defprivileges):
"""
Format options
"""
object_type = object_type.lower()
defprivileges = '' if defprivileges is None else defprivileges
_defprivs = re.split('\\s?,\\s?', defprivileges.upper())
return (object_type, defprivileges, _defprivs)
|
def read_datasource(jboss_config, name, profile=None):
'''
Read datasource properties in the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
Profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.read_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.read_datasource, name=%s", name)
return __read_datasource(jboss_config, name, profile)
|
def function[read_datasource, parameter[jboss_config, name, profile]]:
constant[
Read datasource properties in the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
Profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.read_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
]
call[name[log].debug, parameter[constant[======================== MODULE FUNCTION: jboss7.read_datasource, name=%s], name[name]]]
return[call[name[__read_datasource], parameter[name[jboss_config], name[name], name[profile]]]]
|
keyword[def] identifier[read_datasource] ( identifier[jboss_config] , identifier[name] , identifier[profile] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] identifier[__read_datasource] ( identifier[jboss_config] , identifier[name] , identifier[profile] )
|
def read_datasource(jboss_config, name, profile=None):
"""
Read datasource properties in the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
Profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.read_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
log.debug('======================== MODULE FUNCTION: jboss7.read_datasource, name=%s', name)
return __read_datasource(jboss_config, name, profile)
|
def fmt_iso(timestamp):
""" Format a UNIX timestamp to an ISO datetime string.
"""
try:
return fmt.iso_datetime(timestamp)
except (ValueError, TypeError):
return "N/A".rjust(len(fmt.iso_datetime(0)))
|
def function[fmt_iso, parameter[timestamp]]:
constant[ Format a UNIX timestamp to an ISO datetime string.
]
<ast.Try object at 0x7da2044c04c0>
|
keyword[def] identifier[fmt_iso] ( identifier[timestamp] ):
literal[string]
keyword[try] :
keyword[return] identifier[fmt] . identifier[iso_datetime] ( identifier[timestamp] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[return] literal[string] . identifier[rjust] ( identifier[len] ( identifier[fmt] . identifier[iso_datetime] ( literal[int] )))
|
def fmt_iso(timestamp):
""" Format a UNIX timestamp to an ISO datetime string.
"""
try:
return fmt.iso_datetime(timestamp) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
return 'N/A'.rjust(len(fmt.iso_datetime(0))) # depends on [control=['except'], data=[]]
|
def ionic_strength(mis, zis):
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)])
|
def function[ionic_strength, parameter[mis, zis]]:
constant[Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
]
return[binary_operation[constant[0.5] * call[name[sum], parameter[<ast.ListComp object at 0x7da1b2345600>]]]]
|
keyword[def] identifier[ionic_strength] ( identifier[mis] , identifier[zis] ):
literal[string]
keyword[return] literal[int] * identifier[sum] ([ identifier[mi] * identifier[zi] * identifier[zi] keyword[for] identifier[mi] , identifier[zi] keyword[in] identifier[zip] ( identifier[mis] , identifier[zis] )])
|
def ionic_strength(mis, zis):
"""Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \\frac{1}{2} \\sum M_i z_i^2
I = \\frac{1}{2} \\sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
"""
return 0.5 * sum([mi * zi * zi for (mi, zi) in zip(mis, zis)])
|
def sort_labeled_intervals(intervals, labels=None):
'''Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
'''
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted
else:
return intervals_sorted, [labels[_] for _ in idx]
|
def function[sort_labeled_intervals, parameter[intervals, labels]]:
constant[Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
]
variable[idx] assign[=] call[name[np].argsort, parameter[call[name[intervals]][tuple[[<ast.Slice object at 0x7da18bc71ab0>, <ast.Constant object at 0x7da18bc731f0>]]]]]
variable[intervals_sorted] assign[=] call[name[intervals]][name[idx]]
if compare[name[labels] is constant[None]] begin[:]
return[name[intervals_sorted]]
|
keyword[def] identifier[sort_labeled_intervals] ( identifier[intervals] , identifier[labels] = keyword[None] ):
literal[string]
identifier[idx] = identifier[np] . identifier[argsort] ( identifier[intervals] [:, literal[int] ])
identifier[intervals_sorted] = identifier[intervals] [ identifier[idx] ]
keyword[if] identifier[labels] keyword[is] keyword[None] :
keyword[return] identifier[intervals_sorted]
keyword[else] :
keyword[return] identifier[intervals_sorted] ,[ identifier[labels] [ identifier[_] ] keyword[for] identifier[_] keyword[in] identifier[idx] ]
|
def sort_labeled_intervals(intervals, labels=None):
"""Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
"""
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted # depends on [control=['if'], data=[]]
else:
return (intervals_sorted, [labels[_] for _ in idx])
|
def read(self, symbol, as_of=None, raw=False, **kwargs):
# TODO: shall we block from_version from getting into super.read?
"""Read data for the named symbol. Returns a BitemporalItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `datetime.datetime`
Return the data as it was as_of the point in time.
raw : `bool`
If True, will return the full bitemporal dataframe (i.e. all versions of the data). This also means as_of is
ignored.
Returns
-------
BitemporalItem namedtuple which contains a .data and .metadata element
"""
item = self._store.read(symbol, **kwargs)
last_updated = max(item.data.index.get_level_values(self.observe_column))
if raw:
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=item.data,
metadata=item.metadata,
last_updated=last_updated)
else:
index_names = list(item.data.index.names)
index_names.remove(self.observe_column)
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(),
data=groupby_asof(item.data, as_of=as_of, dt_col=index_names,
asof_col=self.observe_column),
metadata=item.metadata, last_updated=last_updated)
|
def function[read, parameter[self, symbol, as_of, raw]]:
constant[Read data for the named symbol. Returns a BitemporalItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `datetime.datetime`
Return the data as it was as_of the point in time.
raw : `bool`
If True, will return the full bitemporal dataframe (i.e. all versions of the data). This also means as_of is
ignored.
Returns
-------
BitemporalItem namedtuple which contains a .data and .metadata element
]
variable[item] assign[=] call[name[self]._store.read, parameter[name[symbol]]]
variable[last_updated] assign[=] call[name[max], parameter[call[name[item].data.index.get_level_values, parameter[name[self].observe_column]]]]
if name[raw] begin[:]
return[call[name[BitemporalItem], parameter[]]]
|
keyword[def] identifier[read] ( identifier[self] , identifier[symbol] , identifier[as_of] = keyword[None] , identifier[raw] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[item] = identifier[self] . identifier[_store] . identifier[read] ( identifier[symbol] ,** identifier[kwargs] )
identifier[last_updated] = identifier[max] ( identifier[item] . identifier[data] . identifier[index] . identifier[get_level_values] ( identifier[self] . identifier[observe_column] ))
keyword[if] identifier[raw] :
keyword[return] identifier[BitemporalItem] ( identifier[symbol] = identifier[symbol] , identifier[library] = identifier[self] . identifier[_store] . identifier[_arctic_lib] . identifier[get_name] (), identifier[data] = identifier[item] . identifier[data] ,
identifier[metadata] = identifier[item] . identifier[metadata] ,
identifier[last_updated] = identifier[last_updated] )
keyword[else] :
identifier[index_names] = identifier[list] ( identifier[item] . identifier[data] . identifier[index] . identifier[names] )
identifier[index_names] . identifier[remove] ( identifier[self] . identifier[observe_column] )
keyword[return] identifier[BitemporalItem] ( identifier[symbol] = identifier[symbol] , identifier[library] = identifier[self] . identifier[_store] . identifier[_arctic_lib] . identifier[get_name] (),
identifier[data] = identifier[groupby_asof] ( identifier[item] . identifier[data] , identifier[as_of] = identifier[as_of] , identifier[dt_col] = identifier[index_names] ,
identifier[asof_col] = identifier[self] . identifier[observe_column] ),
identifier[metadata] = identifier[item] . identifier[metadata] , identifier[last_updated] = identifier[last_updated] )
|
def read(self, symbol, as_of=None, raw=False, **kwargs):
# TODO: shall we block from_version from getting into super.read?
'Read data for the named symbol. Returns a BitemporalItem object with\n a data and metdata element (as passed into write).\n\n Parameters\n ----------\n symbol : `str`\n symbol name for the item\n as_of : `datetime.datetime`\n Return the data as it was as_of the point in time.\n raw : `bool`\n If True, will return the full bitemporal dataframe (i.e. all versions of the data). This also means as_of is\n ignored.\n\n Returns\n -------\n BitemporalItem namedtuple which contains a .data and .metadata element\n '
item = self._store.read(symbol, **kwargs)
last_updated = max(item.data.index.get_level_values(self.observe_column))
if raw:
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=item.data, metadata=item.metadata, last_updated=last_updated) # depends on [control=['if'], data=[]]
else:
index_names = list(item.data.index.names)
index_names.remove(self.observe_column)
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=groupby_asof(item.data, as_of=as_of, dt_col=index_names, asof_col=self.observe_column), metadata=item.metadata, last_updated=last_updated)
|
def remove_dirs(self, directory):
"""Delete a directory recursively.
:param directory: $PATH to directory.
:type directory: ``str``
"""
LOG.info('Removing directory [ %s ]', directory)
local_files = self._drectory_local_files(directory=directory)
for file_name in local_files:
try:
os.remove(file_name['local_object'])
except OSError as exp:
LOG.error(str(exp))
# Build a list of all local directories
directories = sorted(
[i for i, _, _ in os.walk(directory)],
reverse=True
)
# Remove directories
for directory_path in directories:
try:
os.removedirs(directory_path)
except OSError as exp:
if exp.errno != 2:
LOG.error(str(exp))
pass
|
def function[remove_dirs, parameter[self, directory]]:
constant[Delete a directory recursively.
:param directory: $PATH to directory.
:type directory: ``str``
]
call[name[LOG].info, parameter[constant[Removing directory [ %s ]], name[directory]]]
variable[local_files] assign[=] call[name[self]._drectory_local_files, parameter[]]
for taget[name[file_name]] in starred[name[local_files]] begin[:]
<ast.Try object at 0x7da1b2847070>
variable[directories] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b2847d90>]]
for taget[name[directory_path]] in starred[name[directories]] begin[:]
<ast.Try object at 0x7da1b2844580>
|
keyword[def] identifier[remove_dirs] ( identifier[self] , identifier[directory] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] , identifier[directory] )
identifier[local_files] = identifier[self] . identifier[_drectory_local_files] ( identifier[directory] = identifier[directory] )
keyword[for] identifier[file_name] keyword[in] identifier[local_files] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[file_name] [ literal[string] ])
keyword[except] identifier[OSError] keyword[as] identifier[exp] :
identifier[LOG] . identifier[error] ( identifier[str] ( identifier[exp] ))
identifier[directories] = identifier[sorted] (
[ identifier[i] keyword[for] identifier[i] , identifier[_] , identifier[_] keyword[in] identifier[os] . identifier[walk] ( identifier[directory] )],
identifier[reverse] = keyword[True]
)
keyword[for] identifier[directory_path] keyword[in] identifier[directories] :
keyword[try] :
identifier[os] . identifier[removedirs] ( identifier[directory_path] )
keyword[except] identifier[OSError] keyword[as] identifier[exp] :
keyword[if] identifier[exp] . identifier[errno] != literal[int] :
identifier[LOG] . identifier[error] ( identifier[str] ( identifier[exp] ))
keyword[pass]
|
def remove_dirs(self, directory):
"""Delete a directory recursively.
:param directory: $PATH to directory.
:type directory: ``str``
"""
LOG.info('Removing directory [ %s ]', directory)
local_files = self._drectory_local_files(directory=directory)
for file_name in local_files:
try:
os.remove(file_name['local_object']) # depends on [control=['try'], data=[]]
except OSError as exp:
LOG.error(str(exp)) # depends on [control=['except'], data=['exp']] # depends on [control=['for'], data=['file_name']]
# Build a list of all local directories
directories = sorted([i for (i, _, _) in os.walk(directory)], reverse=True)
# Remove directories
for directory_path in directories:
try:
os.removedirs(directory_path) # depends on [control=['try'], data=[]]
except OSError as exp:
if exp.errno != 2:
LOG.error(str(exp)) # depends on [control=['if'], data=[]]
pass # depends on [control=['except'], data=['exp']] # depends on [control=['for'], data=['directory_path']]
|
def get_rules():
"""Returns all enabled rules.
:rtype: [Rule]
"""
paths = [rule_path for path in get_rules_import_paths()
for rule_path in sorted(path.glob('*.py'))]
return sorted(get_loaded_rules(paths),
key=lambda rule: rule.priority)
|
def function[get_rules, parameter[]]:
constant[Returns all enabled rules.
:rtype: [Rule]
]
variable[paths] assign[=] <ast.ListComp object at 0x7da20cabe590>
return[call[name[sorted], parameter[call[name[get_loaded_rules], parameter[name[paths]]]]]]
|
keyword[def] identifier[get_rules] ():
literal[string]
identifier[paths] =[ identifier[rule_path] keyword[for] identifier[path] keyword[in] identifier[get_rules_import_paths] ()
keyword[for] identifier[rule_path] keyword[in] identifier[sorted] ( identifier[path] . identifier[glob] ( literal[string] ))]
keyword[return] identifier[sorted] ( identifier[get_loaded_rules] ( identifier[paths] ),
identifier[key] = keyword[lambda] identifier[rule] : identifier[rule] . identifier[priority] )
|
def get_rules():
"""Returns all enabled rules.
:rtype: [Rule]
"""
paths = [rule_path for path in get_rules_import_paths() for rule_path in sorted(path.glob('*.py'))]
return sorted(get_loaded_rules(paths), key=lambda rule: rule.priority)
|
def read_flags_from_files(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self._is_flag_file_directive(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise _exceptions.IllegalFlagValueError(
'--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self._extract_filename(current_arg)
new_argv.extend(self._get_flag_file_lines(flag_filename))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
else:
if ('=' not in current_arg and
rest_of_args and not rest_of_args[0].startswith('-')):
# If this is an occurrence of a legitimate --x y, skip the value
# so that it won't be mistaken for a standalone arg.
fl = self._flags()
name = current_arg.lstrip('-')
if name in fl and not fl[name].boolean:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
new_argv.append(current_arg)
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
|
def function[read_flags_from_files, parameter[self, argv, force_gnu]]:
constant[Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
]
variable[rest_of_args] assign[=] name[argv]
variable[new_argv] assign[=] list[[]]
while name[rest_of_args] begin[:]
variable[current_arg] assign[=] call[name[rest_of_args]][constant[0]]
variable[rest_of_args] assign[=] call[name[rest_of_args]][<ast.Slice object at 0x7da1b18a0f70>]
if call[name[self]._is_flag_file_directive, parameter[name[current_arg]]] begin[:]
if <ast.BoolOp object at 0x7da1b18a32e0> begin[:]
if <ast.UnaryOp object at 0x7da1b18a0910> begin[:]
<ast.Raise object at 0x7da1b18a35e0>
variable[flag_filename] assign[=] call[name[os].path.expanduser, parameter[call[name[rest_of_args]][constant[0]]]]
variable[rest_of_args] assign[=] call[name[rest_of_args]][<ast.Slice object at 0x7da1b18a28f0>]
call[name[new_argv].extend, parameter[call[name[self]._get_flag_file_lines, parameter[name[flag_filename]]]]]
if name[rest_of_args] begin[:]
call[name[new_argv].extend, parameter[name[rest_of_args]]]
return[name[new_argv]]
|
keyword[def] identifier[read_flags_from_files] ( identifier[self] , identifier[argv] , identifier[force_gnu] = keyword[True] ):
literal[string]
identifier[rest_of_args] = identifier[argv]
identifier[new_argv] =[]
keyword[while] identifier[rest_of_args] :
identifier[current_arg] = identifier[rest_of_args] [ literal[int] ]
identifier[rest_of_args] = identifier[rest_of_args] [ literal[int] :]
keyword[if] identifier[self] . identifier[_is_flag_file_directive] ( identifier[current_arg] ):
keyword[if] identifier[current_arg] == literal[string] keyword[or] identifier[current_arg] == literal[string] :
keyword[if] keyword[not] identifier[rest_of_args] :
keyword[raise] identifier[_exceptions] . identifier[IllegalFlagValueError] (
literal[string] )
identifier[flag_filename] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[rest_of_args] [ literal[int] ])
identifier[rest_of_args] = identifier[rest_of_args] [ literal[int] :]
keyword[else] :
identifier[flag_filename] = identifier[self] . identifier[_extract_filename] ( identifier[current_arg] )
identifier[new_argv] . identifier[extend] ( identifier[self] . identifier[_get_flag_file_lines] ( identifier[flag_filename] ))
keyword[else] :
identifier[new_argv] . identifier[append] ( identifier[current_arg] )
keyword[if] identifier[current_arg] == literal[string] :
keyword[break]
keyword[if] keyword[not] identifier[current_arg] . identifier[startswith] ( literal[string] ):
keyword[if] keyword[not] identifier[force_gnu] keyword[and] keyword[not] identifier[self] . identifier[__dict__] [ literal[string] ]:
keyword[break]
keyword[else] :
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[current_arg] keyword[and]
identifier[rest_of_args] keyword[and] keyword[not] identifier[rest_of_args] [ literal[int] ]. identifier[startswith] ( literal[string] )):
identifier[fl] = identifier[self] . identifier[_flags] ()
identifier[name] = identifier[current_arg] . identifier[lstrip] ( literal[string] )
keyword[if] identifier[name] keyword[in] identifier[fl] keyword[and] keyword[not] identifier[fl] [ identifier[name] ]. identifier[boolean] :
identifier[current_arg] = identifier[rest_of_args] [ literal[int] ]
identifier[rest_of_args] = identifier[rest_of_args] [ literal[int] :]
identifier[new_argv] . identifier[append] ( identifier[current_arg] )
keyword[if] identifier[rest_of_args] :
identifier[new_argv] . identifier[extend] ( identifier[rest_of_args] )
keyword[return] identifier[new_argv]
|
def read_flags_from_files(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self._is_flag_file_directive(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise _exceptions.IllegalFlagValueError('--flagfile with no argument') # depends on [control=['if'], data=[]]
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:] # depends on [control=['if'], data=[]]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self._extract_filename(current_arg)
new_argv.extend(self._get_flag_file_lines(flag_filename)) # depends on [control=['if'], data=[]]
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break # depends on [control=['if'], data=[]]
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and (not self.__dict__['__use_gnu_getopt']):
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif '=' not in current_arg and rest_of_args and (not rest_of_args[0].startswith('-')):
# If this is an occurrence of a legitimate --x y, skip the value
# so that it won't be mistaken for a standalone arg.
fl = self._flags()
name = current_arg.lstrip('-')
if name in fl and (not fl[name].boolean):
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
new_argv.append(current_arg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if rest_of_args:
new_argv.extend(rest_of_args) # depends on [control=['if'], data=[]]
return new_argv
|
def intersectingIntervalIterator(self, start, end):
"""
Get an iterator which will iterate over those objects in the tree which
intersect the given interval - sorted in order of start index
:param start: find intervals in the tree that intersect an interval with
with this start index (inclusive)
:param end: find intervals in the tree that intersect an interval with
with this end index (exclusive)
:return: an iterator that will yield intersected intervals
"""
items = self.intersectingInterval(start, end)
items.sort(key=lambda x: x.start)
for item in items:
yield item
|
def function[intersectingIntervalIterator, parameter[self, start, end]]:
constant[
Get an iterator which will iterate over those objects in the tree which
intersect the given interval - sorted in order of start index
:param start: find intervals in the tree that intersect an interval with
with this start index (inclusive)
:param end: find intervals in the tree that intersect an interval with
with this end index (exclusive)
:return: an iterator that will yield intersected intervals
]
variable[items] assign[=] call[name[self].intersectingInterval, parameter[name[start], name[end]]]
call[name[items].sort, parameter[]]
for taget[name[item]] in starred[name[items]] begin[:]
<ast.Yield object at 0x7da2054a5840>
|
keyword[def] identifier[intersectingIntervalIterator] ( identifier[self] , identifier[start] , identifier[end] ):
literal[string]
identifier[items] = identifier[self] . identifier[intersectingInterval] ( identifier[start] , identifier[end] )
identifier[items] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[start] )
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[yield] identifier[item]
|
def intersectingIntervalIterator(self, start, end):
"""
Get an iterator which will iterate over those objects in the tree which
intersect the given interval - sorted in order of start index
:param start: find intervals in the tree that intersect an interval with
with this start index (inclusive)
:param end: find intervals in the tree that intersect an interval with
with this end index (exclusive)
:return: an iterator that will yield intersected intervals
"""
items = self.intersectingInterval(start, end)
items.sort(key=lambda x: x.start)
for item in items:
yield item # depends on [control=['for'], data=['item']]
|
def get_pay_giftcard(self, rule_id):
"""
查询支付后投放卡券的规则
详情请参见
https://mp.weixin.qq.com/wiki?id=mp1466494654_K9rNz
:param rule_id: 支付即会员的规则 ID
:return: 支付后投放卡券的规则
:rtype: dict
"""
return self._post(
'card/paygiftcard/getbyid',
data={
'rule_id': rule_id,
},
result_processor=lambda x: x['rule_info'],
)
|
def function[get_pay_giftcard, parameter[self, rule_id]]:
constant[
查询支付后投放卡券的规则
详情请参见
https://mp.weixin.qq.com/wiki?id=mp1466494654_K9rNz
:param rule_id: 支付即会员的规则 ID
:return: 支付后投放卡券的规则
:rtype: dict
]
return[call[name[self]._post, parameter[constant[card/paygiftcard/getbyid]]]]
|
keyword[def] identifier[get_pay_giftcard] ( identifier[self] , identifier[rule_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_post] (
literal[string] ,
identifier[data] ={
literal[string] : identifier[rule_id] ,
},
identifier[result_processor] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ],
)
|
def get_pay_giftcard(self, rule_id):
"""
查询支付后投放卡券的规则
详情请参见
https://mp.weixin.qq.com/wiki?id=mp1466494654_K9rNz
:param rule_id: 支付即会员的规则 ID
:return: 支付后投放卡券的规则
:rtype: dict
"""
return self._post('card/paygiftcard/getbyid', data={'rule_id': rule_id}, result_processor=lambda x: x['rule_info'])
|
def buffer_to_audio(buffer: bytes) -> np.ndarray:
"""Convert a raw mono audio byte string to numpy array of floats"""
return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0
|
def function[buffer_to_audio, parameter[buffer]]:
constant[Convert a raw mono audio byte string to numpy array of floats]
return[binary_operation[call[call[name[np].fromstring, parameter[name[buffer]]].astype, parameter[name[np].float32]] / constant[32768.0]]]
|
keyword[def] identifier[buffer_to_audio] ( identifier[buffer] : identifier[bytes] )-> identifier[np] . identifier[ndarray] :
literal[string]
keyword[return] identifier[np] . identifier[fromstring] ( identifier[buffer] , identifier[dtype] = literal[string] ). identifier[astype] ( identifier[np] . identifier[float32] , identifier[order] = literal[string] )/ literal[int]
|
def buffer_to_audio(buffer: bytes) -> np.ndarray:
"""Convert a raw mono audio byte string to numpy array of floats"""
return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0
|
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1):
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
cols = df_cat.columns.tolist()
new_cols = []
# check whether the columns have the true category available
has_truth = False
if type(cols[0]) is tuple:
has_truth = True
if has_truth:
new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)]
else:
new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)]
# transfer new categories
df_cat.columns = new_cols
# keep track of true and predicted labels
y_info = {}
y_info['true'] = []
y_info['pred'] = []
if has_truth:
y_info['true'] = [x[truth_level].split(': ')[1] for x in cols]
y_info['pred'] = [x.split(': ')[1] for x in top_list]
return df_cat, df_sim.transpose(), y_info
|
def function[predict_cats_from_sigs, parameter[self, df_data_ini, df_sig_ini, dist_type, predict_level, truth_level, unknown_thresh]]:
constant[ Predict category using signature ]
variable[keep_rows] assign[=] call[name[df_sig_ini].index.tolist, parameter[]]
variable[data_rows] assign[=] call[name[df_data_ini].index.tolist, parameter[]]
variable[common_rows] assign[=] call[name[list], parameter[call[call[name[set], parameter[name[data_rows]]].intersection, parameter[name[keep_rows]]]]]
variable[df_data] assign[=] call[name[deepcopy], parameter[call[name[df_data_ini].ix][name[common_rows]]]]
variable[df_sig] assign[=] call[name[deepcopy], parameter[call[name[df_sig_ini].ix][name[common_rows]]]]
variable[cell_types] assign[=] call[name[df_sig].columns.tolist, parameter[]]
variable[barcodes] assign[=] call[name[df_data].columns.tolist, parameter[]]
variable[sim_mat] assign[=] binary_operation[constant[1] - call[name[pairwise_distances], parameter[call[name[df_sig].transpose, parameter[]], call[name[df_data].transpose, parameter[]]]]]
variable[df_sim] assign[=] call[call[name[pd].DataFrame, parameter[]].transpose, parameter[]]
variable[df_sim_top] assign[=] call[name[df_sim].idxmax, parameter[]]
variable[max_sim] assign[=] call[name[df_sim].max, parameter[]]
variable[unknown_cells] assign[=] call[call[name[max_sim]][compare[name[max_sim] less[<] name[unknown_thresh]]].index.tolist, parameter[]]
call[name[df_sim_top]][name[unknown_cells]] assign[=] constant[Unknown]
variable[top_list] assign[=] call[name[df_sim_top].get_values, parameter[]]
variable[top_list] assign[=] <ast.ListComp object at 0x7da1b0537460>
variable[df_cat] assign[=] call[name[deepcopy], parameter[name[df_data]]]
variable[cols] assign[=] call[name[df_cat].columns.tolist, parameter[]]
variable[new_cols] assign[=] list[[]]
variable[has_truth] assign[=] constant[False]
if compare[call[name[type], parameter[call[name[cols]][constant[0]]]] is name[tuple]] begin[:]
variable[has_truth] assign[=] constant[True]
if name[has_truth] begin[:]
variable[new_cols] assign[=] <ast.ListComp object at 0x7da1b0536a10>
name[df_cat].columns assign[=] name[new_cols]
variable[y_info] assign[=] dictionary[[], []]
call[name[y_info]][constant[true]] assign[=] list[[]]
call[name[y_info]][constant[pred]] assign[=] list[[]]
if name[has_truth] begin[:]
call[name[y_info]][constant[true]] assign[=] <ast.ListComp object at 0x7da1b0535f00>
call[name[y_info]][constant[pred]] assign[=] <ast.ListComp object at 0x7da1b05393c0>
return[tuple[[<ast.Name object at 0x7da1b0538340>, <ast.Call object at 0x7da1b0539000>, <ast.Name object at 0x7da1b0538dc0>]]]
|
keyword[def] identifier[predict_cats_from_sigs] ( identifier[self] , identifier[df_data_ini] , identifier[df_sig_ini] , identifier[dist_type] = literal[string] , identifier[predict_level] = literal[string] ,
identifier[truth_level] = literal[int] , identifier[unknown_thresh] =- literal[int] ):
literal[string]
identifier[keep_rows] = identifier[df_sig_ini] . identifier[index] . identifier[tolist] ()
identifier[data_rows] = identifier[df_data_ini] . identifier[index] . identifier[tolist] ()
identifier[common_rows] = identifier[list] ( identifier[set] ( identifier[data_rows] ). identifier[intersection] ( identifier[keep_rows] ))
identifier[df_data] = identifier[deepcopy] ( identifier[df_data_ini] . identifier[ix] [ identifier[common_rows] ])
identifier[df_sig] = identifier[deepcopy] ( identifier[df_sig_ini] . identifier[ix] [ identifier[common_rows] ])
identifier[cell_types] = identifier[df_sig] . identifier[columns] . identifier[tolist] ()
identifier[barcodes] = identifier[df_data] . identifier[columns] . identifier[tolist] ()
identifier[sim_mat] = literal[int] - identifier[pairwise_distances] ( identifier[df_sig] . identifier[transpose] (), identifier[df_data] . identifier[transpose] (), identifier[metric] = identifier[dist_type] )
identifier[df_sim] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[sim_mat] , identifier[index] = identifier[cell_types] , identifier[columns] = identifier[barcodes] ). identifier[transpose] ()
identifier[df_sim_top] = identifier[df_sim] . identifier[idxmax] ( identifier[axis] = literal[int] )
identifier[max_sim] = identifier[df_sim] . identifier[max] ( identifier[axis] = literal[int] )
identifier[unknown_cells] = identifier[max_sim] [ identifier[max_sim] < identifier[unknown_thresh] ]. identifier[index] . identifier[tolist] ()
identifier[df_sim_top] [ identifier[unknown_cells] ]= literal[string]
identifier[top_list] = identifier[df_sim_top] . identifier[get_values] ()
identifier[top_list] =[ identifier[predict_level] + literal[string] + identifier[x] [ literal[int] ] keyword[if] identifier[type] ( identifier[x] ) keyword[is] identifier[tuple] keyword[else] identifier[predict_level] + literal[string] + identifier[x] keyword[for] identifier[x] keyword[in] identifier[top_list] ]
identifier[df_cat] = identifier[deepcopy] ( identifier[df_data] )
identifier[cols] = identifier[df_cat] . identifier[columns] . identifier[tolist] ()
identifier[new_cols] =[]
identifier[has_truth] = keyword[False]
keyword[if] identifier[type] ( identifier[cols] [ literal[int] ]) keyword[is] identifier[tuple] :
identifier[has_truth] = keyword[True]
keyword[if] identifier[has_truth] :
identifier[new_cols] =[ identifier[tuple] ( identifier[list] ( identifier[a] )+[ identifier[b] ]) keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[cols] , identifier[top_list] )]
keyword[else] :
identifier[new_cols] =[ identifier[tuple] ([ identifier[a] ]+[ identifier[b] ]) keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[cols] , identifier[top_list] )]
identifier[df_cat] . identifier[columns] = identifier[new_cols]
identifier[y_info] ={}
identifier[y_info] [ literal[string] ]=[]
identifier[y_info] [ literal[string] ]=[]
keyword[if] identifier[has_truth] :
identifier[y_info] [ literal[string] ]=[ identifier[x] [ identifier[truth_level] ]. identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[cols] ]
identifier[y_info] [ literal[string] ]=[ identifier[x] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[top_list] ]
keyword[return] identifier[df_cat] , identifier[df_sim] . identifier[transpose] (), identifier[y_info]
|
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category', truth_level=1, unknown_thresh=-1):
""" Predict category using signature """
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
cols = df_cat.columns.tolist()
new_cols = []
# check whether the columns have the true category available
has_truth = False
if type(cols[0]) is tuple:
has_truth = True # depends on [control=['if'], data=[]]
if has_truth:
new_cols = [tuple(list(a) + [b]) for (a, b) in zip(cols, top_list)] # depends on [control=['if'], data=[]]
else:
new_cols = [tuple([a] + [b]) for (a, b) in zip(cols, top_list)]
# transfer new categories
df_cat.columns = new_cols
# keep track of true and predicted labels
y_info = {}
y_info['true'] = []
y_info['pred'] = []
if has_truth:
y_info['true'] = [x[truth_level].split(': ')[1] for x in cols]
y_info['pred'] = [x.split(': ')[1] for x in top_list] # depends on [control=['if'], data=[]]
return (df_cat, df_sim.transpose(), y_info)
|
def rels(self):
"""Returns a LIST of all the metadata relations"""
r = []
for i in self.metadata:
r = r + i[REL]
return []
|
def function[rels, parameter[self]]:
constant[Returns a LIST of all the metadata relations]
variable[r] assign[=] list[[]]
for taget[name[i]] in starred[name[self].metadata] begin[:]
variable[r] assign[=] binary_operation[name[r] + call[name[i]][name[REL]]]
return[list[[]]]
|
keyword[def] identifier[rels] ( identifier[self] ):
literal[string]
identifier[r] =[]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[metadata] :
identifier[r] = identifier[r] + identifier[i] [ identifier[REL] ]
keyword[return] []
|
def rels(self):
"""Returns a LIST of all the metadata relations"""
r = []
for i in self.metadata:
r = r + i[REL] # depends on [control=['for'], data=['i']]
return []
|
def view_fullreport(token, dstore):
"""
Display an .rst report about the computation
"""
# avoid circular imports
from openquake.calculators.reportwriter import ReportWriter
return ReportWriter(dstore).make_report()
|
def function[view_fullreport, parameter[token, dstore]]:
constant[
Display an .rst report about the computation
]
from relative_module[openquake.calculators.reportwriter] import module[ReportWriter]
return[call[call[name[ReportWriter], parameter[name[dstore]]].make_report, parameter[]]]
|
keyword[def] identifier[view_fullreport] ( identifier[token] , identifier[dstore] ):
literal[string]
keyword[from] identifier[openquake] . identifier[calculators] . identifier[reportwriter] keyword[import] identifier[ReportWriter]
keyword[return] identifier[ReportWriter] ( identifier[dstore] ). identifier[make_report] ()
|
def view_fullreport(token, dstore):
"""
Display an .rst report about the computation
"""
# avoid circular imports
from openquake.calculators.reportwriter import ReportWriter
return ReportWriter(dstore).make_report()
|
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
def function[delete, parameter[self, key_id]]:
constant[ Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
]
variable[url] assign[=] call[name[self].bitbucket.url, parameter[constant[DELETE_SSH_KEY]]]
return[call[name[self].bitbucket.dispatch, parameter[constant[DELETE], name[url]]]]
|
keyword[def] identifier[delete] ( identifier[self] , identifier[key_id] = keyword[None] ):
literal[string]
identifier[url] = identifier[self] . identifier[bitbucket] . identifier[url] ( literal[string] , identifier[key_id] = identifier[key_id] )
keyword[return] identifier[self] . identifier[bitbucket] . identifier[dispatch] ( literal[string] , identifier[url] , identifier[auth] = identifier[self] . identifier[bitbucket] . identifier[auth] )
|
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
def parse_color(color):
"""Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
"""
r = g = b = a = type = None
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
type = '#rgb'
color = color + 'f'
if len(color) == 4:
type = type or '#rgba'
color = ''.join([c * 2 for c in color])
if len(color) == 6:
type = type or '#rrggbb'
color = color + 'ff'
assert len(color) == 8
type = type or '#rrggbbaa'
r, g, b, a = [
int(''.join(c), 16) for c in zip(color[::2], color[1::2])
]
a /= 255
elif color.startswith('rgb('):
type = 'rgb'
color = color[4:-1]
r, g, b, a = [int(c) for c in color.split(',')] + [1]
elif color.startswith('rgba('):
type = 'rgba'
color = color[5:-1]
r, g, b, a = [int(c) for c in color.split(',')[:-1]
] + [float(color.split(',')[-1])]
return r, g, b, a, type
|
def function[parse_color, parameter[color]]:
constant[Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
]
variable[r] assign[=] constant[None]
if call[name[color].startswith, parameter[constant[#]]] begin[:]
variable[color] assign[=] call[name[color]][<ast.Slice object at 0x7da20c993ac0>]
if compare[call[name[len], parameter[name[color]]] equal[==] constant[3]] begin[:]
variable[type] assign[=] constant[#rgb]
variable[color] assign[=] binary_operation[name[color] + constant[f]]
if compare[call[name[len], parameter[name[color]]] equal[==] constant[4]] begin[:]
variable[type] assign[=] <ast.BoolOp object at 0x7da20c990cd0>
variable[color] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da20c992a10>]]
if compare[call[name[len], parameter[name[color]]] equal[==] constant[6]] begin[:]
variable[type] assign[=] <ast.BoolOp object at 0x7da20c990310>
variable[color] assign[=] binary_operation[name[color] + constant[ff]]
assert[compare[call[name[len], parameter[name[color]]] equal[==] constant[8]]]
variable[type] assign[=] <ast.BoolOp object at 0x7da20c992140>
<ast.Tuple object at 0x7da20c9900d0> assign[=] <ast.ListComp object at 0x7da20c993640>
<ast.AugAssign object at 0x7da20c9906d0>
return[tuple[[<ast.Name object at 0x7da18dc983d0>, <ast.Name object at 0x7da18dc98640>, <ast.Name object at 0x7da18dc98190>, <ast.Name object at 0x7da18dc997e0>, <ast.Name object at 0x7da18dc9a680>]]]
|
keyword[def] identifier[parse_color] ( identifier[color] ):
literal[string]
identifier[r] = identifier[g] = identifier[b] = identifier[a] = identifier[type] = keyword[None]
keyword[if] identifier[color] . identifier[startswith] ( literal[string] ):
identifier[color] = identifier[color] [ literal[int] :]
keyword[if] identifier[len] ( identifier[color] )== literal[int] :
identifier[type] = literal[string]
identifier[color] = identifier[color] + literal[string]
keyword[if] identifier[len] ( identifier[color] )== literal[int] :
identifier[type] = identifier[type] keyword[or] literal[string]
identifier[color] = literal[string] . identifier[join] ([ identifier[c] * literal[int] keyword[for] identifier[c] keyword[in] identifier[color] ])
keyword[if] identifier[len] ( identifier[color] )== literal[int] :
identifier[type] = identifier[type] keyword[or] literal[string]
identifier[color] = identifier[color] + literal[string]
keyword[assert] identifier[len] ( identifier[color] )== literal[int]
identifier[type] = identifier[type] keyword[or] literal[string]
identifier[r] , identifier[g] , identifier[b] , identifier[a] =[
identifier[int] ( literal[string] . identifier[join] ( identifier[c] ), literal[int] ) keyword[for] identifier[c] keyword[in] identifier[zip] ( identifier[color] [:: literal[int] ], identifier[color] [ literal[int] :: literal[int] ])
]
identifier[a] /= literal[int]
keyword[elif] identifier[color] . identifier[startswith] ( literal[string] ):
identifier[type] = literal[string]
identifier[color] = identifier[color] [ literal[int] :- literal[int] ]
identifier[r] , identifier[g] , identifier[b] , identifier[a] =[ identifier[int] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[color] . identifier[split] ( literal[string] )]+[ literal[int] ]
keyword[elif] identifier[color] . identifier[startswith] ( literal[string] ):
identifier[type] = literal[string]
identifier[color] = identifier[color] [ literal[int] :- literal[int] ]
identifier[r] , identifier[g] , identifier[b] , identifier[a] =[ identifier[int] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[color] . identifier[split] ( literal[string] )[:- literal[int] ]
]+[ identifier[float] ( identifier[color] . identifier[split] ( literal[string] )[- literal[int] ])]
keyword[return] identifier[r] , identifier[g] , identifier[b] , identifier[a] , identifier[type]
|
def parse_color(color):
"""Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
"""
r = g = b = a = type = None
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
type = '#rgb'
color = color + 'f' # depends on [control=['if'], data=[]]
if len(color) == 4:
type = type or '#rgba'
color = ''.join([c * 2 for c in color]) # depends on [control=['if'], data=[]]
if len(color) == 6:
type = type or '#rrggbb'
color = color + 'ff' # depends on [control=['if'], data=[]]
assert len(color) == 8
type = type or '#rrggbbaa'
(r, g, b, a) = [int(''.join(c), 16) for c in zip(color[::2], color[1::2])]
a /= 255 # depends on [control=['if'], data=[]]
elif color.startswith('rgb('):
type = 'rgb'
color = color[4:-1]
(r, g, b, a) = [int(c) for c in color.split(',')] + [1] # depends on [control=['if'], data=[]]
elif color.startswith('rgba('):
type = 'rgba'
color = color[5:-1]
(r, g, b, a) = [int(c) for c in color.split(',')[:-1]] + [float(color.split(',')[-1])] # depends on [control=['if'], data=[]]
return (r, g, b, a, type)
|
def Dumper(obj, indent=0, increase=4, encoding='utf-8'):
"""appropriately view a given dict/list/tuple/object data structure"""
##############################################################################
def p(given):
"""ensure proper decoding from unicode, if necessary"""
if isinstance(given, str): return given.encode(encoding)
else: return given
##############################################################################
try:
if isinstance(obj, dict):
for k,v in obj.items():
if hasattr(v, "__iter__"):
print("%s%s"%(" "*(indent), p(k)))
Dumper(v, indent=indent+increase, increase=increase)
else: print("%s%s=%s"%(" "*(indent), p(k), p(v)))
elif isinstance(obj, list):
for o in obj:
Dumper(o, indent=indent, increase=increase) # didn't print anything this go-round
elif isinstance(obj, tuple):
print("%s%s"%(" "*(indent), p(obj[0])))
next = list(obj)[1:]
if len(next) == 1: next = next[0]
else: next = tuple(next)
Dumper(next, indent=indent+increase, increase=increase)
elif isinstance(obj, str):
print("%s%s"%(" "*(indent), p(obj))) # universally convert back to str for printing
elif obj!=None:
print("%s%s"%(" "*(indent), p(obj)))
except Exception:
print(type(obj), obj)
|
def function[Dumper, parameter[obj, indent, increase, encoding]]:
constant[appropriately view a given dict/list/tuple/object data structure]
def function[p, parameter[given]]:
constant[ensure proper decoding from unicode, if necessary]
if call[name[isinstance], parameter[name[given], name[str]]] begin[:]
return[call[name[given].encode, parameter[name[encoding]]]]
<ast.Try object at 0x7da18bcc96c0>
|
keyword[def] identifier[Dumper] ( identifier[obj] , identifier[indent] = literal[int] , identifier[increase] = literal[int] , identifier[encoding] = literal[string] ):
literal[string]
keyword[def] identifier[p] ( identifier[given] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[given] , identifier[str] ): keyword[return] identifier[given] . identifier[encode] ( identifier[encoding] )
keyword[else] : keyword[return] identifier[given]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[items] ():
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[print] ( literal[string] %( literal[string] *( identifier[indent] ), identifier[p] ( identifier[k] )))
identifier[Dumper] ( identifier[v] , identifier[indent] = identifier[indent] + identifier[increase] , identifier[increase] = identifier[increase] )
keyword[else] : identifier[print] ( literal[string] %( literal[string] *( identifier[indent] ), identifier[p] ( identifier[k] ), identifier[p] ( identifier[v] )))
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[list] ):
keyword[for] identifier[o] keyword[in] identifier[obj] :
identifier[Dumper] ( identifier[o] , identifier[indent] = identifier[indent] , identifier[increase] = identifier[increase] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[tuple] ):
identifier[print] ( literal[string] %( literal[string] *( identifier[indent] ), identifier[p] ( identifier[obj] [ literal[int] ])))
identifier[next] = identifier[list] ( identifier[obj] )[ literal[int] :]
keyword[if] identifier[len] ( identifier[next] )== literal[int] : identifier[next] = identifier[next] [ literal[int] ]
keyword[else] : identifier[next] = identifier[tuple] ( identifier[next] )
identifier[Dumper] ( identifier[next] , identifier[indent] = identifier[indent] + identifier[increase] , identifier[increase] = identifier[increase] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[str] ):
identifier[print] ( literal[string] %( literal[string] *( identifier[indent] ), identifier[p] ( identifier[obj] )))
keyword[elif] identifier[obj] != keyword[None] :
identifier[print] ( literal[string] %( literal[string] *( identifier[indent] ), identifier[p] ( identifier[obj] )))
keyword[except] identifier[Exception] :
identifier[print] ( identifier[type] ( identifier[obj] ), identifier[obj] )
|
def Dumper(obj, indent=0, increase=4, encoding='utf-8'):
"""appropriately view a given dict/list/tuple/object data structure"""
##############################################################################
def p(given):
"""ensure proper decoding from unicode, if necessary"""
if isinstance(given, str):
return given.encode(encoding) # depends on [control=['if'], data=[]]
else:
return given
##############################################################################
try:
if isinstance(obj, dict):
for (k, v) in obj.items():
if hasattr(v, '__iter__'):
print('%s%s' % (' ' * indent, p(k)))
Dumper(v, indent=indent + increase, increase=increase) # depends on [control=['if'], data=[]]
else:
print('%s%s=%s' % (' ' * indent, p(k), p(v))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, list):
for o in obj:
Dumper(o, indent=indent, increase=increase) # didn't print anything this go-round # depends on [control=['for'], data=['o']] # depends on [control=['if'], data=[]]
elif isinstance(obj, tuple):
print('%s%s' % (' ' * indent, p(obj[0])))
next = list(obj)[1:]
if len(next) == 1:
next = next[0] # depends on [control=['if'], data=[]]
else:
next = tuple(next)
Dumper(next, indent=indent + increase, increase=increase) # depends on [control=['if'], data=[]]
elif isinstance(obj, str):
print('%s%s' % (' ' * indent, p(obj))) # universally convert back to str for printing # depends on [control=['if'], data=[]]
elif obj != None:
print('%s%s' % (' ' * indent, p(obj))) # depends on [control=['if'], data=['obj']] # depends on [control=['try'], data=[]]
except Exception:
print(type(obj), obj) # depends on [control=['except'], data=[]]
|
def contains_ignoring_case(self, *items):
"""Asserts that val is string and contains the given item or items."""
if len(items) == 0:
raise ValueError('one or more args must be given')
if isinstance(self.val, str_types):
if len(items) == 1:
if not isinstance(items[0], str_types):
raise TypeError('given arg must be a string')
if items[0].lower() not in self.val.lower():
self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0]))
else:
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
if i.lower() not in self.val.lower():
missing.append(i)
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing)))
elif isinstance(self.val, Iterable):
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
found = False
for v in self.val:
if not isinstance(v, str_types):
raise TypeError('val items must all be strings')
if i.lower() == v.lower():
found = True
break
if not found:
missing.append(i)
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing)))
else:
raise TypeError('val is not a string or iterable')
return self
|
def function[contains_ignoring_case, parameter[self]]:
constant[Asserts that val is string and contains the given item or items.]
if compare[call[name[len], parameter[name[items]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f09f190>
if call[name[isinstance], parameter[name[self].val, name[str_types]]] begin[:]
if compare[call[name[len], parameter[name[items]]] equal[==] constant[1]] begin[:]
if <ast.UnaryOp object at 0x7da18f09d270> begin[:]
<ast.Raise object at 0x7da1b01e7e20>
if compare[call[call[name[items]][constant[0]].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> call[name[self].val.lower, parameter[]]] begin[:]
call[name[self]._err, parameter[binary_operation[constant[Expected <%s> to case-insensitive contain item <%s>, but did not.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b016e7a0>, <ast.Subscript object at 0x7da1b016de40>]]]]]
return[name[self]]
|
keyword[def] identifier[contains_ignoring_case] ( identifier[self] ,* identifier[items] ):
literal[string]
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[self] . identifier[val] , identifier[str_types] ):
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[items] [ literal[int] ], identifier[str_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[items] [ literal[int] ]. identifier[lower] () keyword[not] keyword[in] identifier[self] . identifier[val] . identifier[lower] ():
identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[items] [ literal[int] ]))
keyword[else] :
identifier[missing] =[]
keyword[for] identifier[i] keyword[in] identifier[items] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[i] , identifier[str_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[i] . identifier[lower] () keyword[not] keyword[in] identifier[self] . identifier[val] . identifier[lower] ():
identifier[missing] . identifier[append] ( identifier[i] )
keyword[if] identifier[missing] :
identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[self] . identifier[_fmt_items] ( identifier[items] ), identifier[self] . identifier[_fmt_items] ( identifier[missing] )))
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[val] , identifier[Iterable] ):
identifier[missing] =[]
keyword[for] identifier[i] keyword[in] identifier[items] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[i] , identifier[str_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[found] = keyword[False]
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[val] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[str_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[i] . identifier[lower] ()== identifier[v] . identifier[lower] ():
identifier[found] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[found] :
identifier[missing] . identifier[append] ( identifier[i] )
keyword[if] identifier[missing] :
identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[self] . identifier[_fmt_items] ( identifier[items] ), identifier[self] . identifier[_fmt_items] ( identifier[missing] )))
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[self]
|
def contains_ignoring_case(self, *items):
"""Asserts that val is string and contains the given item or items."""
if len(items) == 0:
raise ValueError('one or more args must be given') # depends on [control=['if'], data=[]]
if isinstance(self.val, str_types):
if len(items) == 1:
if not isinstance(items[0], str_types):
raise TypeError('given arg must be a string') # depends on [control=['if'], data=[]]
if items[0].lower() not in self.val.lower():
self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings') # depends on [control=['if'], data=[]]
if i.lower() not in self.val.lower():
missing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(self.val, Iterable):
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings') # depends on [control=['if'], data=[]]
found = False
for v in self.val:
if not isinstance(v, str_types):
raise TypeError('val items must all be strings') # depends on [control=['if'], data=[]]
if i.lower() == v.lower():
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
if not found:
missing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise TypeError('val is not a string or iterable')
return self
|
def get_notebook_image_info(project: 'Project', job: Any) -> Tuple[str, str]:
"""Return the image name and image tag for a job"""
image_name, _ = get_job_image_info(project, job)
return image_name, LATEST_IMAGE_TAG
|
def function[get_notebook_image_info, parameter[project, job]]:
constant[Return the image name and image tag for a job]
<ast.Tuple object at 0x7da20c6c7520> assign[=] call[name[get_job_image_info], parameter[name[project], name[job]]]
return[tuple[[<ast.Name object at 0x7da20c6c6680>, <ast.Name object at 0x7da20c6c46a0>]]]
|
keyword[def] identifier[get_notebook_image_info] ( identifier[project] : literal[string] , identifier[job] : identifier[Any] )-> identifier[Tuple] [ identifier[str] , identifier[str] ]:
literal[string]
identifier[image_name] , identifier[_] = identifier[get_job_image_info] ( identifier[project] , identifier[job] )
keyword[return] identifier[image_name] , identifier[LATEST_IMAGE_TAG]
|
def get_notebook_image_info(project: 'Project', job: Any) -> Tuple[str, str]:
"""Return the image name and image tag for a job"""
(image_name, _) = get_job_image_info(project, job)
return (image_name, LATEST_IMAGE_TAG)
|
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentials = parse_authorization_header(request.headers.get('Authorization'))
if not credentials:
return
request_uri = request.script_root + request.path
if request.query_string:
request_uri += '?' + request.query_string
response_hash = response(credentials, passwd, dict(uri=request_uri,
body=request.data,
method=request.method))
if credentials.get('response') == response_hash:
return True
return False
|
def function[check_digest_auth, parameter[user, passwd]]:
constant[Check user authentication using HTTP Digest auth]
if call[name[request].headers.get, parameter[constant[Authorization]]] begin[:]
variable[credentials] assign[=] call[name[parse_authorization_header], parameter[call[name[request].headers.get, parameter[constant[Authorization]]]]]
if <ast.UnaryOp object at 0x7da1b21d9120> begin[:]
return[None]
variable[request_uri] assign[=] binary_operation[name[request].script_root + name[request].path]
if name[request].query_string begin[:]
<ast.AugAssign object at 0x7da1b21d9690>
variable[response_hash] assign[=] call[name[response], parameter[name[credentials], name[passwd], call[name[dict], parameter[]]]]
if compare[call[name[credentials].get, parameter[constant[response]]] equal[==] name[response_hash]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[check_digest_auth] ( identifier[user] , identifier[passwd] ):
literal[string]
keyword[if] identifier[request] . identifier[headers] . identifier[get] ( literal[string] ):
identifier[credentials] = identifier[parse_authorization_header] ( identifier[request] . identifier[headers] . identifier[get] ( literal[string] ))
keyword[if] keyword[not] identifier[credentials] :
keyword[return]
identifier[request_uri] = identifier[request] . identifier[script_root] + identifier[request] . identifier[path]
keyword[if] identifier[request] . identifier[query_string] :
identifier[request_uri] += literal[string] + identifier[request] . identifier[query_string]
identifier[response_hash] = identifier[response] ( identifier[credentials] , identifier[passwd] , identifier[dict] ( identifier[uri] = identifier[request_uri] ,
identifier[body] = identifier[request] . identifier[data] ,
identifier[method] = identifier[request] . identifier[method] ))
keyword[if] identifier[credentials] . identifier[get] ( literal[string] )== identifier[response_hash] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentials = parse_authorization_header(request.headers.get('Authorization'))
if not credentials:
return # depends on [control=['if'], data=[]]
request_uri = request.script_root + request.path
if request.query_string:
request_uri += '?' + request.query_string # depends on [control=['if'], data=[]]
response_hash = response(credentials, passwd, dict(uri=request_uri, body=request.data, method=request.method))
if credentials.get('response') == response_hash:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False
|
def activities(self):
"""获取用户的最近动态.
:return: 最近动态,返回生成器,具体说明见 :class:`.Activity`
:rtype: Activity.Iterable
"""
from .activity import Activity
if self.url is None:
return
gotten_feed_num = 20
start = '0'
api_url = self.url + 'activities'
while gotten_feed_num == 20:
data = {'_xsrf': self.xsrf, 'start': start}
res = self._session.post(api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
acts = soup.find_all(
'div', class_='zm-profile-section-item zm-item clearfix')
start = acts[-1]['data-time'] if len(acts) > 0 else 0
for act in acts:
# --- ignore Round Table temporarily ---
if act.attrs['data-type-detail'] == "member_follow_roundtable":
continue
# --- --- --- --- -- --- --- --- --- ---
yield Activity(act, self._session, self)
|
def function[activities, parameter[self]]:
constant[获取用户的最近动态.
:return: 最近动态,返回生成器,具体说明见 :class:`.Activity`
:rtype: Activity.Iterable
]
from relative_module[activity] import module[Activity]
if compare[name[self].url is constant[None]] begin[:]
return[None]
variable[gotten_feed_num] assign[=] constant[20]
variable[start] assign[=] constant[0]
variable[api_url] assign[=] binary_operation[name[self].url + constant[activities]]
while compare[name[gotten_feed_num] equal[==] constant[20]] begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da204344940>, <ast.Constant object at 0x7da204347190>], [<ast.Attribute object at 0x7da204345870>, <ast.Name object at 0x7da204344340>]]
variable[res] assign[=] call[name[self]._session.post, parameter[name[api_url]]]
variable[gotten_feed_num] assign[=] call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[0]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[1]]]]
variable[acts] assign[=] call[name[soup].find_all, parameter[constant[div]]]
variable[start] assign[=] <ast.IfExp object at 0x7da204346d10>
for taget[name[act]] in starred[name[acts]] begin[:]
if compare[call[name[act].attrs][constant[data-type-detail]] equal[==] constant[member_follow_roundtable]] begin[:]
continue
<ast.Yield object at 0x7da204345750>
|
keyword[def] identifier[activities] ( identifier[self] ):
literal[string]
keyword[from] . identifier[activity] keyword[import] identifier[Activity]
keyword[if] identifier[self] . identifier[url] keyword[is] keyword[None] :
keyword[return]
identifier[gotten_feed_num] = literal[int]
identifier[start] = literal[string]
identifier[api_url] = identifier[self] . identifier[url] + literal[string]
keyword[while] identifier[gotten_feed_num] == literal[int] :
identifier[data] ={ literal[string] : identifier[self] . identifier[xsrf] , literal[string] : identifier[start] }
identifier[res] = identifier[self] . identifier[_session] . identifier[post] ( identifier[api_url] , identifier[data] = identifier[data] )
identifier[gotten_feed_num] = identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ]
identifier[soup] = identifier[BeautifulSoup] ( identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ])
identifier[acts] = identifier[soup] . identifier[find_all] (
literal[string] , identifier[class_] = literal[string] )
identifier[start] = identifier[acts] [- literal[int] ][ literal[string] ] keyword[if] identifier[len] ( identifier[acts] )> literal[int] keyword[else] literal[int]
keyword[for] identifier[act] keyword[in] identifier[acts] :
keyword[if] identifier[act] . identifier[attrs] [ literal[string] ]== literal[string] :
keyword[continue]
keyword[yield] identifier[Activity] ( identifier[act] , identifier[self] . identifier[_session] , identifier[self] )
|
def activities(self):
"""获取用户的最近动态.
:return: 最近动态,返回生成器,具体说明见 :class:`.Activity`
:rtype: Activity.Iterable
"""
from .activity import Activity
if self.url is None:
return # depends on [control=['if'], data=[]]
gotten_feed_num = 20
start = '0'
api_url = self.url + 'activities'
while gotten_feed_num == 20:
data = {'_xsrf': self.xsrf, 'start': start}
res = self._session.post(api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
acts = soup.find_all('div', class_='zm-profile-section-item zm-item clearfix')
start = acts[-1]['data-time'] if len(acts) > 0 else 0
for act in acts:
# --- ignore Round Table temporarily ---
if act.attrs['data-type-detail'] == 'member_follow_roundtable':
continue # depends on [control=['if'], data=[]]
# --- --- --- --- -- --- --- --- --- ---
yield Activity(act, self._session, self) # depends on [control=['for'], data=['act']] # depends on [control=['while'], data=['gotten_feed_num']]
|
def _mount_devicemapper(self, identifier):
"""
Devicemapper mount backend.
"""
info = self.client.info()
# cid is the contaienr_id of the temp container
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
dm_pool = info['DriverStatus'][0][1]
try:
dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
except:
# TODO: deprecated when GraphDriver patch makes it upstream
dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
dm_dev_name = dm_pool.replace('pool', cid)
# grab list of devces
dmsetupLs = dmsetupWrap.getDmsetupLs()
if dmsetupLs == -1:
raise MountError('Error: dmsetup returned non zero error ')
# ENSURE device exists!
if dm_dev_name not in dmsetupLs:
# IF device doesn't exist yet we create it!
Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
dm_pool)
# check that device is shown in /dev/mapper, if not we can use the
# major minor numbers in /dev/block
mapperDir = os.path.join('/dev/mapper', dm_dev_name)
if os.path.exists(mapperDir):
dm_dev_path = mapperDir
else:
# get new dmsetupLs after device has been created!
dmsetupLs = dmsetupWrap.getDmsetupLs()
# test if device exists in dmsetupls, if so, get its majorminor found in /dev/block
majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs)
blockDir = os.path.join('/dev/block', majorMinor)
# FIXME, coudl be due to Virtual box, but occasionally the block device
# will not be created by the time we check it exists below, so we
# can wait a half a second to let it be created up
import time
time.sleep(0.1)
if os.path.exists(blockDir):
dm_dev_path = blockDir
else:
raise MountError('Error: Block device found in dmsetup ls '
'but not in /dev/mapper/ or /dev/block')
options = ['ro', 'nosuid', 'nodev']
# XFS should get nouuid
fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding())
if fstype.upper() == 'XFS' and 'nouuid' not in options:
if 'nouuid' not in options:
options.append('nouuid')
try:
Mount.mount_path(dm_dev_path, self.mountpoint)
except MountError as de:
self._cleanup_container(cinfo)
Mount.remove_thin_device(dm_dev_name)
raise de
# return the temp container ID so we can unmount later
return cid
|
def function[_mount_devicemapper, parameter[self, identifier]]:
constant[
Devicemapper mount backend.
]
variable[info] assign[=] call[name[self].client.info, parameter[]]
variable[cid] assign[=] call[name[self]._identifier_as_cid, parameter[name[identifier]]]
variable[cinfo] assign[=] call[name[self].client.inspect_container, parameter[name[cid]]]
<ast.Tuple object at 0x7da18c4cc370> assign[=] tuple[[<ast.Constant object at 0x7da18c4cef80>, <ast.Constant object at 0x7da18c4cdc30>, <ast.Constant object at 0x7da18c4cefb0>]]
variable[dm_pool] assign[=] call[call[call[name[info]][constant[DriverStatus]]][constant[0]]][constant[1]]
<ast.Try object at 0x7da18c4cffd0>
variable[dmsetupLs] assign[=] call[name[dmsetupWrap].getDmsetupLs, parameter[]]
if compare[name[dmsetupLs] equal[==] <ast.UnaryOp object at 0x7da18c4cdcf0>] begin[:]
<ast.Raise object at 0x7da18c4cf070>
if compare[name[dm_dev_name] <ast.NotIn object at 0x7da2590d7190> name[dmsetupLs]] begin[:]
call[name[Mount]._activate_thin_device, parameter[name[dm_dev_name], name[dm_dev_id], name[dm_dev_size], name[dm_pool]]]
variable[mapperDir] assign[=] call[name[os].path.join, parameter[constant[/dev/mapper], name[dm_dev_name]]]
if call[name[os].path.exists, parameter[name[mapperDir]]] begin[:]
variable[dm_dev_path] assign[=] name[mapperDir]
variable[options] assign[=] list[[<ast.Constant object at 0x7da20cabdd80>, <ast.Constant object at 0x7da20cabe560>, <ast.Constant object at 0x7da20cabdb70>]]
variable[fstype] assign[=] call[call[name[Mount]._get_fs, parameter[name[dm_dev_path]]].decode, parameter[call[name[sys].getdefaultencoding, parameter[]]]]
if <ast.BoolOp object at 0x7da20cabe2c0> begin[:]
if compare[constant[nouuid] <ast.NotIn object at 0x7da2590d7190> name[options]] begin[:]
call[name[options].append, parameter[constant[nouuid]]]
<ast.Try object at 0x7da20cabdb10>
return[name[cid]]
|
keyword[def] identifier[_mount_devicemapper] ( identifier[self] , identifier[identifier] ):
literal[string]
identifier[info] = identifier[self] . identifier[client] . identifier[info] ()
identifier[cid] = identifier[self] . identifier[_identifier_as_cid] ( identifier[identifier] )
identifier[cinfo] = identifier[self] . identifier[client] . identifier[inspect_container] ( identifier[cid] )
identifier[dm_dev_name] , identifier[dm_dev_id] , identifier[dm_dev_size] = literal[string] , literal[string] , literal[string]
identifier[dm_pool] = identifier[info] [ literal[string] ][ literal[int] ][ literal[int] ]
keyword[try] :
identifier[dm_dev_name] = identifier[cinfo] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[dm_dev_id] = identifier[cinfo] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[dm_dev_size] = identifier[cinfo] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[except] :
identifier[dm_dev_id] , identifier[dm_dev_size] = identifier[DockerMount] . identifier[_no_gd_api_dm] ( identifier[cid] )
identifier[dm_dev_name] = identifier[dm_pool] . identifier[replace] ( literal[string] , identifier[cid] )
identifier[dmsetupLs] = identifier[dmsetupWrap] . identifier[getDmsetupLs] ()
keyword[if] identifier[dmsetupLs] ==- literal[int] :
keyword[raise] identifier[MountError] ( literal[string] )
keyword[if] identifier[dm_dev_name] keyword[not] keyword[in] identifier[dmsetupLs] :
identifier[Mount] . identifier[_activate_thin_device] ( identifier[dm_dev_name] , identifier[dm_dev_id] , identifier[dm_dev_size] ,
identifier[dm_pool] )
identifier[mapperDir] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[dm_dev_name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[mapperDir] ):
identifier[dm_dev_path] = identifier[mapperDir]
keyword[else] :
identifier[dmsetupLs] = identifier[dmsetupWrap] . identifier[getDmsetupLs] ()
identifier[majorMinor] = identifier[dmsetupWrap] . identifier[getMajorMinor] ( identifier[dm_dev_name] , identifier[dmsetupLs] )
identifier[blockDir] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[majorMinor] )
keyword[import] identifier[time]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[blockDir] ):
identifier[dm_dev_path] = identifier[blockDir]
keyword[else] :
keyword[raise] identifier[MountError] ( literal[string]
literal[string] )
identifier[options] =[ literal[string] , literal[string] , literal[string] ]
identifier[fstype] = identifier[Mount] . identifier[_get_fs] ( identifier[dm_dev_path] ). identifier[decode] ( identifier[sys] . identifier[getdefaultencoding] ())
keyword[if] identifier[fstype] . identifier[upper] ()== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[options] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[options] :
identifier[options] . identifier[append] ( literal[string] )
keyword[try] :
identifier[Mount] . identifier[mount_path] ( identifier[dm_dev_path] , identifier[self] . identifier[mountpoint] )
keyword[except] identifier[MountError] keyword[as] identifier[de] :
identifier[self] . identifier[_cleanup_container] ( identifier[cinfo] )
identifier[Mount] . identifier[remove_thin_device] ( identifier[dm_dev_name] )
keyword[raise] identifier[de]
keyword[return] identifier[cid]
|
def _mount_devicemapper(self, identifier):
"""
Devicemapper mount backend.
"""
info = self.client.info()
# cid is the contaienr_id of the temp container
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
(dm_dev_name, dm_dev_id, dm_dev_size) = ('', '', '')
dm_pool = info['DriverStatus'][0][1]
try:
dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize'] # depends on [control=['try'], data=[]]
except:
# TODO: deprecated when GraphDriver patch makes it upstream
(dm_dev_id, dm_dev_size) = DockerMount._no_gd_api_dm(cid)
dm_dev_name = dm_pool.replace('pool', cid) # depends on [control=['except'], data=[]]
# grab list of devces
dmsetupLs = dmsetupWrap.getDmsetupLs()
if dmsetupLs == -1:
raise MountError('Error: dmsetup returned non zero error ') # depends on [control=['if'], data=[]]
# ENSURE device exists!
if dm_dev_name not in dmsetupLs:
# IF device doesn't exist yet we create it!
Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size, dm_pool) # depends on [control=['if'], data=['dm_dev_name']]
# check that device is shown in /dev/mapper, if not we can use the
# major minor numbers in /dev/block
mapperDir = os.path.join('/dev/mapper', dm_dev_name)
if os.path.exists(mapperDir):
dm_dev_path = mapperDir # depends on [control=['if'], data=[]]
else:
# get new dmsetupLs after device has been created!
dmsetupLs = dmsetupWrap.getDmsetupLs()
# test if device exists in dmsetupls, if so, get its majorminor found in /dev/block
majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs)
blockDir = os.path.join('/dev/block', majorMinor)
# FIXME, coudl be due to Virtual box, but occasionally the block device
# will not be created by the time we check it exists below, so we
# can wait a half a second to let it be created up
import time
time.sleep(0.1)
if os.path.exists(blockDir):
dm_dev_path = blockDir # depends on [control=['if'], data=[]]
else:
raise MountError('Error: Block device found in dmsetup ls but not in /dev/mapper/ or /dev/block')
options = ['ro', 'nosuid', 'nodev']
# XFS should get nouuid
fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding())
if fstype.upper() == 'XFS' and 'nouuid' not in options:
if 'nouuid' not in options:
options.append('nouuid') # depends on [control=['if'], data=['options']] # depends on [control=['if'], data=[]]
try:
Mount.mount_path(dm_dev_path, self.mountpoint) # depends on [control=['try'], data=[]]
except MountError as de:
self._cleanup_container(cinfo)
Mount.remove_thin_device(dm_dev_name)
raise de # depends on [control=['except'], data=['de']]
# return the temp container ID so we can unmount later
return cid
|
def _read_dat(x):
"""read 24bit binary data and convert them to numpy.
Parameters
----------
x : bytes
bytes (length should be divisible by 3)
Returns
-------
numpy vector
vector with the signed 24bit values
Notes
-----
It's pretty slow but it's pretty a PITA to read 24bit as far as I can tell.
"""
n_smp = int(len(x) / DATA_PRECISION)
dat = zeros(n_smp)
for i in range(n_smp):
i0 = i * DATA_PRECISION
i1 = i0 + DATA_PRECISION
dat[i] = int.from_bytes(x[i0:i1], byteorder='little', signed=True)
return dat
|
def function[_read_dat, parameter[x]]:
constant[read 24bit binary data and convert them to numpy.
Parameters
----------
x : bytes
bytes (length should be divisible by 3)
Returns
-------
numpy vector
vector with the signed 24bit values
Notes
-----
It's pretty slow but it's pretty a PITA to read 24bit as far as I can tell.
]
variable[n_smp] assign[=] call[name[int], parameter[binary_operation[call[name[len], parameter[name[x]]] / name[DATA_PRECISION]]]]
variable[dat] assign[=] call[name[zeros], parameter[name[n_smp]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_smp]]]] begin[:]
variable[i0] assign[=] binary_operation[name[i] * name[DATA_PRECISION]]
variable[i1] assign[=] binary_operation[name[i0] + name[DATA_PRECISION]]
call[name[dat]][name[i]] assign[=] call[name[int].from_bytes, parameter[call[name[x]][<ast.Slice object at 0x7da18dc9b5b0>]]]
return[name[dat]]
|
keyword[def] identifier[_read_dat] ( identifier[x] ):
literal[string]
identifier[n_smp] = identifier[int] ( identifier[len] ( identifier[x] )/ identifier[DATA_PRECISION] )
identifier[dat] = identifier[zeros] ( identifier[n_smp] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_smp] ):
identifier[i0] = identifier[i] * identifier[DATA_PRECISION]
identifier[i1] = identifier[i0] + identifier[DATA_PRECISION]
identifier[dat] [ identifier[i] ]= identifier[int] . identifier[from_bytes] ( identifier[x] [ identifier[i0] : identifier[i1] ], identifier[byteorder] = literal[string] , identifier[signed] = keyword[True] )
keyword[return] identifier[dat]
|
def _read_dat(x):
"""read 24bit binary data and convert them to numpy.
Parameters
----------
x : bytes
bytes (length should be divisible by 3)
Returns
-------
numpy vector
vector with the signed 24bit values
Notes
-----
It's pretty slow but it's pretty a PITA to read 24bit as far as I can tell.
"""
n_smp = int(len(x) / DATA_PRECISION)
dat = zeros(n_smp)
for i in range(n_smp):
i0 = i * DATA_PRECISION
i1 = i0 + DATA_PRECISION
dat[i] = int.from_bytes(x[i0:i1], byteorder='little', signed=True) # depends on [control=['for'], data=['i']]
return dat
|
def _escape_char(c):
"Single char escape. Return the char, escaped if not already legal"
if isinstance(c, int):
c = _unichr(c)
return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c)
|
def function[_escape_char, parameter[c]]:
constant[Single char escape. Return the char, escaped if not already legal]
if call[name[isinstance], parameter[name[c], name[int]]] begin[:]
variable[c] assign[=] call[name[_unichr], parameter[name[c]]]
return[<ast.IfExp object at 0x7da1b10e4310>]
|
keyword[def] identifier[_escape_char] ( identifier[c] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[c] , identifier[int] ):
identifier[c] = identifier[_unichr] ( identifier[c] )
keyword[return] identifier[c] keyword[if] identifier[c] keyword[in] identifier[LEGAL_CHARS] keyword[else] identifier[ESCAPE_FMT] % identifier[ord] ( identifier[c] )
|
def _escape_char(c):
"""Single char escape. Return the char, escaped if not already legal"""
if isinstance(c, int):
c = _unichr(c) # depends on [control=['if'], data=[]]
return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c)
|
def sheetpack(fn, sheet=0, header=True, startcell=None, stopcell=None,
usecols=None):
"""Return a ChannelPack instance loaded with data from the spread
sheet file fn, (xls, xlsx).
fn: str
The file to read from.
sheet: int or str
If int, it is the index for the sheet 0-based. Else the sheet
name.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
a spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
usecols: str or seqence of ints
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H.
Might not be a favorite, but the header row can be offset from the
data range. The meaning of usecols is then applied on both the data
range and the header row. However, usecols is always specified with
regards to the data range.
"""
cp = ChannelPack(pullxl.sheet_asdict)
chnames = []
cp.load(fn, sheet=sheet, header=header, startcell=startcell,
stopcell=stopcell, usecols=usecols, chnames_out=chnames)
cp.set_channel_names(chnames or None)
return cp
|
def function[sheetpack, parameter[fn, sheet, header, startcell, stopcell, usecols]]:
constant[Return a ChannelPack instance loaded with data from the spread
sheet file fn, (xls, xlsx).
fn: str
The file to read from.
sheet: int or str
If int, it is the index for the sheet 0-based. Else the sheet
name.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
a spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
usecols: str or seqence of ints
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H.
Might not be a favorite, but the header row can be offset from the
data range. The meaning of usecols is then applied on both the data
range and the header row. However, usecols is always specified with
regards to the data range.
]
variable[cp] assign[=] call[name[ChannelPack], parameter[name[pullxl].sheet_asdict]]
variable[chnames] assign[=] list[[]]
call[name[cp].load, parameter[name[fn]]]
call[name[cp].set_channel_names, parameter[<ast.BoolOp object at 0x7da2041d88e0>]]
return[name[cp]]
|
keyword[def] identifier[sheetpack] ( identifier[fn] , identifier[sheet] = literal[int] , identifier[header] = keyword[True] , identifier[startcell] = keyword[None] , identifier[stopcell] = keyword[None] ,
identifier[usecols] = keyword[None] ):
literal[string]
identifier[cp] = identifier[ChannelPack] ( identifier[pullxl] . identifier[sheet_asdict] )
identifier[chnames] =[]
identifier[cp] . identifier[load] ( identifier[fn] , identifier[sheet] = identifier[sheet] , identifier[header] = identifier[header] , identifier[startcell] = identifier[startcell] ,
identifier[stopcell] = identifier[stopcell] , identifier[usecols] = identifier[usecols] , identifier[chnames_out] = identifier[chnames] )
identifier[cp] . identifier[set_channel_names] ( identifier[chnames] keyword[or] keyword[None] )
keyword[return] identifier[cp]
|
def sheetpack(fn, sheet=0, header=True, startcell=None, stopcell=None, usecols=None):
"""Return a ChannelPack instance loaded with data from the spread
sheet file fn, (xls, xlsx).
fn: str
The file to read from.
sheet: int or str
If int, it is the index for the sheet 0-based. Else the sheet
name.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
a spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
usecols: str or seqence of ints
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H.
Might not be a favorite, but the header row can be offset from the
data range. The meaning of usecols is then applied on both the data
range and the header row. However, usecols is always specified with
regards to the data range.
"""
cp = ChannelPack(pullxl.sheet_asdict)
chnames = []
cp.load(fn, sheet=sheet, header=header, startcell=startcell, stopcell=stopcell, usecols=usecols, chnames_out=chnames)
cp.set_channel_names(chnames or None)
return cp
|
def errata_applicability(self, synchronous=True, **kwargs):
"""Force regenerate errata applicability
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all content decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('errata/applicability'), **kwargs)
return _handle_response(response, self._server_config, synchronous)
|
def function[errata_applicability, parameter[self, synchronous]]:
constant[Force regenerate errata applicability
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all content decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
]
variable[kwargs] assign[=] call[name[kwargs].copy, parameter[]]
call[name[kwargs].update, parameter[call[name[self]._server_config.get_client_kwargs, parameter[]]]]
variable[response] assign[=] call[name[client].put, parameter[call[name[self].path, parameter[constant[errata/applicability]]]]]
return[call[name[_handle_response], parameter[name[response], name[self]._server_config, name[synchronous]]]]
|
keyword[def] identifier[errata_applicability] ( identifier[self] , identifier[synchronous] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[kwargs] . identifier[copy] ()
identifier[kwargs] . identifier[update] ( identifier[self] . identifier[_server_config] . identifier[get_client_kwargs] ())
identifier[response] = identifier[client] . identifier[put] ( identifier[self] . identifier[path] ( literal[string] ),** identifier[kwargs] )
keyword[return] identifier[_handle_response] ( identifier[response] , identifier[self] . identifier[_server_config] , identifier[synchronous] )
|
def errata_applicability(self, synchronous=True, **kwargs):
"""Force regenerate errata applicability
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all content decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('errata/applicability'), **kwargs)
return _handle_response(response, self._server_config, synchronous)
|
def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
|
def function[compute_etag, parameter[self]]:
constant[Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
]
variable[hasher] assign[=] call[name[hashlib].sha1, parameter[]]
for taget[name[part]] in starred[name[self]._write_buffer] begin[:]
call[name[hasher].update, parameter[name[part]]]
return[binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> call[name[hasher].hexdigest, parameter[]]]]
|
keyword[def] identifier[compute_etag] ( identifier[self] )-> identifier[Optional] [ identifier[str] ]:
literal[string]
identifier[hasher] = identifier[hashlib] . identifier[sha1] ()
keyword[for] identifier[part] keyword[in] identifier[self] . identifier[_write_buffer] :
identifier[hasher] . identifier[update] ( identifier[part] )
keyword[return] literal[string] % identifier[hasher] . identifier[hexdigest] ()
|
def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part) # depends on [control=['for'], data=['part']]
return '"%s"' % hasher.hexdigest()
|
def quit(self, daemononly = False):
'''
Send quit event to quit the main loop
'''
if not self.quitting:
self.quitting = True
self.queue.append(SystemControlEvent(SystemControlEvent.QUIT, daemononly = daemononly), True)
|
def function[quit, parameter[self, daemononly]]:
constant[
Send quit event to quit the main loop
]
if <ast.UnaryOp object at 0x7da207f008b0> begin[:]
name[self].quitting assign[=] constant[True]
call[name[self].queue.append, parameter[call[name[SystemControlEvent], parameter[name[SystemControlEvent].QUIT]], constant[True]]]
|
keyword[def] identifier[quit] ( identifier[self] , identifier[daemononly] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[quitting] :
identifier[self] . identifier[quitting] = keyword[True]
identifier[self] . identifier[queue] . identifier[append] ( identifier[SystemControlEvent] ( identifier[SystemControlEvent] . identifier[QUIT] , identifier[daemononly] = identifier[daemononly] ), keyword[True] )
|
def quit(self, daemononly=False):
"""
Send quit event to quit the main loop
"""
if not self.quitting:
self.quitting = True
self.queue.append(SystemControlEvent(SystemControlEvent.QUIT, daemononly=daemononly), True) # depends on [control=['if'], data=[]]
|
def answer_options(self):
"""
:returns: A list of :class:`~.AnswerOption` instances representing the
available answers to this question.
"""
return [
AnswerOption(element)
for element in self._answer_option_xpb.apply_(
self._question_element
)
]
|
def function[answer_options, parameter[self]]:
constant[
:returns: A list of :class:`~.AnswerOption` instances representing the
available answers to this question.
]
return[<ast.ListComp object at 0x7da1b261f3a0>]
|
keyword[def] identifier[answer_options] ( identifier[self] ):
literal[string]
keyword[return] [
identifier[AnswerOption] ( identifier[element] )
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[_answer_option_xpb] . identifier[apply_] (
identifier[self] . identifier[_question_element]
)
]
|
def answer_options(self):
"""
:returns: A list of :class:`~.AnswerOption` instances representing the
available answers to this question.
"""
return [AnswerOption(element) for element in self._answer_option_xpb.apply_(self._question_element)]
|
def _calculate(self):
self.logpriors = np.zeros_like(self.rad)
for i in range(self.N-1):
o = np.arange(i+1, self.N)
dist = ((self.zscale*(self.pos[i] - self.pos[o]))**2).sum(axis=-1)
dist0 = (self.rad[i] + self.rad[o])**2
update = self.prior_func(dist - dist0)
self.logpriors[i] += np.sum(update)
self.logpriors[o] += update
"""
# This is equivalent
for i in range(self.N-1):
for j in range(i+1, self.N):
d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)
r = (self.rad[i] + self.rad[j])**2
cost = self.prior_func(d - r)
self.logpriors[i] += cost
self.logpriors[j] += cost
"""
|
def function[_calculate, parameter[self]]:
name[self].logpriors assign[=] call[name[np].zeros_like, parameter[name[self].rad]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[self].N - constant[1]]]]] begin[:]
variable[o] assign[=] call[name[np].arange, parameter[binary_operation[name[i] + constant[1]], name[self].N]]
variable[dist] assign[=] call[binary_operation[binary_operation[name[self].zscale * binary_operation[call[name[self].pos][name[i]] - call[name[self].pos][name[o]]]] ** constant[2]].sum, parameter[]]
variable[dist0] assign[=] binary_operation[binary_operation[call[name[self].rad][name[i]] + call[name[self].rad][name[o]]] ** constant[2]]
variable[update] assign[=] call[name[self].prior_func, parameter[binary_operation[name[dist] - name[dist0]]]]
<ast.AugAssign object at 0x7da18f09f7f0>
<ast.AugAssign object at 0x7da18f812230>
constant[
# This is equivalent
for i in range(self.N-1):
for j in range(i+1, self.N):
d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)
r = (self.rad[i] + self.rad[j])**2
cost = self.prior_func(d - r)
self.logpriors[i] += cost
self.logpriors[j] += cost
]
|
keyword[def] identifier[_calculate] ( identifier[self] ):
identifier[self] . identifier[logpriors] = identifier[np] . identifier[zeros_like] ( identifier[self] . identifier[rad] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[N] - literal[int] ):
identifier[o] = identifier[np] . identifier[arange] ( identifier[i] + literal[int] , identifier[self] . identifier[N] )
identifier[dist] =(( identifier[self] . identifier[zscale] *( identifier[self] . identifier[pos] [ identifier[i] ]- identifier[self] . identifier[pos] [ identifier[o] ]))** literal[int] ). identifier[sum] ( identifier[axis] =- literal[int] )
identifier[dist0] =( identifier[self] . identifier[rad] [ identifier[i] ]+ identifier[self] . identifier[rad] [ identifier[o] ])** literal[int]
identifier[update] = identifier[self] . identifier[prior_func] ( identifier[dist] - identifier[dist0] )
identifier[self] . identifier[logpriors] [ identifier[i] ]+= identifier[np] . identifier[sum] ( identifier[update] )
identifier[self] . identifier[logpriors] [ identifier[o] ]+= identifier[update]
literal[string]
|
def _calculate(self):
self.logpriors = np.zeros_like(self.rad)
for i in range(self.N - 1):
o = np.arange(i + 1, self.N)
dist = ((self.zscale * (self.pos[i] - self.pos[o])) ** 2).sum(axis=-1)
dist0 = (self.rad[i] + self.rad[o]) ** 2
update = self.prior_func(dist - dist0)
self.logpriors[i] += np.sum(update)
self.logpriors[o] += update # depends on [control=['for'], data=['i']]
'\n # This is equivalent\n for i in range(self.N-1):\n for j in range(i+1, self.N):\n d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)\n r = (self.rad[i] + self.rad[j])**2\n\n cost = self.prior_func(d - r)\n self.logpriors[i] += cost\n self.logpriors[j] += cost\n '
|
def ffti(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the imaginary part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return imag(fft(wave, npoints, indep_min, indep_max))
|
def function[ffti, parameter[wave, npoints, indep_min, indep_max]]:
constant[
Return the imaginary part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
]
return[call[name[imag], parameter[call[name[fft], parameter[name[wave], name[npoints], name[indep_min], name[indep_max]]]]]]
|
keyword[def] identifier[ffti] ( identifier[wave] , identifier[npoints] = keyword[None] , identifier[indep_min] = keyword[None] , identifier[indep_max] = keyword[None] ):
literal[string]
keyword[return] identifier[imag] ( identifier[fft] ( identifier[wave] , identifier[npoints] , identifier[indep_min] , identifier[indep_max] ))
|
def ffti(wave, npoints=None, indep_min=None, indep_max=None):
"""
Return the imaginary part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ffti
:raises:
* RuntimeError (Argument \\`indep_max\\` is not valid)
* RuntimeError (Argument \\`indep_min\\` is not valid)
* RuntimeError (Argument \\`npoints\\` is not valid)
* RuntimeError (Argument \\`wave\\` is not valid)
* RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return imag(fft(wave, npoints, indep_min, indep_max))
|
def predict(self, X):
"""
Predictions with the model for all the MCMC samples. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
"""
if X.ndim==1: X = X[None,:]
ps = self.model.param_array.copy()
means = []
stds = []
for s in self.hmc_samples:
if self.model._fixes_ is None:
self.model[:] = s
else:
self.model[self.model._fixes_] = s
self.model._trigger_params_changed()
m, v = self.model.predict(X)
means.append(m)
stds.append(np.sqrt(np.clip(v, 1e-10, np.inf)))
self.model.param_array[:] = ps
self.model._trigger_params_changed()
return means, stds
|
def function[predict, parameter[self, X]]:
constant[
Predictions with the model for all the MCMC samples. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
]
if compare[name[X].ndim equal[==] constant[1]] begin[:]
variable[X] assign[=] call[name[X]][tuple[[<ast.Constant object at 0x7da18c4ccf10>, <ast.Slice object at 0x7da18c4cc340>]]]
variable[ps] assign[=] call[name[self].model.param_array.copy, parameter[]]
variable[means] assign[=] list[[]]
variable[stds] assign[=] list[[]]
for taget[name[s]] in starred[name[self].hmc_samples] begin[:]
if compare[name[self].model._fixes_ is constant[None]] begin[:]
call[name[self].model][<ast.Slice object at 0x7da18c4cf940>] assign[=] name[s]
call[name[self].model._trigger_params_changed, parameter[]]
<ast.Tuple object at 0x7da18c4cec50> assign[=] call[name[self].model.predict, parameter[name[X]]]
call[name[means].append, parameter[name[m]]]
call[name[stds].append, parameter[call[name[np].sqrt, parameter[call[name[np].clip, parameter[name[v], constant[1e-10], name[np].inf]]]]]]
call[name[self].model.param_array][<ast.Slice object at 0x7da18c4ce2c0>] assign[=] name[ps]
call[name[self].model._trigger_params_changed, parameter[]]
return[tuple[[<ast.Name object at 0x7da18c4cc3a0>, <ast.Name object at 0x7da18c4ce5c0>]]]
|
keyword[def] identifier[predict] ( identifier[self] , identifier[X] ):
literal[string]
keyword[if] identifier[X] . identifier[ndim] == literal[int] : identifier[X] = identifier[X] [ keyword[None] ,:]
identifier[ps] = identifier[self] . identifier[model] . identifier[param_array] . identifier[copy] ()
identifier[means] =[]
identifier[stds] =[]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[hmc_samples] :
keyword[if] identifier[self] . identifier[model] . identifier[_fixes_] keyword[is] keyword[None] :
identifier[self] . identifier[model] [:]= identifier[s]
keyword[else] :
identifier[self] . identifier[model] [ identifier[self] . identifier[model] . identifier[_fixes_] ]= identifier[s]
identifier[self] . identifier[model] . identifier[_trigger_params_changed] ()
identifier[m] , identifier[v] = identifier[self] . identifier[model] . identifier[predict] ( identifier[X] )
identifier[means] . identifier[append] ( identifier[m] )
identifier[stds] . identifier[append] ( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[clip] ( identifier[v] , literal[int] , identifier[np] . identifier[inf] )))
identifier[self] . identifier[model] . identifier[param_array] [:]= identifier[ps]
identifier[self] . identifier[model] . identifier[_trigger_params_changed] ()
keyword[return] identifier[means] , identifier[stds]
|
def predict(self, X):
"""
Predictions with the model for all the MCMC samples. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
"""
if X.ndim == 1:
X = X[None, :] # depends on [control=['if'], data=[]]
ps = self.model.param_array.copy()
means = []
stds = []
for s in self.hmc_samples:
if self.model._fixes_ is None:
self.model[:] = s # depends on [control=['if'], data=[]]
else:
self.model[self.model._fixes_] = s
self.model._trigger_params_changed()
(m, v) = self.model.predict(X)
means.append(m)
stds.append(np.sqrt(np.clip(v, 1e-10, np.inf))) # depends on [control=['for'], data=['s']]
self.model.param_array[:] = ps
self.model._trigger_params_changed()
return (means, stds)
|
def parse(self, file, outfile=None):
"""Parse a line-oriented entity file into a list of entity dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Entities generated from the file
"""
file = self._ensure_file(file)
ents = []
skipped = []
n_lines = 0
for line in file:
n_lines += 1
if line.startswith("!"):
if outfile is not None:
outfile.write(line)
continue
line = line.strip("\n")
if line == "":
logging.warning("EMPTY LINE")
continue
parsed_line, new_ents = self.parse_line(line)
if self._skipping_line(new_ents): # Skip if there were no ents
logging.warning("SKIPPING: {}".format(line))
skipped.append(line)
else:
ents += new_ents
if outfile is not None:
outfile.write(parsed_line + "\n")
self.report.skipped += len(skipped)
self.report.n_lines += n_lines
#self.report.n_associations += len(ents)
logging.info("Parsed {} ents from {} lines. Skipped: {}".
format(len(ents),
n_lines,
len(skipped)))
file.close()
return ents
|
def function[parse, parameter[self, file, outfile]]:
constant[Parse a line-oriented entity file into a list of entity dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Entities generated from the file
]
variable[file] assign[=] call[name[self]._ensure_file, parameter[name[file]]]
variable[ents] assign[=] list[[]]
variable[skipped] assign[=] list[[]]
variable[n_lines] assign[=] constant[0]
for taget[name[line]] in starred[name[file]] begin[:]
<ast.AugAssign object at 0x7da1b0779750>
if call[name[line].startswith, parameter[constant[!]]] begin[:]
if compare[name[outfile] is_not constant[None]] begin[:]
call[name[outfile].write, parameter[name[line]]]
continue
variable[line] assign[=] call[name[line].strip, parameter[constant[
]]]
if compare[name[line] equal[==] constant[]] begin[:]
call[name[logging].warning, parameter[constant[EMPTY LINE]]]
continue
<ast.Tuple object at 0x7da20e9562c0> assign[=] call[name[self].parse_line, parameter[name[line]]]
if call[name[self]._skipping_line, parameter[name[new_ents]]] begin[:]
call[name[logging].warning, parameter[call[constant[SKIPPING: {}].format, parameter[name[line]]]]]
call[name[skipped].append, parameter[name[line]]]
<ast.AugAssign object at 0x7da1b08836d0>
<ast.AugAssign object at 0x7da1b0882bf0>
call[name[logging].info, parameter[call[constant[Parsed {} ents from {} lines. Skipped: {}].format, parameter[call[name[len], parameter[name[ents]]], name[n_lines], call[name[len], parameter[name[skipped]]]]]]]
call[name[file].close, parameter[]]
return[name[ents]]
|
keyword[def] identifier[parse] ( identifier[self] , identifier[file] , identifier[outfile] = keyword[None] ):
literal[string]
identifier[file] = identifier[self] . identifier[_ensure_file] ( identifier[file] )
identifier[ents] =[]
identifier[skipped] =[]
identifier[n_lines] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[file] :
identifier[n_lines] += literal[int]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[outfile] keyword[is] keyword[not] keyword[None] :
identifier[outfile] . identifier[write] ( identifier[line] )
keyword[continue]
identifier[line] = identifier[line] . identifier[strip] ( literal[string] )
keyword[if] identifier[line] == literal[string] :
identifier[logging] . identifier[warning] ( literal[string] )
keyword[continue]
identifier[parsed_line] , identifier[new_ents] = identifier[self] . identifier[parse_line] ( identifier[line] )
keyword[if] identifier[self] . identifier[_skipping_line] ( identifier[new_ents] ):
identifier[logging] . identifier[warning] ( literal[string] . identifier[format] ( identifier[line] ))
identifier[skipped] . identifier[append] ( identifier[line] )
keyword[else] :
identifier[ents] += identifier[new_ents]
keyword[if] identifier[outfile] keyword[is] keyword[not] keyword[None] :
identifier[outfile] . identifier[write] ( identifier[parsed_line] + literal[string] )
identifier[self] . identifier[report] . identifier[skipped] += identifier[len] ( identifier[skipped] )
identifier[self] . identifier[report] . identifier[n_lines] += identifier[n_lines]
identifier[logging] . identifier[info] ( literal[string] .
identifier[format] ( identifier[len] ( identifier[ents] ),
identifier[n_lines] ,
identifier[len] ( identifier[skipped] )))
identifier[file] . identifier[close] ()
keyword[return] identifier[ents]
|
def parse(self, file, outfile=None):
"""Parse a line-oriented entity file into a list of entity dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Entities generated from the file
"""
file = self._ensure_file(file)
ents = []
skipped = []
n_lines = 0
for line in file:
n_lines += 1
if line.startswith('!'):
if outfile is not None:
outfile.write(line) # depends on [control=['if'], data=['outfile']]
continue # depends on [control=['if'], data=[]]
line = line.strip('\n')
if line == '':
logging.warning('EMPTY LINE')
continue # depends on [control=['if'], data=[]]
(parsed_line, new_ents) = self.parse_line(line)
if self._skipping_line(new_ents): # Skip if there were no ents
logging.warning('SKIPPING: {}'.format(line))
skipped.append(line) # depends on [control=['if'], data=[]]
else:
ents += new_ents
if outfile is not None:
outfile.write(parsed_line + '\n') # depends on [control=['if'], data=['outfile']] # depends on [control=['for'], data=['line']]
self.report.skipped += len(skipped)
self.report.n_lines += n_lines
#self.report.n_associations += len(ents)
logging.info('Parsed {} ents from {} lines. Skipped: {}'.format(len(ents), n_lines, len(skipped)))
file.close()
return ents
|
def name_scope(name=None):
"""
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
"""
def name_scope_wrapper_decorator(method):
@functools.wraps(method)
def name_scope_wrapper(*args, **kwargs):
scope_name = name if name is not None else method.__name__
with tf.name_scope(scope_name):
return method(*args, **kwargs)
return name_scope_wrapper
return name_scope_wrapper_decorator
|
def function[name_scope, parameter[name]]:
constant[
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
]
def function[name_scope_wrapper_decorator, parameter[method]]:
def function[name_scope_wrapper, parameter[]]:
variable[scope_name] assign[=] <ast.IfExp object at 0x7da1b2125300>
with call[name[tf].name_scope, parameter[name[scope_name]]] begin[:]
return[call[name[method], parameter[<ast.Starred object at 0x7da1b2126320>]]]
return[name[name_scope_wrapper]]
return[name[name_scope_wrapper_decorator]]
|
keyword[def] identifier[name_scope] ( identifier[name] = keyword[None] ):
literal[string]
keyword[def] identifier[name_scope_wrapper_decorator] ( identifier[method] ):
@ identifier[functools] . identifier[wraps] ( identifier[method] )
keyword[def] identifier[name_scope_wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[scope_name] = identifier[name] keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] keyword[else] identifier[method] . identifier[__name__]
keyword[with] identifier[tf] . identifier[name_scope] ( identifier[scope_name] ):
keyword[return] identifier[method] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[name_scope_wrapper]
keyword[return] identifier[name_scope_wrapper_decorator]
|
def name_scope(name=None):
"""
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
"""
def name_scope_wrapper_decorator(method):
@functools.wraps(method)
def name_scope_wrapper(*args, **kwargs):
scope_name = name if name is not None else method.__name__
with tf.name_scope(scope_name):
return method(*args, **kwargs) # depends on [control=['with'], data=[]]
return name_scope_wrapper
return name_scope_wrapper_decorator
|
def get_historical_standings(date):
"""Return the historical standings file for specified date."""
try:
url = STANDINGS_HISTORICAL_URL.format(date.year,
date.strftime('%Y/%m/%d'))
return urlopen(url)
except HTTPError:
ValueError('Could not find standings for that date.')
|
def function[get_historical_standings, parameter[date]]:
constant[Return the historical standings file for specified date.]
<ast.Try object at 0x7da1b1a66290>
|
keyword[def] identifier[get_historical_standings] ( identifier[date] ):
literal[string]
keyword[try] :
identifier[url] = identifier[STANDINGS_HISTORICAL_URL] . identifier[format] ( identifier[date] . identifier[year] ,
identifier[date] . identifier[strftime] ( literal[string] ))
keyword[return] identifier[urlopen] ( identifier[url] )
keyword[except] identifier[HTTPError] :
identifier[ValueError] ( literal[string] )
|
def get_historical_standings(date):
"""Return the historical standings file for specified date."""
try:
url = STANDINGS_HISTORICAL_URL.format(date.year, date.strftime('%Y/%m/%d'))
return urlopen(url) # depends on [control=['try'], data=[]]
except HTTPError:
ValueError('Could not find standings for that date.') # depends on [control=['except'], data=[]]
|
def _unescape_token(token):
r"""Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\u" with "_", and "\\" with "\".
2. Replace "\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
"""
def match(m):
r"""Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\u|\\\\|\\([0-9]+);"
The strings can be '\u', '\\', or '\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\u', '\\', or '\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\u' or '\\' . In this case, the corresponding
replacement ('_' and '\') are returned. Note that in python, a single
backslash is written as '\\', and double backslash as '\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
"""
# Check if the matched strings are '\u' or '\\'.
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
# If m.group(1) exists, try and return unicode character.
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE
# Use match function to replace escaped substrings in the token.
return _UNESCAPE_REGEX.sub(match, token)
|
def function[_unescape_token, parameter[token]]:
constant[Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\u" with "_", and "\\" with "\".
2. Replace "\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
]
def function[match, parameter[m]]:
constant[Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\u|\\\\|\\([0-9]+);"
The strings can be '\u', '\\', or '\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\u', '\\', or '\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\u' or '\\' . In this case, the corresponding
replacement ('_' and '\') are returned. Note that in python, a single
backslash is written as '\\', and double backslash as '\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
]
if compare[call[name[m].group, parameter[constant[1]]] is constant[None]] begin[:]
return[<ast.IfExp object at 0x7da2054a7fa0>]
<ast.Try object at 0x7da2054a5e40>
return[call[name[_UNESCAPE_REGEX].sub, parameter[name[match], name[token]]]]
|
keyword[def] identifier[_unescape_token] ( identifier[token] ):
literal[string]
keyword[def] identifier[match] ( identifier[m] ):
literal[string]
keyword[if] identifier[m] . identifier[group] ( literal[int] ) keyword[is] keyword[None] :
keyword[return] literal[string] keyword[if] identifier[m] . identifier[group] ( literal[int] )== literal[string] keyword[else] literal[string]
keyword[try] :
keyword[return] identifier[six] . identifier[unichr] ( identifier[int] ( identifier[m] . identifier[group] ( literal[int] )))
keyword[except] ( identifier[ValueError] , identifier[OverflowError] ) keyword[as] identifier[_] :
keyword[return] identifier[_UNDEFINED_UNICODE]
keyword[return] identifier[_UNESCAPE_REGEX] . identifier[sub] ( identifier[match] , identifier[token] )
|
def _unescape_token(token):
"""Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\\u" with "_", and "\\\\" with "\\".
2. Replace "\\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
"""
def match(m):
"""Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\\\u|\\\\\\\\|\\\\([0-9]+);"
The strings can be '\\u', '\\\\', or '\\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\\u', '\\\\', or '\\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\\u' or '\\\\' . In this case, the corresponding
replacement ('_' and '\\') are returned. Note that in python, a single
backslash is written as '\\\\', and double backslash as '\\\\\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
"""
# Check if the matched strings are '\u' or '\\'.
if m.group(1) is None:
return u'_' if m.group(0) == u'\\u' else u'\\' # depends on [control=['if'], data=[]]
# If m.group(1) exists, try and return unicode character.
try:
return six.unichr(int(m.group(1))) # depends on [control=['try'], data=[]]
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE # depends on [control=['except'], data=[]]
# Use match function to replace escaped substrings in the token.
return _UNESCAPE_REGEX.sub(match, token)
|
def blob_size(self, digest):
"""
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
"""
r = self._request('head', 'blobs/' + digest)
return long(r.headers['content-length'])
|
def function[blob_size, parameter[self, digest]]:
constant[
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
]
variable[r] assign[=] call[name[self]._request, parameter[constant[head], binary_operation[constant[blobs/] + name[digest]]]]
return[call[name[long], parameter[call[name[r].headers][constant[content-length]]]]]
|
keyword[def] identifier[blob_size] ( identifier[self] , identifier[digest] ):
literal[string]
identifier[r] = identifier[self] . identifier[_request] ( literal[string] , literal[string] + identifier[digest] )
keyword[return] identifier[long] ( identifier[r] . identifier[headers] [ literal[string] ])
|
def blob_size(self, digest):
"""
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
"""
r = self._request('head', 'blobs/' + digest)
return long(r.headers['content-length'])
|
def parse_plotProfile(self):
"""Find plotProfile output"""
self.deeptools_plotProfile = dict()
for f in self.find_log_files('deeptools/plotProfile', filehandles=False):
parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotProfile:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotProfile[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotProfile')
if len(self.deeptools_plotProfile) > 0:
config = {
'id': 'read_distribution_profile',
'title': 'deeptools: Read Distribution Profile after Annotation',
'ylab': 'Occurrence',
'xlab': None,
'smooth_points': 100,
'xPlotBands': [
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
{'from': converted_bin_labels[bin_labels.index('TSS')], 'to': converted_bin_labels[bin_labels.index('TES')], 'color': '#ffffe2'},
{'from': converted_bin_labels[0], 'to': converted_bin_labels[bin_labels.index('TSS')], 'color': '#e5fce0'},
],
'xPlotLines': [
{'width': 1, 'value': converted_bin_labels[bin_labels.index('TES')], 'dashStyle': 'Dash', 'color': '#000000'},
{'width': 1, 'value': converted_bin_labels[bin_labels.index('TSS')], 'dashStyle': 'Dash', 'color': '#000000'},
],
}
self.add_section (
name = 'Read Distribution Profile after Annotation',
anchor = 'read_distribution_profile_plot',
description="Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene".format(list(filter(None,bin_labels))[0], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[3]),
plot=linegraph.plot(self.deeptools_plotProfile, config)
)
return len(self.deeptools_bamPEFragmentSizeDistribution)
|
def function[parse_plotProfile, parameter[self]]:
constant[Find plotProfile output]
name[self].deeptools_plotProfile assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[deeptools/plotProfile]]]] begin[:]
<ast.Tuple object at 0x7da204565540> assign[=] call[name[self].parsePlotProfileData, parameter[name[f]]]
for taget[tuple[[<ast.Name object at 0x7da204564610>, <ast.Name object at 0x7da204567d60>]]] in starred[call[name[parsed_data].items, parameter[]]] begin[:]
if compare[name[k] in name[self].deeptools_plotProfile] begin[:]
call[name[log].warning, parameter[call[constant[Replacing duplicate sample {}.].format, parameter[name[k]]]]]
call[name[self].deeptools_plotProfile][name[k]] assign[=] name[v]
if compare[call[name[len], parameter[name[parsed_data]]] greater[>] constant[0]] begin[:]
call[name[self].add_data_source, parameter[name[f]]]
if compare[call[name[len], parameter[name[self].deeptools_plotProfile]] greater[>] constant[0]] begin[:]
variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5090>, <ast.Constant object at 0x7da18ede5cf0>, <ast.Constant object at 0x7da18ede70a0>, <ast.Constant object at 0x7da18ede5990>, <ast.Constant object at 0x7da18ede5060>, <ast.Constant object at 0x7da18ede7f10>, <ast.Constant object at 0x7da18ede79a0>], [<ast.Constant object at 0x7da18ede6230>, <ast.Constant object at 0x7da1b2345870>, <ast.Constant object at 0x7da1b2344340>, <ast.Constant object at 0x7da1b2344520>, <ast.Constant object at 0x7da18f8137c0>, <ast.List object at 0x7da18f810850>, <ast.List object at 0x7da18f8130d0>]]
call[name[self].add_section, parameter[]]
return[call[name[len], parameter[name[self].deeptools_bamPEFragmentSizeDistribution]]]
|
keyword[def] identifier[parse_plotProfile] ( identifier[self] ):
literal[string]
identifier[self] . identifier[deeptools_plotProfile] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] , identifier[filehandles] = keyword[False] ):
identifier[parsed_data] , identifier[bin_labels] , identifier[converted_bin_labels] = identifier[self] . identifier[parsePlotProfileData] ( identifier[f] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[parsed_data] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[self] . identifier[deeptools_plotProfile] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[k] ))
identifier[self] . identifier[deeptools_plotProfile] [ identifier[k] ]= identifier[v]
keyword[if] identifier[len] ( identifier[parsed_data] )> literal[int] :
identifier[self] . identifier[add_data_source] ( identifier[f] , identifier[section] = literal[string] )
keyword[if] identifier[len] ( identifier[self] . identifier[deeptools_plotProfile] )> literal[int] :
identifier[config] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : literal[int] ,
literal[string] :[
{ literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : identifier[converted_bin_labels] [- literal[int] ], literal[string] : literal[string] },
{ literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : literal[string] },
{ literal[string] : identifier[converted_bin_labels] [ literal[int] ], literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : literal[string] },
],
literal[string] :[
{ literal[string] : literal[int] , literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[int] , literal[string] : identifier[converted_bin_labels] [ identifier[bin_labels] . identifier[index] ( literal[string] )], literal[string] : literal[string] , literal[string] : literal[string] },
],
}
identifier[self] . identifier[add_section] (
identifier[name] = literal[string] ,
identifier[anchor] = literal[string] ,
identifier[description] = literal[string] . identifier[format] ( identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ], identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ], identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ], identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ], identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ], identifier[list] ( identifier[filter] ( keyword[None] , identifier[bin_labels] ))[ literal[int] ]),
identifier[plot] = identifier[linegraph] . identifier[plot] ( identifier[self] . identifier[deeptools_plotProfile] , identifier[config] )
)
keyword[return] identifier[len] ( identifier[self] . identifier[deeptools_bamPEFragmentSizeDistribution] )
|
def parse_plotProfile(self):
"""Find plotProfile output"""
self.deeptools_plotProfile = dict()
for f in self.find_log_files('deeptools/plotProfile', filehandles=False):
(parsed_data, bin_labels, converted_bin_labels) = self.parsePlotProfileData(f)
for (k, v) in parsed_data.items():
if k in self.deeptools_plotProfile:
log.warning('Replacing duplicate sample {}.'.format(k)) # depends on [control=['if'], data=['k']]
self.deeptools_plotProfile[k] = v # depends on [control=['for'], data=[]]
if len(parsed_data) > 0:
self.add_data_source(f, section='plotProfile') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
if len(self.deeptools_plotProfile) > 0:
config = {'id': 'read_distribution_profile', 'title': 'deeptools: Read Distribution Profile after Annotation', 'ylab': 'Occurrence', 'xlab': None, 'smooth_points': 100, 'xPlotBands': [{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'}, {'from': converted_bin_labels[bin_labels.index('TSS')], 'to': converted_bin_labels[bin_labels.index('TES')], 'color': '#ffffe2'}, {'from': converted_bin_labels[0], 'to': converted_bin_labels[bin_labels.index('TSS')], 'color': '#e5fce0'}], 'xPlotLines': [{'width': 1, 'value': converted_bin_labels[bin_labels.index('TES')], 'dashStyle': 'Dash', 'color': '#000000'}, {'width': 1, 'value': converted_bin_labels[bin_labels.index('TSS')], 'dashStyle': 'Dash', 'color': '#000000'}]}
self.add_section(name='Read Distribution Profile after Annotation', anchor='read_distribution_profile_plot', description='Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene'.format(list(filter(None, bin_labels))[0], list(filter(None, bin_labels))[1], list(filter(None, bin_labels))[1], list(filter(None, bin_labels))[2], list(filter(None, bin_labels))[2], list(filter(None, bin_labels))[3]), plot=linegraph.plot(self.deeptools_plotProfile, config)) # depends on [control=['if'], data=[]]
return len(self.deeptools_bamPEFragmentSizeDistribution)
|
def update_sandbox_product(
self,
product_id,
surge_multiplier=None,
drivers_available=None,
):
"""Update sandbox product availability.
Params
product_id (str)
Unique identifier representing a specific product for a
given location.
surge_multiplier (float)
Optional surge multiplier to manipulate pricing of product.
drivers_available (bool)
Optional boolean to manipulate availability of product.
Returns
(Response)
The Response with successful status_code
if product status was updated.
"""
args = {
'surge_multiplier': surge_multiplier,
'drivers_available': drivers_available,
}
endpoint = 'v1.2/sandbox/products/{}'.format(product_id)
return self._api_call('PUT', endpoint, args=args)
|
def function[update_sandbox_product, parameter[self, product_id, surge_multiplier, drivers_available]]:
constant[Update sandbox product availability.
Params
product_id (str)
Unique identifier representing a specific product for a
given location.
surge_multiplier (float)
Optional surge multiplier to manipulate pricing of product.
drivers_available (bool)
Optional boolean to manipulate availability of product.
Returns
(Response)
The Response with successful status_code
if product status was updated.
]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b12b55a0>, <ast.Constant object at 0x7da1b12b4040>], [<ast.Name object at 0x7da1b12b5540>, <ast.Name object at 0x7da1b12b4460>]]
variable[endpoint] assign[=] call[constant[v1.2/sandbox/products/{}].format, parameter[name[product_id]]]
return[call[name[self]._api_call, parameter[constant[PUT], name[endpoint]]]]
|
keyword[def] identifier[update_sandbox_product] (
identifier[self] ,
identifier[product_id] ,
identifier[surge_multiplier] = keyword[None] ,
identifier[drivers_available] = keyword[None] ,
):
literal[string]
identifier[args] ={
literal[string] : identifier[surge_multiplier] ,
literal[string] : identifier[drivers_available] ,
}
identifier[endpoint] = literal[string] . identifier[format] ( identifier[product_id] )
keyword[return] identifier[self] . identifier[_api_call] ( literal[string] , identifier[endpoint] , identifier[args] = identifier[args] )
|
def update_sandbox_product(self, product_id, surge_multiplier=None, drivers_available=None):
"""Update sandbox product availability.
Params
product_id (str)
Unique identifier representing a specific product for a
given location.
surge_multiplier (float)
Optional surge multiplier to manipulate pricing of product.
drivers_available (bool)
Optional boolean to manipulate availability of product.
Returns
(Response)
The Response with successful status_code
if product status was updated.
"""
args = {'surge_multiplier': surge_multiplier, 'drivers_available': drivers_available}
endpoint = 'v1.2/sandbox/products/{}'.format(product_id)
return self._api_call('PUT', endpoint, args=args)
|
async def StorageAttachmentLife(self, ids):
'''
ids : typing.Sequence[~StorageAttachmentId]
Returns -> typing.Sequence[~LifeResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='StorageAttachmentLife',
version=5,
params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
|
<ast.AsyncFunctionDef object at 0x7da18dc05b70>
|
keyword[async] keyword[def] identifier[StorageAttachmentLife] ( identifier[self] , identifier[ids] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[ids]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply]
|
async def StorageAttachmentLife(self, ids):
"""
ids : typing.Sequence[~StorageAttachmentId]
Returns -> typing.Sequence[~LifeResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter', request='StorageAttachmentLife', version=5, params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
|
def sync(context, provider, **kwargs): # pylint: disable=too-many-locals
"""Sync static website to S3 bucket."""
session = get_session(provider.region)
bucket_name = OutputLookup.handle(kwargs.get('bucket_output_lookup'),
provider=provider,
context=context)
if context.hook_data['staticsite']['deploy_is_current']:
LOGGER.info('staticsite: skipping upload; latest version already '
'deployed')
else:
distribution_id = OutputLookup.handle(
kwargs.get('distributionid_output_lookup'),
provider=provider,
context=context
)
distribution_domain = OutputLookup.handle(
kwargs.get('distributiondomain_output_lookup'),
provider=provider,
context=context
)
# Using the awscli for s3 syncing is incredibly suboptimal, but on
# balance it's probably the most stable/efficient option for syncing
# the files until https://github.com/boto/boto3/issues/358 is resolved
aws_cli(['s3',
'sync',
context.hook_data['staticsite']['app_directory'],
"s3://%s/" % bucket_name,
'--delete'])
cf_client = session.client('cloudfront')
cf_client.create_invalidation(
DistributionId=distribution_id,
InvalidationBatch={'Paths': {'Quantity': 1, 'Items': ['/*']},
'CallerReference': str(time.time())}
)
LOGGER.info("staticsite: sync & CF invalidation of %s (domain %s) "
"complete",
distribution_id,
distribution_domain)
if not context.hook_data['staticsite'].get('hash_tracking_disabled'):
LOGGER.info("staticsite: updating environment SSM parameter %s "
"with hash %s",
context.hook_data['staticsite']['hash_tracking_parameter'], # noqa
context.hook_data['staticsite']['hash'])
ssm_client = session.client('ssm')
ssm_client.put_parameter(
Name=context.hook_data['staticsite']['hash_tracking_parameter'], # noqa
Description='Hash of currently deployed static website source',
Value=context.hook_data['staticsite']['hash'],
Type='String',
Overwrite=True
)
LOGGER.info("staticsite: cleaning up old site archives...")
archives = []
s3_client = session.client('s3')
list_objects_v2_paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = list_objects_v2_paginator.paginate(
Bucket=context.hook_data['staticsite']['artifact_bucket_name'],
Prefix=context.hook_data['staticsite']['artifact_key_prefix']
)
for page in response_iterator:
archives.extend(page.get('Contents', []))
archives_to_prune = get_archives_to_prune(
archives,
context.hook_data['staticsite']
)
# Iterate in chunks of 1000 to match delete_objects limit
for objects in [archives_to_prune[i:i + 1000]
for i in range(0, len(archives_to_prune), 1000)]:
s3_client.delete_objects(
Bucket=context.hook_data['staticsite']['artifact_bucket_name'],
Delete={'Objects': [{'Key': i} for i in objects]}
)
return True
|
def function[sync, parameter[context, provider]]:
constant[Sync static website to S3 bucket.]
variable[session] assign[=] call[name[get_session], parameter[name[provider].region]]
variable[bucket_name] assign[=] call[name[OutputLookup].handle, parameter[call[name[kwargs].get, parameter[constant[bucket_output_lookup]]]]]
if call[call[name[context].hook_data][constant[staticsite]]][constant[deploy_is_current]] begin[:]
call[name[LOGGER].info, parameter[constant[staticsite: skipping upload; latest version already deployed]]]
call[name[LOGGER].info, parameter[constant[staticsite: cleaning up old site archives...]]]
variable[archives] assign[=] list[[]]
variable[s3_client] assign[=] call[name[session].client, parameter[constant[s3]]]
variable[list_objects_v2_paginator] assign[=] call[name[s3_client].get_paginator, parameter[constant[list_objects_v2]]]
variable[response_iterator] assign[=] call[name[list_objects_v2_paginator].paginate, parameter[]]
for taget[name[page]] in starred[name[response_iterator]] begin[:]
call[name[archives].extend, parameter[call[name[page].get, parameter[constant[Contents], list[[]]]]]]
variable[archives_to_prune] assign[=] call[name[get_archives_to_prune], parameter[name[archives], call[name[context].hook_data][constant[staticsite]]]]
for taget[name[objects]] in starred[<ast.ListComp object at 0x7da1b0786710>] begin[:]
call[name[s3_client].delete_objects, parameter[]]
return[constant[True]]
|
keyword[def] identifier[sync] ( identifier[context] , identifier[provider] ,** identifier[kwargs] ):
literal[string]
identifier[session] = identifier[get_session] ( identifier[provider] . identifier[region] )
identifier[bucket_name] = identifier[OutputLookup] . identifier[handle] ( identifier[kwargs] . identifier[get] ( literal[string] ),
identifier[provider] = identifier[provider] ,
identifier[context] = identifier[context] )
keyword[if] identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ]:
identifier[LOGGER] . identifier[info] ( literal[string]
literal[string] )
keyword[else] :
identifier[distribution_id] = identifier[OutputLookup] . identifier[handle] (
identifier[kwargs] . identifier[get] ( literal[string] ),
identifier[provider] = identifier[provider] ,
identifier[context] = identifier[context]
)
identifier[distribution_domain] = identifier[OutputLookup] . identifier[handle] (
identifier[kwargs] . identifier[get] ( literal[string] ),
identifier[provider] = identifier[provider] ,
identifier[context] = identifier[context]
)
identifier[aws_cli] ([ literal[string] ,
literal[string] ,
identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
literal[string] % identifier[bucket_name] ,
literal[string] ])
identifier[cf_client] = identifier[session] . identifier[client] ( literal[string] )
identifier[cf_client] . identifier[create_invalidation] (
identifier[DistributionId] = identifier[distribution_id] ,
identifier[InvalidationBatch] ={ literal[string] :{ literal[string] : literal[int] , literal[string] :[ literal[string] ]},
literal[string] : identifier[str] ( identifier[time] . identifier[time] ())}
)
identifier[LOGGER] . identifier[info] ( literal[string]
literal[string] ,
identifier[distribution_id] ,
identifier[distribution_domain] )
keyword[if] keyword[not] identifier[context] . identifier[hook_data] [ literal[string] ]. identifier[get] ( literal[string] ):
identifier[LOGGER] . identifier[info] ( literal[string]
literal[string] ,
identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ])
identifier[ssm_client] = identifier[session] . identifier[client] ( literal[string] )
identifier[ssm_client] . identifier[put_parameter] (
identifier[Name] = identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
identifier[Description] = literal[string] ,
identifier[Value] = identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
identifier[Type] = literal[string] ,
identifier[Overwrite] = keyword[True]
)
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[archives] =[]
identifier[s3_client] = identifier[session] . identifier[client] ( literal[string] )
identifier[list_objects_v2_paginator] = identifier[s3_client] . identifier[get_paginator] ( literal[string] )
identifier[response_iterator] = identifier[list_objects_v2_paginator] . identifier[paginate] (
identifier[Bucket] = identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
identifier[Prefix] = identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ]
)
keyword[for] identifier[page] keyword[in] identifier[response_iterator] :
identifier[archives] . identifier[extend] ( identifier[page] . identifier[get] ( literal[string] ,[]))
identifier[archives_to_prune] = identifier[get_archives_to_prune] (
identifier[archives] ,
identifier[context] . identifier[hook_data] [ literal[string] ]
)
keyword[for] identifier[objects] keyword[in] [ identifier[archives_to_prune] [ identifier[i] : identifier[i] + literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[archives_to_prune] ), literal[int] )]:
identifier[s3_client] . identifier[delete_objects] (
identifier[Bucket] = identifier[context] . identifier[hook_data] [ literal[string] ][ literal[string] ],
identifier[Delete] ={ literal[string] :[{ literal[string] : identifier[i] } keyword[for] identifier[i] keyword[in] identifier[objects] ]}
)
keyword[return] keyword[True]
|
def sync(context, provider, **kwargs): # pylint: disable=too-many-locals
'Sync static website to S3 bucket.'
session = get_session(provider.region)
bucket_name = OutputLookup.handle(kwargs.get('bucket_output_lookup'), provider=provider, context=context)
if context.hook_data['staticsite']['deploy_is_current']:
LOGGER.info('staticsite: skipping upload; latest version already deployed') # depends on [control=['if'], data=[]]
else:
distribution_id = OutputLookup.handle(kwargs.get('distributionid_output_lookup'), provider=provider, context=context)
distribution_domain = OutputLookup.handle(kwargs.get('distributiondomain_output_lookup'), provider=provider, context=context)
# Using the awscli for s3 syncing is incredibly suboptimal, but on
# balance it's probably the most stable/efficient option for syncing
# the files until https://github.com/boto/boto3/issues/358 is resolved
aws_cli(['s3', 'sync', context.hook_data['staticsite']['app_directory'], 's3://%s/' % bucket_name, '--delete'])
cf_client = session.client('cloudfront')
cf_client.create_invalidation(DistributionId=distribution_id, InvalidationBatch={'Paths': {'Quantity': 1, 'Items': ['/*']}, 'CallerReference': str(time.time())})
LOGGER.info('staticsite: sync & CF invalidation of %s (domain %s) complete', distribution_id, distribution_domain)
if not context.hook_data['staticsite'].get('hash_tracking_disabled'): # noqa
LOGGER.info('staticsite: updating environment SSM parameter %s with hash %s', context.hook_data['staticsite']['hash_tracking_parameter'], context.hook_data['staticsite']['hash'])
ssm_client = session.client('ssm') # noqa
ssm_client.put_parameter(Name=context.hook_data['staticsite']['hash_tracking_parameter'], Description='Hash of currently deployed static website source', Value=context.hook_data['staticsite']['hash'], Type='String', Overwrite=True) # depends on [control=['if'], data=[]]
LOGGER.info('staticsite: cleaning up old site archives...')
archives = []
s3_client = session.client('s3')
list_objects_v2_paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = list_objects_v2_paginator.paginate(Bucket=context.hook_data['staticsite']['artifact_bucket_name'], Prefix=context.hook_data['staticsite']['artifact_key_prefix'])
for page in response_iterator:
archives.extend(page.get('Contents', [])) # depends on [control=['for'], data=['page']]
archives_to_prune = get_archives_to_prune(archives, context.hook_data['staticsite'])
# Iterate in chunks of 1000 to match delete_objects limit
for objects in [archives_to_prune[i:i + 1000] for i in range(0, len(archives_to_prune), 1000)]:
s3_client.delete_objects(Bucket=context.hook_data['staticsite']['artifact_bucket_name'], Delete={'Objects': [{'Key': i} for i in objects]}) # depends on [control=['for'], data=['objects']]
return True
|
def get_all_firmwares(self, filter='', start=0, count=-1, query='', sort=''):
"""
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
"""
uri = self.URI + "/*/firmware"
return self._helper.get_all(start, count, filter, query, sort, '', '', uri)
|
def function[get_all_firmwares, parameter[self, filter, start, count, query, sort]]:
constant[
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
]
variable[uri] assign[=] binary_operation[name[self].URI + constant[/*/firmware]]
return[call[name[self]._helper.get_all, parameter[name[start], name[count], name[filter], name[query], name[sort], constant[], constant[], name[uri]]]]
|
keyword[def] identifier[get_all_firmwares] ( identifier[self] , identifier[filter] = literal[string] , identifier[start] = literal[int] , identifier[count] =- literal[int] , identifier[query] = literal[string] , identifier[sort] = literal[string] ):
literal[string]
identifier[uri] = identifier[self] . identifier[URI] + literal[string]
keyword[return] identifier[self] . identifier[_helper] . identifier[get_all] ( identifier[start] , identifier[count] , identifier[filter] , identifier[query] , identifier[sort] , literal[string] , literal[string] , identifier[uri] )
|
def get_all_firmwares(self, filter='', start=0, count=-1, query='', sort=''):
"""
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
"""
uri = self.URI + '/*/firmware'
return self._helper.get_all(start, count, filter, query, sort, '', '', uri)
|
def _ftp_pwd(self):
"""Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string.
"""
try:
return self.ftp.pwd()
except UnicodeEncodeError:
if compat.PY2 or self.ftp.encoding != "utf-8":
raise # should not happen, since Py2 does not try to encode
# TODO: this is NOT THREAD-SAFE!
prev_encoding = self.ftp.encoding
try:
write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True)
return self.ftp.pwd()
finally:
self.ftp.encoding = prev_encoding
|
def function[_ftp_pwd, parameter[self]]:
constant[Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string.
]
<ast.Try object at 0x7da1b042d660>
|
keyword[def] identifier[_ftp_pwd] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[ftp] . identifier[pwd] ()
keyword[except] identifier[UnicodeEncodeError] :
keyword[if] identifier[compat] . identifier[PY2] keyword[or] identifier[self] . identifier[ftp] . identifier[encoding] != literal[string] :
keyword[raise]
identifier[prev_encoding] = identifier[self] . identifier[ftp] . identifier[encoding]
keyword[try] :
identifier[write] ( literal[string] , identifier[warning] = keyword[True] )
keyword[return] identifier[self] . identifier[ftp] . identifier[pwd] ()
keyword[finally] :
identifier[self] . identifier[ftp] . identifier[encoding] = identifier[prev_encoding]
|
def _ftp_pwd(self):
"""Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string.
"""
try:
return self.ftp.pwd() # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
if compat.PY2 or self.ftp.encoding != 'utf-8':
raise # should not happen, since Py2 does not try to encode # depends on [control=['if'], data=[]] # TODO: this is NOT THREAD-SAFE!
prev_encoding = self.ftp.encoding
try:
write('ftp.pwd() failed with utf-8: trying Cp1252...', warning=True)
return self.ftp.pwd() # depends on [control=['try'], data=[]]
finally:
self.ftp.encoding = prev_encoding # depends on [control=['except'], data=[]]
|
def dnld_goa(self, species, ext='gaf', item=None, fileout=None):
"""Download GOA source file name on EMBL-EBI ftp server."""
basename = self.get_basename(species, ext, item)
src = os.path.join(self.ftp_src_goa, species.upper(), "{F}.gz".format(F=basename))
dst = os.path.join(os.getcwd(), basename) if fileout is None else fileout
dnld_file(src, dst, prt=sys.stdout, loading_bar=None)
return dst
|
def function[dnld_goa, parameter[self, species, ext, item, fileout]]:
constant[Download GOA source file name on EMBL-EBI ftp server.]
variable[basename] assign[=] call[name[self].get_basename, parameter[name[species], name[ext], name[item]]]
variable[src] assign[=] call[name[os].path.join, parameter[name[self].ftp_src_goa, call[name[species].upper, parameter[]], call[constant[{F}.gz].format, parameter[]]]]
variable[dst] assign[=] <ast.IfExp object at 0x7da20c6ab5b0>
call[name[dnld_file], parameter[name[src], name[dst]]]
return[name[dst]]
|
keyword[def] identifier[dnld_goa] ( identifier[self] , identifier[species] , identifier[ext] = literal[string] , identifier[item] = keyword[None] , identifier[fileout] = keyword[None] ):
literal[string]
identifier[basename] = identifier[self] . identifier[get_basename] ( identifier[species] , identifier[ext] , identifier[item] )
identifier[src] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[ftp_src_goa] , identifier[species] . identifier[upper] (), literal[string] . identifier[format] ( identifier[F] = identifier[basename] ))
identifier[dst] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[basename] ) keyword[if] identifier[fileout] keyword[is] keyword[None] keyword[else] identifier[fileout]
identifier[dnld_file] ( identifier[src] , identifier[dst] , identifier[prt] = identifier[sys] . identifier[stdout] , identifier[loading_bar] = keyword[None] )
keyword[return] identifier[dst]
|
def dnld_goa(self, species, ext='gaf', item=None, fileout=None):
"""Download GOA source file name on EMBL-EBI ftp server."""
basename = self.get_basename(species, ext, item)
src = os.path.join(self.ftp_src_goa, species.upper(), '{F}.gz'.format(F=basename))
dst = os.path.join(os.getcwd(), basename) if fileout is None else fileout
dnld_file(src, dst, prt=sys.stdout, loading_bar=None)
return dst
|
def _resolve_slices(data_columns, names):
"""
Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices.
"""
def _get_slice_cols(sc):
"""
Convert slice to list of names
"""
# Just like pandas.DataFrame.loc the stop
# column is included
idx_start = data_columns.get_loc(sc.start)
idx_stop = data_columns.get_loc(sc.stop) + 1
return data_columns[idx_start:idx_stop:sc.step]
result = []
for col in names:
if isinstance(col, slice):
result.extend(_get_slice_cols(col))
else:
result.append(col)
return tuple(result)
|
def function[_resolve_slices, parameter[data_columns, names]]:
constant[
Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices.
]
def function[_get_slice_cols, parameter[sc]]:
constant[
Convert slice to list of names
]
variable[idx_start] assign[=] call[name[data_columns].get_loc, parameter[name[sc].start]]
variable[idx_stop] assign[=] binary_operation[call[name[data_columns].get_loc, parameter[name[sc].stop]] + constant[1]]
return[call[name[data_columns]][<ast.Slice object at 0x7da2054a66e0>]]
variable[result] assign[=] list[[]]
for taget[name[col]] in starred[name[names]] begin[:]
if call[name[isinstance], parameter[name[col], name[slice]]] begin[:]
call[name[result].extend, parameter[call[name[_get_slice_cols], parameter[name[col]]]]]
return[call[name[tuple], parameter[name[result]]]]
|
keyword[def] identifier[_resolve_slices] ( identifier[data_columns] , identifier[names] ):
literal[string]
keyword[def] identifier[_get_slice_cols] ( identifier[sc] ):
literal[string]
identifier[idx_start] = identifier[data_columns] . identifier[get_loc] ( identifier[sc] . identifier[start] )
identifier[idx_stop] = identifier[data_columns] . identifier[get_loc] ( identifier[sc] . identifier[stop] )+ literal[int]
keyword[return] identifier[data_columns] [ identifier[idx_start] : identifier[idx_stop] : identifier[sc] . identifier[step] ]
identifier[result] =[]
keyword[for] identifier[col] keyword[in] identifier[names] :
keyword[if] identifier[isinstance] ( identifier[col] , identifier[slice] ):
identifier[result] . identifier[extend] ( identifier[_get_slice_cols] ( identifier[col] ))
keyword[else] :
identifier[result] . identifier[append] ( identifier[col] )
keyword[return] identifier[tuple] ( identifier[result] )
|
def _resolve_slices(data_columns, names):
"""
Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices.
"""
def _get_slice_cols(sc):
"""
Convert slice to list of names
"""
# Just like pandas.DataFrame.loc the stop
# column is included
idx_start = data_columns.get_loc(sc.start)
idx_stop = data_columns.get_loc(sc.stop) + 1
return data_columns[idx_start:idx_stop:sc.step]
result = []
for col in names:
if isinstance(col, slice):
result.extend(_get_slice_cols(col)) # depends on [control=['if'], data=[]]
else:
result.append(col) # depends on [control=['for'], data=['col']]
return tuple(result)
|
def getStuckRelayCheckEnabled(self):
"""Returns True if enabled, False if disabled"""
command = '$GE'
settings = self.sendCommand(command)
flags = int(settings[2], 16)
return not (flags & 0x0010)
|
def function[getStuckRelayCheckEnabled, parameter[self]]:
constant[Returns True if enabled, False if disabled]
variable[command] assign[=] constant[$GE]
variable[settings] assign[=] call[name[self].sendCommand, parameter[name[command]]]
variable[flags] assign[=] call[name[int], parameter[call[name[settings]][constant[2]], constant[16]]]
return[<ast.UnaryOp object at 0x7da1b0abab90>]
|
keyword[def] identifier[getStuckRelayCheckEnabled] ( identifier[self] ):
literal[string]
identifier[command] = literal[string]
identifier[settings] = identifier[self] . identifier[sendCommand] ( identifier[command] )
identifier[flags] = identifier[int] ( identifier[settings] [ literal[int] ], literal[int] )
keyword[return] keyword[not] ( identifier[flags] & literal[int] )
|
def getStuckRelayCheckEnabled(self):
"""Returns True if enabled, False if disabled"""
command = '$GE'
settings = self.sendCommand(command)
flags = int(settings[2], 16)
return not flags & 16
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the RevokeRequestPayload object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(RevokeRequestPayload, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream, kmip_version=kmip_version)
self.revocation_reason = objects.RevocationReason()
self.revocation_reason.read(tstream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.COMPROMISE_OCCURRENCE_DATE, tstream):
self.compromise_occurrence_date = primitives.DateTime(
tag=enums.Tags.COMPROMISE_OCCURRENCE_DATE)
self.compromise_occurrence_date.read(
tstream,
kmip_version=kmip_version
)
self.is_oversized(tstream)
self.validate()
|
def function[read, parameter[self, istream, kmip_version]]:
constant[
Read the data encoding the RevokeRequestPayload object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
]
call[call[name[super], parameter[name[RevokeRequestPayload], name[self]]].read, parameter[name[istream]]]
variable[tstream] assign[=] call[name[BytearrayStream], parameter[call[name[istream].read, parameter[name[self].length]]]]
name[self].unique_identifier assign[=] call[name[attributes].UniqueIdentifier, parameter[]]
call[name[self].unique_identifier.read, parameter[name[tstream]]]
name[self].revocation_reason assign[=] call[name[objects].RevocationReason, parameter[]]
call[name[self].revocation_reason.read, parameter[name[tstream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.COMPROMISE_OCCURRENCE_DATE, name[tstream]]] begin[:]
name[self].compromise_occurrence_date assign[=] call[name[primitives].DateTime, parameter[]]
call[name[self].compromise_occurrence_date.read, parameter[name[tstream]]]
call[name[self].is_oversized, parameter[name[tstream]]]
call[name[self].validate, parameter[]]
|
keyword[def] identifier[read] ( identifier[self] , identifier[istream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ):
literal[string]
identifier[super] ( identifier[RevokeRequestPayload] , identifier[self] ). identifier[read] (
identifier[istream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[tstream] = identifier[BytearrayStream] ( identifier[istream] . identifier[read] ( identifier[self] . identifier[length] ))
identifier[self] . identifier[unique_identifier] = identifier[attributes] . identifier[UniqueIdentifier] ()
identifier[self] . identifier[unique_identifier] . identifier[read] ( identifier[tstream] , identifier[kmip_version] = identifier[kmip_version] )
identifier[self] . identifier[revocation_reason] = identifier[objects] . identifier[RevocationReason] ()
identifier[self] . identifier[revocation_reason] . identifier[read] ( identifier[tstream] , identifier[kmip_version] = identifier[kmip_version] )
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[COMPROMISE_OCCURRENCE_DATE] , identifier[tstream] ):
identifier[self] . identifier[compromise_occurrence_date] = identifier[primitives] . identifier[DateTime] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[COMPROMISE_OCCURRENCE_DATE] )
identifier[self] . identifier[compromise_occurrence_date] . identifier[read] (
identifier[tstream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[is_oversized] ( identifier[tstream] )
identifier[self] . identifier[validate] ()
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the RevokeRequestPayload object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(RevokeRequestPayload, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream, kmip_version=kmip_version)
self.revocation_reason = objects.RevocationReason()
self.revocation_reason.read(tstream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.COMPROMISE_OCCURRENCE_DATE, tstream):
self.compromise_occurrence_date = primitives.DateTime(tag=enums.Tags.COMPROMISE_OCCURRENCE_DATE)
self.compromise_occurrence_date.read(tstream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
self.is_oversized(tstream)
self.validate()
|
def make_formatters(self):
""" Create a list formatter functions for each column. They can then
be stored in the render spec for faster justification processing. """
return [self.make_formatter(inner_w, spec['padding'], spec['align'],
spec['overflow'])
for spec, inner_w in zip(self.colspec, self.widths)]
|
def function[make_formatters, parameter[self]]:
constant[ Create a list formatter functions for each column. They can then
be stored in the render spec for faster justification processing. ]
return[<ast.ListComp object at 0x7da204622560>]
|
keyword[def] identifier[make_formatters] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[self] . identifier[make_formatter] ( identifier[inner_w] , identifier[spec] [ literal[string] ], identifier[spec] [ literal[string] ],
identifier[spec] [ literal[string] ])
keyword[for] identifier[spec] , identifier[inner_w] keyword[in] identifier[zip] ( identifier[self] . identifier[colspec] , identifier[self] . identifier[widths] )]
|
def make_formatters(self):
""" Create a list formatter functions for each column. They can then
be stored in the render spec for faster justification processing. """
return [self.make_formatter(inner_w, spec['padding'], spec['align'], spec['overflow']) for (spec, inner_w) in zip(self.colspec, self.widths)]
|
def _compute_distance_term(self, C, mag, rrup):
"""
Compute second and third terms in equation 1, p. 901.
"""
term1 = C['b'] * rrup
term2 = - np.log(rrup + C['c'] * np.exp(C['d'] * mag))
return term1 + term2
|
def function[_compute_distance_term, parameter[self, C, mag, rrup]]:
constant[
Compute second and third terms in equation 1, p. 901.
]
variable[term1] assign[=] binary_operation[call[name[C]][constant[b]] * name[rrup]]
variable[term2] assign[=] <ast.UnaryOp object at 0x7da204347970>
return[binary_operation[name[term1] + name[term2]]]
|
keyword[def] identifier[_compute_distance_term] ( identifier[self] , identifier[C] , identifier[mag] , identifier[rrup] ):
literal[string]
identifier[term1] = identifier[C] [ literal[string] ]* identifier[rrup]
identifier[term2] =- identifier[np] . identifier[log] ( identifier[rrup] + identifier[C] [ literal[string] ]* identifier[np] . identifier[exp] ( identifier[C] [ literal[string] ]* identifier[mag] ))
keyword[return] identifier[term1] + identifier[term2]
|
def _compute_distance_term(self, C, mag, rrup):
"""
Compute second and third terms in equation 1, p. 901.
"""
term1 = C['b'] * rrup
term2 = -np.log(rrup + C['c'] * np.exp(C['d'] * mag))
return term1 + term2
|
def pauli(qubo):
"""
Convert to pauli operators of universal gate model.
Requires blueqat.
"""
from blueqat.pauli import qubo_bit
h = 0.0
assert all(len(q) == len(qubo) for q in qubo)
for i in range(len(qubo)):
h += qubo_bit(i) * qubo[i][i]
for j in range(i + 1, len(qubo)):
h += qubo_bit(i)*qubo_bit(j) * (qubo[i][j] + qubo[j][i])
return h
|
def function[pauli, parameter[qubo]]:
constant[
Convert to pauli operators of universal gate model.
Requires blueqat.
]
from relative_module[blueqat.pauli] import module[qubo_bit]
variable[h] assign[=] constant[0.0]
assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da18eb54df0>]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[qubo]]]]]] begin[:]
<ast.AugAssign object at 0x7da18eb56860>
for taget[name[j]] in starred[call[name[range], parameter[binary_operation[name[i] + constant[1]], call[name[len], parameter[name[qubo]]]]]] begin[:]
<ast.AugAssign object at 0x7da18eb56890>
return[name[h]]
|
keyword[def] identifier[pauli] ( identifier[qubo] ):
literal[string]
keyword[from] identifier[blueqat] . identifier[pauli] keyword[import] identifier[qubo_bit]
identifier[h] = literal[int]
keyword[assert] identifier[all] ( identifier[len] ( identifier[q] )== identifier[len] ( identifier[qubo] ) keyword[for] identifier[q] keyword[in] identifier[qubo] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[qubo] )):
identifier[h] += identifier[qubo_bit] ( identifier[i] )* identifier[qubo] [ identifier[i] ][ identifier[i] ]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[len] ( identifier[qubo] )):
identifier[h] += identifier[qubo_bit] ( identifier[i] )* identifier[qubo_bit] ( identifier[j] )*( identifier[qubo] [ identifier[i] ][ identifier[j] ]+ identifier[qubo] [ identifier[j] ][ identifier[i] ])
keyword[return] identifier[h]
|
def pauli(qubo):
"""
Convert to pauli operators of universal gate model.
Requires blueqat.
"""
from blueqat.pauli import qubo_bit
h = 0.0
assert all((len(q) == len(qubo) for q in qubo))
for i in range(len(qubo)):
h += qubo_bit(i) * qubo[i][i]
for j in range(i + 1, len(qubo)):
h += qubo_bit(i) * qubo_bit(j) * (qubo[i][j] + qubo[j][i]) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return h
|
def get_dependencies():
"""Returns list of dicts which indicate installed dependencies"""
dependencies = []
# Numpy
dep_attrs = {
"name": "numpy",
"min_version": "1.1.0",
"description": "required",
}
try:
import numpy
dep_attrs["version"] = numpy.version.version
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# wxPython
dep_attrs = {
"name": "wxPython",
"min_version": "2.8.10.1",
"description": "required",
}
try:
import wx
dep_attrs["version"] = wx.version()
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# Matplotlib
dep_attrs = {
"name": "matplotlib",
"min_version": "1.1.1",
"description": "required",
}
try:
import matplotlib
dep_attrs["version"] = matplotlib._version.get_versions()["version"]
except ImportError:
dep_attrs["version"] = None
except AttributeError:
# May happen in old matplotlib versions
dep_attrs["version"] = matplotlib.__version__
dependencies.append(dep_attrs)
# Pycairo
dep_attrs = {
"name": "pycairo",
"min_version": "1.8.8",
"description": "required",
}
try:
import cairo
dep_attrs["version"] = cairo.version
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# Python GnuPG
dep_attrs = {
"name": "python-gnupg",
"min_version": "0.3.0",
"description": "for opening own files without approval",
}
try:
import gnupg
dep_attrs["version"] = gnupg.__version__
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# xlrd
dep_attrs = {
"name": "xlrd",
"min_version": "0.9.2",
"description": "for loading Excel files",
}
try:
import xlrd
dep_attrs["version"] = xlrd.__VERSION__
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# xlwt
dep_attrs = {
"name": "xlwt",
"min_version": "0.7.2",
"description": "for saving Excel files",
}
try:
import xlwt
dep_attrs["version"] = xlwt.__VERSION__
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# Jedi
dep_attrs = {
"name": "jedi",
"min_version": "0.8.0",
"description": "for tab completion and context help in the entry line",
}
try:
import jedi
dep_attrs["version"] = jedi.__version__
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# pyrsvg
dep_attrs = {
"name": "pyrsvg",
"min_version": "2.32",
"description": "for displaying SVG files in cells",
}
try:
import rsvg
dep_attrs["version"] = True
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
# pyenchant
dep_attrs = {
"name": "pyenchant",
"min_version": "1.6.6",
"description": "for spell checking",
}
try:
import enchant
dep_attrs["version"] = enchant.__version__
except ImportError:
dep_attrs["version"] = None
dependencies.append(dep_attrs)
return dependencies
|
def function[get_dependencies, parameter[]]:
constant[Returns list of dicts which indicate installed dependencies]
variable[dependencies] assign[=] list[[]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b151b0a0>, <ast.Constant object at 0x7da1b15180d0>, <ast.Constant object at 0x7da1b151b5b0>], [<ast.Constant object at 0x7da1b151ba90>, <ast.Constant object at 0x7da1b151b520>, <ast.Constant object at 0x7da1b151a110>]]
<ast.Try object at 0x7da1b1518370>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1519f30>, <ast.Constant object at 0x7da1b151a440>, <ast.Constant object at 0x7da1b1519ed0>], [<ast.Constant object at 0x7da1b151b400>, <ast.Constant object at 0x7da1b1519e40>, <ast.Constant object at 0x7da1b151a050>]]
<ast.Try object at 0x7da1b151b160>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b151a020>, <ast.Constant object at 0x7da1b15199c0>, <ast.Constant object at 0x7da1b151b2b0>], [<ast.Constant object at 0x7da1b151b8b0>, <ast.Constant object at 0x7da1b151a4d0>, <ast.Constant object at 0x7da1b151bfd0>]]
<ast.Try object at 0x7da1b151a170>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1518760>, <ast.Constant object at 0x7da1b1518790>, <ast.Constant object at 0x7da1b1519ff0>], [<ast.Constant object at 0x7da1b151b700>, <ast.Constant object at 0x7da1b151afe0>, <ast.Constant object at 0x7da1b1518160>]]
<ast.Try object at 0x7da1b1518190>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b15186a0>, <ast.Constant object at 0x7da1b1518d00>, <ast.Constant object at 0x7da1b15188e0>], [<ast.Constant object at 0x7da1b1518940>, <ast.Constant object at 0x7da1b151b3d0>, <ast.Constant object at 0x7da1b15182b0>]]
<ast.Try object at 0x7da1b151a1a0>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1519c90>, <ast.Constant object at 0x7da1b151bac0>, <ast.Constant object at 0x7da1b1519ea0>], [<ast.Constant object at 0x7da1b151b490>, <ast.Constant object at 0x7da1b15184c0>, <ast.Constant object at 0x7da1b151afb0>]]
<ast.Try object at 0x7da1b15187c0>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da204622b00>, <ast.Constant object at 0x7da204621e40>, <ast.Constant object at 0x7da204623940>], [<ast.Constant object at 0x7da204623250>, <ast.Constant object at 0x7da204621d50>, <ast.Constant object at 0x7da204621540>]]
<ast.Try object at 0x7da204620040>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da204620b20>, <ast.Constant object at 0x7da204623eb0>, <ast.Constant object at 0x7da2046231f0>], [<ast.Constant object at 0x7da204621870>, <ast.Constant object at 0x7da2046223e0>, <ast.Constant object at 0x7da204623f70>]]
<ast.Try object at 0x7da204622590>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da2046215a0>, <ast.Constant object at 0x7da204620e20>, <ast.Constant object at 0x7da204623f40>], [<ast.Constant object at 0x7da204623010>, <ast.Constant object at 0x7da204620460>, <ast.Constant object at 0x7da204620730>]]
<ast.Try object at 0x7da204623ee0>
call[name[dependencies].append, parameter[name[dep_attrs]]]
variable[dep_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da2046209a0>, <ast.Constant object at 0x7da204623160>, <ast.Constant object at 0x7da204620bb0>], [<ast.Constant object at 0x7da2046225c0>, <ast.Constant object at 0x7da204621990>, <ast.Constant object at 0x7da1b1632920>]]
<ast.Try object at 0x7da1b1630e80>
call[name[dependencies].append, parameter[name[dep_attrs]]]
return[name[dependencies]]
|
keyword[def] identifier[get_dependencies] ():
literal[string]
identifier[dependencies] =[]
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[numpy]
identifier[dep_attrs] [ literal[string] ]= identifier[numpy] . identifier[version] . identifier[version]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[wx]
identifier[dep_attrs] [ literal[string] ]= identifier[wx] . identifier[version] ()
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[matplotlib]
identifier[dep_attrs] [ literal[string] ]= identifier[matplotlib] . identifier[_version] . identifier[get_versions] ()[ literal[string] ]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
keyword[except] identifier[AttributeError] :
identifier[dep_attrs] [ literal[string] ]= identifier[matplotlib] . identifier[__version__]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[cairo]
identifier[dep_attrs] [ literal[string] ]= identifier[cairo] . identifier[version]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[gnupg]
identifier[dep_attrs] [ literal[string] ]= identifier[gnupg] . identifier[__version__]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[xlrd]
identifier[dep_attrs] [ literal[string] ]= identifier[xlrd] . identifier[__VERSION__]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[xlwt]
identifier[dep_attrs] [ literal[string] ]= identifier[xlwt] . identifier[__VERSION__]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[jedi]
identifier[dep_attrs] [ literal[string] ]= identifier[jedi] . identifier[__version__]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[rsvg]
identifier[dep_attrs] [ literal[string] ]= keyword[True]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
identifier[dep_attrs] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[try] :
keyword[import] identifier[enchant]
identifier[dep_attrs] [ literal[string] ]= identifier[enchant] . identifier[__version__]
keyword[except] identifier[ImportError] :
identifier[dep_attrs] [ literal[string] ]= keyword[None]
identifier[dependencies] . identifier[append] ( identifier[dep_attrs] )
keyword[return] identifier[dependencies]
|
def get_dependencies():
"""Returns list of dicts which indicate installed dependencies"""
dependencies = []
# Numpy
dep_attrs = {'name': 'numpy', 'min_version': '1.1.0', 'description': 'required'}
try:
import numpy
dep_attrs['version'] = numpy.version.version # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# wxPython
dep_attrs = {'name': 'wxPython', 'min_version': '2.8.10.1', 'description': 'required'}
try:
import wx
dep_attrs['version'] = wx.version() # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# Matplotlib
dep_attrs = {'name': 'matplotlib', 'min_version': '1.1.1', 'description': 'required'}
try:
import matplotlib
dep_attrs['version'] = matplotlib._version.get_versions()['version'] # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
except AttributeError:
# May happen in old matplotlib versions
dep_attrs['version'] = matplotlib.__version__ # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# Pycairo
dep_attrs = {'name': 'pycairo', 'min_version': '1.8.8', 'description': 'required'}
try:
import cairo
dep_attrs['version'] = cairo.version # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# Python GnuPG
dep_attrs = {'name': 'python-gnupg', 'min_version': '0.3.0', 'description': 'for opening own files without approval'}
try:
import gnupg
dep_attrs['version'] = gnupg.__version__ # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# xlrd
dep_attrs = {'name': 'xlrd', 'min_version': '0.9.2', 'description': 'for loading Excel files'}
try:
import xlrd
dep_attrs['version'] = xlrd.__VERSION__ # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# xlwt
dep_attrs = {'name': 'xlwt', 'min_version': '0.7.2', 'description': 'for saving Excel files'}
try:
import xlwt
dep_attrs['version'] = xlwt.__VERSION__ # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# Jedi
dep_attrs = {'name': 'jedi', 'min_version': '0.8.0', 'description': 'for tab completion and context help in the entry line'}
try:
import jedi
dep_attrs['version'] = jedi.__version__ # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# pyrsvg
dep_attrs = {'name': 'pyrsvg', 'min_version': '2.32', 'description': 'for displaying SVG files in cells'}
try:
import rsvg
dep_attrs['version'] = True # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
# pyenchant
dep_attrs = {'name': 'pyenchant', 'min_version': '1.6.6', 'description': 'for spell checking'}
try:
import enchant
dep_attrs['version'] = enchant.__version__ # depends on [control=['try'], data=[]]
except ImportError:
dep_attrs['version'] = None # depends on [control=['except'], data=[]]
dependencies.append(dep_attrs)
return dependencies
|
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk)
|
def function[notify, parameter[cls, user_or_email, instance]]:
constant[Create, save, and return a watch which fires when something
happens to ``instance``.]
return[call[call[name[super], parameter[name[InstanceEvent], name[cls]]].notify, parameter[name[user_or_email]]]]
|
keyword[def] identifier[notify] ( identifier[cls] , identifier[user_or_email] , identifier[instance] ):
literal[string]
keyword[return] identifier[super] ( identifier[InstanceEvent] , identifier[cls] ). identifier[notify] ( identifier[user_or_email] ,
identifier[object_id] = identifier[instance] . identifier[pk] )
|
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email, object_id=instance.pk)
|
def version_calc(dist, attr, value):
"""
Handler for parameter to setup(use_vcs_version=value)
attr should be 'use_vcs_version' (also allows use_hg_version for
compatibility).
bool(value) should be true to invoke this plugin.
value may optionally be a dict and supply options to the plugin.
"""
expected_attrs = 'use_hg_version', 'use_vcs_version'
if not value or attr not in expected_attrs:
return
options = value if isinstance(value, dict) else {}
dist.metadata.version = calculate_version(options)
patch_egg_info()
|
def function[version_calc, parameter[dist, attr, value]]:
constant[
Handler for parameter to setup(use_vcs_version=value)
attr should be 'use_vcs_version' (also allows use_hg_version for
compatibility).
bool(value) should be true to invoke this plugin.
value may optionally be a dict and supply options to the plugin.
]
variable[expected_attrs] assign[=] tuple[[<ast.Constant object at 0x7da1b0bf3550>, <ast.Constant object at 0x7da1b0bf0a30>]]
if <ast.BoolOp object at 0x7da1b0bf1e40> begin[:]
return[None]
variable[options] assign[=] <ast.IfExp object at 0x7da1b0b0e1d0>
name[dist].metadata.version assign[=] call[name[calculate_version], parameter[name[options]]]
call[name[patch_egg_info], parameter[]]
|
keyword[def] identifier[version_calc] ( identifier[dist] , identifier[attr] , identifier[value] ):
literal[string]
identifier[expected_attrs] = literal[string] , literal[string]
keyword[if] keyword[not] identifier[value] keyword[or] identifier[attr] keyword[not] keyword[in] identifier[expected_attrs] :
keyword[return]
identifier[options] = identifier[value] keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[else] {}
identifier[dist] . identifier[metadata] . identifier[version] = identifier[calculate_version] ( identifier[options] )
identifier[patch_egg_info] ()
|
def version_calc(dist, attr, value):
"""
Handler for parameter to setup(use_vcs_version=value)
attr should be 'use_vcs_version' (also allows use_hg_version for
compatibility).
bool(value) should be true to invoke this plugin.
value may optionally be a dict and supply options to the plugin.
"""
expected_attrs = ('use_hg_version', 'use_vcs_version')
if not value or attr not in expected_attrs:
return # depends on [control=['if'], data=[]]
options = value if isinstance(value, dict) else {}
dist.metadata.version = calculate_version(options)
patch_egg_info()
|
def get_queryset(self):
"""
Returns queryset limited to categories with live Entry instances.
:rtype: django.db.models.query.QuerySet.
"""
queryset = super(LiveEntryCategoryManager, self).get_queryset()
return queryset.filter(tag__in=[
entry_tag.tag
for entry_tag
in EntryTag.objects.filter(entry__live=True)
])
|
def function[get_queryset, parameter[self]]:
constant[
Returns queryset limited to categories with live Entry instances.
:rtype: django.db.models.query.QuerySet.
]
variable[queryset] assign[=] call[call[name[super], parameter[name[LiveEntryCategoryManager], name[self]]].get_queryset, parameter[]]
return[call[name[queryset].filter, parameter[]]]
|
keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[queryset] = identifier[super] ( identifier[LiveEntryCategoryManager] , identifier[self] ). identifier[get_queryset] ()
keyword[return] identifier[queryset] . identifier[filter] ( identifier[tag__in] =[
identifier[entry_tag] . identifier[tag]
keyword[for] identifier[entry_tag]
keyword[in] identifier[EntryTag] . identifier[objects] . identifier[filter] ( identifier[entry__live] = keyword[True] )
])
|
def get_queryset(self):
"""
Returns queryset limited to categories with live Entry instances.
:rtype: django.db.models.query.QuerySet.
"""
queryset = super(LiveEntryCategoryManager, self).get_queryset()
return queryset.filter(tag__in=[entry_tag.tag for entry_tag in EntryTag.objects.filter(entry__live=True)])
|
def _parse_common(tag):
"""Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
"""
if "modifiers" in tag.attrib:
modifiers = re.split(",\s*", tag.attrib["modifiers"].strip())
if "" in modifiers:
modifiers.remove("")
else:
modifiers = None
if "name" in tag.attrib:
name = tag.attrib["name"]
if "type" in tag.attrib:
dtype = tag.attrib["type"]
else:
dtype = None
if "kind" in tag.attrib:
kind = tag.attrib["kind"]
else:
kind = None
return (name, modifiers, dtype, kind)
|
def function[_parse_common, parameter[tag]]:
constant[Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
]
if compare[constant[modifiers] in name[tag].attrib] begin[:]
variable[modifiers] assign[=] call[name[re].split, parameter[constant[,\s*], call[call[name[tag].attrib][constant[modifiers]].strip, parameter[]]]]
if compare[constant[] in name[modifiers]] begin[:]
call[name[modifiers].remove, parameter[constant[]]]
if compare[constant[name] in name[tag].attrib] begin[:]
variable[name] assign[=] call[name[tag].attrib][constant[name]]
if compare[constant[type] in name[tag].attrib] begin[:]
variable[dtype] assign[=] call[name[tag].attrib][constant[type]]
if compare[constant[kind] in name[tag].attrib] begin[:]
variable[kind] assign[=] call[name[tag].attrib][constant[kind]]
return[tuple[[<ast.Name object at 0x7da18bcc9ab0>, <ast.Name object at 0x7da18bcca950>, <ast.Name object at 0x7da18bcc8520>, <ast.Name object at 0x7da18bcc9300>]]]
|
keyword[def] identifier[_parse_common] ( identifier[tag] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[tag] . identifier[attrib] :
identifier[modifiers] = identifier[re] . identifier[split] ( literal[string] , identifier[tag] . identifier[attrib] [ literal[string] ]. identifier[strip] ())
keyword[if] literal[string] keyword[in] identifier[modifiers] :
identifier[modifiers] . identifier[remove] ( literal[string] )
keyword[else] :
identifier[modifiers] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[tag] . identifier[attrib] :
identifier[name] = identifier[tag] . identifier[attrib] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[tag] . identifier[attrib] :
identifier[dtype] = identifier[tag] . identifier[attrib] [ literal[string] ]
keyword[else] :
identifier[dtype] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[tag] . identifier[attrib] :
identifier[kind] = identifier[tag] . identifier[attrib] [ literal[string] ]
keyword[else] :
identifier[kind] = keyword[None]
keyword[return] ( identifier[name] , identifier[modifiers] , identifier[dtype] , identifier[kind] )
|
def _parse_common(tag):
"""Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
"""
if 'modifiers' in tag.attrib:
modifiers = re.split(',\\s*', tag.attrib['modifiers'].strip())
if '' in modifiers:
modifiers.remove('') # depends on [control=['if'], data=['modifiers']] # depends on [control=['if'], data=[]]
else:
modifiers = None
if 'name' in tag.attrib:
name = tag.attrib['name'] # depends on [control=['if'], data=[]]
if 'type' in tag.attrib:
dtype = tag.attrib['type'] # depends on [control=['if'], data=[]]
else:
dtype = None
if 'kind' in tag.attrib:
kind = tag.attrib['kind'] # depends on [control=['if'], data=[]]
else:
kind = None
return (name, modifiers, dtype, kind)
|
def fetch(self):
"""
Fetch & return a new `DomainRecord` object representing the domain
record's current state
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the domain record no longer exists)
"""
return self.domain._record(self.doapi_manager.request(self.url)\
["domain_record"])
|
def function[fetch, parameter[self]]:
constant[
Fetch & return a new `DomainRecord` object representing the domain
record's current state
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the domain record no longer exists)
]
return[call[name[self].domain._record, parameter[call[call[name[self].doapi_manager.request, parameter[name[self].url]]][constant[domain_record]]]]]
|
keyword[def] identifier[fetch] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[domain] . identifier[_record] ( identifier[self] . identifier[doapi_manager] . identifier[request] ( identifier[self] . identifier[url] )[ literal[string] ])
|
def fetch(self):
"""
Fetch & return a new `DomainRecord` object representing the domain
record's current state
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the domain record no longer exists)
"""
return self.domain._record(self.doapi_manager.request(self.url)['domain_record'])
|
def primary_spin(mass1, mass2, spin1, spin2):
"""Returns the dimensionless spin of the primary mass."""
mass1, mass2, spin1, spin2, input_is_array = ensurearray(
mass1, mass2, spin1, spin2)
sp = copy.copy(spin1)
mask = mass1 < mass2
sp[mask] = spin2[mask]
return formatreturn(sp, input_is_array)
|
def function[primary_spin, parameter[mass1, mass2, spin1, spin2]]:
constant[Returns the dimensionless spin of the primary mass.]
<ast.Tuple object at 0x7da18dc04460> assign[=] call[name[ensurearray], parameter[name[mass1], name[mass2], name[spin1], name[spin2]]]
variable[sp] assign[=] call[name[copy].copy, parameter[name[spin1]]]
variable[mask] assign[=] compare[name[mass1] less[<] name[mass2]]
call[name[sp]][name[mask]] assign[=] call[name[spin2]][name[mask]]
return[call[name[formatreturn], parameter[name[sp], name[input_is_array]]]]
|
keyword[def] identifier[primary_spin] ( identifier[mass1] , identifier[mass2] , identifier[spin1] , identifier[spin2] ):
literal[string]
identifier[mass1] , identifier[mass2] , identifier[spin1] , identifier[spin2] , identifier[input_is_array] = identifier[ensurearray] (
identifier[mass1] , identifier[mass2] , identifier[spin1] , identifier[spin2] )
identifier[sp] = identifier[copy] . identifier[copy] ( identifier[spin1] )
identifier[mask] = identifier[mass1] < identifier[mass2]
identifier[sp] [ identifier[mask] ]= identifier[spin2] [ identifier[mask] ]
keyword[return] identifier[formatreturn] ( identifier[sp] , identifier[input_is_array] )
|
def primary_spin(mass1, mass2, spin1, spin2):
"""Returns the dimensionless spin of the primary mass."""
(mass1, mass2, spin1, spin2, input_is_array) = ensurearray(mass1, mass2, spin1, spin2)
sp = copy.copy(spin1)
mask = mass1 < mass2
sp[mask] = spin2[mask]
return formatreturn(sp, input_is_array)
|
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='kepler_2mass',area=1,maglim=27,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except UnitsError:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
AV = get_AV_infinity(l,b,frame='galactic')
#cmd = 'get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile)
|
def function[get_trilegal, parameter[filename, ra, dec, folder, galactic, filterset, area, maglim, binaries, trilegal_version, sigma_AV, convert_h5]]:
constant[Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
]
if name[galactic] begin[:]
<ast.Tuple object at 0x7da1b265b3d0> assign[=] tuple[[<ast.Name object at 0x7da1b2658bb0>, <ast.Name object at 0x7da1b265f430>]]
if call[name[os].path.isabs, parameter[name[filename]]] begin[:]
variable[folder] assign[=] constant[]
if <ast.UnaryOp object at 0x7da1b265e710> begin[:]
variable[outfile] assign[=] call[constant[{}/{}.dat].format, parameter[name[folder], name[filename]]]
variable[AV] assign[=] call[name[get_AV_infinity], parameter[name[l], name[b]]]
call[name[trilegal_webcall], parameter[name[trilegal_version], name[l], name[b], name[area], name[binaries], name[AV], name[sigma_AV], name[filterset], name[maglim], name[outfile]]]
if name[convert_h5] begin[:]
variable[df] assign[=] call[name[pd].read_table, parameter[name[outfile]]]
variable[df] assign[=] call[name[df].rename, parameter[]]
for taget[name[col]] in starred[name[df].columns] begin[:]
if compare[name[col] <ast.NotIn object at 0x7da2590d7190> name[NONMAG_COLS]] begin[:]
call[name[df].rename, parameter[]]
if <ast.UnaryOp object at 0x7da1b268f580> begin[:]
variable[h5file] assign[=] call[constant[{}/{}.h5].format, parameter[name[folder], name[filename]]]
call[name[df].to_hdf, parameter[name[h5file], constant[df]]]
with call[name[pd].HDFStore, parameter[name[h5file]]] begin[:]
variable[attrs] assign[=] call[name[store].get_storer, parameter[constant[df]]].attrs
name[attrs].trilegal_args assign[=] dictionary[[<ast.Constant object at 0x7da1b268e3b0>, <ast.Constant object at 0x7da1b268e380>, <ast.Constant object at 0x7da1b268e350>, <ast.Constant object at 0x7da1b268e320>, <ast.Constant object at 0x7da1b268e2f0>, <ast.Constant object at 0x7da1b268e2c0>, <ast.Constant object at 0x7da1b268e290>, <ast.Constant object at 0x7da1b268e260>, <ast.Constant object at 0x7da1b268e230>, <ast.Constant object at 0x7da1b268e200>, <ast.Constant object at 0x7da1b268e1d0>], [<ast.Name object at 0x7da1b268e1a0>, <ast.Name object at 0x7da1b268e170>, <ast.Name object at 0x7da1b268e140>, <ast.Name object at 0x7da1b268e110>, <ast.Name object at 0x7da1b268e0e0>, <ast.Name object at 0x7da1b268e0b0>, <ast.Name object at 0x7da1b268e080>, <ast.Name object at 0x7da1b268e050>, <ast.Name object at 0x7da1b268e020>, <ast.Name object at 0x7da1b268dff0>, <ast.Name object at 0x7da1b268dfc0>]]
call[name[os].remove, parameter[name[outfile]]]
|
keyword[def] identifier[get_trilegal] ( identifier[filename] , identifier[ra] , identifier[dec] , identifier[folder] = literal[string] , identifier[galactic] = keyword[False] ,
identifier[filterset] = literal[string] , identifier[area] = literal[int] , identifier[maglim] = literal[int] , identifier[binaries] = keyword[False] ,
identifier[trilegal_version] = literal[string] , identifier[sigma_AV] = literal[int] , identifier[convert_h5] = keyword[True] ):
literal[string]
keyword[if] identifier[galactic] :
identifier[l] , identifier[b] = identifier[ra] , identifier[dec]
keyword[else] :
keyword[try] :
identifier[c] = identifier[SkyCoord] ( identifier[ra] , identifier[dec] )
keyword[except] identifier[UnitsError] :
identifier[c] = identifier[SkyCoord] ( identifier[ra] , identifier[dec] , identifier[unit] = literal[string] )
identifier[l] , identifier[b] =( identifier[c] . identifier[galactic] . identifier[l] . identifier[value] , identifier[c] . identifier[galactic] . identifier[b] . identifier[value] )
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ):
identifier[folder] = literal[string]
keyword[if] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[filename] ):
identifier[outfile] = literal[string] . identifier[format] ( identifier[folder] , identifier[filename] )
keyword[else] :
identifier[outfile] = literal[string] . identifier[format] ( identifier[folder] , identifier[filename] )
identifier[AV] = identifier[get_AV_infinity] ( identifier[l] , identifier[b] , identifier[frame] = literal[string] )
identifier[trilegal_webcall] ( identifier[trilegal_version] , identifier[l] , identifier[b] , identifier[area] , identifier[binaries] , identifier[AV] , identifier[sigma_AV] , identifier[filterset] , identifier[maglim] , identifier[outfile] )
keyword[if] identifier[convert_h5] :
identifier[df] = identifier[pd] . identifier[read_table] ( identifier[outfile] , identifier[sep] = literal[string] , identifier[skipfooter] = literal[int] , identifier[engine] = literal[string] )
identifier[df] = identifier[df] . identifier[rename] ( identifier[columns] ={ literal[string] : literal[string] })
keyword[for] identifier[col] keyword[in] identifier[df] . identifier[columns] :
keyword[if] identifier[col] keyword[not] keyword[in] identifier[NONMAG_COLS] :
identifier[df] . identifier[rename] ( identifier[columns] ={ identifier[col] : literal[string] . identifier[format] ( identifier[col] )}, identifier[inplace] = keyword[True] )
keyword[if] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[filename] ):
identifier[h5file] = literal[string] . identifier[format] ( identifier[folder] , identifier[filename] )
keyword[else] :
identifier[h5file] = literal[string] . identifier[format] ( identifier[folder] , identifier[filename] )
identifier[df] . identifier[to_hdf] ( identifier[h5file] , literal[string] )
keyword[with] identifier[pd] . identifier[HDFStore] ( identifier[h5file] ) keyword[as] identifier[store] :
identifier[attrs] = identifier[store] . identifier[get_storer] ( literal[string] ). identifier[attrs]
identifier[attrs] . identifier[trilegal_args] ={ literal[string] : identifier[trilegal_version] ,
literal[string] : identifier[ra] , literal[string] : identifier[dec] ,
literal[string] : identifier[l] , literal[string] : identifier[b] , literal[string] : identifier[area] ,
literal[string] : identifier[AV] , literal[string] : identifier[sigma_AV] ,
literal[string] : identifier[filterset] ,
literal[string] : identifier[maglim] ,
literal[string] : identifier[binaries] }
identifier[os] . identifier[remove] ( identifier[outfile] )
|
def get_trilegal(filename, ra, dec, folder='.', galactic=False, filterset='kepler_2mass', area=1, maglim=27, binaries=False, trilegal_version='1.6', sigma_AV=0.1, convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
if galactic:
(l, b) = (ra, dec) # depends on [control=['if'], data=[]]
else:
try:
c = SkyCoord(ra, dec) # depends on [control=['try'], data=[]]
except UnitsError:
c = SkyCoord(ra, dec, unit='deg') # depends on [control=['except'], data=[]]
(l, b) = (c.galactic.l.value, c.galactic.b.value)
if os.path.isabs(filename):
folder = '' # depends on [control=['if'], data=[]]
if not re.search('\\.dat$', filename):
outfile = '{}/{}.dat'.format(folder, filename) # depends on [control=['if'], data=[]]
else:
outfile = '{}/{}'.format(folder, filename)
AV = get_AV_infinity(l, b, frame='galactic')
#cmd = 'get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
trilegal_webcall(trilegal_version, l, b, area, binaries, AV, sigma_AV, filterset, maglim, outfile)
if convert_h5:
df = pd.read_table(outfile, sep='\\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc': 'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col: '{}_mag'.format(col)}, inplace=True) # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']]
if not re.search('\\.h5$', filename):
h5file = '{}/{}.h5'.format(folder, filename) # depends on [control=['if'], data=[]]
else:
h5file = '{}/{}'.format(folder, filename)
df.to_hdf(h5file, 'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version': trilegal_version, 'ra': ra, 'dec': dec, 'l': l, 'b': b, 'area': area, 'AV': AV, 'sigma_AV': sigma_AV, 'filterset': filterset, 'maglim': maglim, 'binaries': binaries} # depends on [control=['with'], data=['store']]
os.remove(outfile) # depends on [control=['if'], data=[]]
|
def get_all_events(self):
"""Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
"""
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id)
except KeyError:
self.all_events[event_type] = [event_id]
|
def function[get_all_events, parameter[self]]:
constant[Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
]
name[self].all_events assign[=] dictionary[[], []]
variable[events] assign[=] call[name[self].tree.findall, parameter[constant[EVENT]]]
<ast.AugAssign object at 0x7da2054a4df0>
for taget[name[e]] in starred[name[events]] begin[:]
variable[event_id] assign[=] call[name[e].attrib][constant[id]]
if compare[name[event_id] in name[self]._static_events] begin[:]
continue
variable[event_type] assign[=] call[name[e].find, parameter[constant[type]]].text
<ast.Try object at 0x7da18f00ee60>
|
keyword[def] identifier[get_all_events] ( identifier[self] ):
literal[string]
identifier[self] . identifier[all_events] ={}
identifier[events] = identifier[self] . identifier[tree] . identifier[findall] ( literal[string] )
identifier[events] += identifier[self] . identifier[tree] . identifier[findall] ( literal[string] )
keyword[for] identifier[e] keyword[in] identifier[events] :
identifier[event_id] = identifier[e] . identifier[attrib] [ literal[string] ]
keyword[if] identifier[event_id] keyword[in] identifier[self] . identifier[_static_events] :
keyword[continue]
identifier[event_type] = identifier[e] . identifier[find] ( literal[string] ). identifier[text]
keyword[try] :
identifier[self] . identifier[all_events] [ identifier[event_type] ]. identifier[append] ( identifier[event_id] )
keyword[except] identifier[KeyError] :
identifier[self] . identifier[all_events] [ identifier[event_type] ]=[ identifier[event_id] ]
|
def get_all_events(self):
"""Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
"""
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue # depends on [control=['if'], data=[]]
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id) # depends on [control=['try'], data=[]]
except KeyError:
self.all_events[event_type] = [event_id] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['e']]
|
def order_by(self, **kwargs):
"""
Orders the query by the key passed in +kwargs+. Only pass one key, as
it cannot sort by multiple columns at once. Raises QueryInvalid if this
method is called when there is already a custom order (i.e. this
method was already called on this query). Analog to "ORDER BY" in SQL.
"""
# Only get one thing from kwargs (we can only order by one thing...)
if self._order_with:
raise QueryInvalid("Cannot order by more than one column")
self._order_with = dict([kwargs.popitem()])
return self
|
def function[order_by, parameter[self]]:
constant[
Orders the query by the key passed in +kwargs+. Only pass one key, as
it cannot sort by multiple columns at once. Raises QueryInvalid if this
method is called when there is already a custom order (i.e. this
method was already called on this query). Analog to "ORDER BY" in SQL.
]
if name[self]._order_with begin[:]
<ast.Raise object at 0x7da1b149ce20>
name[self]._order_with assign[=] call[name[dict], parameter[list[[<ast.Call object at 0x7da1b149ec80>]]]]
return[name[self]]
|
keyword[def] identifier[order_by] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_order_with] :
keyword[raise] identifier[QueryInvalid] ( literal[string] )
identifier[self] . identifier[_order_with] = identifier[dict] ([ identifier[kwargs] . identifier[popitem] ()])
keyword[return] identifier[self]
|
def order_by(self, **kwargs):
"""
Orders the query by the key passed in +kwargs+. Only pass one key, as
it cannot sort by multiple columns at once. Raises QueryInvalid if this
method is called when there is already a custom order (i.e. this
method was already called on this query). Analog to "ORDER BY" in SQL.
"""
# Only get one thing from kwargs (we can only order by one thing...)
if self._order_with:
raise QueryInvalid('Cannot order by more than one column') # depends on [control=['if'], data=[]]
self._order_with = dict([kwargs.popitem()])
return self
|
def niceStringify(self, level):
"""Returns a string representation with new lines and shifts"""
out = level * " " + \
"Function[" + str(self.keywordLine) + \
":" + str(self.keywordPos) + \
":" + self._getLPA() + \
":" + str(self.colonLine) + \
":" + str(self.colonPos) + \
"]: '" + self.name + "'"
if self.isAsync:
out += " (async)"
if self.returnAnnotation is not None:
out += " -> '" + self.returnAnnotation + "'"
for item in self.arguments:
out += '\n' + level * " " + "Argument: '" + str(item) + "'"
for item in self.decorators:
out += '\n' + level * " " + str(item)
if self.docstring is not None:
out += '\n' + level * " " + str(self.docstring)
for item in self.functions:
out += '\n' + item.niceStringify(level + 1)
for item in self.classes:
out += '\n' + item.niceStringify(level + 1)
return out
|
def function[niceStringify, parameter[self, level]]:
constant[Returns a string representation with new lines and shifts]
variable[out] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[level] * constant[ ]] + constant[Function[]] + call[name[str], parameter[name[self].keywordLine]]] + constant[:]] + call[name[str], parameter[name[self].keywordPos]]] + constant[:]] + call[name[self]._getLPA, parameter[]]] + constant[:]] + call[name[str], parameter[name[self].colonLine]]] + constant[:]] + call[name[str], parameter[name[self].colonPos]]] + constant[]: ']] + name[self].name] + constant[']]
if name[self].isAsync begin[:]
<ast.AugAssign object at 0x7da1b244a4a0>
if compare[name[self].returnAnnotation is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b244a230>
for taget[name[item]] in starred[name[self].arguments] begin[:]
<ast.AugAssign object at 0x7da1b2449570>
for taget[name[item]] in starred[name[self].decorators] begin[:]
<ast.AugAssign object at 0x7da1b24495d0>
if compare[name[self].docstring is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b244b0d0>
for taget[name[item]] in starred[name[self].functions] begin[:]
<ast.AugAssign object at 0x7da1b244b610>
for taget[name[item]] in starred[name[self].classes] begin[:]
<ast.AugAssign object at 0x7da1b2448730>
return[name[out]]
|
keyword[def] identifier[niceStringify] ( identifier[self] , identifier[level] ):
literal[string]
identifier[out] = identifier[level] * literal[string] + literal[string] + identifier[str] ( identifier[self] . identifier[keywordLine] )+ literal[string] + identifier[str] ( identifier[self] . identifier[keywordPos] )+ literal[string] + identifier[self] . identifier[_getLPA] ()+ literal[string] + identifier[str] ( identifier[self] . identifier[colonLine] )+ literal[string] + identifier[str] ( identifier[self] . identifier[colonPos] )+ literal[string] + identifier[self] . identifier[name] + literal[string]
keyword[if] identifier[self] . identifier[isAsync] :
identifier[out] += literal[string]
keyword[if] identifier[self] . identifier[returnAnnotation] keyword[is] keyword[not] keyword[None] :
identifier[out] += literal[string] + identifier[self] . identifier[returnAnnotation] + literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[arguments] :
identifier[out] += literal[string] + identifier[level] * literal[string] + literal[string] + identifier[str] ( identifier[item] )+ literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[decorators] :
identifier[out] += literal[string] + identifier[level] * literal[string] + identifier[str] ( identifier[item] )
keyword[if] identifier[self] . identifier[docstring] keyword[is] keyword[not] keyword[None] :
identifier[out] += literal[string] + identifier[level] * literal[string] + identifier[str] ( identifier[self] . identifier[docstring] )
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[functions] :
identifier[out] += literal[string] + identifier[item] . identifier[niceStringify] ( identifier[level] + literal[int] )
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[classes] :
identifier[out] += literal[string] + identifier[item] . identifier[niceStringify] ( identifier[level] + literal[int] )
keyword[return] identifier[out]
|
def niceStringify(self, level):
"""Returns a string representation with new lines and shifts"""
out = level * ' ' + 'Function[' + str(self.keywordLine) + ':' + str(self.keywordPos) + ':' + self._getLPA() + ':' + str(self.colonLine) + ':' + str(self.colonPos) + "]: '" + self.name + "'"
if self.isAsync:
out += ' (async)' # depends on [control=['if'], data=[]]
if self.returnAnnotation is not None:
out += " -> '" + self.returnAnnotation + "'" # depends on [control=['if'], data=[]]
for item in self.arguments:
out += '\n' + level * ' ' + "Argument: '" + str(item) + "'" # depends on [control=['for'], data=['item']]
for item in self.decorators:
out += '\n' + level * ' ' + str(item) # depends on [control=['for'], data=['item']]
if self.docstring is not None:
out += '\n' + level * ' ' + str(self.docstring) # depends on [control=['if'], data=[]]
for item in self.functions:
out += '\n' + item.niceStringify(level + 1) # depends on [control=['for'], data=['item']]
for item in self.classes:
out += '\n' + item.niceStringify(level + 1) # depends on [control=['for'], data=['item']]
return out
|
def make_empty_table(row_count, column_count):
"""
Make an empty table
Parameters
----------
row_count : int
The number of rows in the new table
column_count : int
The number of columns in the new table
Returns
-------
table : list of lists of str
Each cell will be an empty str ('')
"""
table = []
while row_count > 0:
row = []
for column in range(column_count):
row.append('')
table.append(row)
row_count -= 1
return table
|
def function[make_empty_table, parameter[row_count, column_count]]:
constant[
Make an empty table
Parameters
----------
row_count : int
The number of rows in the new table
column_count : int
The number of columns in the new table
Returns
-------
table : list of lists of str
Each cell will be an empty str ('')
]
variable[table] assign[=] list[[]]
while compare[name[row_count] greater[>] constant[0]] begin[:]
variable[row] assign[=] list[[]]
for taget[name[column]] in starred[call[name[range], parameter[name[column_count]]]] begin[:]
call[name[row].append, parameter[constant[]]]
call[name[table].append, parameter[name[row]]]
<ast.AugAssign object at 0x7da1b10c4e50>
return[name[table]]
|
keyword[def] identifier[make_empty_table] ( identifier[row_count] , identifier[column_count] ):
literal[string]
identifier[table] =[]
keyword[while] identifier[row_count] > literal[int] :
identifier[row] =[]
keyword[for] identifier[column] keyword[in] identifier[range] ( identifier[column_count] ):
identifier[row] . identifier[append] ( literal[string] )
identifier[table] . identifier[append] ( identifier[row] )
identifier[row_count] -= literal[int]
keyword[return] identifier[table]
|
def make_empty_table(row_count, column_count):
"""
Make an empty table
Parameters
----------
row_count : int
The number of rows in the new table
column_count : int
The number of columns in the new table
Returns
-------
table : list of lists of str
Each cell will be an empty str ('')
"""
table = []
while row_count > 0:
row = []
for column in range(column_count):
row.append('') # depends on [control=['for'], data=[]]
table.append(row)
row_count -= 1 # depends on [control=['while'], data=['row_count']]
return table
|
def find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq, promoter = False):
"""
This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces.
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
sbjct_start = abs(sbjct_start)
seq_pos = sbjct_start
# Set variables depending on promoter status
factor = 1
mut_prefix = "r."
if promoter == True:
factor = (-1)
mut_prefix = "n."
# Reverse promoter sequences
sbjct_seq = sbjct_seq[::-1]
qry_seq = qry_seq[::-1]
# Go through sequences one nucleotide at a time
shift = 0
for index in range(sbjct_start - 1, len(sbjct_seq)):
mut_name = mut_prefix
mut = ""
# Shift index according to gaps
i = index + shift
# If the end of the sequence is reached, stop
if i == len(sbjct_seq):
break
sbjct_nuc = sbjct_seq[i]
qry_nuc = qry_seq[i]
# Check for mis matches
if sbjct_nuc.upper() != qry_nuc.upper():
# check for insertions and deletions
if sbjct_nuc == "-" or qry_nuc == "-":
if sbjct_nuc == "-":
mut = "ins"
indel_start_pos = (seq_pos -1) *factor
indel_end_pos = seq_pos * factor
indel = find_nuc_indel(sbjct_seq[i:], qry_seq[i:])
else:
mut = "del"
indel_start_pos = seq_pos * factor
indel = find_nuc_indel(qry_seq[i:], sbjct_seq[i:])
indel_end_pos = (seq_pos + len(indel) - 1) * factor
seq_pos += len(indel) - 1
# Shift the index to the end of the indel
shift += len(indel) - 1
# Write mutation name, depending on sequnce
if len(indel) == 1 and mut == "del":
mut_name += str(indel_start_pos) + mut + indel
else:
if promoter == True:
# Reverse the sequence and the start and end positions
indel = indel[::-1]
temp = indel_start_pos
indel_start_pos = indel_end_pos
indel_end_pos = temp
mut_name += str(indel_start_pos) + "_" +str(indel_end_pos) + mut + indel
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, indel, mut_name, mut, indel]]
# Check for substitutions mutations
else:
mut = "sub"
mut_name += str(seq_pos * factor) + sbjct_nuc + ">" + qry_nuc
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, qry_nuc, mut_name, sbjct_nuc, qry_nuc]]
# Increment sequence position
if mut != "ins":
seq_pos += 1
return mis_matches
|
def function[find_nucleotid_mismatches, parameter[sbjct_start, sbjct_seq, qry_seq, promoter]]:
constant[
This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces.
]
variable[mis_matches] assign[=] list[[]]
variable[sbjct_start] assign[=] call[name[abs], parameter[name[sbjct_start]]]
variable[seq_pos] assign[=] name[sbjct_start]
variable[factor] assign[=] constant[1]
variable[mut_prefix] assign[=] constant[r.]
if compare[name[promoter] equal[==] constant[True]] begin[:]
variable[factor] assign[=] <ast.UnaryOp object at 0x7da1b11450f0>
variable[mut_prefix] assign[=] constant[n.]
variable[sbjct_seq] assign[=] call[name[sbjct_seq]][<ast.Slice object at 0x7da1b1144ee0>]
variable[qry_seq] assign[=] call[name[qry_seq]][<ast.Slice object at 0x7da1b1145120>]
variable[shift] assign[=] constant[0]
for taget[name[index]] in starred[call[name[range], parameter[binary_operation[name[sbjct_start] - constant[1]], call[name[len], parameter[name[sbjct_seq]]]]]] begin[:]
variable[mut_name] assign[=] name[mut_prefix]
variable[mut] assign[=] constant[]
variable[i] assign[=] binary_operation[name[index] + name[shift]]
if compare[name[i] equal[==] call[name[len], parameter[name[sbjct_seq]]]] begin[:]
break
variable[sbjct_nuc] assign[=] call[name[sbjct_seq]][name[i]]
variable[qry_nuc] assign[=] call[name[qry_seq]][name[i]]
if compare[call[name[sbjct_nuc].upper, parameter[]] not_equal[!=] call[name[qry_nuc].upper, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1144910> begin[:]
if compare[name[sbjct_nuc] equal[==] constant[-]] begin[:]
variable[mut] assign[=] constant[ins]
variable[indel_start_pos] assign[=] binary_operation[binary_operation[name[seq_pos] - constant[1]] * name[factor]]
variable[indel_end_pos] assign[=] binary_operation[name[seq_pos] * name[factor]]
variable[indel] assign[=] call[name[find_nuc_indel], parameter[call[name[sbjct_seq]][<ast.Slice object at 0x7da1b10c42b0>], call[name[qry_seq]][<ast.Slice object at 0x7da1b10c7820>]]]
<ast.AugAssign object at 0x7da1b113f4f0>
if <ast.BoolOp object at 0x7da1b113fa60> begin[:]
<ast.AugAssign object at 0x7da1b113feb0>
<ast.AugAssign object at 0x7da1b113fdc0>
if compare[name[mut] not_equal[!=] constant[ins]] begin[:]
<ast.AugAssign object at 0x7da1b1192410>
return[name[mis_matches]]
|
keyword[def] identifier[find_nucleotid_mismatches] ( identifier[sbjct_start] , identifier[sbjct_seq] , identifier[qry_seq] , identifier[promoter] = keyword[False] ):
literal[string]
identifier[mis_matches] =[]
identifier[sbjct_start] = identifier[abs] ( identifier[sbjct_start] )
identifier[seq_pos] = identifier[sbjct_start]
identifier[factor] = literal[int]
identifier[mut_prefix] = literal[string]
keyword[if] identifier[promoter] == keyword[True] :
identifier[factor] =(- literal[int] )
identifier[mut_prefix] = literal[string]
identifier[sbjct_seq] = identifier[sbjct_seq] [::- literal[int] ]
identifier[qry_seq] = identifier[qry_seq] [::- literal[int] ]
identifier[shift] = literal[int]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[sbjct_start] - literal[int] , identifier[len] ( identifier[sbjct_seq] )):
identifier[mut_name] = identifier[mut_prefix]
identifier[mut] = literal[string]
identifier[i] = identifier[index] + identifier[shift]
keyword[if] identifier[i] == identifier[len] ( identifier[sbjct_seq] ):
keyword[break]
identifier[sbjct_nuc] = identifier[sbjct_seq] [ identifier[i] ]
identifier[qry_nuc] = identifier[qry_seq] [ identifier[i] ]
keyword[if] identifier[sbjct_nuc] . identifier[upper] ()!= identifier[qry_nuc] . identifier[upper] ():
keyword[if] identifier[sbjct_nuc] == literal[string] keyword[or] identifier[qry_nuc] == literal[string] :
keyword[if] identifier[sbjct_nuc] == literal[string] :
identifier[mut] = literal[string]
identifier[indel_start_pos] =( identifier[seq_pos] - literal[int] )* identifier[factor]
identifier[indel_end_pos] = identifier[seq_pos] * identifier[factor]
identifier[indel] = identifier[find_nuc_indel] ( identifier[sbjct_seq] [ identifier[i] :], identifier[qry_seq] [ identifier[i] :])
keyword[else] :
identifier[mut] = literal[string]
identifier[indel_start_pos] = identifier[seq_pos] * identifier[factor]
identifier[indel] = identifier[find_nuc_indel] ( identifier[qry_seq] [ identifier[i] :], identifier[sbjct_seq] [ identifier[i] :])
identifier[indel_end_pos] =( identifier[seq_pos] + identifier[len] ( identifier[indel] )- literal[int] )* identifier[factor]
identifier[seq_pos] += identifier[len] ( identifier[indel] )- literal[int]
identifier[shift] += identifier[len] ( identifier[indel] )- literal[int]
keyword[if] identifier[len] ( identifier[indel] )== literal[int] keyword[and] identifier[mut] == literal[string] :
identifier[mut_name] += identifier[str] ( identifier[indel_start_pos] )+ identifier[mut] + identifier[indel]
keyword[else] :
keyword[if] identifier[promoter] == keyword[True] :
identifier[indel] = identifier[indel] [::- literal[int] ]
identifier[temp] = identifier[indel_start_pos]
identifier[indel_start_pos] = identifier[indel_end_pos]
identifier[indel_end_pos] = identifier[temp]
identifier[mut_name] += identifier[str] ( identifier[indel_start_pos] )+ literal[string] + identifier[str] ( identifier[indel_end_pos] )+ identifier[mut] + identifier[indel]
identifier[mis_matches] +=[[ identifier[mut] , identifier[seq_pos] * identifier[factor] , identifier[seq_pos] * identifier[factor] , identifier[indel] , identifier[mut_name] , identifier[mut] , identifier[indel] ]]
keyword[else] :
identifier[mut] = literal[string]
identifier[mut_name] += identifier[str] ( identifier[seq_pos] * identifier[factor] )+ identifier[sbjct_nuc] + literal[string] + identifier[qry_nuc]
identifier[mis_matches] +=[[ identifier[mut] , identifier[seq_pos] * identifier[factor] , identifier[seq_pos] * identifier[factor] , identifier[qry_nuc] , identifier[mut_name] , identifier[sbjct_nuc] , identifier[qry_nuc] ]]
keyword[if] identifier[mut] != literal[string] :
identifier[seq_pos] += literal[int]
keyword[return] identifier[mis_matches]
|
def find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq, promoter=False):
"""
This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces.
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
sbjct_start = abs(sbjct_start)
seq_pos = sbjct_start
# Set variables depending on promoter status
factor = 1
mut_prefix = 'r.'
if promoter == True:
factor = -1
mut_prefix = 'n.'
# Reverse promoter sequences
sbjct_seq = sbjct_seq[::-1]
qry_seq = qry_seq[::-1] # depends on [control=['if'], data=[]]
# Go through sequences one nucleotide at a time
shift = 0
for index in range(sbjct_start - 1, len(sbjct_seq)):
mut_name = mut_prefix
mut = ''
# Shift index according to gaps
i = index + shift
# If the end of the sequence is reached, stop
if i == len(sbjct_seq):
break # depends on [control=['if'], data=[]]
sbjct_nuc = sbjct_seq[i]
qry_nuc = qry_seq[i]
# Check for mis matches
if sbjct_nuc.upper() != qry_nuc.upper():
# check for insertions and deletions
if sbjct_nuc == '-' or qry_nuc == '-':
if sbjct_nuc == '-':
mut = 'ins'
indel_start_pos = (seq_pos - 1) * factor
indel_end_pos = seq_pos * factor
indel = find_nuc_indel(sbjct_seq[i:], qry_seq[i:]) # depends on [control=['if'], data=[]]
else:
mut = 'del'
indel_start_pos = seq_pos * factor
indel = find_nuc_indel(qry_seq[i:], sbjct_seq[i:])
indel_end_pos = (seq_pos + len(indel) - 1) * factor
seq_pos += len(indel) - 1
# Shift the index to the end of the indel
shift += len(indel) - 1
# Write mutation name, depending on sequnce
if len(indel) == 1 and mut == 'del':
mut_name += str(indel_start_pos) + mut + indel # depends on [control=['if'], data=[]]
else:
if promoter == True:
# Reverse the sequence and the start and end positions
indel = indel[::-1]
temp = indel_start_pos
indel_start_pos = indel_end_pos
indel_end_pos = temp # depends on [control=['if'], data=[]]
mut_name += str(indel_start_pos) + '_' + str(indel_end_pos) + mut + indel
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, indel, mut_name, mut, indel]] # depends on [control=['if'], data=[]]
else:
# Check for substitutions mutations
mut = 'sub'
mut_name += str(seq_pos * factor) + sbjct_nuc + '>' + qry_nuc
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, qry_nuc, mut_name, sbjct_nuc, qry_nuc]] # depends on [control=['if'], data=[]]
# Increment sequence position
if mut != 'ins':
seq_pos += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
return mis_matches
|
def build_local_filename(download_url=None, filename=None, decompress=False):
"""
Determine which local filename to use based on the file's source URL,
an optional desired filename, and whether a compression suffix needs
to be removed
"""
assert download_url or filename, "Either filename or URL must be specified"
# if no filename provided, use the original filename on the server
if not filename:
digest = hashlib.md5(download_url.encode('utf-8')).hexdigest()
parts = split(download_url)
filename = digest + "." + "_".join(parts)
filename = normalize_filename(filename)
if decompress:
(base, ext) = splitext(filename)
if ext in (".gz", ".zip"):
filename = base
return filename
|
def function[build_local_filename, parameter[download_url, filename, decompress]]:
constant[
Determine which local filename to use based on the file's source URL,
an optional desired filename, and whether a compression suffix needs
to be removed
]
assert[<ast.BoolOp object at 0x7da1b0bdb760>]
if <ast.UnaryOp object at 0x7da1b0bd89a0> begin[:]
variable[digest] assign[=] call[call[name[hashlib].md5, parameter[call[name[download_url].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]
variable[parts] assign[=] call[name[split], parameter[name[download_url]]]
variable[filename] assign[=] binary_operation[binary_operation[name[digest] + constant[.]] + call[constant[_].join, parameter[name[parts]]]]
variable[filename] assign[=] call[name[normalize_filename], parameter[name[filename]]]
if name[decompress] begin[:]
<ast.Tuple object at 0x7da1b0baef80> assign[=] call[name[splitext], parameter[name[filename]]]
if compare[name[ext] in tuple[[<ast.Constant object at 0x7da1b0bac220>, <ast.Constant object at 0x7da1b0bace80>]]] begin[:]
variable[filename] assign[=] name[base]
return[name[filename]]
|
keyword[def] identifier[build_local_filename] ( identifier[download_url] = keyword[None] , identifier[filename] = keyword[None] , identifier[decompress] = keyword[False] ):
literal[string]
keyword[assert] identifier[download_url] keyword[or] identifier[filename] , literal[string]
keyword[if] keyword[not] identifier[filename] :
identifier[digest] = identifier[hashlib] . identifier[md5] ( identifier[download_url] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()
identifier[parts] = identifier[split] ( identifier[download_url] )
identifier[filename] = identifier[digest] + literal[string] + literal[string] . identifier[join] ( identifier[parts] )
identifier[filename] = identifier[normalize_filename] ( identifier[filename] )
keyword[if] identifier[decompress] :
( identifier[base] , identifier[ext] )= identifier[splitext] ( identifier[filename] )
keyword[if] identifier[ext] keyword[in] ( literal[string] , literal[string] ):
identifier[filename] = identifier[base]
keyword[return] identifier[filename]
|
def build_local_filename(download_url=None, filename=None, decompress=False):
"""
Determine which local filename to use based on the file's source URL,
an optional desired filename, and whether a compression suffix needs
to be removed
"""
assert download_url or filename, 'Either filename or URL must be specified'
# if no filename provided, use the original filename on the server
if not filename:
digest = hashlib.md5(download_url.encode('utf-8')).hexdigest()
parts = split(download_url)
filename = digest + '.' + '_'.join(parts) # depends on [control=['if'], data=[]]
filename = normalize_filename(filename)
if decompress:
(base, ext) = splitext(filename)
if ext in ('.gz', '.zip'):
filename = base # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return filename
|
def _package_path(package):
"""Returns the full path to the default package configuration file.
Args:
package (str): name of the python package to return a path for.
"""
from os import path
confdir = config_dir()
return path.join(confdir, "{}.cfg".format(package))
|
def function[_package_path, parameter[package]]:
constant[Returns the full path to the default package configuration file.
Args:
package (str): name of the python package to return a path for.
]
from relative_module[os] import module[path]
variable[confdir] assign[=] call[name[config_dir], parameter[]]
return[call[name[path].join, parameter[name[confdir], call[constant[{}.cfg].format, parameter[name[package]]]]]]
|
keyword[def] identifier[_package_path] ( identifier[package] ):
literal[string]
keyword[from] identifier[os] keyword[import] identifier[path]
identifier[confdir] = identifier[config_dir] ()
keyword[return] identifier[path] . identifier[join] ( identifier[confdir] , literal[string] . identifier[format] ( identifier[package] ))
|
def _package_path(package):
"""Returns the full path to the default package configuration file.
Args:
package (str): name of the python package to return a path for.
"""
from os import path
confdir = config_dir()
return path.join(confdir, '{}.cfg'.format(package))
|
def get_proficiency_lookup_session(self):
"""Gets the OsidSession associated with the proficiency lookup
service.
return: (osid.learning.ProficiencyLookupSession) - a
ProficiencyLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_proficiency_lookup() is false
compliance: optional - This method must be implemented if
supports_proficiency_lookup() is true.
"""
if not self.supports_proficiency_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.ProficiencyLookupSession(runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
def function[get_proficiency_lookup_session, parameter[self]]:
constant[Gets the OsidSession associated with the proficiency lookup
service.
return: (osid.learning.ProficiencyLookupSession) - a
ProficiencyLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_proficiency_lookup() is false
compliance: optional - This method must be implemented if
supports_proficiency_lookup() is true.
]
if <ast.UnaryOp object at 0x7da1b0a22770> begin[:]
<ast.Raise object at 0x7da1b0a202e0>
<ast.Try object at 0x7da1b0a23580>
<ast.Try object at 0x7da1b0a22f50>
return[name[session]]
|
keyword[def] identifier[get_proficiency_lookup_session] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_proficiency_lookup] ():
keyword[raise] identifier[Unimplemented] ()
keyword[try] :
keyword[from] . keyword[import] identifier[sessions]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[OperationFailed] ()
keyword[try] :
identifier[session] = identifier[sessions] . identifier[ProficiencyLookupSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[OperationFailed] ()
keyword[return] identifier[session]
|
def get_proficiency_lookup_session(self):
"""Gets the OsidSession associated with the proficiency lookup
service.
return: (osid.learning.ProficiencyLookupSession) - a
ProficiencyLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_proficiency_lookup() is false
compliance: optional - This method must be implemented if
supports_proficiency_lookup() is true.
"""
if not self.supports_proficiency_lookup():
raise Unimplemented() # depends on [control=['if'], data=[]]
try:
from . import sessions # depends on [control=['try'], data=[]]
except ImportError:
raise OperationFailed() # depends on [control=['except'], data=[]]
try:
session = sessions.ProficiencyLookupSession(runtime=self._runtime) # depends on [control=['try'], data=[]]
except AttributeError:
raise OperationFailed() # depends on [control=['except'], data=[]]
return session
|
def create_header_data(coord, radius=10., **kwargs):
""" Make an empty sky region at location of skydir
skydir : skymaps.SkyDir object
size : size of region (deg.)
kwargs : arguments passed to create_header
"""
header = create_header(coord, radius=radius, **kwargs)
data = np.zeros( (header['NAXIS1'],header['NAXIS2']) )
return header, data
|
def function[create_header_data, parameter[coord, radius]]:
constant[ Make an empty sky region at location of skydir
skydir : skymaps.SkyDir object
size : size of region (deg.)
kwargs : arguments passed to create_header
]
variable[header] assign[=] call[name[create_header], parameter[name[coord]]]
variable[data] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da18fe92710>, <ast.Subscript object at 0x7da18fe91e40>]]]]
return[tuple[[<ast.Name object at 0x7da18fe920b0>, <ast.Name object at 0x7da18fe93820>]]]
|
keyword[def] identifier[create_header_data] ( identifier[coord] , identifier[radius] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[header] = identifier[create_header] ( identifier[coord] , identifier[radius] = identifier[radius] ,** identifier[kwargs] )
identifier[data] = identifier[np] . identifier[zeros] (( identifier[header] [ literal[string] ], identifier[header] [ literal[string] ]))
keyword[return] identifier[header] , identifier[data]
|
def create_header_data(coord, radius=10.0, **kwargs):
""" Make an empty sky region at location of skydir
skydir : skymaps.SkyDir object
size : size of region (deg.)
kwargs : arguments passed to create_header
"""
header = create_header(coord, radius=radius, **kwargs)
data = np.zeros((header['NAXIS1'], header['NAXIS2']))
return (header, data)
|
def get_task_tree(white_list=None):
"""Returns a tree of Task instances
The tree is comprised of dictionaries containing strings for
keys and either dictionaries or Task instances for values.
When WHITE_LIST is given, only the tasks and plugins in this
list will become part of the task tree. The WHITE_LIST may
contain either strings, corresponding to the task of plugin
__qualname__, or, preferable, the WHITE_LIST contains
links to the task function or plugin class instead.
"""
assert white_list is None or isinstance(white_list, list), type(white_list)
if white_list is not None:
white_list = set(item if isinstance(item, str) else item.__qualname__ for item in white_list)
tree = dict((task.qualified_name, task)
for task
in _task_list.values()
if white_list is None or task.qualified_name in white_list)
plugins = get_plugin_list()
for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]:
tasks = [func
for _, func
in inspect.getmembers(plugin)
if inspect.isfunction(func) and hasattr(func, "yaz_task_config")]
if len(tasks) == 0:
continue
node = tree
for name in plugin.__qualname__.split("."):
if not name in node:
node[name] = {}
node = node[name]
for func in tasks:
logger.debug("Found task %s", func)
node[func.__name__] = Task(plugin_class=plugin, func=func, config=func.yaz_task_config)
return tree
|
def function[get_task_tree, parameter[white_list]]:
constant[Returns a tree of Task instances
The tree is comprised of dictionaries containing strings for
keys and either dictionaries or Task instances for values.
When WHITE_LIST is given, only the tasks and plugins in this
list will become part of the task tree. The WHITE_LIST may
contain either strings, corresponding to the task of plugin
__qualname__, or, preferable, the WHITE_LIST contains
links to the task function or plugin class instead.
]
assert[<ast.BoolOp object at 0x7da1aff6fbb0>]
if compare[name[white_list] is_not constant[None]] begin[:]
variable[white_list] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1aff6f280>]]
variable[tree] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1aff6d630>]]
variable[plugins] assign[=] call[name[get_plugin_list], parameter[]]
for taget[name[plugin]] in starred[<ast.ListComp object at 0x7da1aff6efe0>] begin[:]
variable[tasks] assign[=] <ast.ListComp object at 0x7da1aff6f070>
if compare[call[name[len], parameter[name[tasks]]] equal[==] constant[0]] begin[:]
continue
variable[node] assign[=] name[tree]
for taget[name[name]] in starred[call[name[plugin].__qualname__.split, parameter[constant[.]]]] begin[:]
if <ast.UnaryOp object at 0x7da1aff6dc60> begin[:]
call[name[node]][name[name]] assign[=] dictionary[[], []]
variable[node] assign[=] call[name[node]][name[name]]
for taget[name[func]] in starred[name[tasks]] begin[:]
call[name[logger].debug, parameter[constant[Found task %s], name[func]]]
call[name[node]][name[func].__name__] assign[=] call[name[Task], parameter[]]
return[name[tree]]
|
keyword[def] identifier[get_task_tree] ( identifier[white_list] = keyword[None] ):
literal[string]
keyword[assert] identifier[white_list] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[white_list] , identifier[list] ), identifier[type] ( identifier[white_list] )
keyword[if] identifier[white_list] keyword[is] keyword[not] keyword[None] :
identifier[white_list] = identifier[set] ( identifier[item] keyword[if] identifier[isinstance] ( identifier[item] , identifier[str] ) keyword[else] identifier[item] . identifier[__qualname__] keyword[for] identifier[item] keyword[in] identifier[white_list] )
identifier[tree] = identifier[dict] (( identifier[task] . identifier[qualified_name] , identifier[task] )
keyword[for] identifier[task]
keyword[in] identifier[_task_list] . identifier[values] ()
keyword[if] identifier[white_list] keyword[is] keyword[None] keyword[or] identifier[task] . identifier[qualified_name] keyword[in] identifier[white_list] )
identifier[plugins] = identifier[get_plugin_list] ()
keyword[for] identifier[plugin] keyword[in] [ identifier[plugin] keyword[for] identifier[plugin] keyword[in] identifier[plugins] . identifier[values] () keyword[if] identifier[white_list] keyword[is] keyword[None] keyword[or] identifier[plugin] . identifier[__qualname__] keyword[in] identifier[white_list] ]:
identifier[tasks] =[ identifier[func]
keyword[for] identifier[_] , identifier[func]
keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[plugin] )
keyword[if] identifier[inspect] . identifier[isfunction] ( identifier[func] ) keyword[and] identifier[hasattr] ( identifier[func] , literal[string] )]
keyword[if] identifier[len] ( identifier[tasks] )== literal[int] :
keyword[continue]
identifier[node] = identifier[tree]
keyword[for] identifier[name] keyword[in] identifier[plugin] . identifier[__qualname__] . identifier[split] ( literal[string] ):
keyword[if] keyword[not] identifier[name] keyword[in] identifier[node] :
identifier[node] [ identifier[name] ]={}
identifier[node] = identifier[node] [ identifier[name] ]
keyword[for] identifier[func] keyword[in] identifier[tasks] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[func] )
identifier[node] [ identifier[func] . identifier[__name__] ]= identifier[Task] ( identifier[plugin_class] = identifier[plugin] , identifier[func] = identifier[func] , identifier[config] = identifier[func] . identifier[yaz_task_config] )
keyword[return] identifier[tree]
|
def get_task_tree(white_list=None):
"""Returns a tree of Task instances
The tree is comprised of dictionaries containing strings for
keys and either dictionaries or Task instances for values.
When WHITE_LIST is given, only the tasks and plugins in this
list will become part of the task tree. The WHITE_LIST may
contain either strings, corresponding to the task of plugin
__qualname__, or, preferable, the WHITE_LIST contains
links to the task function or plugin class instead.
"""
assert white_list is None or isinstance(white_list, list), type(white_list)
if white_list is not None:
white_list = set((item if isinstance(item, str) else item.__qualname__ for item in white_list)) # depends on [control=['if'], data=['white_list']]
tree = dict(((task.qualified_name, task) for task in _task_list.values() if white_list is None or task.qualified_name in white_list))
plugins = get_plugin_list()
for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]:
tasks = [func for (_, func) in inspect.getmembers(plugin) if inspect.isfunction(func) and hasattr(func, 'yaz_task_config')]
if len(tasks) == 0:
continue # depends on [control=['if'], data=[]]
node = tree
for name in plugin.__qualname__.split('.'):
if not name in node:
node[name] = {} # depends on [control=['if'], data=[]]
node = node[name] # depends on [control=['for'], data=['name']]
for func in tasks:
logger.debug('Found task %s', func)
node[func.__name__] = Task(plugin_class=plugin, func=func, config=func.yaz_task_config) # depends on [control=['for'], data=['func']] # depends on [control=['for'], data=['plugin']]
return tree
|
def rotate(self, vecs):
"""Rotate input vector(s) by the rotation matrix.`
Args:
vecs (np.ndarray): Input vector(s) with dtype=np.float32.
The shape can be a single vector (D, ) or several vectors (N, D)
Returns:
np.ndarray: Rotated vectors with the same shape and dtype to the input vecs.
"""
assert vecs.dtype == np.float32
assert vecs.ndim in [1, 2]
if vecs.ndim == 2:
return vecs @ self.R
elif vecs.ndim == 1:
return (vecs.reshape(1, -1) @ self.R).reshape(-1)
|
def function[rotate, parameter[self, vecs]]:
constant[Rotate input vector(s) by the rotation matrix.`
Args:
vecs (np.ndarray): Input vector(s) with dtype=np.float32.
The shape can be a single vector (D, ) or several vectors (N, D)
Returns:
np.ndarray: Rotated vectors with the same shape and dtype to the input vecs.
]
assert[compare[name[vecs].dtype equal[==] name[np].float32]]
assert[compare[name[vecs].ndim in list[[<ast.Constant object at 0x7da1b10707f0>, <ast.Constant object at 0x7da1b10711b0>]]]]
if compare[name[vecs].ndim equal[==] constant[2]] begin[:]
return[binary_operation[name[vecs] <ast.MatMult object at 0x7da2590d6860> name[self].R]]
|
keyword[def] identifier[rotate] ( identifier[self] , identifier[vecs] ):
literal[string]
keyword[assert] identifier[vecs] . identifier[dtype] == identifier[np] . identifier[float32]
keyword[assert] identifier[vecs] . identifier[ndim] keyword[in] [ literal[int] , literal[int] ]
keyword[if] identifier[vecs] . identifier[ndim] == literal[int] :
keyword[return] identifier[vecs] @ identifier[self] . identifier[R]
keyword[elif] identifier[vecs] . identifier[ndim] == literal[int] :
keyword[return] ( identifier[vecs] . identifier[reshape] ( literal[int] ,- literal[int] )@ identifier[self] . identifier[R] ). identifier[reshape] (- literal[int] )
|
def rotate(self, vecs):
"""Rotate input vector(s) by the rotation matrix.`
Args:
vecs (np.ndarray): Input vector(s) with dtype=np.float32.
The shape can be a single vector (D, ) or several vectors (N, D)
Returns:
np.ndarray: Rotated vectors with the same shape and dtype to the input vecs.
"""
assert vecs.dtype == np.float32
assert vecs.ndim in [1, 2]
if vecs.ndim == 2:
return vecs @ self.R # depends on [control=['if'], data=[]]
elif vecs.ndim == 1:
return (vecs.reshape(1, -1) @ self.R).reshape(-1) # depends on [control=['if'], data=[]]
|
def main(argv=None):
"""
Entry point for the command line tool 'uflash'.
Will print help text if the optional first argument is "help". Otherwise
it will ensure the optional first argument ends in ".py" (the source
Python script).
An optional second argument is used to reference the path to the micro:bit
device. Any more arguments are ignored.
Exceptions are caught and printed for the user.
"""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=_HELP_TEXT)
parser.add_argument('source', nargs='?', default=None)
parser.add_argument('target', nargs='*', default=None)
parser.add_argument('-r', '--runtime', default=None,
help="Use the referenced MicroPython runtime.")
parser.add_argument('-e', '--extract',
action='store_true',
help=("Extract python source from a hex file"
" instead of creating the hex file."), )
parser.add_argument('-w', '--watch',
action='store_true',
help='Watch the source file for changes.')
parser.add_argument('-m', '--minify',
action='store_true',
help='Minify the source')
parser.add_argument('--version', action='version',
version='%(prog)s ' + get_version())
args = parser.parse_args(argv)
if args.extract:
try:
extract(args.source, args.target)
except Exception as ex:
error_message = "Error extracting {source}: {error!s}"
print(error_message.format(source=args.source, error=ex),
file=sys.stderr)
sys.exit(1)
elif args.watch:
try:
watch_file(args.source, flash,
path_to_python=args.source,
paths_to_microbits=args.target,
path_to_runtime=args.runtime)
except Exception as ex:
error_message = "Error watching {source}: {error!s}"
print(error_message.format(source=args.source, error=ex),
file=sys.stderr)
sys.exit(1)
else:
try:
flash(path_to_python=args.source, paths_to_microbits=args.target,
path_to_runtime=args.runtime, minify=args.minify)
except Exception as ex:
error_message = (
"Error flashing {source} to {target}{runtime}: {error!s}"
)
source = args.source
target = args.target if args.target else "microbit"
if args.runtime:
runtime = "with runtime {runtime}".format(runtime=args.runtime)
else:
runtime = ""
print(error_message.format(source=source, target=target,
runtime=runtime, error=ex),
file=sys.stderr)
sys.exit(1)
|
def function[main, parameter[argv]]:
constant[
Entry point for the command line tool 'uflash'.
Will print help text if the optional first argument is "help". Otherwise
it will ensure the optional first argument ends in ".py" (the source
Python script).
An optional second argument is used to reference the path to the micro:bit
device. Any more arguments are ignored.
Exceptions are caught and printed for the user.
]
if <ast.UnaryOp object at 0x7da18ede6c20> begin[:]
variable[argv] assign[=] call[name[sys].argv][<ast.Slice object at 0x7da18ede5d80>]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[source]]]
call[name[parser].add_argument, parameter[constant[target]]]
call[name[parser].add_argument, parameter[constant[-r], constant[--runtime]]]
call[name[parser].add_argument, parameter[constant[-e], constant[--extract]]]
call[name[parser].add_argument, parameter[constant[-w], constant[--watch]]]
call[name[parser].add_argument, parameter[constant[-m], constant[--minify]]]
call[name[parser].add_argument, parameter[constant[--version]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[argv]]]
if name[args].extract begin[:]
<ast.Try object at 0x7da18ede51b0>
|
keyword[def] identifier[main] ( identifier[argv] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[argv] :
identifier[argv] = identifier[sys] . identifier[argv] [ literal[int] :]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[_HELP_TEXT] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[action] = literal[string] ,
identifier[help] =( literal[string]
literal[string] ),)
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[version] = literal[string] + identifier[get_version] ())
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[argv] )
keyword[if] identifier[args] . identifier[extract] :
keyword[try] :
identifier[extract] ( identifier[args] . identifier[source] , identifier[args] . identifier[target] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[error_message] = literal[string]
identifier[print] ( identifier[error_message] . identifier[format] ( identifier[source] = identifier[args] . identifier[source] , identifier[error] = identifier[ex] ),
identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[elif] identifier[args] . identifier[watch] :
keyword[try] :
identifier[watch_file] ( identifier[args] . identifier[source] , identifier[flash] ,
identifier[path_to_python] = identifier[args] . identifier[source] ,
identifier[paths_to_microbits] = identifier[args] . identifier[target] ,
identifier[path_to_runtime] = identifier[args] . identifier[runtime] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[error_message] = literal[string]
identifier[print] ( identifier[error_message] . identifier[format] ( identifier[source] = identifier[args] . identifier[source] , identifier[error] = identifier[ex] ),
identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
keyword[try] :
identifier[flash] ( identifier[path_to_python] = identifier[args] . identifier[source] , identifier[paths_to_microbits] = identifier[args] . identifier[target] ,
identifier[path_to_runtime] = identifier[args] . identifier[runtime] , identifier[minify] = identifier[args] . identifier[minify] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[error_message] =(
literal[string]
)
identifier[source] = identifier[args] . identifier[source]
identifier[target] = identifier[args] . identifier[target] keyword[if] identifier[args] . identifier[target] keyword[else] literal[string]
keyword[if] identifier[args] . identifier[runtime] :
identifier[runtime] = literal[string] . identifier[format] ( identifier[runtime] = identifier[args] . identifier[runtime] )
keyword[else] :
identifier[runtime] = literal[string]
identifier[print] ( identifier[error_message] . identifier[format] ( identifier[source] = identifier[source] , identifier[target] = identifier[target] ,
identifier[runtime] = identifier[runtime] , identifier[error] = identifier[ex] ),
identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] )
|
def main(argv=None):
"""
Entry point for the command line tool 'uflash'.
Will print help text if the optional first argument is "help". Otherwise
it will ensure the optional first argument ends in ".py" (the source
Python script).
An optional second argument is used to reference the path to the micro:bit
device. Any more arguments are ignored.
Exceptions are caught and printed for the user.
"""
if not argv:
argv = sys.argv[1:] # depends on [control=['if'], data=[]]
parser = argparse.ArgumentParser(description=_HELP_TEXT)
parser.add_argument('source', nargs='?', default=None)
parser.add_argument('target', nargs='*', default=None)
parser.add_argument('-r', '--runtime', default=None, help='Use the referenced MicroPython runtime.')
parser.add_argument('-e', '--extract', action='store_true', help='Extract python source from a hex file instead of creating the hex file.')
parser.add_argument('-w', '--watch', action='store_true', help='Watch the source file for changes.')
parser.add_argument('-m', '--minify', action='store_true', help='Minify the source')
parser.add_argument('--version', action='version', version='%(prog)s ' + get_version())
args = parser.parse_args(argv)
if args.extract:
try:
extract(args.source, args.target) # depends on [control=['try'], data=[]]
except Exception as ex:
error_message = 'Error extracting {source}: {error!s}'
print(error_message.format(source=args.source, error=ex), file=sys.stderr)
sys.exit(1) # depends on [control=['except'], data=['ex']] # depends on [control=['if'], data=[]]
elif args.watch:
try:
watch_file(args.source, flash, path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime) # depends on [control=['try'], data=[]]
except Exception as ex:
error_message = 'Error watching {source}: {error!s}'
print(error_message.format(source=args.source, error=ex), file=sys.stderr)
sys.exit(1) # depends on [control=['except'], data=['ex']] # depends on [control=['if'], data=[]]
else:
try:
flash(path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime, minify=args.minify) # depends on [control=['try'], data=[]]
except Exception as ex:
error_message = 'Error flashing {source} to {target}{runtime}: {error!s}'
source = args.source
target = args.target if args.target else 'microbit'
if args.runtime:
runtime = 'with runtime {runtime}'.format(runtime=args.runtime) # depends on [control=['if'], data=[]]
else:
runtime = ''
print(error_message.format(source=source, target=target, runtime=runtime, error=ex), file=sys.stderr)
sys.exit(1) # depends on [control=['except'], data=['ex']]
|
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
|
def function[handle_display_options, parameter[self, option_order]]:
constant[If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
]
import module[sys]
if <ast.BoolOp object at 0x7da20e960f40> begin[:]
return[call[name[_Distribution].handle_display_options, parameter[name[self], name[option_order]]]]
import module[io]
if <ast.UnaryOp object at 0x7da20e9627a0> begin[:]
return[call[name[_Distribution].handle_display_options, parameter[name[self], name[option_order]]]]
if compare[call[name[sys].stdout.encoding.lower, parameter[]] in tuple[[<ast.Constant object at 0x7da20e961ff0>, <ast.Constant object at 0x7da20e962b90>]]] begin[:]
return[call[name[_Distribution].handle_display_options, parameter[name[self], name[option_order]]]]
variable[encoding] assign[=] name[sys].stdout.encoding
variable[errors] assign[=] name[sys].stdout.errors
variable[newline] assign[=] <ast.BoolOp object at 0x7da20e960c10>
variable[line_buffering] assign[=] name[sys].stdout.line_buffering
name[sys].stdout assign[=] call[name[io].TextIOWrapper, parameter[call[name[sys].stdout.detach, parameter[]], constant[utf-8], name[errors], name[newline], name[line_buffering]]]
<ast.Try object at 0x7da18f811150>
|
keyword[def] identifier[handle_display_options] ( identifier[self] , identifier[option_order] ):
literal[string]
keyword[import] identifier[sys]
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] ,) keyword[or] identifier[self] . identifier[help_commands] :
keyword[return] identifier[_Distribution] . identifier[handle_display_options] ( identifier[self] , identifier[option_order] )
keyword[import] identifier[io]
keyword[if] keyword[not] identifier[isinstance] ( identifier[sys] . identifier[stdout] , identifier[io] . identifier[TextIOWrapper] ):
keyword[return] identifier[_Distribution] . identifier[handle_display_options] ( identifier[self] , identifier[option_order] )
keyword[if] identifier[sys] . identifier[stdout] . identifier[encoding] . identifier[lower] () keyword[in] ( literal[string] , literal[string] ):
keyword[return] identifier[_Distribution] . identifier[handle_display_options] ( identifier[self] , identifier[option_order] )
identifier[encoding] = identifier[sys] . identifier[stdout] . identifier[encoding]
identifier[errors] = identifier[sys] . identifier[stdout] . identifier[errors]
identifier[newline] = identifier[sys] . identifier[platform] != literal[string] keyword[and] literal[string] keyword[or] keyword[None]
identifier[line_buffering] = identifier[sys] . identifier[stdout] . identifier[line_buffering]
identifier[sys] . identifier[stdout] = identifier[io] . identifier[TextIOWrapper] (
identifier[sys] . identifier[stdout] . identifier[detach] (), literal[string] , identifier[errors] , identifier[newline] , identifier[line_buffering] )
keyword[try] :
keyword[return] identifier[_Distribution] . identifier[handle_display_options] ( identifier[self] , identifier[option_order] )
keyword[finally] :
identifier[sys] . identifier[stdout] = identifier[io] . identifier[TextIOWrapper] (
identifier[sys] . identifier[stdout] . identifier[detach] (), identifier[encoding] , identifier[errors] , identifier[newline] , identifier[line_buffering] )
|
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order) # depends on [control=['if'], data=[]]
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order) # depends on [control=['if'], data=[]]
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order) # depends on [control=['if'], data=[]]
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order) # depends on [control=['try'], data=[]]
finally:
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding, errors, newline, line_buffering)
|
def get_name(modality_type, value=None):
"""Gets default name for transformations; if none available, return value."""
# For legacy reasons, modalities vary in their naming scheme. Future plans are
# to remove any need for get_name. We do not recommend using it.
if modality_type == ModalityType.AUDIO:
return lambda model_hparams, vocab_size: "audio_modality"
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return lambda model_hparams, vocab_size: "audio_spectral_modality"
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return lambda model_hparams, vocab_size: "generic_l2_loss_modality"
elif modality_type == ModalityType.IDENTITY:
return lambda model_hparams, vocab_size: "identity_modality"
elif modality_type == ModalityType.IMAGE:
return lambda model_hparams, vocab_size: "image_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY:
return (lambda model_hparams, vocab_size: # pylint: disable=g-long-lambda
"image_channel_bottom_identity_modality")
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return lambda model_hparams, vocab_size: "image_channel_compress_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return lambda model_hparams, vocab_size: "image_channel_embeddings_bottom"
elif modality_type == ModalityType.REAL:
return lambda model_hparams, vocab_size: "real_modality"
elif modality_type == ModalityType.REAL_L2_LOSS:
return lambda model_hparams, vocab_size: "real_l2_loss_modality"
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return lambda model_hparams, vocab_size: "real_log_poisson_loss_modality"
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return lambda model_hparams, vocab_size: "speech_recognition_modality"
elif modality_type == ModalityType.VIDEO:
return lambda model_hparams, vocab_size: "video_modality"
elif modality_type == ModalityType.VIDEO_BITWISE:
return lambda model_hparams, vocab_size: "video_modality_bitwise"
elif modality_type == ModalityType.VIDEO_IDENTITY:
return lambda model_hparams, vocab_size: "video_modality_identity"
elif modality_type == ModalityType.VIDEO_L1:
return lambda model_hparams, vocab_size: "video_modality_l1"
elif modality_type == ModalityType.VIDEO_L1_RAW:
return lambda model_hparams, vocab_size: "video_modality_l1_raw"
elif modality_type == ModalityType.VIDEO_L2:
return lambda model_hparams, vocab_size: "video_modality_l2"
elif modality_type == ModalityType.VIDEO_L2_RAW:
return lambda model_hparams, vocab_size: "video_modality_l2_raw"
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return lambda model_hparams, vocab_size: "video_modality_pixel_noise"
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL):
def name(model_hparams, vocab_size):
return "class_label_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL,
ModalityType.SYMBOL_ONE_HOT):
def name(model_hparams, vocab_size):
return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_class_symbol_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_max_pooling_class_symbol_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_average_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_last_timestep_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_max_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
return value
|
def function[get_name, parameter[modality_type, value]]:
constant[Gets default name for transformations; if none available, return value.]
if compare[name[modality_type] equal[==] name[ModalityType].AUDIO] begin[:]
return[<ast.Lambda object at 0x7da20c6e74c0>]
return[name[value]]
|
keyword[def] identifier[get_name] ( identifier[modality_type] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[modality_type] == identifier[ModalityType] . identifier[AUDIO] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[AUDIO_SPECTRAL] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[GENERIC_L2_LOSS] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[IDENTITY] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[IMAGE] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[IMAGE_CHANNEL_BOTTOM_IDENTITY] :
keyword[return] ( keyword[lambda] identifier[model_hparams] , identifier[vocab_size] :
literal[string] )
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[IMAGE_CHANNEL_COMPRESS] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[IMAGE_CHANNEL_EMBEDDINGS_BOTTOM] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[REAL] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[REAL_L2_LOSS] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[REAL_LOG_POISSON_LOSS] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SPEECH_RECOGNITION] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_BITWISE] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_IDENTITY] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_L1] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_L1_RAW] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_L2] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_L2_RAW] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[VIDEO_PIXEL_NOISE] :
keyword[return] keyword[lambda] identifier[model_hparams] , identifier[vocab_size] : literal[string]
keyword[elif] identifier[modality_type] keyword[in] ( identifier[ModalityType] . identifier[CLASS_LABEL] ,
identifier[ModalityType] . identifier[MULTI_LABEL] ,
identifier[ModalityType] . identifier[ONE_HOT_CLASS_LABEL] ):
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %( identifier[vocab_size] ,
identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] keyword[in] ( identifier[ModalityType] . identifier[CTC_SYMBOL] ,
identifier[ModalityType] . identifier[IDENTITY_SYMBOL] ,
identifier[ModalityType] . identifier[SYMBOL] ,
identifier[ModalityType] . identifier[SYMBOL_WEIGHTS_ALL] ,
identifier[ModalityType] . identifier[SYMBOL_ONE_HOT] ):
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %( identifier[vocab_size] , identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SIGMOID_CLASS_LABEL] :
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %( identifier[vocab_size] ,
identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SIGMOID_MAX_POOLING_CLASS_LABEL] :
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %(
identifier[vocab_size] , identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SOFTMAX_AVERAGE_POOLING_CLASS_LABEL] :
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %(
identifier[vocab_size] , identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SOFTMAX_LAST_TIMESTEP_CLASS_LABEL] :
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %(
identifier[vocab_size] , identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[elif] identifier[modality_type] == identifier[ModalityType] . identifier[SOFTMAX_MAX_POOLING_CLASS_LABEL] :
keyword[def] identifier[name] ( identifier[model_hparams] , identifier[vocab_size] ):
keyword[return] literal[string] %(
identifier[vocab_size] , identifier[model_hparams] . identifier[hidden_size] )
keyword[return] identifier[name]
keyword[return] identifier[value]
|
def get_name(modality_type, value=None):
"""Gets default name for transformations; if none available, return value."""
# For legacy reasons, modalities vary in their naming scheme. Future plans are
# to remove any need for get_name. We do not recommend using it.
if modality_type == ModalityType.AUDIO:
return lambda model_hparams, vocab_size: 'audio_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return lambda model_hparams, vocab_size: 'audio_spectral_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return lambda model_hparams, vocab_size: 'generic_l2_loss_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.IDENTITY:
return lambda model_hparams, vocab_size: 'identity_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.IMAGE:
return lambda model_hparams, vocab_size: 'image_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY: # pylint: disable=g-long-lambda
return lambda model_hparams, vocab_size: 'image_channel_bottom_identity_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return lambda model_hparams, vocab_size: 'image_channel_compress_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return lambda model_hparams, vocab_size: 'image_channel_embeddings_bottom' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.REAL:
return lambda model_hparams, vocab_size: 'real_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.REAL_L2_LOSS:
return lambda model_hparams, vocab_size: 'real_l2_loss_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return lambda model_hparams, vocab_size: 'real_log_poisson_loss_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return lambda model_hparams, vocab_size: 'speech_recognition_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO:
return lambda model_hparams, vocab_size: 'video_modality' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_BITWISE:
return lambda model_hparams, vocab_size: 'video_modality_bitwise' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_IDENTITY:
return lambda model_hparams, vocab_size: 'video_modality_identity' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_L1:
return lambda model_hparams, vocab_size: 'video_modality_l1' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_L1_RAW:
return lambda model_hparams, vocab_size: 'video_modality_l1_raw' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_L2:
return lambda model_hparams, vocab_size: 'video_modality_l2' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_L2_RAW:
return lambda model_hparams, vocab_size: 'video_modality_l2_raw' # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return lambda model_hparams, vocab_size: 'video_modality_pixel_noise' # depends on [control=['if'], data=[]]
elif modality_type in (ModalityType.CLASS_LABEL, ModalityType.MULTI_LABEL, ModalityType.ONE_HOT_CLASS_LABEL):
def name(model_hparams, vocab_size):
return 'class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type in (ModalityType.CTC_SYMBOL, ModalityType.IDENTITY_SYMBOL, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL, ModalityType.SYMBOL_ONE_HOT):
def name(model_hparams, vocab_size):
return 'symbol_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
def name(model_hparams, vocab_size):
return 'sigmoid_class_symbol_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return 'sigmoid_max_pooling_class_symbol_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return 'softmax_average_pooling_onehot_class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
def name(model_hparams, vocab_size):
return 'softmax_last_timestep_onehot_class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return 'softmax_max_pooling_onehot_class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size)
return name # depends on [control=['if'], data=[]]
return value
|
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
dldz = jac[0,1]
dndz = jac[1,1]
return - (dldz * self._lderiv(l,n) + \
dndz * self._nderiv(l,n))
|
def function[_zforce, parameter[self, R, z, phi, t]]:
constant[
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
]
<ast.Tuple object at 0x7da1b0c406a0> assign[=] call[name[bovy_coords].Rz_to_lambdanu, parameter[name[R], name[z]]]
variable[jac] assign[=] call[name[bovy_coords].Rz_to_lambdanu_jac, parameter[name[R], name[z]]]
variable[dldz] assign[=] call[name[jac]][tuple[[<ast.Constant object at 0x7da1b0c40a60>, <ast.Constant object at 0x7da1b0cba560>]]]
variable[dndz] assign[=] call[name[jac]][tuple[[<ast.Constant object at 0x7da1b0cba710>, <ast.Constant object at 0x7da1b0da1990>]]]
return[<ast.UnaryOp object at 0x7da1b0da20b0>]
|
keyword[def] identifier[_zforce] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
identifier[l] , identifier[n] = identifier[bovy_coords] . identifier[Rz_to_lambdanu] ( identifier[R] , identifier[z] , identifier[ac] = identifier[self] . identifier[_ac] , identifier[Delta] = identifier[self] . identifier[_Delta] )
identifier[jac] = identifier[bovy_coords] . identifier[Rz_to_lambdanu_jac] ( identifier[R] , identifier[z] , identifier[Delta] = identifier[self] . identifier[_Delta] )
identifier[dldz] = identifier[jac] [ literal[int] , literal[int] ]
identifier[dndz] = identifier[jac] [ literal[int] , literal[int] ]
keyword[return] -( identifier[dldz] * identifier[self] . identifier[_lderiv] ( identifier[l] , identifier[n] )+ identifier[dndz] * identifier[self] . identifier[_nderiv] ( identifier[l] , identifier[n] ))
|
def _zforce(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
(l, n) = bovy_coords.Rz_to_lambdanu(R, z, ac=self._ac, Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R, z, Delta=self._Delta)
dldz = jac[0, 1]
dndz = jac[1, 1]
return -(dldz * self._lderiv(l, n) + dndz * self._nderiv(l, n))
|
def get_state_machine(self):
"""Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine
"""
if self.parent:
if self.is_root_state:
return self.parent
else:
return self.parent.get_state_machine()
return None
|
def function[get_state_machine, parameter[self]]:
constant[Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine
]
if name[self].parent begin[:]
if name[self].is_root_state begin[:]
return[name[self].parent]
return[constant[None]]
|
keyword[def] identifier[get_state_machine] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] :
keyword[if] identifier[self] . identifier[is_root_state] :
keyword[return] identifier[self] . identifier[parent]
keyword[else] :
keyword[return] identifier[self] . identifier[parent] . identifier[get_state_machine] ()
keyword[return] keyword[None]
|
def get_state_machine(self):
"""Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine
"""
if self.parent:
if self.is_root_state:
return self.parent # depends on [control=['if'], data=[]]
else:
return self.parent.get_state_machine() # depends on [control=['if'], data=[]]
return None
|
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header,
payload=payload)
if return_header:
return payload, header
return payload
|
def function[loads, parameter[self, s, salt, return_header]]:
constant[Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
]
<ast.Tuple object at 0x7da2045645e0> assign[=] call[name[self].load_payload, parameter[call[call[name[self].make_signer, parameter[name[salt], name[self].algorithm]].unsign, parameter[call[name[want_bytes], parameter[name[s]]]]]]]
if compare[call[name[header].get, parameter[constant[alg]]] not_equal[!=] name[self].algorithm_name] begin[:]
<ast.Raise object at 0x7da204345240>
if name[return_header] begin[:]
return[tuple[[<ast.Name object at 0x7da204345ba0>, <ast.Name object at 0x7da204346830>]]]
return[name[payload]]
|
keyword[def] identifier[loads] ( identifier[self] , identifier[s] , identifier[salt] = keyword[None] , identifier[return_header] = keyword[False] ):
literal[string]
identifier[payload] , identifier[header] = identifier[self] . identifier[load_payload] (
identifier[self] . identifier[make_signer] ( identifier[salt] , identifier[self] . identifier[algorithm] ). identifier[unsign] ( identifier[want_bytes] ( identifier[s] )),
identifier[return_header] = keyword[True] )
keyword[if] identifier[header] . identifier[get] ( literal[string] )!= identifier[self] . identifier[algorithm_name] :
keyword[raise] identifier[BadHeader] ( literal[string] , identifier[header] = identifier[header] ,
identifier[payload] = identifier[payload] )
keyword[if] identifier[return_header] :
keyword[return] identifier[payload] , identifier[header]
keyword[return] identifier[payload]
|
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
(payload, header) = self.load_payload(self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header, payload=payload) # depends on [control=['if'], data=[]]
if return_header:
return (payload, header) # depends on [control=['if'], data=[]]
return payload
|
def QA_util_random_with_zh_stock_code(stockNumber=10):
'''
随机生成股票代码
:param stockNumber: 生成个数
:return: ['60XXXX', '00XXXX', '300XXX']
'''
codeList = []
pt = 0
for i in range(stockNumber):
if pt == 0:
#print("random 60XXXX")
iCode = random.randint(600000, 609999)
aCode = "%06d" % iCode
elif pt == 1:
#print("random 00XXXX")
iCode = random.randint(600000, 600999)
aCode = "%06d" % iCode
elif pt == 2:
#print("random 00XXXX")
iCode = random.randint(2000, 9999)
aCode = "%06d" % iCode
elif pt == 3:
#print("random 300XXX")
iCode = random.randint(300000, 300999)
aCode = "%06d" % iCode
elif pt == 4:
#print("random 00XXXX")
iCode = random.randint(2000, 2999)
aCode = "%06d" % iCode
pt = (pt + 1) % 5
codeList.append(aCode)
return codeList
|
def function[QA_util_random_with_zh_stock_code, parameter[stockNumber]]:
constant[
随机生成股票代码
:param stockNumber: 生成个数
:return: ['60XXXX', '00XXXX', '300XXX']
]
variable[codeList] assign[=] list[[]]
variable[pt] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[stockNumber]]]] begin[:]
if compare[name[pt] equal[==] constant[0]] begin[:]
variable[iCode] assign[=] call[name[random].randint, parameter[constant[600000], constant[609999]]]
variable[aCode] assign[=] binary_operation[constant[%06d] <ast.Mod object at 0x7da2590d6920> name[iCode]]
variable[pt] assign[=] binary_operation[binary_operation[name[pt] + constant[1]] <ast.Mod object at 0x7da2590d6920> constant[5]]
call[name[codeList].append, parameter[name[aCode]]]
return[name[codeList]]
|
keyword[def] identifier[QA_util_random_with_zh_stock_code] ( identifier[stockNumber] = literal[int] ):
literal[string]
identifier[codeList] =[]
identifier[pt] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[stockNumber] ):
keyword[if] identifier[pt] == literal[int] :
identifier[iCode] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[aCode] = literal[string] % identifier[iCode]
keyword[elif] identifier[pt] == literal[int] :
identifier[iCode] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[aCode] = literal[string] % identifier[iCode]
keyword[elif] identifier[pt] == literal[int] :
identifier[iCode] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[aCode] = literal[string] % identifier[iCode]
keyword[elif] identifier[pt] == literal[int] :
identifier[iCode] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[aCode] = literal[string] % identifier[iCode]
keyword[elif] identifier[pt] == literal[int] :
identifier[iCode] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[aCode] = literal[string] % identifier[iCode]
identifier[pt] =( identifier[pt] + literal[int] )% literal[int]
identifier[codeList] . identifier[append] ( identifier[aCode] )
keyword[return] identifier[codeList]
|
def QA_util_random_with_zh_stock_code(stockNumber=10):
"""
随机生成股票代码
:param stockNumber: 生成个数
:return: ['60XXXX', '00XXXX', '300XXX']
"""
codeList = []
pt = 0
for i in range(stockNumber):
if pt == 0:
#print("random 60XXXX")
iCode = random.randint(600000, 609999)
aCode = '%06d' % iCode # depends on [control=['if'], data=[]]
elif pt == 1:
#print("random 00XXXX")
iCode = random.randint(600000, 600999)
aCode = '%06d' % iCode # depends on [control=['if'], data=[]]
elif pt == 2:
#print("random 00XXXX")
iCode = random.randint(2000, 9999)
aCode = '%06d' % iCode # depends on [control=['if'], data=[]]
elif pt == 3:
#print("random 300XXX")
iCode = random.randint(300000, 300999)
aCode = '%06d' % iCode # depends on [control=['if'], data=[]]
elif pt == 4:
#print("random 00XXXX")
iCode = random.randint(2000, 2999)
aCode = '%06d' % iCode # depends on [control=['if'], data=[]]
pt = (pt + 1) % 5
codeList.append(aCode) # depends on [control=['for'], data=[]]
return codeList
|
def find(self, haystack, needle):
"""
Finds needle in haystack.
If needle is found return True, if not return False.
Required arguments:
* haystack - Text to search in.
* needle - Text to search for.
"""
try:
qstatus = haystack.find(needle)
except AttributeError:
if needle in haystack:
return True
else:
return False
if qstatus == -1:
return False
elif qstatus != -1:
return True
|
def function[find, parameter[self, haystack, needle]]:
constant[
Finds needle in haystack.
If needle is found return True, if not return False.
Required arguments:
* haystack - Text to search in.
* needle - Text to search for.
]
<ast.Try object at 0x7da2054a4d90>
if compare[name[qstatus] equal[==] <ast.UnaryOp object at 0x7da2054a75b0>] begin[:]
return[constant[False]]
|
keyword[def] identifier[find] ( identifier[self] , identifier[haystack] , identifier[needle] ):
literal[string]
keyword[try] :
identifier[qstatus] = identifier[haystack] . identifier[find] ( identifier[needle] )
keyword[except] identifier[AttributeError] :
keyword[if] identifier[needle] keyword[in] identifier[haystack] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[if] identifier[qstatus] ==- literal[int] :
keyword[return] keyword[False]
keyword[elif] identifier[qstatus] !=- literal[int] :
keyword[return] keyword[True]
|
def find(self, haystack, needle):
"""
Finds needle in haystack.
If needle is found return True, if not return False.
Required arguments:
* haystack - Text to search in.
* needle - Text to search for.
"""
try:
qstatus = haystack.find(needle) # depends on [control=['try'], data=[]]
except AttributeError:
if needle in haystack:
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['except'], data=[]]
if qstatus == -1:
return False # depends on [control=['if'], data=[]]
elif qstatus != -1:
return True # depends on [control=['if'], data=[]]
|
def renew_lease(self, lease_id, increment=None):
"""Renew a lease, requesting to extend the lease.
Supported methods:
PUT: /sys/leases/renew. Produces: 200 application/json
:param lease_id: The ID of the lease to extend.
:type lease_id: str | unicode
:param increment: The requested amount of time (in seconds) to extend the lease.
:type increment: int
:return: The JSON response of the request
:rtype: dict
"""
params = {
'lease_id': lease_id,
'increment': increment,
}
api_path = '/v1/sys/leases/renew'
response = self._adapter.put(
url=api_path,
json=params,
)
return response.json()
|
def function[renew_lease, parameter[self, lease_id, increment]]:
constant[Renew a lease, requesting to extend the lease.
Supported methods:
PUT: /sys/leases/renew. Produces: 200 application/json
:param lease_id: The ID of the lease to extend.
:type lease_id: str | unicode
:param increment: The requested amount of time (in seconds) to extend the lease.
:type increment: int
:return: The JSON response of the request
:rtype: dict
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70e50>, <ast.Constant object at 0x7da18bc70580>], [<ast.Name object at 0x7da18bc73190>, <ast.Name object at 0x7da18bc71540>]]
variable[api_path] assign[=] constant[/v1/sys/leases/renew]
variable[response] assign[=] call[name[self]._adapter.put, parameter[]]
return[call[name[response].json, parameter[]]]
|
keyword[def] identifier[renew_lease] ( identifier[self] , identifier[lease_id] , identifier[increment] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : identifier[lease_id] ,
literal[string] : identifier[increment] ,
}
identifier[api_path] = literal[string]
identifier[response] = identifier[self] . identifier[_adapter] . identifier[put] (
identifier[url] = identifier[api_path] ,
identifier[json] = identifier[params] ,
)
keyword[return] identifier[response] . identifier[json] ()
|
def renew_lease(self, lease_id, increment=None):
"""Renew a lease, requesting to extend the lease.
Supported methods:
PUT: /sys/leases/renew. Produces: 200 application/json
:param lease_id: The ID of the lease to extend.
:type lease_id: str | unicode
:param increment: The requested amount of time (in seconds) to extend the lease.
:type increment: int
:return: The JSON response of the request
:rtype: dict
"""
params = {'lease_id': lease_id, 'increment': increment}
api_path = '/v1/sys/leases/renew'
response = self._adapter.put(url=api_path, json=params)
return response.json()
|
def handle_exception(exc_info=None, source_hint=None, tb_override=_NO):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None: # pragma: no cover
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from .runtime.debug import make_traceback as _make_traceback
exc_type, exc_value, tb = exc_info
if tb_override is not _NO: # pragma: no cover
tb = tb_override
traceback = _make_traceback((exc_type, exc_value, tb), source_hint)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
|
def function[handle_exception, parameter[exc_info, source_hint, tb_override]]:
constant[Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
]
<ast.Global object at 0x7da1b0acbd90>
if compare[name[exc_info] is constant[None]] begin[:]
variable[exc_info] assign[=] call[name[sys].exc_info, parameter[]]
if compare[name[_make_traceback] is constant[None]] begin[:]
from relative_module[runtime.debug] import module[make_traceback]
<ast.Tuple object at 0x7da1b0af8580> assign[=] name[exc_info]
if compare[name[tb_override] is_not name[_NO]] begin[:]
variable[tb] assign[=] name[tb_override]
variable[traceback] assign[=] call[name[_make_traceback], parameter[tuple[[<ast.Name object at 0x7da1b0b6f0d0>, <ast.Name object at 0x7da1b0b6cd60>, <ast.Name object at 0x7da1b0b6feb0>]], name[source_hint]]]
<ast.Tuple object at 0x7da1b0b6fca0> assign[=] name[traceback].standard_exc_info
call[name[reraise], parameter[name[exc_type], name[exc_value], name[tb]]]
|
keyword[def] identifier[handle_exception] ( identifier[exc_info] = keyword[None] , identifier[source_hint] = keyword[None] , identifier[tb_override] = identifier[_NO] ):
literal[string]
keyword[global] identifier[_make_traceback]
keyword[if] identifier[exc_info] keyword[is] keyword[None] :
identifier[exc_info] = identifier[sys] . identifier[exc_info] ()
keyword[if] identifier[_make_traceback] keyword[is] keyword[None] :
keyword[from] . identifier[runtime] . identifier[debug] keyword[import] identifier[make_traceback] keyword[as] identifier[_make_traceback]
identifier[exc_type] , identifier[exc_value] , identifier[tb] = identifier[exc_info]
keyword[if] identifier[tb_override] keyword[is] keyword[not] identifier[_NO] :
identifier[tb] = identifier[tb_override]
identifier[traceback] = identifier[_make_traceback] (( identifier[exc_type] , identifier[exc_value] , identifier[tb] ), identifier[source_hint] )
identifier[exc_type] , identifier[exc_value] , identifier[tb] = identifier[traceback] . identifier[standard_exc_info]
identifier[reraise] ( identifier[exc_type] , identifier[exc_value] , identifier[tb] )
|
def handle_exception(exc_info=None, source_hint=None, tb_override=_NO):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None: # pragma: no cover
exc_info = sys.exc_info() # depends on [control=['if'], data=['exc_info']]
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from .runtime.debug import make_traceback as _make_traceback # depends on [control=['if'], data=[]]
(exc_type, exc_value, tb) = exc_info
if tb_override is not _NO: # pragma: no cover
tb = tb_override # depends on [control=['if'], data=['tb_override']]
traceback = _make_traceback((exc_type, exc_value, tb), source_hint)
(exc_type, exc_value, tb) = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
|
def reject(self, delivery_tag, requeue=False):
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args))
|
def function[reject, parameter[self, delivery_tag, requeue]]:
constant[
Reject a message.
]
variable[args] assign[=] call[name[Writer], parameter[]]
call[call[name[args].write_longlong, parameter[name[delivery_tag]]].write_bit, parameter[name[requeue]]]
call[name[self].send_frame, parameter[call[name[MethodFrame], parameter[name[self].channel_id, constant[60], constant[90], name[args]]]]]
|
keyword[def] identifier[reject] ( identifier[self] , identifier[delivery_tag] , identifier[requeue] = keyword[False] ):
literal[string]
identifier[args] = identifier[Writer] ()
identifier[args] . identifier[write_longlong] ( identifier[delivery_tag] ). identifier[write_bit] ( identifier[requeue] )
identifier[self] . identifier[send_frame] ( identifier[MethodFrame] ( identifier[self] . identifier[channel_id] , literal[int] , literal[int] , identifier[args] ))
|
def reject(self, delivery_tag, requeue=False):
"""
Reject a message.
"""
args = Writer()
args.write_longlong(delivery_tag).write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.