code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _mount_repo(repo, wait_for_server=False):
"""
This function will create the VM directory where a repo will be mounted, if it
doesn't exist. If wait_for_server is set, it will wait up to 10 seconds for
the nfs server to start, by retrying mounts that fail with 'Connection Refused'.
If wait_for_server is not set, it will attempt to run the mount command once
"""
check_call_on_vm('sudo mkdir -p {}'.format(repo.vm_path))
if wait_for_server:
for i in range(0,10):
try:
_run_mount_command(repo)
return
except CalledProcessError as e:
if 'Connection refused' in e.output:
logging.info('Failed to mount repo; waiting for nfsd to restart')
time.sleep(1)
else:
logging.info(e.output)
raise e
log_to_client('Failed to mount repo {}'.format(repo.short_name))
raise RuntimeError('Unable to mount repo with NFS')
else:
_run_mount_command(repo) | def function[_mount_repo, parameter[repo, wait_for_server]]:
constant[
This function will create the VM directory where a repo will be mounted, if it
doesn't exist. If wait_for_server is set, it will wait up to 10 seconds for
the nfs server to start, by retrying mounts that fail with 'Connection Refused'.
If wait_for_server is not set, it will attempt to run the mount command once
]
call[name[check_call_on_vm], parameter[call[constant[sudo mkdir -p {}].format, parameter[name[repo].vm_path]]]]
if name[wait_for_server] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[10]]]] begin[:]
<ast.Try object at 0x7da18f00fa30>
call[name[log_to_client], parameter[call[constant[Failed to mount repo {}].format, parameter[name[repo].short_name]]]]
<ast.Raise object at 0x7da18f00e9b0> | keyword[def] identifier[_mount_repo] ( identifier[repo] , identifier[wait_for_server] = keyword[False] ):
literal[string]
identifier[check_call_on_vm] ( literal[string] . identifier[format] ( identifier[repo] . identifier[vm_path] ))
keyword[if] identifier[wait_for_server] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[try] :
identifier[_run_mount_command] ( identifier[repo] )
keyword[return]
keyword[except] identifier[CalledProcessError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[e] . identifier[output] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
identifier[logging] . identifier[info] ( identifier[e] . identifier[output] )
keyword[raise] identifier[e]
identifier[log_to_client] ( literal[string] . identifier[format] ( identifier[repo] . identifier[short_name] ))
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[else] :
identifier[_run_mount_command] ( identifier[repo] ) | def _mount_repo(repo, wait_for_server=False):
"""
This function will create the VM directory where a repo will be mounted, if it
doesn't exist. If wait_for_server is set, it will wait up to 10 seconds for
the nfs server to start, by retrying mounts that fail with 'Connection Refused'.
If wait_for_server is not set, it will attempt to run the mount command once
"""
check_call_on_vm('sudo mkdir -p {}'.format(repo.vm_path))
if wait_for_server:
for i in range(0, 10):
try:
_run_mount_command(repo)
return # depends on [control=['try'], data=[]]
except CalledProcessError as e:
if 'Connection refused' in e.output:
logging.info('Failed to mount repo; waiting for nfsd to restart')
time.sleep(1) # depends on [control=['if'], data=[]]
else:
logging.info(e.output)
raise e # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
log_to_client('Failed to mount repo {}'.format(repo.short_name))
raise RuntimeError('Unable to mount repo with NFS') # depends on [control=['if'], data=[]]
else:
_run_mount_command(repo) |
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs) | def function[template, parameter[]]:
constant[
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
]
variable[tpl] assign[=] <ast.IfExp object at 0x7da1b1982080>
variable[template_adapter] assign[=] call[name[kwargs].pop, parameter[constant[template_adapter], name[SimpleTemplate]]]
if <ast.BoolOp object at 0x7da1b1980f40> begin[:]
variable[settings] assign[=] call[name[kwargs].pop, parameter[constant[template_settings], dictionary[[], []]]]
variable[lookup] assign[=] call[name[kwargs].pop, parameter[constant[template_lookup], name[TEMPLATE_PATH]]]
if call[name[isinstance], parameter[name[tpl], name[template_adapter]]] begin[:]
call[name[TEMPLATES]][name[tpl]] assign[=] name[tpl]
if name[settings] begin[:]
call[call[name[TEMPLATES]][name[tpl]].prepare, parameter[]]
if <ast.UnaryOp object at 0x7da1b1980580> begin[:]
call[name[abort], parameter[constant[500], binary_operation[constant[Template (%s) not found] <ast.Mod object at 0x7da2590d6920> name[tpl]]]]
for taget[name[dictarg]] in starred[call[name[args]][<ast.Slice object at 0x7da1b194c730>]] begin[:]
call[name[kwargs].update, parameter[name[dictarg]]]
return[call[call[name[TEMPLATES]][name[tpl]].render, parameter[name[kwargs]]]] | keyword[def] identifier[template] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[tpl] = identifier[args] [ literal[int] ] keyword[if] identifier[args] keyword[else] keyword[None]
identifier[template_adapter] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[SimpleTemplate] )
keyword[if] identifier[tpl] keyword[not] keyword[in] identifier[TEMPLATES] keyword[or] identifier[DEBUG] :
identifier[settings] = identifier[kwargs] . identifier[pop] ( literal[string] ,{})
identifier[lookup] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[TEMPLATE_PATH] )
keyword[if] identifier[isinstance] ( identifier[tpl] , identifier[template_adapter] ):
identifier[TEMPLATES] [ identifier[tpl] ]= identifier[tpl]
keyword[if] identifier[settings] : identifier[TEMPLATES] [ identifier[tpl] ]. identifier[prepare] (** identifier[settings] )
keyword[elif] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] :
identifier[TEMPLATES] [ identifier[tpl] ]= identifier[template_adapter] ( identifier[source] = identifier[tpl] , identifier[lookup] = identifier[lookup] ,** identifier[settings] )
keyword[else] :
identifier[TEMPLATES] [ identifier[tpl] ]= identifier[template_adapter] ( identifier[name] = identifier[tpl] , identifier[lookup] = identifier[lookup] ,** identifier[settings] )
keyword[if] keyword[not] identifier[TEMPLATES] [ identifier[tpl] ]:
identifier[abort] ( literal[int] , literal[string] % identifier[tpl] )
keyword[for] identifier[dictarg] keyword[in] identifier[args] [ literal[int] :]: identifier[kwargs] . identifier[update] ( identifier[dictarg] )
keyword[return] identifier[TEMPLATES] [ identifier[tpl] ]. identifier[render] ( identifier[kwargs] ) | def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings:
TEMPLATES[tpl].prepare(**settings) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif '\n' in tpl or '{' in tpl or '%' in tpl or ('$' in tpl):
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings) # depends on [control=['if'], data=[]]
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings) # depends on [control=['if'], data=[]]
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl) # depends on [control=['if'], data=[]]
for dictarg in args[1:]:
kwargs.update(dictarg) # depends on [control=['for'], data=['dictarg']]
return TEMPLATES[tpl].render(kwargs) |
def hours_minutes_seconds(value):
"""converts a timestamp to seconds
- hours:minutes:seconds to seconds
- minutes:seconds to seconds
- 11h22m33s to seconds
- 11h to seconds
- 20h15m to seconds
- seconds to seconds
:param value: hh:mm:ss ; 00h00m00s ; seconds
:return: seconds
"""
try:
return int(value)
except ValueError:
pass
match = (_hours_minutes_seconds_re.match(value)
or _hours_minutes_seconds_2_re.match(value))
if not match:
raise ValueError
s = 0
s += int(match.group("hours") or "0") * 60 * 60
s += int(match.group("minutes") or "0") * 60
s += int(match.group("seconds") or "0")
return s | def function[hours_minutes_seconds, parameter[value]]:
constant[converts a timestamp to seconds
- hours:minutes:seconds to seconds
- minutes:seconds to seconds
- 11h22m33s to seconds
- 11h to seconds
- 20h15m to seconds
- seconds to seconds
:param value: hh:mm:ss ; 00h00m00s ; seconds
:return: seconds
]
<ast.Try object at 0x7da20c6e6380>
variable[match] assign[=] <ast.BoolOp object at 0x7da18f09f6d0>
if <ast.UnaryOp object at 0x7da18f09d7e0> begin[:]
<ast.Raise object at 0x7da18f09f790>
variable[s] assign[=] constant[0]
<ast.AugAssign object at 0x7da18f09c1f0>
<ast.AugAssign object at 0x7da18f09d570>
<ast.AugAssign object at 0x7da18f09f250>
return[name[s]] | keyword[def] identifier[hours_minutes_seconds] ( identifier[value] ):
literal[string]
keyword[try] :
keyword[return] identifier[int] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[pass]
identifier[match] =( identifier[_hours_minutes_seconds_re] . identifier[match] ( identifier[value] )
keyword[or] identifier[_hours_minutes_seconds_2_re] . identifier[match] ( identifier[value] ))
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[ValueError]
identifier[s] = literal[int]
identifier[s] += identifier[int] ( identifier[match] . identifier[group] ( literal[string] ) keyword[or] literal[string] )* literal[int] * literal[int]
identifier[s] += identifier[int] ( identifier[match] . identifier[group] ( literal[string] ) keyword[or] literal[string] )* literal[int]
identifier[s] += identifier[int] ( identifier[match] . identifier[group] ( literal[string] ) keyword[or] literal[string] )
keyword[return] identifier[s] | def hours_minutes_seconds(value):
"""converts a timestamp to seconds
- hours:minutes:seconds to seconds
- minutes:seconds to seconds
- 11h22m33s to seconds
- 11h to seconds
- 20h15m to seconds
- seconds to seconds
:param value: hh:mm:ss ; 00h00m00s ; seconds
:return: seconds
"""
try:
return int(value) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
match = _hours_minutes_seconds_re.match(value) or _hours_minutes_seconds_2_re.match(value)
if not match:
raise ValueError # depends on [control=['if'], data=[]]
s = 0
s += int(match.group('hours') or '0') * 60 * 60
s += int(match.group('minutes') or '0') * 60
s += int(match.group('seconds') or '0')
return s |
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n") | def function[store_to_file, parameter[self, filename]]:
constant[Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
]
with call[name[tf].gfile.Open, parameter[name[filename], constant[w]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._id_to_token]]]]] begin[:]
call[name[f].write, parameter[binary_operation[call[name[self]._id_to_token][name[i]] + constant[
]]]] | keyword[def] identifier[store_to_file] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[with] identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_id_to_token] )):
identifier[f] . identifier[write] ( identifier[self] . identifier[_id_to_token] [ identifier[i] ]+ literal[string] ) | def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, 'w') as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + '\n') # depends on [control=['for'], data=['i']] # depends on [control=['with'], data=['f']] |
def get_pids_in_revision_chain(client, did):
"""Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned.
"""
def _req(p):
return d1_common.xml.get_req_val(p)
def _opt(p, a):
return d1_common.xml.get_opt_val(p, a)
sysmeta_pyxb = client.getSystemMetadata(did)
# Walk to tail
while _opt(sysmeta_pyxb, 'obsoletes'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletes'))
chain_pid_list = [_req(sysmeta_pyxb.identifier)]
# Walk from tail to head, recording traversed PIDs
while _opt(sysmeta_pyxb, 'obsoletedBy'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletedBy'))
chain_pid_list.append(_req(sysmeta_pyxb.identifier))
return chain_pid_list | def function[get_pids_in_revision_chain, parameter[client, did]]:
constant[Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned.
]
def function[_req, parameter[p]]:
return[call[name[d1_common].xml.get_req_val, parameter[name[p]]]]
def function[_opt, parameter[p, a]]:
return[call[name[d1_common].xml.get_opt_val, parameter[name[p], name[a]]]]
variable[sysmeta_pyxb] assign[=] call[name[client].getSystemMetadata, parameter[name[did]]]
while call[name[_opt], parameter[name[sysmeta_pyxb], constant[obsoletes]]] begin[:]
variable[sysmeta_pyxb] assign[=] call[name[client].getSystemMetadata, parameter[call[name[_opt], parameter[name[sysmeta_pyxb], constant[obsoletes]]]]]
variable[chain_pid_list] assign[=] list[[<ast.Call object at 0x7da18dc052a0>]]
while call[name[_opt], parameter[name[sysmeta_pyxb], constant[obsoletedBy]]] begin[:]
variable[sysmeta_pyxb] assign[=] call[name[client].getSystemMetadata, parameter[call[name[_opt], parameter[name[sysmeta_pyxb], constant[obsoletedBy]]]]]
call[name[chain_pid_list].append, parameter[call[name[_req], parameter[name[sysmeta_pyxb].identifier]]]]
return[name[chain_pid_list]] | keyword[def] identifier[get_pids_in_revision_chain] ( identifier[client] , identifier[did] ):
literal[string]
keyword[def] identifier[_req] ( identifier[p] ):
keyword[return] identifier[d1_common] . identifier[xml] . identifier[get_req_val] ( identifier[p] )
keyword[def] identifier[_opt] ( identifier[p] , identifier[a] ):
keyword[return] identifier[d1_common] . identifier[xml] . identifier[get_opt_val] ( identifier[p] , identifier[a] )
identifier[sysmeta_pyxb] = identifier[client] . identifier[getSystemMetadata] ( identifier[did] )
keyword[while] identifier[_opt] ( identifier[sysmeta_pyxb] , literal[string] ):
identifier[sysmeta_pyxb] = identifier[client] . identifier[getSystemMetadata] ( identifier[_opt] ( identifier[sysmeta_pyxb] , literal[string] ))
identifier[chain_pid_list] =[ identifier[_req] ( identifier[sysmeta_pyxb] . identifier[identifier] )]
keyword[while] identifier[_opt] ( identifier[sysmeta_pyxb] , literal[string] ):
identifier[sysmeta_pyxb] = identifier[client] . identifier[getSystemMetadata] ( identifier[_opt] ( identifier[sysmeta_pyxb] , literal[string] ))
identifier[chain_pid_list] . identifier[append] ( identifier[_req] ( identifier[sysmeta_pyxb] . identifier[identifier] ))
keyword[return] identifier[chain_pid_list] | def get_pids_in_revision_chain(client, did):
"""Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned.
"""
def _req(p):
return d1_common.xml.get_req_val(p)
def _opt(p, a):
return d1_common.xml.get_opt_val(p, a)
sysmeta_pyxb = client.getSystemMetadata(did)
# Walk to tail
while _opt(sysmeta_pyxb, 'obsoletes'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletes')) # depends on [control=['while'], data=[]]
chain_pid_list = [_req(sysmeta_pyxb.identifier)]
# Walk from tail to head, recording traversed PIDs
while _opt(sysmeta_pyxb, 'obsoletedBy'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletedBy'))
chain_pid_list.append(_req(sysmeta_pyxb.identifier)) # depends on [control=['while'], data=[]]
return chain_pid_list |
def service_count(self, block_identifier: BlockSpecification) -> int:
"""Get the number of registered services"""
result = self.proxy.contract.functions.serviceCount().call(
block_identifier=block_identifier,
)
return result | def function[service_count, parameter[self, block_identifier]]:
constant[Get the number of registered services]
variable[result] assign[=] call[call[name[self].proxy.contract.functions.serviceCount, parameter[]].call, parameter[]]
return[name[result]] | keyword[def] identifier[service_count] ( identifier[self] , identifier[block_identifier] : identifier[BlockSpecification] )-> identifier[int] :
literal[string]
identifier[result] = identifier[self] . identifier[proxy] . identifier[contract] . identifier[functions] . identifier[serviceCount] (). identifier[call] (
identifier[block_identifier] = identifier[block_identifier] ,
)
keyword[return] identifier[result] | def service_count(self, block_identifier: BlockSpecification) -> int:
"""Get the number of registered services"""
result = self.proxy.contract.functions.serviceCount().call(block_identifier=block_identifier)
return result |
def sim(self, src, tar, qval=2):
r"""Return the Jaccard similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Jaccard similarity
Examples
--------
>>> cmp = Jaccard()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
return super(self.__class__, self).sim(src, tar, qval, 1, 1) | def function[sim, parameter[self, src, tar, qval]]:
constant[Return the Jaccard similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Jaccard similarity
Examples
--------
>>> cmp = Jaccard()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
]
return[call[call[name[super], parameter[name[self].__class__, name[self]]].sim, parameter[name[src], name[tar], name[qval], constant[1], constant[1]]]] | keyword[def] identifier[sim] ( identifier[self] , identifier[src] , identifier[tar] , identifier[qval] = literal[int] ):
literal[string]
keyword[return] identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[sim] ( identifier[src] , identifier[tar] , identifier[qval] , literal[int] , literal[int] ) | def sim(self, src, tar, qval=2):
"""Return the Jaccard similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Jaccard similarity
Examples
--------
>>> cmp = Jaccard()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
return super(self.__class__, self).sim(src, tar, qval, 1, 1) |
def _generate_base_lsid(self):
"""
Generates and returns a base LSID
:return:
"""
domain = self._generate_domain()
namespace = self._generate_namespace()
# Return the base LSID
return "urn:lsid:" + domain + ":" + namespace | def function[_generate_base_lsid, parameter[self]]:
constant[
Generates and returns a base LSID
:return:
]
variable[domain] assign[=] call[name[self]._generate_domain, parameter[]]
variable[namespace] assign[=] call[name[self]._generate_namespace, parameter[]]
return[binary_operation[binary_operation[binary_operation[constant[urn:lsid:] + name[domain]] + constant[:]] + name[namespace]]] | keyword[def] identifier[_generate_base_lsid] ( identifier[self] ):
literal[string]
identifier[domain] = identifier[self] . identifier[_generate_domain] ()
identifier[namespace] = identifier[self] . identifier[_generate_namespace] ()
keyword[return] literal[string] + identifier[domain] + literal[string] + identifier[namespace] | def _generate_base_lsid(self):
"""
Generates and returns a base LSID
:return:
"""
domain = self._generate_domain()
namespace = self._generate_namespace()
# Return the base LSID
return 'urn:lsid:' + domain + ':' + namespace |
def getScreenDims(self):
"""returns a tuple that contains (screen_width,screen_height)
"""
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
return (width,height) | def function[getScreenDims, parameter[self]]:
constant[returns a tuple that contains (screen_width,screen_height)
]
variable[width] assign[=] call[name[ale_lib].getScreenWidth, parameter[name[self].obj]]
variable[height] assign[=] call[name[ale_lib].getScreenHeight, parameter[name[self].obj]]
return[tuple[[<ast.Name object at 0x7da1b0f41bd0>, <ast.Name object at 0x7da1b0f435b0>]]] | keyword[def] identifier[getScreenDims] ( identifier[self] ):
literal[string]
identifier[width] = identifier[ale_lib] . identifier[getScreenWidth] ( identifier[self] . identifier[obj] )
identifier[height] = identifier[ale_lib] . identifier[getScreenHeight] ( identifier[self] . identifier[obj] )
keyword[return] ( identifier[width] , identifier[height] ) | def getScreenDims(self):
"""returns a tuple that contains (screen_width,screen_height)
"""
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
return (width, height) |
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "totalResults") | def function[get_total_result_count, parameter[self]]:
constant[Returns the total count of all the results.]
variable[doc] assign[=] call[name[self]._request, parameter[binary_operation[name[self]._ws_prefix + constant[.search]], constant[True]]]
return[call[name[_extract], parameter[name[doc], constant[totalResults]]]] | keyword[def] identifier[get_total_result_count] ( identifier[self] ):
literal[string]
identifier[doc] = identifier[self] . identifier[_request] ( identifier[self] . identifier[_ws_prefix] + literal[string] , keyword[True] )
keyword[return] identifier[_extract] ( identifier[doc] , literal[string] ) | def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + '.search', True)
return _extract(doc, 'totalResults') |
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs) | def function[get, parameter[self, request, id]]:
constant[
Handles get requests for either the collection or an object detail.
]
if <ast.UnaryOp object at 0x7da18ede7340> begin[:]
return[call[name[HttpResponseForbidden], parameter[call[name[_], parameter[constant[You do not have permission to perform this action.]]]]]]
if name[id] begin[:]
variable[obj] assign[=] call[name[get_object_or_404], parameter[call[name[self].queryset, parameter[name[request]]]]]
return[call[name[self].get_object_detail, parameter[name[request], name[obj]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[request] , identifier[id] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_get_permission] ( identifier[request] ):
keyword[return] identifier[HttpResponseForbidden] ( identifier[_] ( literal[string] ))
keyword[if] identifier[id] :
identifier[obj] = identifier[get_object_or_404] ( identifier[self] . identifier[queryset] ( identifier[request] ,** identifier[kwargs] ), identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[get_object_detail] ( identifier[request] , identifier[obj] )
keyword[else] :
keyword[return] identifier[self] . identifier[get_collection] ( identifier[request] ,** identifier[kwargs] ) | def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.')) # depends on [control=['if'], data=[]]
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj) # depends on [control=['if'], data=[]]
else:
return self.get_collection(request, **kwargs) |
def dual_quaternion(self):
""":obj:`DualQuaternion`: The DualQuaternion corresponding to this
transform.
"""
qr = self.quaternion
qd = np.append([0], self.translation / 2.)
return DualQuaternion(qr, qd) | def function[dual_quaternion, parameter[self]]:
constant[:obj:`DualQuaternion`: The DualQuaternion corresponding to this
transform.
]
variable[qr] assign[=] name[self].quaternion
variable[qd] assign[=] call[name[np].append, parameter[list[[<ast.Constant object at 0x7da1b12b5300>]], binary_operation[name[self].translation / constant[2.0]]]]
return[call[name[DualQuaternion], parameter[name[qr], name[qd]]]] | keyword[def] identifier[dual_quaternion] ( identifier[self] ):
literal[string]
identifier[qr] = identifier[self] . identifier[quaternion]
identifier[qd] = identifier[np] . identifier[append] ([ literal[int] ], identifier[self] . identifier[translation] / literal[int] )
keyword[return] identifier[DualQuaternion] ( identifier[qr] , identifier[qd] ) | def dual_quaternion(self):
""":obj:`DualQuaternion`: The DualQuaternion corresponding to this
transform.
"""
qr = self.quaternion
qd = np.append([0], self.translation / 2.0)
return DualQuaternion(qr, qd) |
def _apply_labels(
self, autolabel="none", xlabel=None, ylabel=None, data=None, channel_index=0
):
"""Apply x and y labels to axes.
Parameters
----------
autolabel : {'none', 'both', 'x', 'y'} (optional)
Label(s) to apply from data. Default is none.
xlabel : string (optional)
x label. Default is None.
ylabel : string (optional)
y label. Default is None.
data : WrightTools.data.Data object (optional)
data to read labels from. Default is None.
channel_index : integer (optional)
Channel index. Default is 0.
"""
# read from data
if autolabel in ["xy", "both", "x"] and not xlabel:
xlabel = data.axes[0].label
if autolabel in ["xy", "both", "y"] and not ylabel:
if data.ndim == 1:
ylabel = data.channels[channel_index].label
elif data.ndim == 2:
ylabel = data.axes[1].label
# apply
if xlabel:
if isinstance(xlabel, bool):
xlabel = data.axes[0].label
self.set_xlabel(xlabel, fontsize=18)
if ylabel:
if isinstance(ylabel, bool):
ylabel = data.axes[1].label
self.set_ylabel(ylabel, fontsize=18) | def function[_apply_labels, parameter[self, autolabel, xlabel, ylabel, data, channel_index]]:
constant[Apply x and y labels to axes.
Parameters
----------
autolabel : {'none', 'both', 'x', 'y'} (optional)
Label(s) to apply from data. Default is none.
xlabel : string (optional)
x label. Default is None.
ylabel : string (optional)
y label. Default is None.
data : WrightTools.data.Data object (optional)
data to read labels from. Default is None.
channel_index : integer (optional)
Channel index. Default is 0.
]
if <ast.BoolOp object at 0x7da18eb54100> begin[:]
variable[xlabel] assign[=] call[name[data].axes][constant[0]].label
if <ast.BoolOp object at 0x7da18eb55660> begin[:]
if compare[name[data].ndim equal[==] constant[1]] begin[:]
variable[ylabel] assign[=] call[name[data].channels][name[channel_index]].label
if name[xlabel] begin[:]
if call[name[isinstance], parameter[name[xlabel], name[bool]]] begin[:]
variable[xlabel] assign[=] call[name[data].axes][constant[0]].label
call[name[self].set_xlabel, parameter[name[xlabel]]]
if name[ylabel] begin[:]
if call[name[isinstance], parameter[name[ylabel], name[bool]]] begin[:]
variable[ylabel] assign[=] call[name[data].axes][constant[1]].label
call[name[self].set_ylabel, parameter[name[ylabel]]] | keyword[def] identifier[_apply_labels] (
identifier[self] , identifier[autolabel] = literal[string] , identifier[xlabel] = keyword[None] , identifier[ylabel] = keyword[None] , identifier[data] = keyword[None] , identifier[channel_index] = literal[int]
):
literal[string]
keyword[if] identifier[autolabel] keyword[in] [ literal[string] , literal[string] , literal[string] ] keyword[and] keyword[not] identifier[xlabel] :
identifier[xlabel] = identifier[data] . identifier[axes] [ literal[int] ]. identifier[label]
keyword[if] identifier[autolabel] keyword[in] [ literal[string] , literal[string] , literal[string] ] keyword[and] keyword[not] identifier[ylabel] :
keyword[if] identifier[data] . identifier[ndim] == literal[int] :
identifier[ylabel] = identifier[data] . identifier[channels] [ identifier[channel_index] ]. identifier[label]
keyword[elif] identifier[data] . identifier[ndim] == literal[int] :
identifier[ylabel] = identifier[data] . identifier[axes] [ literal[int] ]. identifier[label]
keyword[if] identifier[xlabel] :
keyword[if] identifier[isinstance] ( identifier[xlabel] , identifier[bool] ):
identifier[xlabel] = identifier[data] . identifier[axes] [ literal[int] ]. identifier[label]
identifier[self] . identifier[set_xlabel] ( identifier[xlabel] , identifier[fontsize] = literal[int] )
keyword[if] identifier[ylabel] :
keyword[if] identifier[isinstance] ( identifier[ylabel] , identifier[bool] ):
identifier[ylabel] = identifier[data] . identifier[axes] [ literal[int] ]. identifier[label]
identifier[self] . identifier[set_ylabel] ( identifier[ylabel] , identifier[fontsize] = literal[int] ) | def _apply_labels(self, autolabel='none', xlabel=None, ylabel=None, data=None, channel_index=0):
"""Apply x and y labels to axes.
Parameters
----------
autolabel : {'none', 'both', 'x', 'y'} (optional)
Label(s) to apply from data. Default is none.
xlabel : string (optional)
x label. Default is None.
ylabel : string (optional)
y label. Default is None.
data : WrightTools.data.Data object (optional)
data to read labels from. Default is None.
channel_index : integer (optional)
Channel index. Default is 0.
"""
# read from data
if autolabel in ['xy', 'both', 'x'] and (not xlabel):
xlabel = data.axes[0].label # depends on [control=['if'], data=[]]
if autolabel in ['xy', 'both', 'y'] and (not ylabel):
if data.ndim == 1:
ylabel = data.channels[channel_index].label # depends on [control=['if'], data=[]]
elif data.ndim == 2:
ylabel = data.axes[1].label # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# apply
if xlabel:
if isinstance(xlabel, bool):
xlabel = data.axes[0].label # depends on [control=['if'], data=[]]
self.set_xlabel(xlabel, fontsize=18) # depends on [control=['if'], data=[]]
if ylabel:
if isinstance(ylabel, bool):
ylabel = data.axes[1].label # depends on [control=['if'], data=[]]
self.set_ylabel(ylabel, fontsize=18) # depends on [control=['if'], data=[]] |
def generate_subid(self, token=None):
'''assumes a flat (file system) database, organized by experiment id, and
subject id, with data (json) organized by subject identifier
'''
# Not headless auto-increments
if not token:
token = str(uuid.uuid4())
# Headless doesn't use any folder_id, just generated token folder
return "%s/%s" % (self.study_id, token) | def function[generate_subid, parameter[self, token]]:
constant[assumes a flat (file system) database, organized by experiment id, and
subject id, with data (json) organized by subject identifier
]
if <ast.UnaryOp object at 0x7da18fe93280> begin[:]
variable[token] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]
return[binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18fe93a90>, <ast.Name object at 0x7da18fe91060>]]]] | keyword[def] identifier[generate_subid] ( identifier[self] , identifier[token] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[token] :
identifier[token] = identifier[str] ( identifier[uuid] . identifier[uuid4] ())
keyword[return] literal[string] %( identifier[self] . identifier[study_id] , identifier[token] ) | def generate_subid(self, token=None):
"""assumes a flat (file system) database, organized by experiment id, and
subject id, with data (json) organized by subject identifier
"""
# Not headless auto-increments
if not token:
token = str(uuid.uuid4()) # depends on [control=['if'], data=[]]
# Headless doesn't use any folder_id, just generated token folder
return '%s/%s' % (self.study_id, token) |
def write_gpio(self, gpio=None):
"""Write the specified byte value to the GPIO registor. If no value
specified the current buffered value will be written.
"""
if gpio is not None:
self.gpio = gpio
self._device.writeList(self.GPIO, self.gpio) | def function[write_gpio, parameter[self, gpio]]:
constant[Write the specified byte value to the GPIO registor. If no value
specified the current buffered value will be written.
]
if compare[name[gpio] is_not constant[None]] begin[:]
name[self].gpio assign[=] name[gpio]
call[name[self]._device.writeList, parameter[name[self].GPIO, name[self].gpio]] | keyword[def] identifier[write_gpio] ( identifier[self] , identifier[gpio] = keyword[None] ):
literal[string]
keyword[if] identifier[gpio] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[gpio] = identifier[gpio]
identifier[self] . identifier[_device] . identifier[writeList] ( identifier[self] . identifier[GPIO] , identifier[self] . identifier[gpio] ) | def write_gpio(self, gpio=None):
"""Write the specified byte value to the GPIO registor. If no value
specified the current buffered value will be written.
"""
if gpio is not None:
self.gpio = gpio # depends on [control=['if'], data=['gpio']]
self._device.writeList(self.GPIO, self.gpio) |
def now_time(str=False):
"""Get the current time."""
if str:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return datetime.datetime.now() | def function[now_time, parameter[str]]:
constant[Get the current time.]
if name[str] begin[:]
return[call[call[name[datetime].datetime.now, parameter[]].strftime, parameter[constant[%Y-%m-%d %H:%M:%S]]]]
return[call[name[datetime].datetime.now, parameter[]]] | keyword[def] identifier[now_time] ( identifier[str] = keyword[False] ):
literal[string]
keyword[if] identifier[str] :
keyword[return] identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )
keyword[return] identifier[datetime] . identifier[datetime] . identifier[now] () | def now_time(str=False):
"""Get the current time."""
if str:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # depends on [control=['if'], data=[]]
return datetime.datetime.now() |
def add_raw_code(self, string_or_list):
"""Add raw Gmsh code.
"""
if _is_string(string_or_list):
self._GMSH_CODE.append(string_or_list)
else:
assert isinstance(string_or_list, list)
for string in string_or_list:
self._GMSH_CODE.append(string)
return | def function[add_raw_code, parameter[self, string_or_list]]:
constant[Add raw Gmsh code.
]
if call[name[_is_string], parameter[name[string_or_list]]] begin[:]
call[name[self]._GMSH_CODE.append, parameter[name[string_or_list]]]
return[None] | keyword[def] identifier[add_raw_code] ( identifier[self] , identifier[string_or_list] ):
literal[string]
keyword[if] identifier[_is_string] ( identifier[string_or_list] ):
identifier[self] . identifier[_GMSH_CODE] . identifier[append] ( identifier[string_or_list] )
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[string_or_list] , identifier[list] )
keyword[for] identifier[string] keyword[in] identifier[string_or_list] :
identifier[self] . identifier[_GMSH_CODE] . identifier[append] ( identifier[string] )
keyword[return] | def add_raw_code(self, string_or_list):
"""Add raw Gmsh code.
"""
if _is_string(string_or_list):
self._GMSH_CODE.append(string_or_list) # depends on [control=['if'], data=[]]
else:
assert isinstance(string_or_list, list)
for string in string_or_list:
self._GMSH_CODE.append(string) # depends on [control=['for'], data=['string']]
return |
def add_new_directory(self, node):
"""
Adds a new directory next to given Node associated path.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
"""
if self.__script_editor.model.is_authoring_node(node):
return False
directory, state = QInputDialog.getText(self, "Add Directory", "Enter your new directory name:")
if not state:
return False
if node.family in ("Project", "Directory"):
parent_directory = node.path
elif node.family == "File":
parent_directory = os.path.dirname(node.path)
directory = foundations.strings.to_string(directory)
if not directory in os.listdir(parent_directory):
directory = os.path.join(parent_directory, directory)
LOGGER.info("{0} | Adding '{1}' directory!".format(self.__class__.__name__, directory))
os.makedirs(directory)
else:
self.__raise_file_system_exception(file, parent_directory)
return True | def function[add_new_directory, parameter[self, node]]:
constant[
Adds a new directory next to given Node associated path.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
]
if call[name[self].__script_editor.model.is_authoring_node, parameter[name[node]]] begin[:]
return[constant[False]]
<ast.Tuple object at 0x7da1b08acac0> assign[=] call[name[QInputDialog].getText, parameter[name[self], constant[Add Directory], constant[Enter your new directory name:]]]
if <ast.UnaryOp object at 0x7da1b08ad810> begin[:]
return[constant[False]]
if compare[name[node].family in tuple[[<ast.Constant object at 0x7da1b08ae350>, <ast.Constant object at 0x7da1b08acb80>]]] begin[:]
variable[parent_directory] assign[=] name[node].path
variable[directory] assign[=] call[name[foundations].strings.to_string, parameter[name[directory]]]
if <ast.UnaryOp object at 0x7da1b08acfd0> begin[:]
variable[directory] assign[=] call[name[os].path.join, parameter[name[parent_directory], name[directory]]]
call[name[LOGGER].info, parameter[call[constant[{0} | Adding '{1}' directory!].format, parameter[name[self].__class__.__name__, name[directory]]]]]
call[name[os].makedirs, parameter[name[directory]]]
return[constant[True]] | keyword[def] identifier[add_new_directory] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[self] . identifier[__script_editor] . identifier[model] . identifier[is_authoring_node] ( identifier[node] ):
keyword[return] keyword[False]
identifier[directory] , identifier[state] = identifier[QInputDialog] . identifier[getText] ( identifier[self] , literal[string] , literal[string] )
keyword[if] keyword[not] identifier[state] :
keyword[return] keyword[False]
keyword[if] identifier[node] . identifier[family] keyword[in] ( literal[string] , literal[string] ):
identifier[parent_directory] = identifier[node] . identifier[path]
keyword[elif] identifier[node] . identifier[family] == literal[string] :
identifier[parent_directory] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[node] . identifier[path] )
identifier[directory] = identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[directory] )
keyword[if] keyword[not] identifier[directory] keyword[in] identifier[os] . identifier[listdir] ( identifier[parent_directory] ):
identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[parent_directory] , identifier[directory] )
identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[directory] ))
identifier[os] . identifier[makedirs] ( identifier[directory] )
keyword[else] :
identifier[self] . identifier[__raise_file_system_exception] ( identifier[file] , identifier[parent_directory] )
keyword[return] keyword[True] | def add_new_directory(self, node):
"""
Adds a new directory next to given Node associated path.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
"""
if self.__script_editor.model.is_authoring_node(node):
return False # depends on [control=['if'], data=[]]
(directory, state) = QInputDialog.getText(self, 'Add Directory', 'Enter your new directory name:')
if not state:
return False # depends on [control=['if'], data=[]]
if node.family in ('Project', 'Directory'):
parent_directory = node.path # depends on [control=['if'], data=[]]
elif node.family == 'File':
parent_directory = os.path.dirname(node.path) # depends on [control=['if'], data=[]]
directory = foundations.strings.to_string(directory)
if not directory in os.listdir(parent_directory):
directory = os.path.join(parent_directory, directory)
LOGGER.info("{0} | Adding '{1}' directory!".format(self.__class__.__name__, directory))
os.makedirs(directory) # depends on [control=['if'], data=[]]
else:
self.__raise_file_system_exception(file, parent_directory)
return True |
def random_id(k=5):
"""Random id to use for AWS identifiers."""
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k)) | def function[random_id, parameter[k]]:
constant[Random id to use for AWS identifiers.]
return[call[constant[].join, parameter[call[name[random].choices, parameter[binary_operation[name[string].ascii_lowercase + name[string].digits]]]]]] | keyword[def] identifier[random_id] ( identifier[k] = literal[int] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[random] . identifier[choices] ( identifier[string] . identifier[ascii_lowercase] + identifier[string] . identifier[digits] , identifier[k] = identifier[k] )) | def random_id(k=5):
"""Random id to use for AWS identifiers."""
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k)) |
def view(ctx):
""" Build and view docs.
"""
report.info(ctx, "docs.view", f"viewing documentation")
build_path = ctx.docs.directory / "build" / "html" / "index.html"
build_path = pathname2url(build_path.as_posix())
webbrowser.open(f"file:{build_path!s}") | def function[view, parameter[ctx]]:
constant[ Build and view docs.
]
call[name[report].info, parameter[name[ctx], constant[docs.view], <ast.JoinedStr object at 0x7da1b0e38a60>]]
variable[build_path] assign[=] binary_operation[binary_operation[binary_operation[name[ctx].docs.directory / constant[build]] / constant[html]] / constant[index.html]]
variable[build_path] assign[=] call[name[pathname2url], parameter[call[name[build_path].as_posix, parameter[]]]]
call[name[webbrowser].open, parameter[<ast.JoinedStr object at 0x7da18eb543a0>]] | keyword[def] identifier[view] ( identifier[ctx] ):
literal[string]
identifier[report] . identifier[info] ( identifier[ctx] , literal[string] , literal[string] )
identifier[build_path] = identifier[ctx] . identifier[docs] . identifier[directory] / literal[string] / literal[string] / literal[string]
identifier[build_path] = identifier[pathname2url] ( identifier[build_path] . identifier[as_posix] ())
identifier[webbrowser] . identifier[open] ( literal[string] ) | def view(ctx):
""" Build and view docs.
"""
report.info(ctx, 'docs.view', f'viewing documentation')
build_path = ctx.docs.directory / 'build' / 'html' / 'index.html'
build_path = pathname2url(build_path.as_posix())
webbrowser.open(f'file:{build_path!s}') |
def forward(self, input):
"""Feed-forward through the network."""
return th.nn.functional.linear(input, self.weight.div(self.weight.pow(2).sum(0).sqrt())) | def function[forward, parameter[self, input]]:
constant[Feed-forward through the network.]
return[call[name[th].nn.functional.linear, parameter[name[input], call[name[self].weight.div, parameter[call[call[call[name[self].weight.pow, parameter[constant[2]]].sum, parameter[constant[0]]].sqrt, parameter[]]]]]]] | keyword[def] identifier[forward] ( identifier[self] , identifier[input] ):
literal[string]
keyword[return] identifier[th] . identifier[nn] . identifier[functional] . identifier[linear] ( identifier[input] , identifier[self] . identifier[weight] . identifier[div] ( identifier[self] . identifier[weight] . identifier[pow] ( literal[int] ). identifier[sum] ( literal[int] ). identifier[sqrt] ())) | def forward(self, input):
"""Feed-forward through the network."""
return th.nn.functional.linear(input, self.weight.div(self.weight.pow(2).sum(0).sqrt())) |
def become(cls, session_token):
"""
通过 session token 获取用户对象
:param session_token: 用户的 session token
:return: leancloud.User
"""
response = client.get('/users/me', params={'session_token': session_token})
content = response.json()
user = cls()
user._update_data(content)
user._handle_save_result(True)
if 'smsCode' not in content:
user._attributes.pop('smsCode', None)
return user | def function[become, parameter[cls, session_token]]:
constant[
通过 session token 获取用户对象
:param session_token: 用户的 session token
:return: leancloud.User
]
variable[response] assign[=] call[name[client].get, parameter[constant[/users/me]]]
variable[content] assign[=] call[name[response].json, parameter[]]
variable[user] assign[=] call[name[cls], parameter[]]
call[name[user]._update_data, parameter[name[content]]]
call[name[user]._handle_save_result, parameter[constant[True]]]
if compare[constant[smsCode] <ast.NotIn object at 0x7da2590d7190> name[content]] begin[:]
call[name[user]._attributes.pop, parameter[constant[smsCode], constant[None]]]
return[name[user]] | keyword[def] identifier[become] ( identifier[cls] , identifier[session_token] ):
literal[string]
identifier[response] = identifier[client] . identifier[get] ( literal[string] , identifier[params] ={ literal[string] : identifier[session_token] })
identifier[content] = identifier[response] . identifier[json] ()
identifier[user] = identifier[cls] ()
identifier[user] . identifier[_update_data] ( identifier[content] )
identifier[user] . identifier[_handle_save_result] ( keyword[True] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[content] :
identifier[user] . identifier[_attributes] . identifier[pop] ( literal[string] , keyword[None] )
keyword[return] identifier[user] | def become(cls, session_token):
"""
通过 session token 获取用户对象
:param session_token: 用户的 session token
:return: leancloud.User
"""
response = client.get('/users/me', params={'session_token': session_token})
content = response.json()
user = cls()
user._update_data(content)
user._handle_save_result(True)
if 'smsCode' not in content:
user._attributes.pop('smsCode', None) # depends on [control=['if'], data=[]]
return user |
def serialize(self, config):
"""
:param config:
:type config: yowsup.config.base.config.Config
:return:
:rtype: bytes
"""
for transform in self._transforms:
config = transform.transform(config)
return config | def function[serialize, parameter[self, config]]:
constant[
:param config:
:type config: yowsup.config.base.config.Config
:return:
:rtype: bytes
]
for taget[name[transform]] in starred[name[self]._transforms] begin[:]
variable[config] assign[=] call[name[transform].transform, parameter[name[config]]]
return[name[config]] | keyword[def] identifier[serialize] ( identifier[self] , identifier[config] ):
literal[string]
keyword[for] identifier[transform] keyword[in] identifier[self] . identifier[_transforms] :
identifier[config] = identifier[transform] . identifier[transform] ( identifier[config] )
keyword[return] identifier[config] | def serialize(self, config):
"""
:param config:
:type config: yowsup.config.base.config.Config
:return:
:rtype: bytes
"""
for transform in self._transforms:
config = transform.transform(config) # depends on [control=['for'], data=['transform']]
return config |
def add_pull_request_comment(self, project, repository, pull_request_id, text):
"""
Add comment into pull request
:param project:
:param repository:
:param pull_request_id: the ID of the pull request within the repository
:param text comment text
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/pull-requests/{pullRequestId}/comments'.format(
project=project,
repository=repository,
pullRequestId=pull_request_id)
body = {'text': text}
return self.post(url, data=body) | def function[add_pull_request_comment, parameter[self, project, repository, pull_request_id, text]]:
constant[
Add comment into pull request
:param project:
:param repository:
:param pull_request_id: the ID of the pull request within the repository
:param text comment text
:return:
]
variable[url] assign[=] call[constant[rest/api/1.0/projects/{project}/repos/{repository}/pull-requests/{pullRequestId}/comments].format, parameter[]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da20e955930>], [<ast.Name object at 0x7da20e956950>]]
return[call[name[self].post, parameter[name[url]]]] | keyword[def] identifier[add_pull_request_comment] ( identifier[self] , identifier[project] , identifier[repository] , identifier[pull_request_id] , identifier[text] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[project] = identifier[project] ,
identifier[repository] = identifier[repository] ,
identifier[pullRequestId] = identifier[pull_request_id] )
identifier[body] ={ literal[string] : identifier[text] }
keyword[return] identifier[self] . identifier[post] ( identifier[url] , identifier[data] = identifier[body] ) | def add_pull_request_comment(self, project, repository, pull_request_id, text):
"""
Add comment into pull request
:param project:
:param repository:
:param pull_request_id: the ID of the pull request within the repository
:param text comment text
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/pull-requests/{pullRequestId}/comments'.format(project=project, repository=repository, pullRequestId=pull_request_id)
body = {'text': text}
return self.post(url, data=body) |
def execution_group(id):
"""A decorator designed to be used with both classes and functions. Pass the
decorator some object that represents the Execution Group the decorated object
should be added to. If one test function in an Execution Group fails then no
more tests from that Execution Group will run."""
def add_execution_group(class_or_func):
execution_groups = getattr(class_or_func, 'execution_groups', [])
execution_groups.append(id)
class_or_func.execution_groups = execution_groups
return class_or_func
return add_execution_group | def function[execution_group, parameter[id]]:
constant[A decorator designed to be used with both classes and functions. Pass the
decorator some object that represents the Execution Group the decorated object
should be added to. If one test function in an Execution Group fails then no
more tests from that Execution Group will run.]
def function[add_execution_group, parameter[class_or_func]]:
variable[execution_groups] assign[=] call[name[getattr], parameter[name[class_or_func], constant[execution_groups], list[[]]]]
call[name[execution_groups].append, parameter[name[id]]]
name[class_or_func].execution_groups assign[=] name[execution_groups]
return[name[class_or_func]]
return[name[add_execution_group]] | keyword[def] identifier[execution_group] ( identifier[id] ):
literal[string]
keyword[def] identifier[add_execution_group] ( identifier[class_or_func] ):
identifier[execution_groups] = identifier[getattr] ( identifier[class_or_func] , literal[string] ,[])
identifier[execution_groups] . identifier[append] ( identifier[id] )
identifier[class_or_func] . identifier[execution_groups] = identifier[execution_groups]
keyword[return] identifier[class_or_func]
keyword[return] identifier[add_execution_group] | def execution_group(id):
"""A decorator designed to be used with both classes and functions. Pass the
decorator some object that represents the Execution Group the decorated object
should be added to. If one test function in an Execution Group fails then no
more tests from that Execution Group will run."""
def add_execution_group(class_or_func):
execution_groups = getattr(class_or_func, 'execution_groups', [])
execution_groups.append(id)
class_or_func.execution_groups = execution_groups
return class_or_func
return add_execution_group |
def make_list(json_arr, client, cls, parent_id=None):
"""
Returns a list of Populated objects of the given class type. This
should not be called outside of the :any:`LinodeClient` class.
:param json_arr: The array of JSON data to make into a list
:param client: The LinodeClient to pass to new objects
:param parent_id: The parent id for derived objects
:returns: A list of models from the JSON
"""
result = []
for obj in json_arr:
id_val = None
if 'id' in obj:
id_val = obj['id']
elif hasattr(cls, 'id_attribute') and getattr(cls, 'id_attribute') in obj:
id_val = obj[getattr(cls, 'id_attribute')]
else:
continue
o = cls.make_instance(id_val, client, parent_id=parent_id, json=obj)
result.append(o)
return result | def function[make_list, parameter[json_arr, client, cls, parent_id]]:
constant[
Returns a list of Populated objects of the given class type. This
should not be called outside of the :any:`LinodeClient` class.
:param json_arr: The array of JSON data to make into a list
:param client: The LinodeClient to pass to new objects
:param parent_id: The parent id for derived objects
:returns: A list of models from the JSON
]
variable[result] assign[=] list[[]]
for taget[name[obj]] in starred[name[json_arr]] begin[:]
variable[id_val] assign[=] constant[None]
if compare[constant[id] in name[obj]] begin[:]
variable[id_val] assign[=] call[name[obj]][constant[id]]
variable[o] assign[=] call[name[cls].make_instance, parameter[name[id_val], name[client]]]
call[name[result].append, parameter[name[o]]]
return[name[result]] | keyword[def] identifier[make_list] ( identifier[json_arr] , identifier[client] , identifier[cls] , identifier[parent_id] = keyword[None] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[obj] keyword[in] identifier[json_arr] :
identifier[id_val] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[obj] :
identifier[id_val] = identifier[obj] [ literal[string] ]
keyword[elif] identifier[hasattr] ( identifier[cls] , literal[string] ) keyword[and] identifier[getattr] ( identifier[cls] , literal[string] ) keyword[in] identifier[obj] :
identifier[id_val] = identifier[obj] [ identifier[getattr] ( identifier[cls] , literal[string] )]
keyword[else] :
keyword[continue]
identifier[o] = identifier[cls] . identifier[make_instance] ( identifier[id_val] , identifier[client] , identifier[parent_id] = identifier[parent_id] , identifier[json] = identifier[obj] )
identifier[result] . identifier[append] ( identifier[o] )
keyword[return] identifier[result] | def make_list(json_arr, client, cls, parent_id=None):
"""
Returns a list of Populated objects of the given class type. This
should not be called outside of the :any:`LinodeClient` class.
:param json_arr: The array of JSON data to make into a list
:param client: The LinodeClient to pass to new objects
:param parent_id: The parent id for derived objects
:returns: A list of models from the JSON
"""
result = []
for obj in json_arr:
id_val = None
if 'id' in obj:
id_val = obj['id'] # depends on [control=['if'], data=['obj']]
elif hasattr(cls, 'id_attribute') and getattr(cls, 'id_attribute') in obj:
id_val = obj[getattr(cls, 'id_attribute')] # depends on [control=['if'], data=[]]
else:
continue
o = cls.make_instance(id_val, client, parent_id=parent_id, json=obj)
result.append(o) # depends on [control=['for'], data=['obj']]
return result |
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text) | def function[_fill, parameter[self, text]]:
constant[Indent a piece of text, according to the current indentation level]
if name[self]._do_indent begin[:]
call[name[self]._write, parameter[binary_operation[binary_operation[constant[
] + binary_operation[constant[ ] * name[self]._indent]] + name[text]]]] | keyword[def] identifier[_fill] ( identifier[self] , identifier[text] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[_do_indent] :
identifier[self] . identifier[_write] ( literal[string] + literal[string] * identifier[self] . identifier[_indent] + identifier[text] )
keyword[else] :
identifier[self] . identifier[_write] ( identifier[text] ) | def _fill(self, text=''):
"""Indent a piece of text, according to the current indentation level"""
if self._do_indent:
self._write('\n' + ' ' * self._indent + text) # depends on [control=['if'], data=[]]
else:
self._write(text) |
def is_generic_union(type_: Type) -> bool:
"""Determines whether a type is a Union[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Union[...something...].
"""
if hasattr(typing, '_GenericAlias'):
# 3.7
return (isinstance(type_, typing._GenericAlias) and # type: ignore
type_.__origin__ is Union)
else:
if hasattr(typing, '_Union'):
# 3.6
return isinstance(type_, typing._Union) # type: ignore
else:
# 3.5 and earlier (?)
return isinstance(type_, typing.UnionMeta) # type: ignore
raise RuntimeError('Could not determine whether type is a Union. Is this'
' a YAtiML-supported Python version?') | def function[is_generic_union, parameter[type_]]:
constant[Determines whether a type is a Union[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Union[...something...].
]
if call[name[hasattr], parameter[name[typing], constant[_GenericAlias]]] begin[:]
return[<ast.BoolOp object at 0x7da2041d9a80>]
<ast.Raise object at 0x7da2041d8f40> | keyword[def] identifier[is_generic_union] ( identifier[type_] : identifier[Type] )-> identifier[bool] :
literal[string]
keyword[if] identifier[hasattr] ( identifier[typing] , literal[string] ):
keyword[return] ( identifier[isinstance] ( identifier[type_] , identifier[typing] . identifier[_GenericAlias] ) keyword[and]
identifier[type_] . identifier[__origin__] keyword[is] identifier[Union] )
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[typing] , literal[string] ):
keyword[return] identifier[isinstance] ( identifier[type_] , identifier[typing] . identifier[_Union] )
keyword[else] :
keyword[return] identifier[isinstance] ( identifier[type_] , identifier[typing] . identifier[UnionMeta] )
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] ) | def is_generic_union(type_: Type) -> bool:
"""Determines whether a type is a Union[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Union[...something...].
"""
if hasattr(typing, '_GenericAlias'):
# 3.7
# type: ignore
return isinstance(type_, typing._GenericAlias) and type_.__origin__ is Union # depends on [control=['if'], data=[]]
elif hasattr(typing, '_Union'):
# 3.6
return isinstance(type_, typing._Union) # type: ignore # depends on [control=['if'], data=[]]
else:
# 3.5 and earlier (?)
return isinstance(type_, typing.UnionMeta) # type: ignore
raise RuntimeError('Could not determine whether type is a Union. Is this a YAtiML-supported Python version?') |
def resetdb():
"""
Clear out the database
"""
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.base.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb() | def function[resetdb, parameter[]]:
constant[
Clear out the database
]
from relative_module[airflow] import module[models]
from relative_module[alembic.migration] import module[MigrationContext]
call[name[log].info, parameter[constant[Dropping tables that exist]]]
call[name[models].base.Base.metadata.drop_all, parameter[name[settings].engine]]
variable[mc] assign[=] call[name[MigrationContext].configure, parameter[name[settings].engine]]
if call[name[mc]._version.exists, parameter[name[settings].engine]] begin[:]
call[name[mc]._version.drop, parameter[name[settings].engine]]
from relative_module[flask_appbuilder.models.sqla] import module[Base]
call[name[Base].metadata.drop_all, parameter[name[settings].engine]]
call[name[initdb], parameter[]] | keyword[def] identifier[resetdb] ():
literal[string]
keyword[from] identifier[airflow] keyword[import] identifier[models]
keyword[from] identifier[alembic] . identifier[migration] keyword[import] identifier[MigrationContext]
identifier[log] . identifier[info] ( literal[string] )
identifier[models] . identifier[base] . identifier[Base] . identifier[metadata] . identifier[drop_all] ( identifier[settings] . identifier[engine] )
identifier[mc] = identifier[MigrationContext] . identifier[configure] ( identifier[settings] . identifier[engine] )
keyword[if] identifier[mc] . identifier[_version] . identifier[exists] ( identifier[settings] . identifier[engine] ):
identifier[mc] . identifier[_version] . identifier[drop] ( identifier[settings] . identifier[engine] )
keyword[from] identifier[flask_appbuilder] . identifier[models] . identifier[sqla] keyword[import] identifier[Base]
identifier[Base] . identifier[metadata] . identifier[drop_all] ( identifier[settings] . identifier[engine] )
identifier[initdb] () | def resetdb():
"""
Clear out the database
"""
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info('Dropping tables that exist')
models.base.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine) # depends on [control=['if'], data=[]]
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb() |
def path(self, filename, folder=None):
"""
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
"""
if folder is not None:
target_folder = os.path.join(self.config.destination, folder)
else:
target_folder = self.config.destination
return os.path.join(target_folder, filename) | def function[path, parameter[self, filename, folder]]:
constant[
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
]
if compare[name[folder] is_not constant[None]] begin[:]
variable[target_folder] assign[=] call[name[os].path.join, parameter[name[self].config.destination, name[folder]]]
return[call[name[os].path.join, parameter[name[target_folder], name[filename]]]] | keyword[def] identifier[path] ( identifier[self] , identifier[filename] , identifier[folder] = keyword[None] ):
literal[string]
keyword[if] identifier[folder] keyword[is] keyword[not] keyword[None] :
identifier[target_folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[config] . identifier[destination] , identifier[folder] )
keyword[else] :
identifier[target_folder] = identifier[self] . identifier[config] . identifier[destination]
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[target_folder] , identifier[filename] ) | def path(self, filename, folder=None):
"""
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
"""
if folder is not None:
target_folder = os.path.join(self.config.destination, folder) # depends on [control=['if'], data=['folder']]
else:
target_folder = self.config.destination
return os.path.join(target_folder, filename) |
def raise_for_redefined_namespace(self, line: str, position: int, namespace: str) -> None:
"""Raise an exception if a namespace is already defined.
:raises: RedefinedNamespaceError
"""
if self.disallow_redefinition and self.has_namespace(namespace):
raise RedefinedNamespaceError(self.get_line_number(), line, position, namespace) | def function[raise_for_redefined_namespace, parameter[self, line, position, namespace]]:
constant[Raise an exception if a namespace is already defined.
:raises: RedefinedNamespaceError
]
if <ast.BoolOp object at 0x7da1b0ebcb20> begin[:]
<ast.Raise object at 0x7da1b0ebd570> | keyword[def] identifier[raise_for_redefined_namespace] ( identifier[self] , identifier[line] : identifier[str] , identifier[position] : identifier[int] , identifier[namespace] : identifier[str] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[disallow_redefinition] keyword[and] identifier[self] . identifier[has_namespace] ( identifier[namespace] ):
keyword[raise] identifier[RedefinedNamespaceError] ( identifier[self] . identifier[get_line_number] (), identifier[line] , identifier[position] , identifier[namespace] ) | def raise_for_redefined_namespace(self, line: str, position: int, namespace: str) -> None:
"""Raise an exception if a namespace is already defined.
:raises: RedefinedNamespaceError
"""
if self.disallow_redefinition and self.has_namespace(namespace):
raise RedefinedNamespaceError(self.get_line_number(), line, position, namespace) # depends on [control=['if'], data=[]] |
def named_entities(self):
"""The elements of ``named_entities`` layer."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases] | def function[named_entities, parameter[self]]:
constant[The elements of ``named_entities`` layer.]
if <ast.UnaryOp object at 0x7da18f58fd00> begin[:]
call[name[self].tag_named_entities, parameter[]]
variable[phrases] assign[=] call[name[self].split_by, parameter[name[NAMED_ENTITIES]]]
return[<ast.ListComp object at 0x7da18f00dff0>] | keyword[def] identifier[named_entities] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_tagged] ( identifier[NAMED_ENTITIES] ):
identifier[self] . identifier[tag_named_entities] ()
identifier[phrases] = identifier[self] . identifier[split_by] ( identifier[NAMED_ENTITIES] )
keyword[return] [ literal[string] . identifier[join] ( identifier[phrase] . identifier[lemmas] ) keyword[for] identifier[phrase] keyword[in] identifier[phrases] ] | def named_entities(self):
"""The elements of ``named_entities`` layer."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities() # depends on [control=['if'], data=[]]
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases] |
def mapped_repr_stripping_underscores(
obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
attributes = []
for attr_name in attrnames:
if attr_name.startswith('_'):
init_param_name = attr_name[1:]
else:
init_param_name = attr_name
attributes.append((attr_name, init_param_name))
return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner) | def function[mapped_repr_stripping_underscores, parameter[obj, attrnames, with_addr, joiner]]:
constant[
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
]
variable[attributes] assign[=] list[[]]
for taget[name[attr_name]] in starred[name[attrnames]] begin[:]
if call[name[attr_name].startswith, parameter[constant[_]]] begin[:]
variable[init_param_name] assign[=] call[name[attr_name]][<ast.Slice object at 0x7da1b1734f10>]
call[name[attributes].append, parameter[tuple[[<ast.Name object at 0x7da1b17364d0>, <ast.Name object at 0x7da1b1736320>]]]]
return[call[name[mapped_repr], parameter[name[obj], name[attributes]]]] | keyword[def] identifier[mapped_repr_stripping_underscores] (
identifier[obj] : identifier[Any] , identifier[attrnames] : identifier[List] [ identifier[str] ],
identifier[with_addr] : identifier[bool] = keyword[False] , identifier[joiner] : identifier[str] = identifier[COMMA_SPACE] )-> identifier[str] :
literal[string]
identifier[attributes] =[]
keyword[for] identifier[attr_name] keyword[in] identifier[attrnames] :
keyword[if] identifier[attr_name] . identifier[startswith] ( literal[string] ):
identifier[init_param_name] = identifier[attr_name] [ literal[int] :]
keyword[else] :
identifier[init_param_name] = identifier[attr_name]
identifier[attributes] . identifier[append] (( identifier[attr_name] , identifier[init_param_name] ))
keyword[return] identifier[mapped_repr] ( identifier[obj] , identifier[attributes] , identifier[with_addr] = identifier[with_addr] , identifier[joiner] = identifier[joiner] ) | def mapped_repr_stripping_underscores(obj: Any, attrnames: List[str], with_addr: bool=False, joiner: str=COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
attributes = []
for attr_name in attrnames:
if attr_name.startswith('_'):
init_param_name = attr_name[1:] # depends on [control=['if'], data=[]]
else:
init_param_name = attr_name
attributes.append((attr_name, init_param_name)) # depends on [control=['for'], data=['attr_name']]
return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner) |
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding) | def function[to_binary_string, parameter[obj, encoding]]:
constant[Convert `obj` to binary string (bytes in Python 3, str in Python 2)]
if name[PY2] begin[:]
if compare[name[encoding] is constant[None]] begin[:]
return[call[name[str], parameter[name[obj]]]] | keyword[def] identifier[to_binary_string] ( identifier[obj] , identifier[encoding] = keyword[None] ):
literal[string]
keyword[if] identifier[PY2] :
keyword[if] identifier[encoding] keyword[is] keyword[None] :
keyword[return] identifier[str] ( identifier[obj] )
keyword[else] :
keyword[return] identifier[obj] . identifier[encode] ( identifier[encoding] )
keyword[else] :
keyword[return] identifier[bytes] ( identifier[obj] , literal[string] keyword[if] identifier[encoding] keyword[is] keyword[None] keyword[else] identifier[encoding] ) | def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2: # Python 2
if encoding is None:
return str(obj) # depends on [control=['if'], data=[]]
else:
return obj.encode(encoding) # depends on [control=['if'], data=[]]
else: # Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding) |
def visit(self, node):
""" Add OMPDirective from the old node to the new one. """
old_omp = metadata.get(node, OMPDirective)
node = super(DeadCodeElimination, self).visit(node)
if not metadata.get(node, OMPDirective):
for omp_directive in old_omp:
metadata.add(node, omp_directive)
return node | def function[visit, parameter[self, node]]:
constant[ Add OMPDirective from the old node to the new one. ]
variable[old_omp] assign[=] call[name[metadata].get, parameter[name[node], name[OMPDirective]]]
variable[node] assign[=] call[call[name[super], parameter[name[DeadCodeElimination], name[self]]].visit, parameter[name[node]]]
if <ast.UnaryOp object at 0x7da1b15dbf10> begin[:]
for taget[name[omp_directive]] in starred[name[old_omp]] begin[:]
call[name[metadata].add, parameter[name[node], name[omp_directive]]]
return[name[node]] | keyword[def] identifier[visit] ( identifier[self] , identifier[node] ):
literal[string]
identifier[old_omp] = identifier[metadata] . identifier[get] ( identifier[node] , identifier[OMPDirective] )
identifier[node] = identifier[super] ( identifier[DeadCodeElimination] , identifier[self] ). identifier[visit] ( identifier[node] )
keyword[if] keyword[not] identifier[metadata] . identifier[get] ( identifier[node] , identifier[OMPDirective] ):
keyword[for] identifier[omp_directive] keyword[in] identifier[old_omp] :
identifier[metadata] . identifier[add] ( identifier[node] , identifier[omp_directive] )
keyword[return] identifier[node] | def visit(self, node):
""" Add OMPDirective from the old node to the new one. """
old_omp = metadata.get(node, OMPDirective)
node = super(DeadCodeElimination, self).visit(node)
if not metadata.get(node, OMPDirective):
for omp_directive in old_omp:
metadata.add(node, omp_directive) # depends on [control=['for'], data=['omp_directive']] # depends on [control=['if'], data=[]]
return node |
def sign(self, message):
"""
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
"""
hmac_context = hmac.new(self.outgoing_signing_key)
hmac_context.update(struct.pack('<i', self.outgoing_sequence) + message)
# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4
if self.key_exchange:
checksum = self.outgoing_seal.update(hmac_context.digest()[:8])
else:
checksum = hmac_context.digest()[:8]
mac = _Ntlm2MessageSignature()
mac['checksum'] = struct.unpack('<q', checksum)[0]
mac['sequence'] = self.outgoing_sequence
#logger.debug("Signing Sequence Number: %s", str(self.outgoing_sequence))
# Increment the sequence number after signing each message
self.outgoing_sequence += 1
return str(mac) | def function[sign, parameter[self, message]]:
constant[
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
]
variable[hmac_context] assign[=] call[name[hmac].new, parameter[name[self].outgoing_signing_key]]
call[name[hmac_context].update, parameter[binary_operation[call[name[struct].pack, parameter[constant[<i], name[self].outgoing_sequence]] + name[message]]]]
if name[self].key_exchange begin[:]
variable[checksum] assign[=] call[name[self].outgoing_seal.update, parameter[call[call[name[hmac_context].digest, parameter[]]][<ast.Slice object at 0x7da20e955cc0>]]]
variable[mac] assign[=] call[name[_Ntlm2MessageSignature], parameter[]]
call[name[mac]][constant[checksum]] assign[=] call[call[name[struct].unpack, parameter[constant[<q], name[checksum]]]][constant[0]]
call[name[mac]][constant[sequence]] assign[=] name[self].outgoing_sequence
<ast.AugAssign object at 0x7da20c7c9e40>
return[call[name[str], parameter[name[mac]]]] | keyword[def] identifier[sign] ( identifier[self] , identifier[message] ):
literal[string]
identifier[hmac_context] = identifier[hmac] . identifier[new] ( identifier[self] . identifier[outgoing_signing_key] )
identifier[hmac_context] . identifier[update] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[outgoing_sequence] )+ identifier[message] )
keyword[if] identifier[self] . identifier[key_exchange] :
identifier[checksum] = identifier[self] . identifier[outgoing_seal] . identifier[update] ( identifier[hmac_context] . identifier[digest] ()[: literal[int] ])
keyword[else] :
identifier[checksum] = identifier[hmac_context] . identifier[digest] ()[: literal[int] ]
identifier[mac] = identifier[_Ntlm2MessageSignature] ()
identifier[mac] [ literal[string] ]= identifier[struct] . identifier[unpack] ( literal[string] , identifier[checksum] )[ literal[int] ]
identifier[mac] [ literal[string] ]= identifier[self] . identifier[outgoing_sequence]
identifier[self] . identifier[outgoing_sequence] += literal[int]
keyword[return] identifier[str] ( identifier[mac] ) | def sign(self, message):
"""
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
"""
hmac_context = hmac.new(self.outgoing_signing_key)
hmac_context.update(struct.pack('<i', self.outgoing_sequence) + message)
# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4
if self.key_exchange:
checksum = self.outgoing_seal.update(hmac_context.digest()[:8]) # depends on [control=['if'], data=[]]
else:
checksum = hmac_context.digest()[:8]
mac = _Ntlm2MessageSignature()
mac['checksum'] = struct.unpack('<q', checksum)[0]
mac['sequence'] = self.outgoing_sequence
#logger.debug("Signing Sequence Number: %s", str(self.outgoing_sequence))
# Increment the sequence number after signing each message
self.outgoing_sequence += 1
return str(mac) |
def split_key(key):
"""Splits a node key."""
if key == KEY_SEP:
return ()
key_chunks = tuple(key.strip(KEY_SEP).split(KEY_SEP))
if key_chunks[0].startswith(KEY_SEP):
return (key_chunks[0][len(KEY_SEP):],) + key_chunks[1:]
else:
return key_chunks | def function[split_key, parameter[key]]:
constant[Splits a node key.]
if compare[name[key] equal[==] name[KEY_SEP]] begin[:]
return[tuple[[]]]
variable[key_chunks] assign[=] call[name[tuple], parameter[call[call[name[key].strip, parameter[name[KEY_SEP]]].split, parameter[name[KEY_SEP]]]]]
if call[call[name[key_chunks]][constant[0]].startswith, parameter[name[KEY_SEP]]] begin[:]
return[binary_operation[tuple[[<ast.Subscript object at 0x7da20c6e53f0>]] + call[name[key_chunks]][<ast.Slice object at 0x7da20c6e6410>]]] | keyword[def] identifier[split_key] ( identifier[key] ):
literal[string]
keyword[if] identifier[key] == identifier[KEY_SEP] :
keyword[return] ()
identifier[key_chunks] = identifier[tuple] ( identifier[key] . identifier[strip] ( identifier[KEY_SEP] ). identifier[split] ( identifier[KEY_SEP] ))
keyword[if] identifier[key_chunks] [ literal[int] ]. identifier[startswith] ( identifier[KEY_SEP] ):
keyword[return] ( identifier[key_chunks] [ literal[int] ][ identifier[len] ( identifier[KEY_SEP] ):],)+ identifier[key_chunks] [ literal[int] :]
keyword[else] :
keyword[return] identifier[key_chunks] | def split_key(key):
"""Splits a node key."""
if key == KEY_SEP:
return () # depends on [control=['if'], data=[]]
key_chunks = tuple(key.strip(KEY_SEP).split(KEY_SEP))
if key_chunks[0].startswith(KEY_SEP):
return (key_chunks[0][len(KEY_SEP):],) + key_chunks[1:] # depends on [control=['if'], data=[]]
else:
return key_chunks |
def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value)
return new_value | def function[incr, parameter[self, key, delta]]:
constant[
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
]
variable[value] assign[=] call[name[self].get, parameter[name[key]]]
if compare[name[value] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76c820>
variable[new_value] assign[=] binary_operation[name[value] + name[delta]]
call[name[self].set, parameter[name[key], name[new_value]]]
return[name[new_value]] | keyword[def] identifier[incr] ( identifier[self] , identifier[key] , identifier[delta] = literal[int] ):
literal[string]
identifier[value] = identifier[self] . identifier[get] ( identifier[key] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[key] )
identifier[new_value] = identifier[value] + identifier[delta]
identifier[self] . identifier[set] ( identifier[key] , identifier[new_value] )
keyword[return] identifier[new_value] | def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key)
if value is None:
raise ValueError("Key '%s' not found" % key) # depends on [control=['if'], data=[]]
new_value = value + delta
self.set(key, new_value)
return new_value |
def _build_doc(func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None):
"""Build docstring for symbolic functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
if key_var_num_args:
desc += '\nThis function support variable length of positional input.'
doc_str = ('%s\n\n' +
'%s\n' +
'name : string, optional.\n' +
' Name of the resulting symbol.\n\n' +
'Returns\n' +
'-------\n' +
'Symbol\n' +
' The result symbol.')
doc_str = doc_str % (desc, param_str)
extra_doc = "\n" + '\n'.join([x.__doc__ for x in type.__subclasses__(SymbolDoc)
if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(" "), "", extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'Symbol', doc_str)
return doc_str | def function[_build_doc, parameter[func_name, desc, arg_names, arg_types, arg_desc, key_var_num_args, ret_type]]:
constant[Build docstring for symbolic functions.]
variable[param_str] assign[=] call[name[_build_param_doc], parameter[name[arg_names], name[arg_types], name[arg_desc]]]
if name[key_var_num_args] begin[:]
<ast.AugAssign object at 0x7da1b1f22020>
variable[doc_str] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[%s
] + constant[%s
]] + constant[name : string, optional.
]] + constant[ Name of the resulting symbol.
]] + constant[Returns
]] + constant[-------
]] + constant[Symbol
]] + constant[ The result symbol.]]
variable[doc_str] assign[=] binary_operation[name[doc_str] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1f23580>, <ast.Name object at 0x7da1b1f21f90>]]]
variable[extra_doc] assign[=] binary_operation[constant[
] + call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b1f20e20>]]]
<ast.AugAssign object at 0x7da1b1f223b0>
variable[doc_str] assign[=] call[name[_re].sub, parameter[constant[NDArray-or-Symbol], constant[Symbol], name[doc_str]]]
return[name[doc_str]] | keyword[def] identifier[_build_doc] ( identifier[func_name] ,
identifier[desc] ,
identifier[arg_names] ,
identifier[arg_types] ,
identifier[arg_desc] ,
identifier[key_var_num_args] = keyword[None] ,
identifier[ret_type] = keyword[None] ):
literal[string]
identifier[param_str] = identifier[_build_param_doc] ( identifier[arg_names] , identifier[arg_types] , identifier[arg_desc] )
keyword[if] identifier[key_var_num_args] :
identifier[desc] += literal[string]
identifier[doc_str] =( literal[string] +
literal[string] +
literal[string] +
literal[string] +
literal[string] +
literal[string] +
literal[string] +
literal[string] )
identifier[doc_str] = identifier[doc_str] %( identifier[desc] , identifier[param_str] )
identifier[extra_doc] = literal[string] + literal[string] . identifier[join] ([ identifier[x] . identifier[__doc__] keyword[for] identifier[x] keyword[in] identifier[type] . identifier[__subclasses__] ( identifier[SymbolDoc] )
keyword[if] identifier[x] . identifier[__name__] == literal[string] % identifier[func_name] ])
identifier[doc_str] += identifier[_re] . identifier[sub] ( identifier[_re] . identifier[compile] ( literal[string] ), literal[string] , identifier[extra_doc] )
identifier[doc_str] = identifier[_re] . identifier[sub] ( literal[string] , literal[string] , identifier[doc_str] )
keyword[return] identifier[doc_str] | def _build_doc(func_name, desc, arg_names, arg_types, arg_desc, key_var_num_args=None, ret_type=None):
"""Build docstring for symbolic functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
if key_var_num_args:
desc += '\nThis function support variable length of positional input.' # depends on [control=['if'], data=[]]
doc_str = '%s\n\n' + '%s\n' + 'name : string, optional.\n' + ' Name of the resulting symbol.\n\n' + 'Returns\n' + '-------\n' + 'Symbol\n' + ' The result symbol.'
doc_str = doc_str % (desc, param_str)
extra_doc = '\n' + '\n'.join([x.__doc__ for x in type.__subclasses__(SymbolDoc) if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(' '), '', extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'Symbol', doc_str)
return doc_str |
def subscribe_account(self, username, password, service):
"""Subscribe an account for a service.
"""
data = {
'service': service,
'username': username,
'password': password,
}
return self._perform_post_request(self.subscribe_account_endpoint, data, self.token_header) | def function[subscribe_account, parameter[self, username, password, service]]:
constant[Subscribe an account for a service.
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0de5b40>, <ast.Constant object at 0x7da1b0de4fd0>, <ast.Constant object at 0x7da1b0de4fa0>], [<ast.Name object at 0x7da1b0de4670>, <ast.Name object at 0x7da1b0de4610>, <ast.Name object at 0x7da1b0de5960>]]
return[call[name[self]._perform_post_request, parameter[name[self].subscribe_account_endpoint, name[data], name[self].token_header]]] | keyword[def] identifier[subscribe_account] ( identifier[self] , identifier[username] , identifier[password] , identifier[service] ):
literal[string]
identifier[data] ={
literal[string] : identifier[service] ,
literal[string] : identifier[username] ,
literal[string] : identifier[password] ,
}
keyword[return] identifier[self] . identifier[_perform_post_request] ( identifier[self] . identifier[subscribe_account_endpoint] , identifier[data] , identifier[self] . identifier[token_header] ) | def subscribe_account(self, username, password, service):
"""Subscribe an account for a service.
"""
data = {'service': service, 'username': username, 'password': password}
return self._perform_post_request(self.subscribe_account_endpoint, data, self.token_header) |
def islitlet_progress(islitlet, islitlet_max):
"""Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
"""
if islitlet % 10 == 0:
cout = str(islitlet // 10)
else:
cout = '.'
sys.stdout.write(cout)
if islitlet == islitlet_max:
sys.stdout.write('\n')
sys.stdout.flush() | def function[islitlet_progress, parameter[islitlet, islitlet_max]]:
constant[Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
]
if compare[binary_operation[name[islitlet] <ast.Mod object at 0x7da2590d6920> constant[10]] equal[==] constant[0]] begin[:]
variable[cout] assign[=] call[name[str], parameter[binary_operation[name[islitlet] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]]]]
call[name[sys].stdout.write, parameter[name[cout]]]
if compare[name[islitlet] equal[==] name[islitlet_max]] begin[:]
call[name[sys].stdout.write, parameter[constant[
]]]
call[name[sys].stdout.flush, parameter[]] | keyword[def] identifier[islitlet_progress] ( identifier[islitlet] , identifier[islitlet_max] ):
literal[string]
keyword[if] identifier[islitlet] % literal[int] == literal[int] :
identifier[cout] = identifier[str] ( identifier[islitlet] // literal[int] )
keyword[else] :
identifier[cout] = literal[string]
identifier[sys] . identifier[stdout] . identifier[write] ( identifier[cout] )
keyword[if] identifier[islitlet] == identifier[islitlet_max] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] () | def islitlet_progress(islitlet, islitlet_max):
"""Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
"""
if islitlet % 10 == 0:
cout = str(islitlet // 10) # depends on [control=['if'], data=[]]
else:
cout = '.'
sys.stdout.write(cout)
if islitlet == islitlet_max:
sys.stdout.write('\n') # depends on [control=['if'], data=[]]
sys.stdout.flush() |
def get_kafka_brokers():
"""
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format
that kafka-python expects.
"""
# NOTE: The Kafka environment variables need to be present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
if not os.environ.get('KAFKA_URL'):
raise RuntimeError('The KAFKA_URL config variable is not set.')
return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in
[urlparse(url) for url in os.environ.get('KAFKA_URL').split(',')]] | def function[get_kafka_brokers, parameter[]]:
constant[
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format
that kafka-python expects.
]
if <ast.UnaryOp object at 0x7da1b0c4ef80> begin[:]
<ast.Raise object at 0x7da1b0c4e230>
return[<ast.ListComp object at 0x7da1b0c4d000>] | keyword[def] identifier[get_kafka_brokers] ():
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] [ literal[string] . identifier[format] ( identifier[parsedUrl] . identifier[hostname] , identifier[parsedUrl] . identifier[port] ) keyword[for] identifier[parsedUrl] keyword[in]
[ identifier[urlparse] ( identifier[url] ) keyword[for] identifier[url] keyword[in] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] )]] | def get_kafka_brokers():
"""
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format
that kafka-python expects.
"""
# NOTE: The Kafka environment variables need to be present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
if not os.environ.get('KAFKA_URL'):
raise RuntimeError('The KAFKA_URL config variable is not set.') # depends on [control=['if'], data=[]]
return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in os.environ.get('KAFKA_URL').split(',')]] |
def phonenumber(anon, obj, field, val):
"""
Generates a random US-style phone number
"""
return anon.faker.phone_number(field=field) | def function[phonenumber, parameter[anon, obj, field, val]]:
constant[
Generates a random US-style phone number
]
return[call[name[anon].faker.phone_number, parameter[]]] | keyword[def] identifier[phonenumber] ( identifier[anon] , identifier[obj] , identifier[field] , identifier[val] ):
literal[string]
keyword[return] identifier[anon] . identifier[faker] . identifier[phone_number] ( identifier[field] = identifier[field] ) | def phonenumber(anon, obj, field, val):
"""
Generates a random US-style phone number
"""
return anon.faker.phone_number(field=field) |
def linspace_pix(self, start=None, stop=None, pixel_step=1, y_vs_x=None):
"""Return x,y values evaluated with a given pixel step.
The returned values are computed within the corresponding
bounding box of the line.
Parameters
----------
start : float
Minimum pixel coordinate to evaluate the independent
variable.
stop : float
Maximum pixel coordinate to evaluate the independent
variable.
pixel_step : float
Pixel step employed to evaluate the independent variable.
y_vs_x : bool
If True, the polynomial fit is assumed to be Y vs X.
Otherwise, X vs Y is employed.
Returns
-------
x : 1d numpy array
X coordinates.
y : 1d numpy array
Y coordinates.
"""
if y_vs_x:
if start is None:
xmin = self.bb_nc1_orig
else:
xmin = start
if stop is None:
xmax = self.bb_nc2_orig
else:
xmax = stop
num = int(float(xmax-xmin+1)/float(pixel_step)+0.5)
x = np.linspace(start=xmin, stop=xmax, num=num)
y = self.poly_funct(x)
else:
if start is None:
ymin = self.bb_ns1_orig
else:
ymin = start
if stop is None:
ymax = self.bb_ns2_orig
else:
ymax = stop
num = int(float(ymax-ymin+1)/float(pixel_step)+0.5)
y = np.linspace(start=ymin, stop=ymax, num=num)
x = self.poly_funct(y)
return x, y | def function[linspace_pix, parameter[self, start, stop, pixel_step, y_vs_x]]:
constant[Return x,y values evaluated with a given pixel step.
The returned values are computed within the corresponding
bounding box of the line.
Parameters
----------
start : float
Minimum pixel coordinate to evaluate the independent
variable.
stop : float
Maximum pixel coordinate to evaluate the independent
variable.
pixel_step : float
Pixel step employed to evaluate the independent variable.
y_vs_x : bool
If True, the polynomial fit is assumed to be Y vs X.
Otherwise, X vs Y is employed.
Returns
-------
x : 1d numpy array
X coordinates.
y : 1d numpy array
Y coordinates.
]
if name[y_vs_x] begin[:]
if compare[name[start] is constant[None]] begin[:]
variable[xmin] assign[=] name[self].bb_nc1_orig
if compare[name[stop] is constant[None]] begin[:]
variable[xmax] assign[=] name[self].bb_nc2_orig
variable[num] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[float], parameter[binary_operation[binary_operation[name[xmax] - name[xmin]] + constant[1]]]] / call[name[float], parameter[name[pixel_step]]]] + constant[0.5]]]]
variable[x] assign[=] call[name[np].linspace, parameter[]]
variable[y] assign[=] call[name[self].poly_funct, parameter[name[x]]]
return[tuple[[<ast.Name object at 0x7da1b26adf60>, <ast.Name object at 0x7da1b26acb80>]]] | keyword[def] identifier[linspace_pix] ( identifier[self] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] , identifier[pixel_step] = literal[int] , identifier[y_vs_x] = keyword[None] ):
literal[string]
keyword[if] identifier[y_vs_x] :
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[xmin] = identifier[self] . identifier[bb_nc1_orig]
keyword[else] :
identifier[xmin] = identifier[start]
keyword[if] identifier[stop] keyword[is] keyword[None] :
identifier[xmax] = identifier[self] . identifier[bb_nc2_orig]
keyword[else] :
identifier[xmax] = identifier[stop]
identifier[num] = identifier[int] ( identifier[float] ( identifier[xmax] - identifier[xmin] + literal[int] )/ identifier[float] ( identifier[pixel_step] )+ literal[int] )
identifier[x] = identifier[np] . identifier[linspace] ( identifier[start] = identifier[xmin] , identifier[stop] = identifier[xmax] , identifier[num] = identifier[num] )
identifier[y] = identifier[self] . identifier[poly_funct] ( identifier[x] )
keyword[else] :
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[ymin] = identifier[self] . identifier[bb_ns1_orig]
keyword[else] :
identifier[ymin] = identifier[start]
keyword[if] identifier[stop] keyword[is] keyword[None] :
identifier[ymax] = identifier[self] . identifier[bb_ns2_orig]
keyword[else] :
identifier[ymax] = identifier[stop]
identifier[num] = identifier[int] ( identifier[float] ( identifier[ymax] - identifier[ymin] + literal[int] )/ identifier[float] ( identifier[pixel_step] )+ literal[int] )
identifier[y] = identifier[np] . identifier[linspace] ( identifier[start] = identifier[ymin] , identifier[stop] = identifier[ymax] , identifier[num] = identifier[num] )
identifier[x] = identifier[self] . identifier[poly_funct] ( identifier[y] )
keyword[return] identifier[x] , identifier[y] | def linspace_pix(self, start=None, stop=None, pixel_step=1, y_vs_x=None):
"""Return x,y values evaluated with a given pixel step.
The returned values are computed within the corresponding
bounding box of the line.
Parameters
----------
start : float
Minimum pixel coordinate to evaluate the independent
variable.
stop : float
Maximum pixel coordinate to evaluate the independent
variable.
pixel_step : float
Pixel step employed to evaluate the independent variable.
y_vs_x : bool
If True, the polynomial fit is assumed to be Y vs X.
Otherwise, X vs Y is employed.
Returns
-------
x : 1d numpy array
X coordinates.
y : 1d numpy array
Y coordinates.
"""
if y_vs_x:
if start is None:
xmin = self.bb_nc1_orig # depends on [control=['if'], data=[]]
else:
xmin = start
if stop is None:
xmax = self.bb_nc2_orig # depends on [control=['if'], data=[]]
else:
xmax = stop
num = int(float(xmax - xmin + 1) / float(pixel_step) + 0.5)
x = np.linspace(start=xmin, stop=xmax, num=num)
y = self.poly_funct(x) # depends on [control=['if'], data=[]]
else:
if start is None:
ymin = self.bb_ns1_orig # depends on [control=['if'], data=[]]
else:
ymin = start
if stop is None:
ymax = self.bb_ns2_orig # depends on [control=['if'], data=[]]
else:
ymax = stop
num = int(float(ymax - ymin + 1) / float(pixel_step) + 0.5)
y = np.linspace(start=ymin, stop=ymax, num=num)
x = self.poly_funct(y)
return (x, y) |
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) | def function[_DoubleDecoder, parameter[]]:
constant[Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
]
variable[local_unpack] assign[=] name[struct].unpack
def function[InnerDecode, parameter[buffer, pos]]:
variable[new_pos] assign[=] binary_operation[name[pos] + constant[8]]
variable[double_bytes] assign[=] call[name[buffer]][<ast.Slice object at 0x7da1b20ee440>]
if <ast.BoolOp object at 0x7da1b20edf30> begin[:]
return[tuple[[<ast.Name object at 0x7da1b20eea40>, <ast.Name object at 0x7da1b20eeb00>]]]
variable[result] assign[=] call[call[name[local_unpack], parameter[constant[<d], name[double_bytes]]]][constant[0]]
return[tuple[[<ast.Name object at 0x7da1b20ec2e0>, <ast.Name object at 0x7da1b20ec310>]]]
return[call[name[_SimpleDecoder], parameter[name[wire_format].WIRETYPE_FIXED64, name[InnerDecode]]]] | keyword[def] identifier[_DoubleDecoder] ():
literal[string]
identifier[local_unpack] = identifier[struct] . identifier[unpack]
keyword[def] identifier[InnerDecode] ( identifier[buffer] , identifier[pos] ):
identifier[new_pos] = identifier[pos] + literal[int]
identifier[double_bytes] = identifier[buffer] [ identifier[pos] : identifier[new_pos] ]
keyword[if] (( identifier[double_bytes] [ literal[int] : literal[int] ] keyword[in] literal[string] )
keyword[and] ( identifier[double_bytes] [ literal[int] : literal[int] ]>= literal[string] )
keyword[and] ( identifier[double_bytes] [ literal[int] : literal[int] ]!= literal[string] )):
keyword[return] ( identifier[_NAN] , identifier[new_pos] )
identifier[result] = identifier[local_unpack] ( literal[string] , identifier[double_bytes] )[ literal[int] ]
keyword[return] ( identifier[result] , identifier[new_pos] )
keyword[return] identifier[_SimpleDecoder] ( identifier[wire_format] . identifier[WIRETYPE_FIXED64] , identifier[InnerDecode] ) | def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if double_bytes[7:8] in b'\x7f\xff' and double_bytes[6:7] >= b'\xf0' and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xf0'):
return (_NAN, new_pos) # depends on [control=['if'], data=[]]
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) |
def set_quantities(self, product_quantities):
''' Sets the quantities on each of the products on each of the
products specified. Raises an exception (ValidationError) if a limit
is violated. `product_quantities` is an iterable of (product, quantity)
pairs. '''
items_in_cart = commerce.ProductItem.objects.filter(cart=self.cart)
items_in_cart = items_in_cart.select_related(
"product",
"product__category",
)
product_quantities = list(product_quantities)
# n.b need to add have the existing items first so that the new
# items override the old ones.
all_product_quantities = dict(itertools.chain(
((i.product, i.quantity) for i in items_in_cart.all()),
product_quantities,
)).items()
# Validate that the limits we're adding are OK
products = set(product for product, q in product_quantities)
try:
self._test_limits(all_product_quantities)
except CartValidationError as ve:
# Only raise errors for products that we're explicitly
# Manipulating here.
for ve_field in ve.error_list:
product, message = ve_field.message
if product in products:
raise ve
new_items = []
products = []
for product, quantity in product_quantities:
products.append(product)
if quantity == 0:
continue
item = commerce.ProductItem(
cart=self.cart,
product=product,
quantity=quantity,
)
new_items.append(item)
to_delete = (
Q(quantity=0) |
Q(product__in=products)
)
items_in_cart.filter(to_delete).delete()
commerce.ProductItem.objects.bulk_create(new_items) | def function[set_quantities, parameter[self, product_quantities]]:
constant[ Sets the quantities on each of the products on each of the
products specified. Raises an exception (ValidationError) if a limit
is violated. `product_quantities` is an iterable of (product, quantity)
pairs. ]
variable[items_in_cart] assign[=] call[name[commerce].ProductItem.objects.filter, parameter[]]
variable[items_in_cart] assign[=] call[name[items_in_cart].select_related, parameter[constant[product], constant[product__category]]]
variable[product_quantities] assign[=] call[name[list], parameter[name[product_quantities]]]
variable[all_product_quantities] assign[=] call[call[name[dict], parameter[call[name[itertools].chain, parameter[<ast.GeneratorExp object at 0x7da18bc73610>, name[product_quantities]]]]].items, parameter[]]
variable[products] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18bc72560>]]
<ast.Try object at 0x7da18bc71630>
variable[new_items] assign[=] list[[]]
variable[products] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b01ba0b0>, <ast.Name object at 0x7da1b01bbcd0>]]] in starred[name[product_quantities]] begin[:]
call[name[products].append, parameter[name[product]]]
if compare[name[quantity] equal[==] constant[0]] begin[:]
continue
variable[item] assign[=] call[name[commerce].ProductItem, parameter[]]
call[name[new_items].append, parameter[name[item]]]
variable[to_delete] assign[=] binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]]
call[call[name[items_in_cart].filter, parameter[name[to_delete]]].delete, parameter[]]
call[name[commerce].ProductItem.objects.bulk_create, parameter[name[new_items]]] | keyword[def] identifier[set_quantities] ( identifier[self] , identifier[product_quantities] ):
literal[string]
identifier[items_in_cart] = identifier[commerce] . identifier[ProductItem] . identifier[objects] . identifier[filter] ( identifier[cart] = identifier[self] . identifier[cart] )
identifier[items_in_cart] = identifier[items_in_cart] . identifier[select_related] (
literal[string] ,
literal[string] ,
)
identifier[product_quantities] = identifier[list] ( identifier[product_quantities] )
identifier[all_product_quantities] = identifier[dict] ( identifier[itertools] . identifier[chain] (
(( identifier[i] . identifier[product] , identifier[i] . identifier[quantity] ) keyword[for] identifier[i] keyword[in] identifier[items_in_cart] . identifier[all] ()),
identifier[product_quantities] ,
)). identifier[items] ()
identifier[products] = identifier[set] ( identifier[product] keyword[for] identifier[product] , identifier[q] keyword[in] identifier[product_quantities] )
keyword[try] :
identifier[self] . identifier[_test_limits] ( identifier[all_product_quantities] )
keyword[except] identifier[CartValidationError] keyword[as] identifier[ve] :
keyword[for] identifier[ve_field] keyword[in] identifier[ve] . identifier[error_list] :
identifier[product] , identifier[message] = identifier[ve_field] . identifier[message]
keyword[if] identifier[product] keyword[in] identifier[products] :
keyword[raise] identifier[ve]
identifier[new_items] =[]
identifier[products] =[]
keyword[for] identifier[product] , identifier[quantity] keyword[in] identifier[product_quantities] :
identifier[products] . identifier[append] ( identifier[product] )
keyword[if] identifier[quantity] == literal[int] :
keyword[continue]
identifier[item] = identifier[commerce] . identifier[ProductItem] (
identifier[cart] = identifier[self] . identifier[cart] ,
identifier[product] = identifier[product] ,
identifier[quantity] = identifier[quantity] ,
)
identifier[new_items] . identifier[append] ( identifier[item] )
identifier[to_delete] =(
identifier[Q] ( identifier[quantity] = literal[int] )|
identifier[Q] ( identifier[product__in] = identifier[products] )
)
identifier[items_in_cart] . identifier[filter] ( identifier[to_delete] ). identifier[delete] ()
identifier[commerce] . identifier[ProductItem] . identifier[objects] . identifier[bulk_create] ( identifier[new_items] ) | def set_quantities(self, product_quantities):
""" Sets the quantities on each of the products on each of the
products specified. Raises an exception (ValidationError) if a limit
is violated. `product_quantities` is an iterable of (product, quantity)
pairs. """
items_in_cart = commerce.ProductItem.objects.filter(cart=self.cart)
items_in_cart = items_in_cart.select_related('product', 'product__category')
product_quantities = list(product_quantities)
# n.b need to add have the existing items first so that the new
# items override the old ones.
all_product_quantities = dict(itertools.chain(((i.product, i.quantity) for i in items_in_cart.all()), product_quantities)).items()
# Validate that the limits we're adding are OK
products = set((product for (product, q) in product_quantities))
try:
self._test_limits(all_product_quantities) # depends on [control=['try'], data=[]]
except CartValidationError as ve:
# Only raise errors for products that we're explicitly
# Manipulating here.
for ve_field in ve.error_list:
(product, message) = ve_field.message
if product in products:
raise ve # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ve_field']] # depends on [control=['except'], data=['ve']]
new_items = []
products = []
for (product, quantity) in product_quantities:
products.append(product)
if quantity == 0:
continue # depends on [control=['if'], data=[]]
item = commerce.ProductItem(cart=self.cart, product=product, quantity=quantity)
new_items.append(item) # depends on [control=['for'], data=[]]
to_delete = Q(quantity=0) | Q(product__in=products)
items_in_cart.filter(to_delete).delete()
commerce.ProductItem.objects.bulk_create(new_items) |
def merge(cls, *args, **kwargs):
"""Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys.
"""
newkeys = bool(kwargs.get('newkeys', False))
ignore = kwargs.get('ignore', list())
if len(args) < 1:
raise ValueError('no ents given to Ent.merge()')
elif not all(isinstance(s, Ent) for s in args):
raise ValueError('all positional arguments to Ent.merge() must '
'be instances of Ent')
ent = args[0]
data = cls.load(ent)
for ent in args[1:]:
for key, value in ent.__dict__.items():
if key in ignore:
continue
if key in data.__dict__:
v1 = data.__dict__[key]
if type(value) == type(v1):
if isinstance(v1, Ent):
data.__dict__[key] = cls.merge(v1, value, **kwargs)
else:
data.__dict__[key] = cls.load(value)
elif newkeys:
data.__dict__[key] = value
return data | def function[merge, parameter[cls]]:
constant[Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys.
]
variable[newkeys] assign[=] call[name[bool], parameter[call[name[kwargs].get, parameter[constant[newkeys], constant[False]]]]]
variable[ignore] assign[=] call[name[kwargs].get, parameter[constant[ignore], call[name[list], parameter[]]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da2044c3c70>
variable[ent] assign[=] call[name[args]][constant[0]]
variable[data] assign[=] call[name[cls].load, parameter[name[ent]]]
for taget[name[ent]] in starred[call[name[args]][<ast.Slice object at 0x7da2044c0e20>]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2044c0d60>, <ast.Name object at 0x7da2044c0d00>]]] in starred[call[name[ent].__dict__.items, parameter[]]] begin[:]
if compare[name[key] in name[ignore]] begin[:]
continue
if compare[name[key] in name[data].__dict__] begin[:]
variable[v1] assign[=] call[name[data].__dict__][name[key]]
if compare[call[name[type], parameter[name[value]]] equal[==] call[name[type], parameter[name[v1]]]] begin[:]
if call[name[isinstance], parameter[name[v1], name[Ent]]] begin[:]
call[name[data].__dict__][name[key]] assign[=] call[name[cls].merge, parameter[name[v1], name[value]]]
return[name[data]] | keyword[def] identifier[merge] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[newkeys] = identifier[bool] ( identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ))
identifier[ignore] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[list] ())
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] keyword[not] identifier[all] ( identifier[isinstance] ( identifier[s] , identifier[Ent] ) keyword[for] identifier[s] keyword[in] identifier[args] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[ent] = identifier[args] [ literal[int] ]
identifier[data] = identifier[cls] . identifier[load] ( identifier[ent] )
keyword[for] identifier[ent] keyword[in] identifier[args] [ literal[int] :]:
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[ent] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[ignore] :
keyword[continue]
keyword[if] identifier[key] keyword[in] identifier[data] . identifier[__dict__] :
identifier[v1] = identifier[data] . identifier[__dict__] [ identifier[key] ]
keyword[if] identifier[type] ( identifier[value] )== identifier[type] ( identifier[v1] ):
keyword[if] identifier[isinstance] ( identifier[v1] , identifier[Ent] ):
identifier[data] . identifier[__dict__] [ identifier[key] ]= identifier[cls] . identifier[merge] ( identifier[v1] , identifier[value] ,** identifier[kwargs] )
keyword[else] :
identifier[data] . identifier[__dict__] [ identifier[key] ]= identifier[cls] . identifier[load] ( identifier[value] )
keyword[elif] identifier[newkeys] :
identifier[data] . identifier[__dict__] [ identifier[key] ]= identifier[value]
keyword[return] identifier[data] | def merge(cls, *args, **kwargs):
"""Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys.
"""
newkeys = bool(kwargs.get('newkeys', False))
ignore = kwargs.get('ignore', list())
if len(args) < 1:
raise ValueError('no ents given to Ent.merge()') # depends on [control=['if'], data=[]]
elif not all((isinstance(s, Ent) for s in args)):
raise ValueError('all positional arguments to Ent.merge() must be instances of Ent') # depends on [control=['if'], data=[]]
ent = args[0]
data = cls.load(ent)
for ent in args[1:]:
for (key, value) in ent.__dict__.items():
if key in ignore:
continue # depends on [control=['if'], data=[]]
if key in data.__dict__:
v1 = data.__dict__[key]
if type(value) == type(v1):
if isinstance(v1, Ent):
data.__dict__[key] = cls.merge(v1, value, **kwargs) # depends on [control=['if'], data=[]]
else:
data.__dict__[key] = cls.load(value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['key']]
elif newkeys:
data.__dict__[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['ent']]
return data |
def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
"""
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
if __umr__.enabled:
__umr__.run()
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg)
if wdir is not None:
try:
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
os.chdir(wdir)
if post_mortem:
set_post_mortem()
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read())
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__')
except KeyError:
pass | def function[runfile, parameter[filename, args, wdir, namespace, post_mortem]]:
constant[
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
]
<ast.Try object at 0x7da1b054af80>
if name[__umr__].enabled begin[:]
call[name[__umr__].run, parameter[]]
if <ast.BoolOp object at 0x7da1b054bc40> begin[:]
<ast.Raise object at 0x7da1b054b520>
if compare[name[namespace] is constant[None]] begin[:]
variable[namespace] assign[=] call[name[_get_globals], parameter[]]
call[name[namespace]][constant[__file__]] assign[=] name[filename]
name[sys].argv assign[=] list[[<ast.Name object at 0x7da2044c1ba0>]]
if compare[name[args] is_not constant[None]] begin[:]
for taget[name[arg]] in starred[call[name[shlex].split, parameter[name[args]]]] begin[:]
call[name[sys].argv.append, parameter[name[arg]]]
if compare[name[wdir] is_not constant[None]] begin[:]
<ast.Try object at 0x7da2044c3f40>
call[name[os].chdir, parameter[name[wdir]]]
if name[post_mortem] begin[:]
call[name[set_post_mortem], parameter[]]
if name[__umr__].has_cython begin[:]
with call[name[io].open, parameter[name[filename]]] begin[:]
variable[ipython_shell] assign[=] call[name[get_ipython], parameter[]]
call[name[ipython_shell].run_cell_magic, parameter[constant[cython], constant[], call[name[f].read, parameter[]]]]
call[name[clear_post_mortem], parameter[]]
name[sys].argv assign[=] list[[<ast.Constant object at 0x7da1b05c9330>]]
<ast.Try object at 0x7da1b05ca4d0> | keyword[def] identifier[runfile] ( identifier[filename] , identifier[args] = keyword[None] , identifier[wdir] = keyword[None] , identifier[namespace] = keyword[None] , identifier[post_mortem] = keyword[False] ):
literal[string]
keyword[try] :
identifier[filename] = identifier[filename] . identifier[decode] ( literal[string] )
keyword[except] ( identifier[UnicodeError] , identifier[TypeError] , identifier[AttributeError] ):
keyword[pass]
keyword[if] identifier[__umr__] . identifier[enabled] :
identifier[__umr__] . identifier[run] ()
keyword[if] identifier[args] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[args] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[namespace] keyword[is] keyword[None] :
identifier[namespace] = identifier[_get_globals] ()
identifier[namespace] [ literal[string] ]= identifier[filename]
identifier[sys] . identifier[argv] =[ identifier[filename] ]
keyword[if] identifier[args] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[arg] keyword[in] identifier[shlex] . identifier[split] ( identifier[args] ):
identifier[sys] . identifier[argv] . identifier[append] ( identifier[arg] )
keyword[if] identifier[wdir] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[wdir] = identifier[wdir] . identifier[decode] ( literal[string] )
keyword[except] ( identifier[UnicodeError] , identifier[TypeError] , identifier[AttributeError] ):
keyword[pass]
identifier[os] . identifier[chdir] ( identifier[wdir] )
keyword[if] identifier[post_mortem] :
identifier[set_post_mortem] ()
keyword[if] identifier[__umr__] . identifier[has_cython] :
keyword[with] identifier[io] . identifier[open] ( identifier[filename] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[ipython_shell] = identifier[get_ipython] ()
identifier[ipython_shell] . identifier[run_cell_magic] ( literal[string] , literal[string] , identifier[f] . identifier[read] ())
keyword[else] :
identifier[execfile] ( identifier[filename] , identifier[namespace] )
identifier[clear_post_mortem] ()
identifier[sys] . identifier[argv] =[ literal[string] ]
keyword[try] :
identifier[namespace] . identifier[pop] ( literal[string] )
keyword[except] identifier[KeyError] :
keyword[pass] | def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
"""
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
"""
try:
filename = filename.decode('utf-8') # depends on [control=['try'], data=[]]
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass # depends on [control=['except'], data=[]]
if __umr__.enabled:
__umr__.run() # depends on [control=['if'], data=[]]
if args is not None and (not isinstance(args, basestring)):
raise TypeError('expected a character buffer object') # depends on [control=['if'], data=[]]
if namespace is None:
namespace = _get_globals() # depends on [control=['if'], data=['namespace']]
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=['args']]
if wdir is not None:
try:
wdir = wdir.decode('utf-8') # depends on [control=['try'], data=[]]
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass # depends on [control=['except'], data=[]]
os.chdir(wdir) # depends on [control=['if'], data=['wdir']]
if post_mortem:
set_post_mortem() # depends on [control=['if'], data=[]]
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read()) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__') # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] |
async def create_payment_address(seed: str = None) -> str:
"""
Creates a payment address inside the wallet.
:param seed: String
Example:
address = await Wallet.create_payment_address('00000000000000000000000001234567')
:return: String
"""
logger = logging.getLogger(__name__)
if not hasattr(Wallet.create_payment_address, "cb"):
logger.debug("vcx_wallet_create_payment_address: Creating callback")
Wallet.create_payment_address.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
if seed:
c_seed = c_char_p(seed.encode('utf-8'))
else:
c_seed = None
result = await do_call('vcx_wallet_create_payment_address',
c_seed,
Wallet.create_payment_address.cb)
logger.debug("vcx_wallet_create_payment_address completed")
return result | <ast.AsyncFunctionDef object at 0x7da18f00f0d0> | keyword[async] keyword[def] identifier[create_payment_address] ( identifier[seed] : identifier[str] = keyword[None] )-> identifier[str] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[Wallet] . identifier[create_payment_address] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[Wallet] . identifier[create_payment_address] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_uint32] , identifier[c_uint32] , identifier[c_char_p] ))
keyword[if] identifier[seed] :
identifier[c_seed] = identifier[c_char_p] ( identifier[seed] . identifier[encode] ( literal[string] ))
keyword[else] :
identifier[c_seed] = keyword[None]
identifier[result] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_seed] ,
identifier[Wallet] . identifier[create_payment_address] . identifier[cb] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[result] | async def create_payment_address(seed: str=None) -> str:
"""
Creates a payment address inside the wallet.
:param seed: String
Example:
address = await Wallet.create_payment_address('00000000000000000000000001234567')
:return: String
"""
logger = logging.getLogger(__name__)
if not hasattr(Wallet.create_payment_address, 'cb'):
logger.debug('vcx_wallet_create_payment_address: Creating callback')
Wallet.create_payment_address.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) # depends on [control=['if'], data=[]]
if seed:
c_seed = c_char_p(seed.encode('utf-8')) # depends on [control=['if'], data=[]]
else:
c_seed = None
result = await do_call('vcx_wallet_create_payment_address', c_seed, Wallet.create_payment_address.cb)
logger.debug('vcx_wallet_create_payment_address completed')
return result |
def _connect(self):
"""Creates a websocket connection.
:return:
"""
self.log.debug("_connect(): Initializing Connection..")
self.socket = websocket.WebSocketApp(
self.url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close
)
if 'ca_certs' not in self.sslopt.keys():
ssl_defaults = ssl.get_default_verify_paths()
self.sslopt['ca_certs'] = ssl_defaults.cafile
self.log.debug("_connect(): Starting Connection..")
self.socket.run_forever(sslopt=self.sslopt,
http_proxy_host=self.http_proxy_host,
http_proxy_port=self.http_proxy_port,
http_proxy_auth=self.http_proxy_auth,
http_no_proxy=self.http_no_proxy)
# stop outstanding ping/pong timers
self._stop_timers()
while self.reconnect_required.is_set():
if not self.disconnect_called.is_set():
self.log.info("Attempting to connect again in %s seconds."
% self.reconnect_interval)
self.state = "unavailable"
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will
# set it to False
self.socket.keep_running = True
self.socket.sock = None
self.socket.run_forever(sslopt=self.sslopt,
http_proxy_host=self.http_proxy_host,
http_proxy_port=self.http_proxy_port,
http_proxy_auth=self.http_proxy_auth,
http_no_proxy=self.http_no_proxy)
else:
break | def function[_connect, parameter[self]]:
constant[Creates a websocket connection.
:return:
]
call[name[self].log.debug, parameter[constant[_connect(): Initializing Connection..]]]
name[self].socket assign[=] call[name[websocket].WebSocketApp, parameter[name[self].url]]
if compare[constant[ca_certs] <ast.NotIn object at 0x7da2590d7190> call[name[self].sslopt.keys, parameter[]]] begin[:]
variable[ssl_defaults] assign[=] call[name[ssl].get_default_verify_paths, parameter[]]
call[name[self].sslopt][constant[ca_certs]] assign[=] name[ssl_defaults].cafile
call[name[self].log.debug, parameter[constant[_connect(): Starting Connection..]]]
call[name[self].socket.run_forever, parameter[]]
call[name[self]._stop_timers, parameter[]]
while call[name[self].reconnect_required.is_set, parameter[]] begin[:]
if <ast.UnaryOp object at 0x7da1b0533580> begin[:]
call[name[self].log.info, parameter[binary_operation[constant[Attempting to connect again in %s seconds.] <ast.Mod object at 0x7da2590d6920> name[self].reconnect_interval]]]
name[self].state assign[=] constant[unavailable]
call[name[time].sleep, parameter[name[self].reconnect_interval]]
name[self].socket.keep_running assign[=] constant[True]
name[self].socket.sock assign[=] constant[None]
call[name[self].socket.run_forever, parameter[]] | keyword[def] identifier[_connect] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[socket] = identifier[websocket] . identifier[WebSocketApp] (
identifier[self] . identifier[url] ,
identifier[on_open] = identifier[self] . identifier[_on_open] ,
identifier[on_message] = identifier[self] . identifier[_on_message] ,
identifier[on_error] = identifier[self] . identifier[_on_error] ,
identifier[on_close] = identifier[self] . identifier[_on_close]
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[sslopt] . identifier[keys] ():
identifier[ssl_defaults] = identifier[ssl] . identifier[get_default_verify_paths] ()
identifier[self] . identifier[sslopt] [ literal[string] ]= identifier[ssl_defaults] . identifier[cafile]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[socket] . identifier[run_forever] ( identifier[sslopt] = identifier[self] . identifier[sslopt] ,
identifier[http_proxy_host] = identifier[self] . identifier[http_proxy_host] ,
identifier[http_proxy_port] = identifier[self] . identifier[http_proxy_port] ,
identifier[http_proxy_auth] = identifier[self] . identifier[http_proxy_auth] ,
identifier[http_no_proxy] = identifier[self] . identifier[http_no_proxy] )
identifier[self] . identifier[_stop_timers] ()
keyword[while] identifier[self] . identifier[reconnect_required] . identifier[is_set] ():
keyword[if] keyword[not] identifier[self] . identifier[disconnect_called] . identifier[is_set] ():
identifier[self] . identifier[log] . identifier[info] ( literal[string]
% identifier[self] . identifier[reconnect_interval] )
identifier[self] . identifier[state] = literal[string]
identifier[time] . identifier[sleep] ( identifier[self] . identifier[reconnect_interval] )
identifier[self] . identifier[socket] . identifier[keep_running] = keyword[True]
identifier[self] . identifier[socket] . identifier[sock] = keyword[None]
identifier[self] . identifier[socket] . identifier[run_forever] ( identifier[sslopt] = identifier[self] . identifier[sslopt] ,
identifier[http_proxy_host] = identifier[self] . identifier[http_proxy_host] ,
identifier[http_proxy_port] = identifier[self] . identifier[http_proxy_port] ,
identifier[http_proxy_auth] = identifier[self] . identifier[http_proxy_auth] ,
identifier[http_no_proxy] = identifier[self] . identifier[http_no_proxy] )
keyword[else] :
keyword[break] | def _connect(self):
"""Creates a websocket connection.
:return:
"""
self.log.debug('_connect(): Initializing Connection..')
self.socket = websocket.WebSocketApp(self.url, on_open=self._on_open, on_message=self._on_message, on_error=self._on_error, on_close=self._on_close)
if 'ca_certs' not in self.sslopt.keys():
ssl_defaults = ssl.get_default_verify_paths()
self.sslopt['ca_certs'] = ssl_defaults.cafile # depends on [control=['if'], data=[]]
self.log.debug('_connect(): Starting Connection..')
self.socket.run_forever(sslopt=self.sslopt, http_proxy_host=self.http_proxy_host, http_proxy_port=self.http_proxy_port, http_proxy_auth=self.http_proxy_auth, http_no_proxy=self.http_no_proxy)
# stop outstanding ping/pong timers
self._stop_timers()
while self.reconnect_required.is_set():
if not self.disconnect_called.is_set():
self.log.info('Attempting to connect again in %s seconds.' % self.reconnect_interval)
self.state = 'unavailable'
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will
# set it to False
self.socket.keep_running = True
self.socket.sock = None
self.socket.run_forever(sslopt=self.sslopt, http_proxy_host=self.http_proxy_host, http_proxy_port=self.http_proxy_port, http_proxy_auth=self.http_proxy_auth, http_no_proxy=self.http_no_proxy) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] |
def stop(self):
"""
Close the window and stops the worker thread. The main thread will
resume with the next command after the `start()` call.
"""
assert threading.current_thread() == self.thread
assert self.state.running
self.state.running = False | def function[stop, parameter[self]]:
constant[
Close the window and stops the worker thread. The main thread will
resume with the next command after the `start()` call.
]
assert[compare[call[name[threading].current_thread, parameter[]] equal[==] name[self].thread]]
assert[name[self].state.running]
name[self].state.running assign[=] constant[False] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[assert] identifier[threading] . identifier[current_thread] ()== identifier[self] . identifier[thread]
keyword[assert] identifier[self] . identifier[state] . identifier[running]
identifier[self] . identifier[state] . identifier[running] = keyword[False] | def stop(self):
"""
Close the window and stops the worker thread. The main thread will
resume with the next command after the `start()` call.
"""
assert threading.current_thread() == self.thread
assert self.state.running
self.state.running = False |
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method) | def function[_dispatch, parameter[self, method, params]]:
constant[Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
]
variable[func] assign[=] constant[None]
<ast.Try object at 0x7da18ede5990>
if compare[name[func] is_not constant[None]] begin[:]
return[call[name[func], parameter[<ast.Starred object at 0x7da18f58d180>]]] | keyword[def] identifier[_dispatch] ( identifier[self] , identifier[method] , identifier[params] ):
literal[string]
identifier[func] = keyword[None]
keyword[try] :
identifier[func] = identifier[self] . identifier[funcs] [ identifier[method] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[self] . identifier[instance] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[self] . identifier[instance] , literal[string] ):
keyword[return] identifier[self] . identifier[instance] . identifier[_dispatch] ( identifier[method] , identifier[params] )
keyword[else] :
keyword[try] :
identifier[func] = identifier[resolve_dotted_attribute] (
identifier[self] . identifier[instance] ,
identifier[method] ,
identifier[self] . identifier[allow_dotted_names]
)
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[func] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[func] (* identifier[params] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[method] ) | def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method] # depends on [control=['try'], data=[]]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params) # depends on [control=['if'], data=[]]
else:
# call instance method directly
try:
func = resolve_dotted_attribute(self.instance, method, self.allow_dotted_names) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
if func is not None:
return func(*params) # depends on [control=['if'], data=['func']]
else:
raise Exception('method "%s" is not supported' % method) |
def _get_fitnesses(self,
problem,
population,
cache_encoded=True,
cache_solution=False,
pool=None):
"""Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation.
"""
fitnesses = [None] * len(population)
#############################
# Decoding
#############################
if cache_encoded:
try:
encoded_keys = map(self._get_encoded_key, population)
# Get all fitnesses from encoded_solution cache
to_decode_indices = []
for i, encoded_key in enumerate(encoded_keys):
try:
fitnesses[i] = self.__encoded_cache[encoded_key]
# Note that this fitness will never be better than the current best
# because we have already evaluted it,
# Therefore, we do not need to worry about decoding the solution
except KeyError: # Cache miss
to_decode_indices.append(i)
except UnhashableError: # Cannot hash encoded solution
encoded_keys = None
to_decode_indices = range(len(population))
else:
encoded_keys = None
to_decode_indices = range(len(population))
# Decode all that need to be decoded, and combine back into list the same length
# as population
if encoded_keys is None:
to_decode_keys = None
else:
to_decode_keys = [encoded_keys[i] for i in to_decode_indices]
solutions = [None] * len(population)
for i, solution in zip(to_decode_indices,
self._pmap(
problem.decode_solution,
[population[i] for i in to_decode_indices],
to_decode_keys,
pool)):
solutions[i] = solution
#############################
# Evaluating
#############################
if cache_solution:
try:
# Try to make solutions hashable
# Use user provided hash function if available
if problem.hash_solution:
hash_solution_func = problem.hash_solution
else:
# Otherwise, default to built in "smart" hash function
hash_solution_func = self._get_solution_key
solution_keys = [
hash_solution_func(solution)
# None corresponds to encoded_solutions found in cache
if solution is not None else None for solution in solutions
]
# Get all fitnesses from solution cache
to_eval_indices = []
for i, solution_key in enumerate(solution_keys):
if solution_key is not None: # Otherwise, fitness already found in encoded cache
try:
fitnesses[i] = self.__solution_cache[solution_key]
except KeyError: # Cache miss
to_eval_indices.append(i)
except UnhashableError: # Cannot hash solution
solution_keys = None
to_eval_indices = to_decode_indices[:]
else:
solution_keys = None
to_eval_indices = to_decode_indices[:]
# Evaluate all that need to be evaluated, and combine back into fitnesses list
if solution_keys is None:
if encoded_keys is None:
# No way to detect duplicates
to_eval_keys = None
else:
# Cannot use decoded keys, default to encoded keys
to_eval_keys = [encoded_keys[i] for i in to_eval_indices]
else:
to_eval_keys = [solution_keys[i] for i in to_eval_indices]
finished = False
eval_bookkeeping = {}
for i, fitness_finished in zip(to_eval_indices,
self._pmap(
problem.get_fitness,
[solutions[i] for i in to_eval_indices],
to_eval_keys,
pool,
bookkeeping_dict=eval_bookkeeping)):
# Unpack fitness_finished tuple
try:
fitness, maybe_finished = fitness_finished
if maybe_finished:
finished = True
except TypeError: # Not (fitness, finished) tuple
fitness = fitness_finished
fitnesses[i] = fitness
#############################
# Finishing
#############################
# Bookkeeping
# keep track of how many times fitness is evaluated
self.fitness_runs += len(eval_bookkeeping['key_indices']) # Evaled once for each unique key
# Add evaluated fitnesses to caches (both of them)
if cache_encoded and encoded_keys is not None:
for i in to_decode_indices: # Encoded cache misses
self.__encoded_cache[encoded_keys[i]] = fitnesses[i]
if cache_solution and solution_keys is not None:
for i in to_eval_indices: # Decoded cache misses
self.__solution_cache[solution_keys[i]] = fitnesses[i]
# Return
# assert None not in fitnesses # Un-comment for debugging
return solutions, fitnesses, finished | def function[_get_fitnesses, parameter[self, problem, population, cache_encoded, cache_solution, pool]]:
constant[Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation.
]
variable[fitnesses] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b04ca4a0>]] * call[name[len], parameter[name[population]]]]
if name[cache_encoded] begin[:]
<ast.Try object at 0x7da1b04cb700>
if compare[name[encoded_keys] is constant[None]] begin[:]
variable[to_decode_keys] assign[=] constant[None]
variable[solutions] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0549300>]] * call[name[len], parameter[name[population]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0548c10>, <ast.Name object at 0x7da1b054b400>]]] in starred[call[name[zip], parameter[name[to_decode_indices], call[name[self]._pmap, parameter[name[problem].decode_solution, <ast.ListComp object at 0x7da1b05dbd90>, name[to_decode_keys], name[pool]]]]]] begin[:]
call[name[solutions]][name[i]] assign[=] name[solution]
if name[cache_solution] begin[:]
<ast.Try object at 0x7da1b05dbe50>
if compare[name[solution_keys] is constant[None]] begin[:]
if compare[name[encoded_keys] is constant[None]] begin[:]
variable[to_eval_keys] assign[=] constant[None]
variable[finished] assign[=] constant[False]
variable[eval_bookkeeping] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b04c91b0>, <ast.Name object at 0x7da1b04cbc70>]]] in starred[call[name[zip], parameter[name[to_eval_indices], call[name[self]._pmap, parameter[name[problem].get_fitness, <ast.ListComp object at 0x7da1b04c8df0>, name[to_eval_keys], name[pool]]]]]] begin[:]
<ast.Try object at 0x7da1b04cb5e0>
call[name[fitnesses]][name[i]] assign[=] name[fitness]
<ast.AugAssign object at 0x7da1b04c91e0>
if <ast.BoolOp object at 0x7da1b04cbac0> begin[:]
for taget[name[i]] in starred[name[to_decode_indices]] begin[:]
call[name[self].__encoded_cache][call[name[encoded_keys]][name[i]]] assign[=] call[name[fitnesses]][name[i]]
if <ast.BoolOp object at 0x7da1b04ca1a0> begin[:]
for taget[name[i]] in starred[name[to_eval_indices]] begin[:]
call[name[self].__solution_cache][call[name[solution_keys]][name[i]]] assign[=] call[name[fitnesses]][name[i]]
return[tuple[[<ast.Name object at 0x7da1b04c8400>, <ast.Name object at 0x7da1b04cb970>, <ast.Name object at 0x7da1b04cbe50>]]] | keyword[def] identifier[_get_fitnesses] ( identifier[self] ,
identifier[problem] ,
identifier[population] ,
identifier[cache_encoded] = keyword[True] ,
identifier[cache_solution] = keyword[False] ,
identifier[pool] = keyword[None] ):
literal[string]
identifier[fitnesses] =[ keyword[None] ]* identifier[len] ( identifier[population] )
keyword[if] identifier[cache_encoded] :
keyword[try] :
identifier[encoded_keys] = identifier[map] ( identifier[self] . identifier[_get_encoded_key] , identifier[population] )
identifier[to_decode_indices] =[]
keyword[for] identifier[i] , identifier[encoded_key] keyword[in] identifier[enumerate] ( identifier[encoded_keys] ):
keyword[try] :
identifier[fitnesses] [ identifier[i] ]= identifier[self] . identifier[__encoded_cache] [ identifier[encoded_key] ]
keyword[except] identifier[KeyError] :
identifier[to_decode_indices] . identifier[append] ( identifier[i] )
keyword[except] identifier[UnhashableError] :
identifier[encoded_keys] = keyword[None]
identifier[to_decode_indices] = identifier[range] ( identifier[len] ( identifier[population] ))
keyword[else] :
identifier[encoded_keys] = keyword[None]
identifier[to_decode_indices] = identifier[range] ( identifier[len] ( identifier[population] ))
keyword[if] identifier[encoded_keys] keyword[is] keyword[None] :
identifier[to_decode_keys] = keyword[None]
keyword[else] :
identifier[to_decode_keys] =[ identifier[encoded_keys] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[to_decode_indices] ]
identifier[solutions] =[ keyword[None] ]* identifier[len] ( identifier[population] )
keyword[for] identifier[i] , identifier[solution] keyword[in] identifier[zip] ( identifier[to_decode_indices] ,
identifier[self] . identifier[_pmap] (
identifier[problem] . identifier[decode_solution] ,
[ identifier[population] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[to_decode_indices] ],
identifier[to_decode_keys] ,
identifier[pool] )):
identifier[solutions] [ identifier[i] ]= identifier[solution]
keyword[if] identifier[cache_solution] :
keyword[try] :
keyword[if] identifier[problem] . identifier[hash_solution] :
identifier[hash_solution_func] = identifier[problem] . identifier[hash_solution]
keyword[else] :
identifier[hash_solution_func] = identifier[self] . identifier[_get_solution_key]
identifier[solution_keys] =[
identifier[hash_solution_func] ( identifier[solution] )
keyword[if] identifier[solution] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] keyword[for] identifier[solution] keyword[in] identifier[solutions]
]
identifier[to_eval_indices] =[]
keyword[for] identifier[i] , identifier[solution_key] keyword[in] identifier[enumerate] ( identifier[solution_keys] ):
keyword[if] identifier[solution_key] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[fitnesses] [ identifier[i] ]= identifier[self] . identifier[__solution_cache] [ identifier[solution_key] ]
keyword[except] identifier[KeyError] :
identifier[to_eval_indices] . identifier[append] ( identifier[i] )
keyword[except] identifier[UnhashableError] :
identifier[solution_keys] = keyword[None]
identifier[to_eval_indices] = identifier[to_decode_indices] [:]
keyword[else] :
identifier[solution_keys] = keyword[None]
identifier[to_eval_indices] = identifier[to_decode_indices] [:]
keyword[if] identifier[solution_keys] keyword[is] keyword[None] :
keyword[if] identifier[encoded_keys] keyword[is] keyword[None] :
identifier[to_eval_keys] = keyword[None]
keyword[else] :
identifier[to_eval_keys] =[ identifier[encoded_keys] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[to_eval_indices] ]
keyword[else] :
identifier[to_eval_keys] =[ identifier[solution_keys] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[to_eval_indices] ]
identifier[finished] = keyword[False]
identifier[eval_bookkeeping] ={}
keyword[for] identifier[i] , identifier[fitness_finished] keyword[in] identifier[zip] ( identifier[to_eval_indices] ,
identifier[self] . identifier[_pmap] (
identifier[problem] . identifier[get_fitness] ,
[ identifier[solutions] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[to_eval_indices] ],
identifier[to_eval_keys] ,
identifier[pool] ,
identifier[bookkeeping_dict] = identifier[eval_bookkeeping] )):
keyword[try] :
identifier[fitness] , identifier[maybe_finished] = identifier[fitness_finished]
keyword[if] identifier[maybe_finished] :
identifier[finished] = keyword[True]
keyword[except] identifier[TypeError] :
identifier[fitness] = identifier[fitness_finished]
identifier[fitnesses] [ identifier[i] ]= identifier[fitness]
identifier[self] . identifier[fitness_runs] += identifier[len] ( identifier[eval_bookkeeping] [ literal[string] ])
keyword[if] identifier[cache_encoded] keyword[and] identifier[encoded_keys] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[i] keyword[in] identifier[to_decode_indices] :
identifier[self] . identifier[__encoded_cache] [ identifier[encoded_keys] [ identifier[i] ]]= identifier[fitnesses] [ identifier[i] ]
keyword[if] identifier[cache_solution] keyword[and] identifier[solution_keys] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[i] keyword[in] identifier[to_eval_indices] :
identifier[self] . identifier[__solution_cache] [ identifier[solution_keys] [ identifier[i] ]]= identifier[fitnesses] [ identifier[i] ]
keyword[return] identifier[solutions] , identifier[fitnesses] , identifier[finished] | def _get_fitnesses(self, problem, population, cache_encoded=True, cache_solution=False, pool=None):
"""Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation.
"""
fitnesses = [None] * len(population)
#############################
# Decoding
#############################
if cache_encoded:
try:
encoded_keys = map(self._get_encoded_key, population)
# Get all fitnesses from encoded_solution cache
to_decode_indices = []
for (i, encoded_key) in enumerate(encoded_keys):
try:
fitnesses[i] = self.__encoded_cache[encoded_key] # depends on [control=['try'], data=[]]
# Note that this fitness will never be better than the current best
# because we have already evaluted it,
# Therefore, we do not need to worry about decoding the solution
except KeyError: # Cache miss
to_decode_indices.append(i) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except UnhashableError: # Cannot hash encoded solution
encoded_keys = None
to_decode_indices = range(len(population)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
encoded_keys = None
to_decode_indices = range(len(population))
# Decode all that need to be decoded, and combine back into list the same length
# as population
if encoded_keys is None:
to_decode_keys = None # depends on [control=['if'], data=[]]
else:
to_decode_keys = [encoded_keys[i] for i in to_decode_indices]
solutions = [None] * len(population)
for (i, solution) in zip(to_decode_indices, self._pmap(problem.decode_solution, [population[i] for i in to_decode_indices], to_decode_keys, pool)):
solutions[i] = solution # depends on [control=['for'], data=[]]
#############################
# Evaluating
#############################
if cache_solution:
try:
# Try to make solutions hashable
# Use user provided hash function if available
if problem.hash_solution:
hash_solution_func = problem.hash_solution # depends on [control=['if'], data=[]]
else:
# Otherwise, default to built in "smart" hash function
hash_solution_func = self._get_solution_key
# None corresponds to encoded_solutions found in cache
solution_keys = [hash_solution_func(solution) if solution is not None else None for solution in solutions]
# Get all fitnesses from solution cache
to_eval_indices = []
for (i, solution_key) in enumerate(solution_keys):
if solution_key is not None: # Otherwise, fitness already found in encoded cache
try:
fitnesses[i] = self.__solution_cache[solution_key] # depends on [control=['try'], data=[]]
except KeyError: # Cache miss
to_eval_indices.append(i) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['solution_key']] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except UnhashableError: # Cannot hash solution
solution_keys = None
to_eval_indices = to_decode_indices[:] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
solution_keys = None
to_eval_indices = to_decode_indices[:]
# Evaluate all that need to be evaluated, and combine back into fitnesses list
if solution_keys is None:
if encoded_keys is None:
# No way to detect duplicates
to_eval_keys = None # depends on [control=['if'], data=[]]
else:
# Cannot use decoded keys, default to encoded keys
to_eval_keys = [encoded_keys[i] for i in to_eval_indices] # depends on [control=['if'], data=[]]
else:
to_eval_keys = [solution_keys[i] for i in to_eval_indices]
finished = False
eval_bookkeeping = {}
for (i, fitness_finished) in zip(to_eval_indices, self._pmap(problem.get_fitness, [solutions[i] for i in to_eval_indices], to_eval_keys, pool, bookkeeping_dict=eval_bookkeeping)):
# Unpack fitness_finished tuple
try:
(fitness, maybe_finished) = fitness_finished
if maybe_finished:
finished = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError: # Not (fitness, finished) tuple
fitness = fitness_finished # depends on [control=['except'], data=[]]
fitnesses[i] = fitness # depends on [control=['for'], data=[]]
#############################
# Finishing
#############################
# Bookkeeping
# keep track of how many times fitness is evaluated
self.fitness_runs += len(eval_bookkeeping['key_indices']) # Evaled once for each unique key
# Add evaluated fitnesses to caches (both of them)
if cache_encoded and encoded_keys is not None:
for i in to_decode_indices: # Encoded cache misses
self.__encoded_cache[encoded_keys[i]] = fitnesses[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if cache_solution and solution_keys is not None:
for i in to_eval_indices: # Decoded cache misses
self.__solution_cache[solution_keys[i]] = fitnesses[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
# Return
# assert None not in fitnesses # Un-comment for debugging
return (solutions, fitnesses, finished) |
def laplacian_reordering(G):
'''Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.'''
L = G.laplacian()
vals, vecs = np.linalg.eigh(L)
min_positive_idx = np.argmax(vals == vals[vals>0].min())
vec = vecs[:, min_positive_idx]
return permute_graph(G, np.argsort(vec)) | def function[laplacian_reordering, parameter[G]]:
constant[Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.]
variable[L] assign[=] call[name[G].laplacian, parameter[]]
<ast.Tuple object at 0x7da2046237c0> assign[=] call[name[np].linalg.eigh, parameter[name[L]]]
variable[min_positive_idx] assign[=] call[name[np].argmax, parameter[compare[name[vals] equal[==] call[call[name[vals]][compare[name[vals] greater[>] constant[0]]].min, parameter[]]]]]
variable[vec] assign[=] call[name[vecs]][tuple[[<ast.Slice object at 0x7da1b26acaf0>, <ast.Name object at 0x7da1b26aea70>]]]
return[call[name[permute_graph], parameter[name[G], call[name[np].argsort, parameter[name[vec]]]]]] | keyword[def] identifier[laplacian_reordering] ( identifier[G] ):
literal[string]
identifier[L] = identifier[G] . identifier[laplacian] ()
identifier[vals] , identifier[vecs] = identifier[np] . identifier[linalg] . identifier[eigh] ( identifier[L] )
identifier[min_positive_idx] = identifier[np] . identifier[argmax] ( identifier[vals] == identifier[vals] [ identifier[vals] > literal[int] ]. identifier[min] ())
identifier[vec] = identifier[vecs] [:, identifier[min_positive_idx] ]
keyword[return] identifier[permute_graph] ( identifier[G] , identifier[np] . identifier[argsort] ( identifier[vec] )) | def laplacian_reordering(G):
"""Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue."""
L = G.laplacian()
(vals, vecs) = np.linalg.eigh(L)
min_positive_idx = np.argmax(vals == vals[vals > 0].min())
vec = vecs[:, min_positive_idx]
return permute_graph(G, np.argsort(vec)) |
def to_mapquest_str(self):
"""
Convert Viewbox object to a string that can be used by
`MapQuest <http://www.mapquestapi.com/geocoding/#options>`_
as a query parameter.
"""
vb = self.convert_srs(4326)
return '%s,%s,%s,%s' % (vb.left, vb.top, vb.right, vb.bottom) | def function[to_mapquest_str, parameter[self]]:
constant[
Convert Viewbox object to a string that can be used by
`MapQuest <http://www.mapquestapi.com/geocoding/#options>`_
as a query parameter.
]
variable[vb] assign[=] call[name[self].convert_srs, parameter[constant[4326]]]
return[binary_operation[constant[%s,%s,%s,%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204567e20>, <ast.Attribute object at 0x7da204564490>, <ast.Attribute object at 0x7da20c9937f0>, <ast.Attribute object at 0x7da20c991870>]]]] | keyword[def] identifier[to_mapquest_str] ( identifier[self] ):
literal[string]
identifier[vb] = identifier[self] . identifier[convert_srs] ( literal[int] )
keyword[return] literal[string] %( identifier[vb] . identifier[left] , identifier[vb] . identifier[top] , identifier[vb] . identifier[right] , identifier[vb] . identifier[bottom] ) | def to_mapquest_str(self):
"""
Convert Viewbox object to a string that can be used by
`MapQuest <http://www.mapquestapi.com/geocoding/#options>`_
as a query parameter.
"""
vb = self.convert_srs(4326)
return '%s,%s,%s,%s' % (vb.left, vb.top, vb.right, vb.bottom) |
def _generate_union_properties(self, fields):
"""Emits union instance properties from the given fields."""
for field in fields:
# void types do not need properties to store additional state
# information
if not is_void_type(field.data_type):
doc = self.process_doc(
field.doc, self._docf) if field.doc else undocumented
warning_str = (
' @note Ensure the `is{}` method returns true before accessing, '
'otherwise a runtime exception will be raised.')
doc += warning_str.format(fmt_camel_upper(field.name))
self.emit_wrapped_text(
self.process_doc(doc, self._docf), prefix=comment_prefix)
self.emit(fmt_property(field=field))
self.emit() | def function[_generate_union_properties, parameter[self, fields]]:
constant[Emits union instance properties from the given fields.]
for taget[name[field]] in starred[name[fields]] begin[:]
if <ast.UnaryOp object at 0x7da20c76d8a0> begin[:]
variable[doc] assign[=] <ast.IfExp object at 0x7da20c76f190>
variable[warning_str] assign[=] constant[ @note Ensure the `is{}` method returns true before accessing, otherwise a runtime exception will be raised.]
<ast.AugAssign object at 0x7da20c76d240>
call[name[self].emit_wrapped_text, parameter[call[name[self].process_doc, parameter[name[doc], name[self]._docf]]]]
call[name[self].emit, parameter[call[name[fmt_property], parameter[]]]]
call[name[self].emit, parameter[]] | keyword[def] identifier[_generate_union_properties] ( identifier[self] , identifier[fields] ):
literal[string]
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[if] keyword[not] identifier[is_void_type] ( identifier[field] . identifier[data_type] ):
identifier[doc] = identifier[self] . identifier[process_doc] (
identifier[field] . identifier[doc] , identifier[self] . identifier[_docf] ) keyword[if] identifier[field] . identifier[doc] keyword[else] identifier[undocumented]
identifier[warning_str] =(
literal[string]
literal[string] )
identifier[doc] += identifier[warning_str] . identifier[format] ( identifier[fmt_camel_upper] ( identifier[field] . identifier[name] ))
identifier[self] . identifier[emit_wrapped_text] (
identifier[self] . identifier[process_doc] ( identifier[doc] , identifier[self] . identifier[_docf] ), identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[fmt_property] ( identifier[field] = identifier[field] ))
identifier[self] . identifier[emit] () | def _generate_union_properties(self, fields):
"""Emits union instance properties from the given fields."""
for field in fields:
# void types do not need properties to store additional state
# information
if not is_void_type(field.data_type):
doc = self.process_doc(field.doc, self._docf) if field.doc else undocumented
warning_str = ' @note Ensure the `is{}` method returns true before accessing, otherwise a runtime exception will be raised.'
doc += warning_str.format(fmt_camel_upper(field.name))
self.emit_wrapped_text(self.process_doc(doc, self._docf), prefix=comment_prefix)
self.emit(fmt_property(field=field))
self.emit() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] |
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs) | def function[cprint, parameter[text, color, on_color, attrs]]:
constant[Print colorize text.
It accepts arguments of print function.
]
call[name[print], parameter[call[name[colored], parameter[name[text], name[color], name[on_color], name[attrs]]]]] | keyword[def] identifier[cprint] ( identifier[text] , identifier[color] = keyword[None] , identifier[on_color] = keyword[None] , identifier[attrs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[print] (( identifier[colored] ( identifier[text] , identifier[color] , identifier[on_color] , identifier[attrs] )),** identifier[kwargs] ) | def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print(colored(text, color, on_color, attrs), **kwargs) |
def read_astropy_ascii (self, **kwargs):
"""Open as an ASCII table, returning a :class:`astropy.table.Table` object.
Keyword arguments are passed to :func:`astropy.io.ascii.open`; valid
ones likely include:
- ``names = <list>`` (column names)
- ``format`` ('basic', 'cds', 'csv', 'ipac', ...)
- ``guess = True`` (guess table format)
- ``delimiter`` (column delimiter)
- ``comment = <regex>``
- ``header_start = <int>`` (line number of header, ignoring blank and comment lines)
- ``data_start = <int>``
- ``data_end = <int>``
- ``converters = <dict>``
- ``include_names = <list>`` (names of columns to include)
- ``exclude_names = <list>`` (names of columns to exclude; applied after include)
- ``fill_values = <dict>`` (filler values)
"""
from astropy.io import ascii
return ascii.read (text_type (self), **kwargs) | def function[read_astropy_ascii, parameter[self]]:
constant[Open as an ASCII table, returning a :class:`astropy.table.Table` object.
Keyword arguments are passed to :func:`astropy.io.ascii.open`; valid
ones likely include:
- ``names = <list>`` (column names)
- ``format`` ('basic', 'cds', 'csv', 'ipac', ...)
- ``guess = True`` (guess table format)
- ``delimiter`` (column delimiter)
- ``comment = <regex>``
- ``header_start = <int>`` (line number of header, ignoring blank and comment lines)
- ``data_start = <int>``
- ``data_end = <int>``
- ``converters = <dict>``
- ``include_names = <list>`` (names of columns to include)
- ``exclude_names = <list>`` (names of columns to exclude; applied after include)
- ``fill_values = <dict>`` (filler values)
]
from relative_module[astropy.io] import module[ascii]
return[call[name[ascii].read, parameter[call[name[text_type], parameter[name[self]]]]]] | keyword[def] identifier[read_astropy_ascii] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[astropy] . identifier[io] keyword[import] identifier[ascii]
keyword[return] identifier[ascii] . identifier[read] ( identifier[text_type] ( identifier[self] ),** identifier[kwargs] ) | def read_astropy_ascii(self, **kwargs):
"""Open as an ASCII table, returning a :class:`astropy.table.Table` object.
Keyword arguments are passed to :func:`astropy.io.ascii.open`; valid
ones likely include:
- ``names = <list>`` (column names)
- ``format`` ('basic', 'cds', 'csv', 'ipac', ...)
- ``guess = True`` (guess table format)
- ``delimiter`` (column delimiter)
- ``comment = <regex>``
- ``header_start = <int>`` (line number of header, ignoring blank and comment lines)
- ``data_start = <int>``
- ``data_end = <int>``
- ``converters = <dict>``
- ``include_names = <list>`` (names of columns to include)
- ``exclude_names = <list>`` (names of columns to exclude; applied after include)
- ``fill_values = <dict>`` (filler values)
"""
from astropy.io import ascii
return ascii.read(text_type(self), **kwargs) |
def _run_cmd(cmd, ctx, glob, loc): # pragma: no cover
"""Run a command with optionally a debugger, IPython, or profiling."""
if PDB:
_enable_pdb()
if IPYTHON:
from IPython import start_ipython
args_ipy = ['-i', '--gui=qt']
ns = glob.copy()
ns.update(loc)
return start_ipython(args_ipy, user_ns=ns)
# Profiling. The builtin `profile` is added in __init__.
prof = __builtins__.get('profile', None)
if prof:
prof = __builtins__['profile']
return _profile(prof, cmd, glob, loc)
return exec_(cmd, glob, loc) | def function[_run_cmd, parameter[cmd, ctx, glob, loc]]:
constant[Run a command with optionally a debugger, IPython, or profiling.]
if name[PDB] begin[:]
call[name[_enable_pdb], parameter[]]
if name[IPYTHON] begin[:]
from relative_module[IPython] import module[start_ipython]
variable[args_ipy] assign[=] list[[<ast.Constant object at 0x7da2044c00d0>, <ast.Constant object at 0x7da2044c1960>]]
variable[ns] assign[=] call[name[glob].copy, parameter[]]
call[name[ns].update, parameter[name[loc]]]
return[call[name[start_ipython], parameter[name[args_ipy]]]]
variable[prof] assign[=] call[name[__builtins__].get, parameter[constant[profile], constant[None]]]
if name[prof] begin[:]
variable[prof] assign[=] call[name[__builtins__]][constant[profile]]
return[call[name[_profile], parameter[name[prof], name[cmd], name[glob], name[loc]]]]
return[call[name[exec_], parameter[name[cmd], name[glob], name[loc]]]] | keyword[def] identifier[_run_cmd] ( identifier[cmd] , identifier[ctx] , identifier[glob] , identifier[loc] ):
literal[string]
keyword[if] identifier[PDB] :
identifier[_enable_pdb] ()
keyword[if] identifier[IPYTHON] :
keyword[from] identifier[IPython] keyword[import] identifier[start_ipython]
identifier[args_ipy] =[ literal[string] , literal[string] ]
identifier[ns] = identifier[glob] . identifier[copy] ()
identifier[ns] . identifier[update] ( identifier[loc] )
keyword[return] identifier[start_ipython] ( identifier[args_ipy] , identifier[user_ns] = identifier[ns] )
identifier[prof] = identifier[__builtins__] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[prof] :
identifier[prof] = identifier[__builtins__] [ literal[string] ]
keyword[return] identifier[_profile] ( identifier[prof] , identifier[cmd] , identifier[glob] , identifier[loc] )
keyword[return] identifier[exec_] ( identifier[cmd] , identifier[glob] , identifier[loc] ) | def _run_cmd(cmd, ctx, glob, loc): # pragma: no cover
'Run a command with optionally a debugger, IPython, or profiling.'
if PDB:
_enable_pdb() # depends on [control=['if'], data=[]]
if IPYTHON:
from IPython import start_ipython
args_ipy = ['-i', '--gui=qt']
ns = glob.copy()
ns.update(loc)
return start_ipython(args_ipy, user_ns=ns) # depends on [control=['if'], data=[]]
# Profiling. The builtin `profile` is added in __init__.
prof = __builtins__.get('profile', None)
if prof:
prof = __builtins__['profile']
return _profile(prof, cmd, glob, loc) # depends on [control=['if'], data=[]]
return exec_(cmd, glob, loc) |
def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0):
"Generate coords for a matrix of rects"
for i, f, c in rowmajor(rows, cols):
x = ox + c * colw
y = oy + f * rowh
x1 = x + colw
y1 = y + rowh
yield (i, x, y, x1, y1) | def function[matrix_coords, parameter[rows, cols, rowh, colw, ox, oy]]:
constant[Generate coords for a matrix of rects]
for taget[tuple[[<ast.Name object at 0x7da204960e80>, <ast.Name object at 0x7da204963bb0>, <ast.Name object at 0x7da204961a50>]]] in starred[call[name[rowmajor], parameter[name[rows], name[cols]]]] begin[:]
variable[x] assign[=] binary_operation[name[ox] + binary_operation[name[c] * name[colw]]]
variable[y] assign[=] binary_operation[name[oy] + binary_operation[name[f] * name[rowh]]]
variable[x1] assign[=] binary_operation[name[x] + name[colw]]
variable[y1] assign[=] binary_operation[name[y] + name[rowh]]
<ast.Yield object at 0x7da1b16b6e30> | keyword[def] identifier[matrix_coords] ( identifier[rows] , identifier[cols] , identifier[rowh] , identifier[colw] , identifier[ox] = literal[int] , identifier[oy] = literal[int] ):
literal[string]
keyword[for] identifier[i] , identifier[f] , identifier[c] keyword[in] identifier[rowmajor] ( identifier[rows] , identifier[cols] ):
identifier[x] = identifier[ox] + identifier[c] * identifier[colw]
identifier[y] = identifier[oy] + identifier[f] * identifier[rowh]
identifier[x1] = identifier[x] + identifier[colw]
identifier[y1] = identifier[y] + identifier[rowh]
keyword[yield] ( identifier[i] , identifier[x] , identifier[y] , identifier[x1] , identifier[y1] ) | def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0):
"""Generate coords for a matrix of rects"""
for (i, f, c) in rowmajor(rows, cols):
x = ox + c * colw
y = oy + f * rowh
x1 = x + colw
y1 = y + rowh
yield (i, x, y, x1, y1) # depends on [control=['for'], data=[]] |
def covfilter(args):
"""
%prog covfilter blastfile fastafile
Fastafile is used to get the sizes of the queries. Two filters can be
applied, the id% and cov%.
"""
from jcvi.algorithms.supermap import supermap
from jcvi.utils.range import range_union
allowed_iterby = ("query", "query_sbjct")
p = OptionParser(covfilter.__doc__)
p.set_align(pctid=95, pctcov=50)
p.add_option("--scov", default=False, action="store_true",
help="Subject coverage instead of query [default: %default]")
p.add_option("--supermap", action="store_true",
help="Use supermap instead of union")
p.add_option("--ids", dest="ids", default=None,
help="Print out the ids that satisfy [default: %default]")
p.add_option("--list", dest="list", default=False, action="store_true",
help="List the id% and cov% per gene [default: %default]")
p.add_option("--iterby", dest="iterby", default="query", choices=allowed_iterby,
help="Choose how to iterate through BLAST [default: %default]")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, fastafile = args
pctid = opts.pctid
pctcov = opts.pctcov
union = not opts.supermap
scov = opts.scov
sz = Sizes(fastafile)
sizes = sz.mapping
iterby = opts.iterby
qspair = iterby == "query_sbjct"
if not union:
querysupermap = blastfile + ".query.supermap"
if not op.exists(querysupermap):
supermap(blastfile, filter="query")
blastfile = querysupermap
assert op.exists(blastfile)
covered = 0
mismatches = 0
gaps = 0
alignlen = 0
queries = set()
valid = set()
blast = BlastSlow(blastfile)
iterator = blast.iter_hits_pair if qspair else blast.iter_hits
covidstore = {}
for query, blines in iterator():
blines = list(blines)
queries.add(query)
# per gene report
this_covered = 0
this_alignlen = 0
this_mismatches = 0
this_gaps = 0
this_identity = 0
ranges = []
for b in blines:
if scov:
s, start, stop = b.subject, b.sstart, b.sstop
else:
s, start, stop = b.query, b.qstart, b.qstop
cov_id = s
if b.pctid < pctid:
continue
if start > stop:
start, stop = stop, start
this_covered += stop - start + 1
this_alignlen += b.hitlen
this_mismatches += b.nmismatch
this_gaps += b.ngaps
ranges.append(("1", start, stop))
if ranges:
this_identity = 100. - (this_mismatches + this_gaps) * 100. / this_alignlen
if union:
this_covered = range_union(ranges)
this_coverage = this_covered * 100. / sizes[cov_id]
covidstore[query] = (this_identity, this_coverage)
if this_identity >= pctid and this_coverage >= pctcov:
valid.add(query)
covered += this_covered
mismatches += this_mismatches
gaps += this_gaps
alignlen += this_alignlen
if opts.list:
if qspair:
allpairs = defaultdict(list)
for (q, s) in covidstore:
allpairs[q].append((q, s))
allpairs[s].append((q, s))
for id, size in sz.iter_sizes():
if id not in allpairs:
print("\t".join((id, "na", "0", "0")))
else:
for qs in allpairs[id]:
this_identity, this_coverage = covidstore[qs]
print("{0}\t{1:.1f}\t{2:.1f}".format("\t".join(qs), this_identity, this_coverage))
else:
for query, size in sz.iter_sizes():
this_identity, this_coverage = covidstore.get(query, (0, 0))
print("{0}\t{1:.1f}\t{2:.1f}".format(query, this_identity, this_coverage))
mapped_count = len(queries)
valid_count = len(valid)
cutoff_message = "(id={0.pctid}% cov={0.pctcov}%)".format(opts)
m = "Identity: {0} mismatches, {1} gaps, {2} alignlen\n".\
format(mismatches, gaps, alignlen)
total = len(sizes.keys())
m += "Total mapped: {0} ({1:.1f}% of {2})\n".\
format(mapped_count, mapped_count * 100. / total, total)
m += "Total valid {0}: {1} ({2:.1f}% of {3})\n".\
format(cutoff_message, valid_count, valid_count * 100. / total, total)
m += "Average id = {0:.2f}%\n".\
format(100 - (mismatches + gaps) * 100. / alignlen)
queries_combined = sz.totalsize
m += "Coverage: {0} covered, {1} total\n".\
format(covered, queries_combined)
m += "Average coverage = {0:.2f}%".\
format(covered * 100. / queries_combined)
logfile = blastfile + ".covfilter.log"
fw = open(logfile, "w")
for f in (sys.stderr, fw):
print(m, file=f)
fw.close()
if opts.ids:
filename = opts.ids
fw = must_open(filename, "w")
for id in valid:
print(id, file=fw)
logging.debug("Queries beyond cutoffs {0} written to `{1}`.".\
format(cutoff_message, filename))
outfile = opts.outfile
if not outfile:
return
fw = must_open(outfile, "w")
blast = Blast(blastfile)
for b in blast:
query = (b.query, b.subject) if qspair else b.query
if query in valid:
print(b, file=fw) | def function[covfilter, parameter[args]]:
constant[
%prog covfilter blastfile fastafile
Fastafile is used to get the sizes of the queries. Two filters can be
applied, the id% and cov%.
]
from relative_module[jcvi.algorithms.supermap] import module[supermap]
from relative_module[jcvi.utils.range] import module[range_union]
variable[allowed_iterby] assign[=] tuple[[<ast.Constant object at 0x7da1b080dde0>, <ast.Constant object at 0x7da1b080dc90>]]
variable[p] assign[=] call[name[OptionParser], parameter[name[covfilter].__doc__]]
call[name[p].set_align, parameter[]]
call[name[p].add_option, parameter[constant[--scov]]]
call[name[p].add_option, parameter[constant[--supermap]]]
call[name[p].add_option, parameter[constant[--ids]]]
call[name[p].add_option, parameter[constant[--list]]]
call[name[p].add_option, parameter[constant[--iterby]]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da1b080d090> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b080d480>]]
<ast.Tuple object at 0x7da1b080ff10> assign[=] name[args]
variable[pctid] assign[=] name[opts].pctid
variable[pctcov] assign[=] name[opts].pctcov
variable[union] assign[=] <ast.UnaryOp object at 0x7da1b080d150>
variable[scov] assign[=] name[opts].scov
variable[sz] assign[=] call[name[Sizes], parameter[name[fastafile]]]
variable[sizes] assign[=] name[sz].mapping
variable[iterby] assign[=] name[opts].iterby
variable[qspair] assign[=] compare[name[iterby] equal[==] constant[query_sbjct]]
if <ast.UnaryOp object at 0x7da1b08c9f00> begin[:]
variable[querysupermap] assign[=] binary_operation[name[blastfile] + constant[.query.supermap]]
if <ast.UnaryOp object at 0x7da1b08cb460> begin[:]
call[name[supermap], parameter[name[blastfile]]]
variable[blastfile] assign[=] name[querysupermap]
assert[call[name[op].exists, parameter[name[blastfile]]]]
variable[covered] assign[=] constant[0]
variable[mismatches] assign[=] constant[0]
variable[gaps] assign[=] constant[0]
variable[alignlen] assign[=] constant[0]
variable[queries] assign[=] call[name[set], parameter[]]
variable[valid] assign[=] call[name[set], parameter[]]
variable[blast] assign[=] call[name[BlastSlow], parameter[name[blastfile]]]
variable[iterator] assign[=] <ast.IfExp object at 0x7da1b08c9510>
variable[covidstore] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b08c9810>, <ast.Name object at 0x7da1b08cba00>]]] in starred[call[name[iterator], parameter[]]] begin[:]
variable[blines] assign[=] call[name[list], parameter[name[blines]]]
call[name[queries].add, parameter[name[query]]]
variable[this_covered] assign[=] constant[0]
variable[this_alignlen] assign[=] constant[0]
variable[this_mismatches] assign[=] constant[0]
variable[this_gaps] assign[=] constant[0]
variable[this_identity] assign[=] constant[0]
variable[ranges] assign[=] list[[]]
for taget[name[b]] in starred[name[blines]] begin[:]
if name[scov] begin[:]
<ast.Tuple object at 0x7da1b08c9060> assign[=] tuple[[<ast.Attribute object at 0x7da1b08caf80>, <ast.Attribute object at 0x7da1b08cb220>, <ast.Attribute object at 0x7da1b08cb040>]]
variable[cov_id] assign[=] name[s]
if compare[name[b].pctid less[<] name[pctid]] begin[:]
continue
if compare[name[start] greater[>] name[stop]] begin[:]
<ast.Tuple object at 0x7da20e9557b0> assign[=] tuple[[<ast.Name object at 0x7da20e955450>, <ast.Name object at 0x7da20e954280>]]
<ast.AugAssign object at 0x7da20e954c70>
<ast.AugAssign object at 0x7da20e957580>
<ast.AugAssign object at 0x7da20e954ee0>
<ast.AugAssign object at 0x7da20e956b90>
call[name[ranges].append, parameter[tuple[[<ast.Constant object at 0x7da20e9557e0>, <ast.Name object at 0x7da20e956620>, <ast.Name object at 0x7da20e956500>]]]]
if name[ranges] begin[:]
variable[this_identity] assign[=] binary_operation[constant[100.0] - binary_operation[binary_operation[binary_operation[name[this_mismatches] + name[this_gaps]] * constant[100.0]] / name[this_alignlen]]]
if name[union] begin[:]
variable[this_covered] assign[=] call[name[range_union], parameter[name[ranges]]]
variable[this_coverage] assign[=] binary_operation[binary_operation[name[this_covered] * constant[100.0]] / call[name[sizes]][name[cov_id]]]
call[name[covidstore]][name[query]] assign[=] tuple[[<ast.Name object at 0x7da20e9564d0>, <ast.Name object at 0x7da20e956ec0>]]
if <ast.BoolOp object at 0x7da20e954ca0> begin[:]
call[name[valid].add, parameter[name[query]]]
<ast.AugAssign object at 0x7da20e956260>
<ast.AugAssign object at 0x7da20e956200>
<ast.AugAssign object at 0x7da20e954670>
<ast.AugAssign object at 0x7da20e9563e0>
if name[opts].list begin[:]
if name[qspair] begin[:]
variable[allpairs] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da20e957700>, <ast.Name object at 0x7da20e956bc0>]]] in starred[name[covidstore]] begin[:]
call[call[name[allpairs]][name[q]].append, parameter[tuple[[<ast.Name object at 0x7da1b08fcf10>, <ast.Name object at 0x7da1b08fc340>]]]]
call[call[name[allpairs]][name[s]].append, parameter[tuple[[<ast.Name object at 0x7da1b08ffd00>, <ast.Name object at 0x7da1b08fc2e0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b08ffe50>, <ast.Name object at 0x7da1b08ffdc0>]]] in starred[call[name[sz].iter_sizes, parameter[]]] begin[:]
if compare[name[id] <ast.NotIn object at 0x7da2590d7190> name[allpairs]] begin[:]
call[name[print], parameter[call[constant[ ].join, parameter[tuple[[<ast.Name object at 0x7da1b08fc040>, <ast.Constant object at 0x7da1b08fc280>, <ast.Constant object at 0x7da1b08fc130>, <ast.Constant object at 0x7da1b08ffca0>]]]]]]
variable[mapped_count] assign[=] call[name[len], parameter[name[queries]]]
variable[valid_count] assign[=] call[name[len], parameter[name[valid]]]
variable[cutoff_message] assign[=] call[constant[(id={0.pctid}% cov={0.pctcov}%)].format, parameter[name[opts]]]
variable[m] assign[=] call[constant[Identity: {0} mismatches, {1} gaps, {2} alignlen
].format, parameter[name[mismatches], name[gaps], name[alignlen]]]
variable[total] assign[=] call[name[len], parameter[call[name[sizes].keys, parameter[]]]]
<ast.AugAssign object at 0x7da1b084d7b0>
<ast.AugAssign object at 0x7da1b088fc70>
<ast.AugAssign object at 0x7da1b088dc90>
variable[queries_combined] assign[=] name[sz].totalsize
<ast.AugAssign object at 0x7da1b088f460>
<ast.AugAssign object at 0x7da1b088ee00>
variable[logfile] assign[=] binary_operation[name[blastfile] + constant[.covfilter.log]]
variable[fw] assign[=] call[name[open], parameter[name[logfile], constant[w]]]
for taget[name[f]] in starred[tuple[[<ast.Attribute object at 0x7da1b08311e0>, <ast.Name object at 0x7da1b0832e00>]]] begin[:]
call[name[print], parameter[name[m]]]
call[name[fw].close, parameter[]]
if name[opts].ids begin[:]
variable[filename] assign[=] name[opts].ids
variable[fw] assign[=] call[name[must_open], parameter[name[filename], constant[w]]]
for taget[name[id]] in starred[name[valid]] begin[:]
call[name[print], parameter[name[id]]]
call[name[logging].debug, parameter[call[constant[Queries beyond cutoffs {0} written to `{1}`.].format, parameter[name[cutoff_message], name[filename]]]]]
variable[outfile] assign[=] name[opts].outfile
if <ast.UnaryOp object at 0x7da1b08d2830> begin[:]
return[None]
variable[fw] assign[=] call[name[must_open], parameter[name[outfile], constant[w]]]
variable[blast] assign[=] call[name[Blast], parameter[name[blastfile]]]
for taget[name[b]] in starred[name[blast]] begin[:]
variable[query] assign[=] <ast.IfExp object at 0x7da1b08d2fb0>
if compare[name[query] in name[valid]] begin[:]
call[name[print], parameter[name[b]]] | keyword[def] identifier[covfilter] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[algorithms] . identifier[supermap] keyword[import] identifier[supermap]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[range] keyword[import] identifier[range_union]
identifier[allowed_iterby] =( literal[string] , literal[string] )
identifier[p] = identifier[OptionParser] ( identifier[covfilter] . identifier[__doc__] )
identifier[p] . identifier[set_align] ( identifier[pctid] = literal[int] , identifier[pctcov] = literal[int] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = literal[string] , identifier[choices] = identifier[allowed_iterby] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_outfile] ( identifier[outfile] = keyword[None] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[blastfile] , identifier[fastafile] = identifier[args]
identifier[pctid] = identifier[opts] . identifier[pctid]
identifier[pctcov] = identifier[opts] . identifier[pctcov]
identifier[union] = keyword[not] identifier[opts] . identifier[supermap]
identifier[scov] = identifier[opts] . identifier[scov]
identifier[sz] = identifier[Sizes] ( identifier[fastafile] )
identifier[sizes] = identifier[sz] . identifier[mapping]
identifier[iterby] = identifier[opts] . identifier[iterby]
identifier[qspair] = identifier[iterby] == literal[string]
keyword[if] keyword[not] identifier[union] :
identifier[querysupermap] = identifier[blastfile] + literal[string]
keyword[if] keyword[not] identifier[op] . identifier[exists] ( identifier[querysupermap] ):
identifier[supermap] ( identifier[blastfile] , identifier[filter] = literal[string] )
identifier[blastfile] = identifier[querysupermap]
keyword[assert] identifier[op] . identifier[exists] ( identifier[blastfile] )
identifier[covered] = literal[int]
identifier[mismatches] = literal[int]
identifier[gaps] = literal[int]
identifier[alignlen] = literal[int]
identifier[queries] = identifier[set] ()
identifier[valid] = identifier[set] ()
identifier[blast] = identifier[BlastSlow] ( identifier[blastfile] )
identifier[iterator] = identifier[blast] . identifier[iter_hits_pair] keyword[if] identifier[qspair] keyword[else] identifier[blast] . identifier[iter_hits]
identifier[covidstore] ={}
keyword[for] identifier[query] , identifier[blines] keyword[in] identifier[iterator] ():
identifier[blines] = identifier[list] ( identifier[blines] )
identifier[queries] . identifier[add] ( identifier[query] )
identifier[this_covered] = literal[int]
identifier[this_alignlen] = literal[int]
identifier[this_mismatches] = literal[int]
identifier[this_gaps] = literal[int]
identifier[this_identity] = literal[int]
identifier[ranges] =[]
keyword[for] identifier[b] keyword[in] identifier[blines] :
keyword[if] identifier[scov] :
identifier[s] , identifier[start] , identifier[stop] = identifier[b] . identifier[subject] , identifier[b] . identifier[sstart] , identifier[b] . identifier[sstop]
keyword[else] :
identifier[s] , identifier[start] , identifier[stop] = identifier[b] . identifier[query] , identifier[b] . identifier[qstart] , identifier[b] . identifier[qstop]
identifier[cov_id] = identifier[s]
keyword[if] identifier[b] . identifier[pctid] < identifier[pctid] :
keyword[continue]
keyword[if] identifier[start] > identifier[stop] :
identifier[start] , identifier[stop] = identifier[stop] , identifier[start]
identifier[this_covered] += identifier[stop] - identifier[start] + literal[int]
identifier[this_alignlen] += identifier[b] . identifier[hitlen]
identifier[this_mismatches] += identifier[b] . identifier[nmismatch]
identifier[this_gaps] += identifier[b] . identifier[ngaps]
identifier[ranges] . identifier[append] (( literal[string] , identifier[start] , identifier[stop] ))
keyword[if] identifier[ranges] :
identifier[this_identity] = literal[int] -( identifier[this_mismatches] + identifier[this_gaps] )* literal[int] / identifier[this_alignlen]
keyword[if] identifier[union] :
identifier[this_covered] = identifier[range_union] ( identifier[ranges] )
identifier[this_coverage] = identifier[this_covered] * literal[int] / identifier[sizes] [ identifier[cov_id] ]
identifier[covidstore] [ identifier[query] ]=( identifier[this_identity] , identifier[this_coverage] )
keyword[if] identifier[this_identity] >= identifier[pctid] keyword[and] identifier[this_coverage] >= identifier[pctcov] :
identifier[valid] . identifier[add] ( identifier[query] )
identifier[covered] += identifier[this_covered]
identifier[mismatches] += identifier[this_mismatches]
identifier[gaps] += identifier[this_gaps]
identifier[alignlen] += identifier[this_alignlen]
keyword[if] identifier[opts] . identifier[list] :
keyword[if] identifier[qspair] :
identifier[allpairs] = identifier[defaultdict] ( identifier[list] )
keyword[for] ( identifier[q] , identifier[s] ) keyword[in] identifier[covidstore] :
identifier[allpairs] [ identifier[q] ]. identifier[append] (( identifier[q] , identifier[s] ))
identifier[allpairs] [ identifier[s] ]. identifier[append] (( identifier[q] , identifier[s] ))
keyword[for] identifier[id] , identifier[size] keyword[in] identifier[sz] . identifier[iter_sizes] ():
keyword[if] identifier[id] keyword[not] keyword[in] identifier[allpairs] :
identifier[print] ( literal[string] . identifier[join] (( identifier[id] , literal[string] , literal[string] , literal[string] )))
keyword[else] :
keyword[for] identifier[qs] keyword[in] identifier[allpairs] [ identifier[id] ]:
identifier[this_identity] , identifier[this_coverage] = identifier[covidstore] [ identifier[qs] ]
identifier[print] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[qs] ), identifier[this_identity] , identifier[this_coverage] ))
keyword[else] :
keyword[for] identifier[query] , identifier[size] keyword[in] identifier[sz] . identifier[iter_sizes] ():
identifier[this_identity] , identifier[this_coverage] = identifier[covidstore] . identifier[get] ( identifier[query] ,( literal[int] , literal[int] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[query] , identifier[this_identity] , identifier[this_coverage] ))
identifier[mapped_count] = identifier[len] ( identifier[queries] )
identifier[valid_count] = identifier[len] ( identifier[valid] )
identifier[cutoff_message] = literal[string] . identifier[format] ( identifier[opts] )
identifier[m] = literal[string] . identifier[format] ( identifier[mismatches] , identifier[gaps] , identifier[alignlen] )
identifier[total] = identifier[len] ( identifier[sizes] . identifier[keys] ())
identifier[m] += literal[string] . identifier[format] ( identifier[mapped_count] , identifier[mapped_count] * literal[int] / identifier[total] , identifier[total] )
identifier[m] += literal[string] . identifier[format] ( identifier[cutoff_message] , identifier[valid_count] , identifier[valid_count] * literal[int] / identifier[total] , identifier[total] )
identifier[m] += literal[string] . identifier[format] ( literal[int] -( identifier[mismatches] + identifier[gaps] )* literal[int] / identifier[alignlen] )
identifier[queries_combined] = identifier[sz] . identifier[totalsize]
identifier[m] += literal[string] . identifier[format] ( identifier[covered] , identifier[queries_combined] )
identifier[m] += literal[string] . identifier[format] ( identifier[covered] * literal[int] / identifier[queries_combined] )
identifier[logfile] = identifier[blastfile] + literal[string]
identifier[fw] = identifier[open] ( identifier[logfile] , literal[string] )
keyword[for] identifier[f] keyword[in] ( identifier[sys] . identifier[stderr] , identifier[fw] ):
identifier[print] ( identifier[m] , identifier[file] = identifier[f] )
identifier[fw] . identifier[close] ()
keyword[if] identifier[opts] . identifier[ids] :
identifier[filename] = identifier[opts] . identifier[ids]
identifier[fw] = identifier[must_open] ( identifier[filename] , literal[string] )
keyword[for] identifier[id] keyword[in] identifier[valid] :
identifier[print] ( identifier[id] , identifier[file] = identifier[fw] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cutoff_message] , identifier[filename] ))
identifier[outfile] = identifier[opts] . identifier[outfile]
keyword[if] keyword[not] identifier[outfile] :
keyword[return]
identifier[fw] = identifier[must_open] ( identifier[outfile] , literal[string] )
identifier[blast] = identifier[Blast] ( identifier[blastfile] )
keyword[for] identifier[b] keyword[in] identifier[blast] :
identifier[query] =( identifier[b] . identifier[query] , identifier[b] . identifier[subject] ) keyword[if] identifier[qspair] keyword[else] identifier[b] . identifier[query]
keyword[if] identifier[query] keyword[in] identifier[valid] :
identifier[print] ( identifier[b] , identifier[file] = identifier[fw] ) | def covfilter(args):
"""
%prog covfilter blastfile fastafile
Fastafile is used to get the sizes of the queries. Two filters can be
applied, the id% and cov%.
"""
from jcvi.algorithms.supermap import supermap
from jcvi.utils.range import range_union
allowed_iterby = ('query', 'query_sbjct')
p = OptionParser(covfilter.__doc__)
p.set_align(pctid=95, pctcov=50)
p.add_option('--scov', default=False, action='store_true', help='Subject coverage instead of query [default: %default]')
p.add_option('--supermap', action='store_true', help='Use supermap instead of union')
p.add_option('--ids', dest='ids', default=None, help='Print out the ids that satisfy [default: %default]')
p.add_option('--list', dest='list', default=False, action='store_true', help='List the id% and cov% per gene [default: %default]')
p.add_option('--iterby', dest='iterby', default='query', choices=allowed_iterby, help='Choose how to iterate through BLAST [default: %default]')
p.set_outfile(outfile=None)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(blastfile, fastafile) = args
pctid = opts.pctid
pctcov = opts.pctcov
union = not opts.supermap
scov = opts.scov
sz = Sizes(fastafile)
sizes = sz.mapping
iterby = opts.iterby
qspair = iterby == 'query_sbjct'
if not union:
querysupermap = blastfile + '.query.supermap'
if not op.exists(querysupermap):
supermap(blastfile, filter='query') # depends on [control=['if'], data=[]]
blastfile = querysupermap # depends on [control=['if'], data=[]]
assert op.exists(blastfile)
covered = 0
mismatches = 0
gaps = 0
alignlen = 0
queries = set()
valid = set()
blast = BlastSlow(blastfile)
iterator = blast.iter_hits_pair if qspair else blast.iter_hits
covidstore = {}
for (query, blines) in iterator():
blines = list(blines)
queries.add(query)
# per gene report
this_covered = 0
this_alignlen = 0
this_mismatches = 0
this_gaps = 0
this_identity = 0
ranges = []
for b in blines:
if scov:
(s, start, stop) = (b.subject, b.sstart, b.sstop) # depends on [control=['if'], data=[]]
else:
(s, start, stop) = (b.query, b.qstart, b.qstop)
cov_id = s
if b.pctid < pctid:
continue # depends on [control=['if'], data=[]]
if start > stop:
(start, stop) = (stop, start) # depends on [control=['if'], data=['start', 'stop']]
this_covered += stop - start + 1
this_alignlen += b.hitlen
this_mismatches += b.nmismatch
this_gaps += b.ngaps
ranges.append(('1', start, stop)) # depends on [control=['for'], data=['b']]
if ranges:
this_identity = 100.0 - (this_mismatches + this_gaps) * 100.0 / this_alignlen # depends on [control=['if'], data=[]]
if union:
this_covered = range_union(ranges) # depends on [control=['if'], data=[]]
this_coverage = this_covered * 100.0 / sizes[cov_id]
covidstore[query] = (this_identity, this_coverage)
if this_identity >= pctid and this_coverage >= pctcov:
valid.add(query) # depends on [control=['if'], data=[]]
covered += this_covered
mismatches += this_mismatches
gaps += this_gaps
alignlen += this_alignlen # depends on [control=['for'], data=[]]
if opts.list:
if qspair:
allpairs = defaultdict(list)
for (q, s) in covidstore:
allpairs[q].append((q, s))
allpairs[s].append((q, s)) # depends on [control=['for'], data=[]]
for (id, size) in sz.iter_sizes():
if id not in allpairs:
print('\t'.join((id, 'na', '0', '0'))) # depends on [control=['if'], data=['id']]
else:
for qs in allpairs[id]:
(this_identity, this_coverage) = covidstore[qs]
print('{0}\t{1:.1f}\t{2:.1f}'.format('\t'.join(qs), this_identity, this_coverage)) # depends on [control=['for'], data=['qs']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (query, size) in sz.iter_sizes():
(this_identity, this_coverage) = covidstore.get(query, (0, 0))
print('{0}\t{1:.1f}\t{2:.1f}'.format(query, this_identity, this_coverage)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
mapped_count = len(queries)
valid_count = len(valid)
cutoff_message = '(id={0.pctid}% cov={0.pctcov}%)'.format(opts)
m = 'Identity: {0} mismatches, {1} gaps, {2} alignlen\n'.format(mismatches, gaps, alignlen)
total = len(sizes.keys())
m += 'Total mapped: {0} ({1:.1f}% of {2})\n'.format(mapped_count, mapped_count * 100.0 / total, total)
m += 'Total valid {0}: {1} ({2:.1f}% of {3})\n'.format(cutoff_message, valid_count, valid_count * 100.0 / total, total)
m += 'Average id = {0:.2f}%\n'.format(100 - (mismatches + gaps) * 100.0 / alignlen)
queries_combined = sz.totalsize
m += 'Coverage: {0} covered, {1} total\n'.format(covered, queries_combined)
m += 'Average coverage = {0:.2f}%'.format(covered * 100.0 / queries_combined)
logfile = blastfile + '.covfilter.log'
fw = open(logfile, 'w')
for f in (sys.stderr, fw):
print(m, file=f) # depends on [control=['for'], data=['f']]
fw.close()
if opts.ids:
filename = opts.ids
fw = must_open(filename, 'w')
for id in valid:
print(id, file=fw) # depends on [control=['for'], data=['id']]
logging.debug('Queries beyond cutoffs {0} written to `{1}`.'.format(cutoff_message, filename)) # depends on [control=['if'], data=[]]
outfile = opts.outfile
if not outfile:
return # depends on [control=['if'], data=[]]
fw = must_open(outfile, 'w')
blast = Blast(blastfile)
for b in blast:
query = (b.query, b.subject) if qspair else b.query
if query in valid:
print(b, file=fw) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['b']] |
def geodetic2aer(lat: float, lon: float, h: float,
lat0: float, lon0: float, h0: float,
ell=None, deg: bool = True) -> Tuple[float, float, float]:
"""
gives azimuth, elevation and slant range from an Observer to a Point with geodetic coordinates.
Parameters
----------
lat : float or numpy.ndarray of float
target geodetic latitude
lon : float or numpy.ndarray of float
target geodetic longitude
h : float or numpy.ndarray of float
target altitude above geodetic ellipsoid (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
az : float or numpy.ndarray of float
azimuth
el : float or numpy.ndarray of float
elevation
srange : float or numpy.ndarray of float
slant range [meters]
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return enu2aer(e, n, u, deg=deg) | def function[geodetic2aer, parameter[lat, lon, h, lat0, lon0, h0, ell, deg]]:
constant[
gives azimuth, elevation and slant range from an Observer to a Point with geodetic coordinates.
Parameters
----------
lat : float or numpy.ndarray of float
target geodetic latitude
lon : float or numpy.ndarray of float
target geodetic longitude
h : float or numpy.ndarray of float
target altitude above geodetic ellipsoid (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
az : float or numpy.ndarray of float
azimuth
el : float or numpy.ndarray of float
elevation
srange : float or numpy.ndarray of float
slant range [meters]
]
<ast.Tuple object at 0x7da1b15e8760> assign[=] call[name[geodetic2enu], parameter[name[lat], name[lon], name[h], name[lat0], name[lon0], name[h0], name[ell]]]
return[call[name[enu2aer], parameter[name[e], name[n], name[u]]]] | keyword[def] identifier[geodetic2aer] ( identifier[lat] : identifier[float] , identifier[lon] : identifier[float] , identifier[h] : identifier[float] ,
identifier[lat0] : identifier[float] , identifier[lon0] : identifier[float] , identifier[h0] : identifier[float] ,
identifier[ell] = keyword[None] , identifier[deg] : identifier[bool] = keyword[True] )-> identifier[Tuple] [ identifier[float] , identifier[float] , identifier[float] ]:
literal[string]
identifier[e] , identifier[n] , identifier[u] = identifier[geodetic2enu] ( identifier[lat] , identifier[lon] , identifier[h] , identifier[lat0] , identifier[lon0] , identifier[h0] , identifier[ell] , identifier[deg] = identifier[deg] )
keyword[return] identifier[enu2aer] ( identifier[e] , identifier[n] , identifier[u] , identifier[deg] = identifier[deg] ) | def geodetic2aer(lat: float, lon: float, h: float, lat0: float, lon0: float, h0: float, ell=None, deg: bool=True) -> Tuple[float, float, float]:
"""
gives azimuth, elevation and slant range from an Observer to a Point with geodetic coordinates.
Parameters
----------
lat : float or numpy.ndarray of float
target geodetic latitude
lon : float or numpy.ndarray of float
target geodetic longitude
h : float or numpy.ndarray of float
target altitude above geodetic ellipsoid (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
az : float or numpy.ndarray of float
azimuth
el : float or numpy.ndarray of float
elevation
srange : float or numpy.ndarray of float
slant range [meters]
"""
(e, n, u) = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return enu2aer(e, n, u, deg=deg) |
def get_logs(self):
"""
print logs from pod
:return: str or None
"""
try:
api_response = self.core_api.read_namespaced_pod_log(self.name, self.namespace)
logger.debug("Logs from pod: %s in namespace: %s", self.name, self.namespace)
for line in api_response.split('\n'):
logger.debug(line)
return api_response
except ApiException as e:
# no reason to throw exception when logs cannot be obtain, just notify user
logger.debug("Cannot get pod logs because of "
"exception during calling Kubernetes API %s\n", e)
return None | def function[get_logs, parameter[self]]:
constant[
print logs from pod
:return: str or None
]
<ast.Try object at 0x7da1b1251600>
return[constant[None]] | keyword[def] identifier[get_logs] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[api_response] = identifier[self] . identifier[core_api] . identifier[read_namespaced_pod_log] ( identifier[self] . identifier[name] , identifier[self] . identifier[namespace] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] , identifier[self] . identifier[namespace] )
keyword[for] identifier[line] keyword[in] identifier[api_response] . identifier[split] ( literal[string] ):
identifier[logger] . identifier[debug] ( identifier[line] )
keyword[return] identifier[api_response]
keyword[except] identifier[ApiException] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[e] )
keyword[return] keyword[None] | def get_logs(self):
"""
print logs from pod
:return: str or None
"""
try:
api_response = self.core_api.read_namespaced_pod_log(self.name, self.namespace)
logger.debug('Logs from pod: %s in namespace: %s', self.name, self.namespace)
for line in api_response.split('\n'):
logger.debug(line) # depends on [control=['for'], data=['line']]
return api_response # depends on [control=['try'], data=[]]
except ApiException as e:
# no reason to throw exception when logs cannot be obtain, just notify user
logger.debug('Cannot get pod logs because of exception during calling Kubernetes API %s\n', e) # depends on [control=['except'], data=['e']]
return None |
def _init_from_dt(self, data, nthread):
"""
Initialize data from a datatable Frame.
"""
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
nthread))
self.handle = handle | def function[_init_from_dt, parameter[self, data, nthread]]:
constant[
Initialize data from a datatable Frame.
]
variable[ptrs] assign[=] call[binary_operation[name[ctypes].c_void_p * name[data].ncols], parameter[]]
if <ast.BoolOp object at 0x7da1b1e9ab90> begin[:]
for taget[name[icol]] in starred[call[name[range], parameter[name[data].ncols]]] begin[:]
variable[col] assign[=] call[name[data].internal.column, parameter[name[icol]]]
variable[ptr] assign[=] name[col].data_pointer
call[name[ptrs]][name[icol]] assign[=] call[name[ctypes].c_void_p, parameter[name[ptr]]]
variable[feature_type_strings] assign[=] call[binary_operation[name[ctypes].c_char_p * name[data].ncols], parameter[]]
for taget[name[icol]] in starred[call[name[range], parameter[name[data].ncols]]] begin[:]
call[name[feature_type_strings]][name[icol]] assign[=] call[name[ctypes].c_char_p, parameter[call[call[name[data].stypes][name[icol]].name.encode, parameter[constant[utf-8]]]]]
variable[handle] assign[=] call[name[ctypes].c_void_p, parameter[]]
call[name[_check_call], parameter[call[name[_LIB].XGDMatrixCreateFromDT, parameter[name[ptrs], name[feature_type_strings], call[name[c_bst_ulong], parameter[call[name[data].shape][constant[0]]]], call[name[c_bst_ulong], parameter[call[name[data].shape][constant[1]]]], call[name[ctypes].byref, parameter[name[handle]]], name[nthread]]]]]
name[self].handle assign[=] name[handle] | keyword[def] identifier[_init_from_dt] ( identifier[self] , identifier[data] , identifier[nthread] ):
literal[string]
identifier[ptrs] =( identifier[ctypes] . identifier[c_void_p] * identifier[data] . identifier[ncols] )()
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[data] . identifier[internal] , literal[string] ):
keyword[for] identifier[icol] keyword[in] identifier[range] ( identifier[data] . identifier[ncols] ):
identifier[col] = identifier[data] . identifier[internal] . identifier[column] ( identifier[icol] )
identifier[ptr] = identifier[col] . identifier[data_pointer]
identifier[ptrs] [ identifier[icol] ]= identifier[ctypes] . identifier[c_void_p] ( identifier[ptr] )
keyword[else] :
keyword[from] identifier[datatable] . identifier[internal] keyword[import] identifier[frame_column_data_r]
keyword[for] identifier[icol] keyword[in] identifier[range] ( identifier[data] . identifier[ncols] ):
identifier[ptrs] [ identifier[icol] ]= identifier[frame_column_data_r] ( identifier[data] , identifier[icol] )
identifier[feature_type_strings] =( identifier[ctypes] . identifier[c_char_p] * identifier[data] . identifier[ncols] )()
keyword[for] identifier[icol] keyword[in] identifier[range] ( identifier[data] . identifier[ncols] ):
identifier[feature_type_strings] [ identifier[icol] ]= identifier[ctypes] . identifier[c_char_p] ( identifier[data] . identifier[stypes] [ identifier[icol] ]. identifier[name] . identifier[encode] ( literal[string] ))
identifier[handle] = identifier[ctypes] . identifier[c_void_p] ()
identifier[_check_call] ( identifier[_LIB] . identifier[XGDMatrixCreateFromDT] (
identifier[ptrs] , identifier[feature_type_strings] ,
identifier[c_bst_ulong] ( identifier[data] . identifier[shape] [ literal[int] ]),
identifier[c_bst_ulong] ( identifier[data] . identifier[shape] [ literal[int] ]),
identifier[ctypes] . identifier[byref] ( identifier[handle] ),
identifier[nthread] ))
identifier[self] . identifier[handle] = identifier[handle] | def _init_from_dt(self, data, nthread):
"""
Initialize data from a datatable Frame.
"""
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, 'internal') and hasattr(data.internal, 'column'):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr) # depends on [control=['for'], data=['icol']] # depends on [control=['if'], data=[]]
else:
# datatable<=0.8.0
from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol) # depends on [control=['for'], data=['icol']]
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8')) # depends on [control=['for'], data=['icol']]
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(ptrs, feature_type_strings, c_bst_ulong(data.shape[0]), c_bst_ulong(data.shape[1]), ctypes.byref(handle), nthread))
self.handle = handle |
def check_method_requirements(func):
"""Check methods requirements
:param callable func: the function to decorate
:return callable: the wrapped function
"""
@wraps(func)
def wrapper(*args, **kwargs):
error_message = "You must provide {error_field} in {cls} to get access to the default {method} method"
error_data = {'cls': args[0].__class__.__name__, 'method': request.method.lower()}
if request.method != 'DELETE':
if not hasattr(args[0], 'schema'):
error_data.update({'error_field': 'a schema class'})
raise Exception(error_message.format(**error_data))
return func(*args, **kwargs)
return wrapper | def function[check_method_requirements, parameter[func]]:
constant[Check methods requirements
:param callable func: the function to decorate
:return callable: the wrapped function
]
def function[wrapper, parameter[]]:
variable[error_message] assign[=] constant[You must provide {error_field} in {cls} to get access to the default {method} method]
variable[error_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1633880>, <ast.Constant object at 0x7da1b1633670>], [<ast.Attribute object at 0x7da1b1631ab0>, <ast.Call object at 0x7da1b1631c00>]]
if compare[name[request].method not_equal[!=] constant[DELETE]] begin[:]
if <ast.UnaryOp object at 0x7da1b1630940> begin[:]
call[name[error_data].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1632350>], [<ast.Constant object at 0x7da1b1633010>]]]]
<ast.Raise object at 0x7da1b16311b0>
return[call[name[func], parameter[<ast.Starred object at 0x7da1b1631d50>]]]
return[name[wrapper]] | keyword[def] identifier[check_method_requirements] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[error_message] = literal[string]
identifier[error_data] ={ literal[string] : identifier[args] [ literal[int] ]. identifier[__class__] . identifier[__name__] , literal[string] : identifier[request] . identifier[method] . identifier[lower] ()}
keyword[if] identifier[request] . identifier[method] != literal[string] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[args] [ literal[int] ], literal[string] ):
identifier[error_data] . identifier[update] ({ literal[string] : literal[string] })
keyword[raise] identifier[Exception] ( identifier[error_message] . identifier[format] (** identifier[error_data] ))
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def check_method_requirements(func):
"""Check methods requirements
:param callable func: the function to decorate
:return callable: the wrapped function
"""
@wraps(func)
def wrapper(*args, **kwargs):
error_message = 'You must provide {error_field} in {cls} to get access to the default {method} method'
error_data = {'cls': args[0].__class__.__name__, 'method': request.method.lower()}
if request.method != 'DELETE':
if not hasattr(args[0], 'schema'):
error_data.update({'error_field': 'a schema class'})
raise Exception(error_message.format(**error_data)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return func(*args, **kwargs)
return wrapper |
def pack(self):
"""
Pack the structure data into a string
"""
data = []
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
data.append(getattr(self, field))
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
v = getattr(self, field, vtype())
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
else: # multiple struct
values = getattr(self, field, [])
for j in range(0, num):
try:
v = values[j]
except:
v = vtype()
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
elif vlen == 1:
data.append(getattr(self, field))
else:
v = getattr(self, field)
v = v[:vlen] + [0] * (vlen - len(v))
data.extend(v)
return struct.pack(self.__fmt__, *data) | def function[pack, parameter[self]]:
constant[
Pack the structure data into a string
]
variable[data] assign[=] list[[]]
for taget[name[field]] in starred[name[self].__fields__] begin[:]
<ast.Tuple object at 0x7da18f09cbe0> assign[=] call[name[self].__fields_types__][name[field]]
if compare[name[vtype] equal[==] constant[char]] begin[:]
call[name[data].append, parameter[call[name[getattr], parameter[name[self], name[field]]]]]
return[call[name[struct].pack, parameter[name[self].__fmt__, <ast.Starred object at 0x7da18f09c880>]]] | keyword[def] identifier[pack] ( identifier[self] ):
literal[string]
identifier[data] =[]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[__fields__] :
( identifier[vtype] , identifier[vlen] )= identifier[self] . identifier[__fields_types__] [ identifier[field] ]
keyword[if] identifier[vtype] == literal[string] :
identifier[data] . identifier[append] ( identifier[getattr] ( identifier[self] , identifier[field] ))
keyword[elif] identifier[isinstance] ( identifier[vtype] , identifier[CStructMeta] ):
identifier[num] = identifier[int] ( identifier[vlen] / identifier[vtype] . identifier[size] )
keyword[if] identifier[num] == literal[int] :
identifier[v] = identifier[getattr] ( identifier[self] , identifier[field] , identifier[vtype] ())
identifier[v] = identifier[v] . identifier[pack] ()
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
identifier[v] =([ identifier[bytes] ([ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[v] ])
identifier[data] . identifier[extend] ( identifier[v] )
keyword[else] :
identifier[values] = identifier[getattr] ( identifier[self] , identifier[field] ,[])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[num] ):
keyword[try] :
identifier[v] = identifier[values] [ identifier[j] ]
keyword[except] :
identifier[v] = identifier[vtype] ()
identifier[v] = identifier[v] . identifier[pack] ()
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
identifier[v] =([ identifier[bytes] ([ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[v] ])
identifier[data] . identifier[extend] ( identifier[v] )
keyword[elif] identifier[vlen] == literal[int] :
identifier[data] . identifier[append] ( identifier[getattr] ( identifier[self] , identifier[field] ))
keyword[else] :
identifier[v] = identifier[getattr] ( identifier[self] , identifier[field] )
identifier[v] = identifier[v] [: identifier[vlen] ]+[ literal[int] ]*( identifier[vlen] - identifier[len] ( identifier[v] ))
identifier[data] . identifier[extend] ( identifier[v] )
keyword[return] identifier[struct] . identifier[pack] ( identifier[self] . identifier[__fmt__] ,* identifier[data] ) | def pack(self):
"""
Pack the structure data into a string
"""
data = []
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
data.append(getattr(self, field)) # depends on [control=['if'], data=[]]
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
v = getattr(self, field, vtype())
v = v.pack()
if sys.version_info >= (3, 0):
v = [bytes([x]) for x in v] # depends on [control=['if'], data=[]]
data.extend(v) # depends on [control=['if'], data=[]]
else: # multiple struct
values = getattr(self, field, [])
for j in range(0, num):
try:
v = values[j] # depends on [control=['try'], data=[]]
except:
v = vtype() # depends on [control=['except'], data=[]]
v = v.pack()
if sys.version_info >= (3, 0):
v = [bytes([x]) for x in v] # depends on [control=['if'], data=[]]
data.extend(v) # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
elif vlen == 1:
data.append(getattr(self, field)) # depends on [control=['if'], data=[]]
else:
v = getattr(self, field)
v = v[:vlen] + [0] * (vlen - len(v))
data.extend(v) # depends on [control=['for'], data=['field']]
return struct.pack(self.__fmt__, *data) |
def build_managers(app, conf):
"""
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
"""
# Load default options from config file that apply to all
# managers.
default_options = _get_default_options(conf)
manager_descriptions = ManagerDescriptions()
if "job_managers_config" in conf:
job_managers_config = conf.get("job_managers_config", None)
_populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config)
elif "managers" in conf:
for manager_name, manager_options in conf["managers"].items():
manager_description = ManagerDescription.from_dict(manager_options, manager_name)
manager_descriptions.add(manager_description)
elif "manager" in conf:
manager_description = ManagerDescription.from_dict(conf["manager"])
manager_descriptions.add(manager_description)
else:
manager_descriptions.add(ManagerDescription())
manager_classes = _get_managers_dict()
managers = {}
for manager_name, manager_description in manager_descriptions.descriptions.items():
manager_options = dict(default_options)
manager_options.update(manager_description.manager_options)
manager_class = manager_classes[manager_description.manager_type]
manager = _build_manager(manager_class, app, manager_name, manager_options)
managers[manager_name] = manager
return managers | def function[build_managers, parameter[app, conf]]:
constant[
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
]
variable[default_options] assign[=] call[name[_get_default_options], parameter[name[conf]]]
variable[manager_descriptions] assign[=] call[name[ManagerDescriptions], parameter[]]
if compare[constant[job_managers_config] in name[conf]] begin[:]
variable[job_managers_config] assign[=] call[name[conf].get, parameter[constant[job_managers_config], constant[None]]]
call[name[_populate_manager_descriptions_from_ini], parameter[name[manager_descriptions], name[job_managers_config]]]
variable[manager_classes] assign[=] call[name[_get_managers_dict], parameter[]]
variable[managers] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b053a980>, <ast.Name object at 0x7da1b0539600>]]] in starred[call[name[manager_descriptions].descriptions.items, parameter[]]] begin[:]
variable[manager_options] assign[=] call[name[dict], parameter[name[default_options]]]
call[name[manager_options].update, parameter[name[manager_description].manager_options]]
variable[manager_class] assign[=] call[name[manager_classes]][name[manager_description].manager_type]
variable[manager] assign[=] call[name[_build_manager], parameter[name[manager_class], name[app], name[manager_name], name[manager_options]]]
call[name[managers]][name[manager_name]] assign[=] name[manager]
return[name[managers]] | keyword[def] identifier[build_managers] ( identifier[app] , identifier[conf] ):
literal[string]
identifier[default_options] = identifier[_get_default_options] ( identifier[conf] )
identifier[manager_descriptions] = identifier[ManagerDescriptions] ()
keyword[if] literal[string] keyword[in] identifier[conf] :
identifier[job_managers_config] = identifier[conf] . identifier[get] ( literal[string] , keyword[None] )
identifier[_populate_manager_descriptions_from_ini] ( identifier[manager_descriptions] , identifier[job_managers_config] )
keyword[elif] literal[string] keyword[in] identifier[conf] :
keyword[for] identifier[manager_name] , identifier[manager_options] keyword[in] identifier[conf] [ literal[string] ]. identifier[items] ():
identifier[manager_description] = identifier[ManagerDescription] . identifier[from_dict] ( identifier[manager_options] , identifier[manager_name] )
identifier[manager_descriptions] . identifier[add] ( identifier[manager_description] )
keyword[elif] literal[string] keyword[in] identifier[conf] :
identifier[manager_description] = identifier[ManagerDescription] . identifier[from_dict] ( identifier[conf] [ literal[string] ])
identifier[manager_descriptions] . identifier[add] ( identifier[manager_description] )
keyword[else] :
identifier[manager_descriptions] . identifier[add] ( identifier[ManagerDescription] ())
identifier[manager_classes] = identifier[_get_managers_dict] ()
identifier[managers] ={}
keyword[for] identifier[manager_name] , identifier[manager_description] keyword[in] identifier[manager_descriptions] . identifier[descriptions] . identifier[items] ():
identifier[manager_options] = identifier[dict] ( identifier[default_options] )
identifier[manager_options] . identifier[update] ( identifier[manager_description] . identifier[manager_options] )
identifier[manager_class] = identifier[manager_classes] [ identifier[manager_description] . identifier[manager_type] ]
identifier[manager] = identifier[_build_manager] ( identifier[manager_class] , identifier[app] , identifier[manager_name] , identifier[manager_options] )
identifier[managers] [ identifier[manager_name] ]= identifier[manager]
keyword[return] identifier[managers] | def build_managers(app, conf):
"""
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
"""
# Load default options from config file that apply to all
# managers.
default_options = _get_default_options(conf)
manager_descriptions = ManagerDescriptions()
if 'job_managers_config' in conf:
job_managers_config = conf.get('job_managers_config', None)
_populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config) # depends on [control=['if'], data=['conf']]
elif 'managers' in conf:
for (manager_name, manager_options) in conf['managers'].items():
manager_description = ManagerDescription.from_dict(manager_options, manager_name)
manager_descriptions.add(manager_description) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['conf']]
elif 'manager' in conf:
manager_description = ManagerDescription.from_dict(conf['manager'])
manager_descriptions.add(manager_description) # depends on [control=['if'], data=['conf']]
else:
manager_descriptions.add(ManagerDescription())
manager_classes = _get_managers_dict()
managers = {}
for (manager_name, manager_description) in manager_descriptions.descriptions.items():
manager_options = dict(default_options)
manager_options.update(manager_description.manager_options)
manager_class = manager_classes[manager_description.manager_type]
manager = _build_manager(manager_class, app, manager_name, manager_options)
managers[manager_name] = manager # depends on [control=['for'], data=[]]
return managers |
def resolve_sound(self, sound):
"""Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
"""
sound = sound if isinstance(sound, Symbol) else self.system[sound]
if sound.name in self.data:
return self.data[sound.name]['grapheme']
if not sound.type == 'unknownsound':
if sound.type in ['diphthong', 'cluster']:
return self.resolve_sound(sound.from_sound)
name = [
s for s in sound.name.split(' ') if
self.system._feature_values.get(s, '') not in
['laminality', 'ejection', 'tone']]
while len(name) >= 4:
sound = self.system.get(' '.join(name))
if sound and sound.name in self.data:
return self.resolve_sound(sound)
name.pop(0)
raise KeyError(":sc:resolve_sound: No sound could be found.") | def function[resolve_sound, parameter[self, sound]]:
constant[Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
]
variable[sound] assign[=] <ast.IfExp object at 0x7da1affe7f70>
if compare[name[sound].name in name[self].data] begin[:]
return[call[call[name[self].data][name[sound].name]][constant[grapheme]]]
if <ast.UnaryOp object at 0x7da1affe6a70> begin[:]
if compare[name[sound].type in list[[<ast.Constant object at 0x7da1affe4880>, <ast.Constant object at 0x7da1affe6bc0>]]] begin[:]
return[call[name[self].resolve_sound, parameter[name[sound].from_sound]]]
variable[name] assign[=] <ast.ListComp object at 0x7da1affe56f0>
while compare[call[name[len], parameter[name[name]]] greater_or_equal[>=] constant[4]] begin[:]
variable[sound] assign[=] call[name[self].system.get, parameter[call[constant[ ].join, parameter[name[name]]]]]
if <ast.BoolOp object at 0x7da1affe6f50> begin[:]
return[call[name[self].resolve_sound, parameter[name[sound]]]]
call[name[name].pop, parameter[constant[0]]]
<ast.Raise object at 0x7da1affe5e10> | keyword[def] identifier[resolve_sound] ( identifier[self] , identifier[sound] ):
literal[string]
identifier[sound] = identifier[sound] keyword[if] identifier[isinstance] ( identifier[sound] , identifier[Symbol] ) keyword[else] identifier[self] . identifier[system] [ identifier[sound] ]
keyword[if] identifier[sound] . identifier[name] keyword[in] identifier[self] . identifier[data] :
keyword[return] identifier[self] . identifier[data] [ identifier[sound] . identifier[name] ][ literal[string] ]
keyword[if] keyword[not] identifier[sound] . identifier[type] == literal[string] :
keyword[if] identifier[sound] . identifier[type] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[self] . identifier[resolve_sound] ( identifier[sound] . identifier[from_sound] )
identifier[name] =[
identifier[s] keyword[for] identifier[s] keyword[in] identifier[sound] . identifier[name] . identifier[split] ( literal[string] ) keyword[if]
identifier[self] . identifier[system] . identifier[_feature_values] . identifier[get] ( identifier[s] , literal[string] ) keyword[not] keyword[in]
[ literal[string] , literal[string] , literal[string] ]]
keyword[while] identifier[len] ( identifier[name] )>= literal[int] :
identifier[sound] = identifier[self] . identifier[system] . identifier[get] ( literal[string] . identifier[join] ( identifier[name] ))
keyword[if] identifier[sound] keyword[and] identifier[sound] . identifier[name] keyword[in] identifier[self] . identifier[data] :
keyword[return] identifier[self] . identifier[resolve_sound] ( identifier[sound] )
identifier[name] . identifier[pop] ( literal[int] )
keyword[raise] identifier[KeyError] ( literal[string] ) | def resolve_sound(self, sound):
"""Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
"""
sound = sound if isinstance(sound, Symbol) else self.system[sound]
if sound.name in self.data:
return self.data[sound.name]['grapheme'] # depends on [control=['if'], data=[]]
if not sound.type == 'unknownsound':
if sound.type in ['diphthong', 'cluster']:
return self.resolve_sound(sound.from_sound) # depends on [control=['if'], data=[]]
name = [s for s in sound.name.split(' ') if self.system._feature_values.get(s, '') not in ['laminality', 'ejection', 'tone']]
while len(name) >= 4:
sound = self.system.get(' '.join(name))
if sound and sound.name in self.data:
return self.resolve_sound(sound) # depends on [control=['if'], data=[]]
name.pop(0) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
raise KeyError(':sc:resolve_sound: No sound could be found.') |
def collide(cls, data, params):
"""
Calculate boundaries of geometry object
Uses Strategy
"""
xminmax = ['xmin', 'xmax']
data, width = cls._collide_setup(data, params)
if params.get('width', None) is None:
params['width'] = width
# Reorder by x position then on group, relying on stable sort to
# preserve existing ordering. The default stacking order reverses
# the group in order to match the legend order.
if params and 'reverse' in params and params['reverse']:
idx = data.sort_values(
['xmin', 'group'], kind='mergesort').index
else:
data['-group'] = -data['group']
idx = data.sort_values(
['xmin', '-group'], kind='mergesort').index
del data['-group']
data = data.loc[idx, :]
# Check for overlap
intervals = data[xminmax].drop_duplicates().values.flatten()
intervals = intervals[~np.isnan(intervals)]
if (len(np.unique(intervals)) > 1 and
any(np.diff(intervals - intervals.mean()) < -1e-6)):
msg = "{} requires non-overlapping x intervals"
warn(msg.format(cls.__name__), PlotnineWarning)
if 'ymax' in data:
data = groupby_apply(data, 'xmin', cls.strategy, params)
elif 'y' in data:
data['ymax'] = data['y']
data = groupby_apply(data, 'xmin', cls.strategy, params)
data['y'] = data['ymax']
else:
raise PlotnineError('Neither y nor ymax defined')
return data | def function[collide, parameter[cls, data, params]]:
constant[
Calculate boundaries of geometry object
Uses Strategy
]
variable[xminmax] assign[=] list[[<ast.Constant object at 0x7da2044c35b0>, <ast.Constant object at 0x7da2044c17e0>]]
<ast.Tuple object at 0x7da2044c2110> assign[=] call[name[cls]._collide_setup, parameter[name[data], name[params]]]
if compare[call[name[params].get, parameter[constant[width], constant[None]]] is constant[None]] begin[:]
call[name[params]][constant[width]] assign[=] name[width]
if <ast.BoolOp object at 0x7da2044c3520> begin[:]
variable[idx] assign[=] call[name[data].sort_values, parameter[list[[<ast.Constant object at 0x7da18ede50c0>, <ast.Constant object at 0x7da18ede6410>]]]].index
variable[data] assign[=] call[name[data].loc][tuple[[<ast.Name object at 0x7da20c991f60>, <ast.Slice object at 0x7da20c9925f0>]]]
variable[intervals] assign[=] call[call[call[name[data]][name[xminmax]].drop_duplicates, parameter[]].values.flatten, parameter[]]
variable[intervals] assign[=] call[name[intervals]][<ast.UnaryOp object at 0x7da20c993310>]
if <ast.BoolOp object at 0x7da20c990760> begin[:]
variable[msg] assign[=] constant[{} requires non-overlapping x intervals]
call[name[warn], parameter[call[name[msg].format, parameter[name[cls].__name__]], name[PlotnineWarning]]]
if compare[constant[ymax] in name[data]] begin[:]
variable[data] assign[=] call[name[groupby_apply], parameter[name[data], constant[xmin], name[cls].strategy, name[params]]]
return[name[data]] | keyword[def] identifier[collide] ( identifier[cls] , identifier[data] , identifier[params] ):
literal[string]
identifier[xminmax] =[ literal[string] , literal[string] ]
identifier[data] , identifier[width] = identifier[cls] . identifier[_collide_setup] ( identifier[data] , identifier[params] )
keyword[if] identifier[params] . identifier[get] ( literal[string] , keyword[None] ) keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[width]
keyword[if] identifier[params] keyword[and] literal[string] keyword[in] identifier[params] keyword[and] identifier[params] [ literal[string] ]:
identifier[idx] = identifier[data] . identifier[sort_values] (
[ literal[string] , literal[string] ], identifier[kind] = literal[string] ). identifier[index]
keyword[else] :
identifier[data] [ literal[string] ]=- identifier[data] [ literal[string] ]
identifier[idx] = identifier[data] . identifier[sort_values] (
[ literal[string] , literal[string] ], identifier[kind] = literal[string] ). identifier[index]
keyword[del] identifier[data] [ literal[string] ]
identifier[data] = identifier[data] . identifier[loc] [ identifier[idx] ,:]
identifier[intervals] = identifier[data] [ identifier[xminmax] ]. identifier[drop_duplicates] (). identifier[values] . identifier[flatten] ()
identifier[intervals] = identifier[intervals] [~ identifier[np] . identifier[isnan] ( identifier[intervals] )]
keyword[if] ( identifier[len] ( identifier[np] . identifier[unique] ( identifier[intervals] ))> literal[int] keyword[and]
identifier[any] ( identifier[np] . identifier[diff] ( identifier[intervals] - identifier[intervals] . identifier[mean] ())<- literal[int] )):
identifier[msg] = literal[string]
identifier[warn] ( identifier[msg] . identifier[format] ( identifier[cls] . identifier[__name__] ), identifier[PlotnineWarning] )
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[data] = identifier[groupby_apply] ( identifier[data] , literal[string] , identifier[cls] . identifier[strategy] , identifier[params] )
keyword[elif] literal[string] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= identifier[data] [ literal[string] ]
identifier[data] = identifier[groupby_apply] ( identifier[data] , literal[string] , identifier[cls] . identifier[strategy] , identifier[params] )
identifier[data] [ literal[string] ]= identifier[data] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[PlotnineError] ( literal[string] )
keyword[return] identifier[data] | def collide(cls, data, params):
"""
Calculate boundaries of geometry object
Uses Strategy
"""
xminmax = ['xmin', 'xmax']
(data, width) = cls._collide_setup(data, params)
if params.get('width', None) is None:
params['width'] = width # depends on [control=['if'], data=[]]
# Reorder by x position then on group, relying on stable sort to
# preserve existing ordering. The default stacking order reverses
# the group in order to match the legend order.
if params and 'reverse' in params and params['reverse']:
idx = data.sort_values(['xmin', 'group'], kind='mergesort').index # depends on [control=['if'], data=[]]
else:
data['-group'] = -data['group']
idx = data.sort_values(['xmin', '-group'], kind='mergesort').index
del data['-group']
data = data.loc[idx, :]
# Check for overlap
intervals = data[xminmax].drop_duplicates().values.flatten()
intervals = intervals[~np.isnan(intervals)]
if len(np.unique(intervals)) > 1 and any(np.diff(intervals - intervals.mean()) < -1e-06):
msg = '{} requires non-overlapping x intervals'
warn(msg.format(cls.__name__), PlotnineWarning) # depends on [control=['if'], data=[]]
if 'ymax' in data:
data = groupby_apply(data, 'xmin', cls.strategy, params) # depends on [control=['if'], data=['data']]
elif 'y' in data:
data['ymax'] = data['y']
data = groupby_apply(data, 'xmin', cls.strategy, params)
data['y'] = data['ymax'] # depends on [control=['if'], data=['data']]
else:
raise PlotnineError('Neither y nor ymax defined')
return data |
def validate_and_convert_nexson(nexson, output_version, allow_invalid, **kwargs):
"""Runs the nexson validator and returns a converted 4 object:
nexson, annotation, validation_log, nexson_adaptor
`nexson` is the nexson dict.
`output_version` is the version of nexson syntax to be used after validation.
if `allow_invalid` is False, and the nexson validation has errors, then
a GitWorkflowError will be generated before conversion.
"""
try:
if TRACE_FILES:
_write_to_next_free('input', nexson)
annotation, validation_log, nexson_adaptor = ot_validate(nexson, **kwargs)
if TRACE_FILES:
_write_to_next_free('annotation', annotation)
except:
msg = 'exception in ot_validate: ' + traceback.format_exc()
raise GitWorkflowError(msg)
if (not allow_invalid) and validation_log.has_error():
raise GitWorkflowError('ot_validation failed: ' + json.dumps(annotation))
nexson = convert_nexson_format(nexson, output_version)
if TRACE_FILES:
_write_to_next_free('converted', nexson)
return nexson, annotation, validation_log, nexson_adaptor | def function[validate_and_convert_nexson, parameter[nexson, output_version, allow_invalid]]:
constant[Runs the nexson validator and returns a converted 4 object:
nexson, annotation, validation_log, nexson_adaptor
`nexson` is the nexson dict.
`output_version` is the version of nexson syntax to be used after validation.
if `allow_invalid` is False, and the nexson validation has errors, then
a GitWorkflowError will be generated before conversion.
]
<ast.Try object at 0x7da1b24916f0>
if <ast.BoolOp object at 0x7da1b2490fd0> begin[:]
<ast.Raise object at 0x7da1b2490370>
variable[nexson] assign[=] call[name[convert_nexson_format], parameter[name[nexson], name[output_version]]]
if name[TRACE_FILES] begin[:]
call[name[_write_to_next_free], parameter[constant[converted], name[nexson]]]
return[tuple[[<ast.Name object at 0x7da1b2491ea0>, <ast.Name object at 0x7da1b2491f00>, <ast.Name object at 0x7da1b2491cc0>, <ast.Name object at 0x7da1b2491e40>]]] | keyword[def] identifier[validate_and_convert_nexson] ( identifier[nexson] , identifier[output_version] , identifier[allow_invalid] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[if] identifier[TRACE_FILES] :
identifier[_write_to_next_free] ( literal[string] , identifier[nexson] )
identifier[annotation] , identifier[validation_log] , identifier[nexson_adaptor] = identifier[ot_validate] ( identifier[nexson] ,** identifier[kwargs] )
keyword[if] identifier[TRACE_FILES] :
identifier[_write_to_next_free] ( literal[string] , identifier[annotation] )
keyword[except] :
identifier[msg] = literal[string] + identifier[traceback] . identifier[format_exc] ()
keyword[raise] identifier[GitWorkflowError] ( identifier[msg] )
keyword[if] ( keyword[not] identifier[allow_invalid] ) keyword[and] identifier[validation_log] . identifier[has_error] ():
keyword[raise] identifier[GitWorkflowError] ( literal[string] + identifier[json] . identifier[dumps] ( identifier[annotation] ))
identifier[nexson] = identifier[convert_nexson_format] ( identifier[nexson] , identifier[output_version] )
keyword[if] identifier[TRACE_FILES] :
identifier[_write_to_next_free] ( literal[string] , identifier[nexson] )
keyword[return] identifier[nexson] , identifier[annotation] , identifier[validation_log] , identifier[nexson_adaptor] | def validate_and_convert_nexson(nexson, output_version, allow_invalid, **kwargs):
"""Runs the nexson validator and returns a converted 4 object:
nexson, annotation, validation_log, nexson_adaptor
`nexson` is the nexson dict.
`output_version` is the version of nexson syntax to be used after validation.
if `allow_invalid` is False, and the nexson validation has errors, then
a GitWorkflowError will be generated before conversion.
"""
try:
if TRACE_FILES:
_write_to_next_free('input', nexson) # depends on [control=['if'], data=[]]
(annotation, validation_log, nexson_adaptor) = ot_validate(nexson, **kwargs)
if TRACE_FILES:
_write_to_next_free('annotation', annotation) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
msg = 'exception in ot_validate: ' + traceback.format_exc()
raise GitWorkflowError(msg) # depends on [control=['except'], data=[]]
if not allow_invalid and validation_log.has_error():
raise GitWorkflowError('ot_validation failed: ' + json.dumps(annotation)) # depends on [control=['if'], data=[]]
nexson = convert_nexson_format(nexson, output_version)
if TRACE_FILES:
_write_to_next_free('converted', nexson) # depends on [control=['if'], data=[]]
return (nexson, annotation, validation_log, nexson_adaptor) |
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string | def function[title, parameter[value, failure_string]]:
constant[
Converts a string into titlecase.
Lifted from Django.
]
<ast.Try object at 0x7da1b0a4f850> | keyword[def] identifier[title] ( identifier[value] , identifier[failure_string] = literal[string] ):
literal[string]
keyword[try] :
identifier[value] = identifier[value] . identifier[lower] ()
identifier[t] = identifier[re] . identifier[sub] ( literal[string] , keyword[lambda] identifier[m] : identifier[m] . identifier[group] ( literal[int] ). identifier[lower] (), identifier[value] . identifier[title] ())
identifier[result] = identifier[re] . identifier[sub] ( literal[string] , keyword[lambda] identifier[m] : identifier[m] . identifier[group] ( literal[int] ). identifier[lower] (), identifier[t] )
keyword[if] keyword[not] identifier[result] :
keyword[return] identifier[failure_string]
keyword[return] identifier[result]
keyword[except] :
keyword[return] identifier[failure_string] | def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub('\\d([A-Z])', lambda m: m.group(0).lower(), t)
if not result:
return failure_string # depends on [control=['if'], data=[]]
return result # depends on [control=['try'], data=[]]
except:
return failure_string # depends on [control=['except'], data=[]] |
def backup(self):
"""
Backup the main dataframe
"""
try:
self.backup_df = self.df.copy()
except Exception as e:
self.err(e, "Can not backup data")
return
self.ok("Dataframe backed up") | def function[backup, parameter[self]]:
constant[
Backup the main dataframe
]
<ast.Try object at 0x7da207f99960>
call[name[self].ok, parameter[constant[Dataframe backed up]]] | keyword[def] identifier[backup] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[backup_df] = identifier[self] . identifier[df] . identifier[copy] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , literal[string] )
keyword[return]
identifier[self] . identifier[ok] ( literal[string] ) | def backup(self):
"""
Backup the main dataframe
"""
try:
self.backup_df = self.df.copy() # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, 'Can not backup data')
return # depends on [control=['except'], data=['e']]
self.ok('Dataframe backed up') |
def add_network_ipv6_hosts(
self,
id_vlan,
id_tipo_rede,
num_hosts,
id_ambiente_vip=None):
"""
Add new networkipv6
:param id_vlan: Identifier of the Vlan. Integer value and greater than zero.
:param id_tipo_rede: Identifier of the NetworkType. Integer value and greater than zero.
:param num_hosts: Number of hosts expected. Integer value and greater than zero.
:param id_ambiente_vip: Identifier of the Environment Vip. Integer value and greater than zero.
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_tipo_rede': < id_tipo_rede >,
'id_ambiente': < id_ambiente >,
'rede_oct1': < rede_oct1 >,
'rede_oct2': < rede_oct2 >,
'rede_oct3': < rede_oct3 >,
'rede_oct4': < rede_oct4 >,
'rede_oct5': < rede_oct4 >,
'rede_oct6': < rede_oct4 >,
'rede_oct7': < rede_oct4 >,
'rede_oct8': < rede_oct4 >,
'bloco': < bloco >,
'mascara_oct1': < mascara_oct1 >,
'mascara_oct2': < mascara_oct2 >,
'mascara_oct3': < mascara_oct3 >,
'mascara_oct4': < mascara_oct4 >,
'mascara_oct5': < mascara_oct4 >,
'mascara_oct6': < mascara_oct4 >,
'mascara_oct7': < mascara_oct4 >,
'mascara_oct8': < mascara_oct4 >,
'broadcast': < broadcast >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'ativada': < ativada >}}
:raise TipoRedeNaoExisteError: NetworkType not found.
:raise InvalidParameterError: Invalid ID for Vlan or NetworkType.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise IPNaoDisponivelError: Network address unavailable to create a NetworkIPv6.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
vlan_map = dict()
vlan_map['id_vlan'] = id_vlan
vlan_map['id_tipo_rede'] = id_tipo_rede
vlan_map['num_hosts'] = num_hosts
vlan_map['id_ambiente_vip'] = id_ambiente_vip
code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'network/ipv6/add/')
return self.response(code, xml) | def function[add_network_ipv6_hosts, parameter[self, id_vlan, id_tipo_rede, num_hosts, id_ambiente_vip]]:
constant[
Add new networkipv6
:param id_vlan: Identifier of the Vlan. Integer value and greater than zero.
:param id_tipo_rede: Identifier of the NetworkType. Integer value and greater than zero.
:param num_hosts: Number of hosts expected. Integer value and greater than zero.
:param id_ambiente_vip: Identifier of the Environment Vip. Integer value and greater than zero.
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_tipo_rede': < id_tipo_rede >,
'id_ambiente': < id_ambiente >,
'rede_oct1': < rede_oct1 >,
'rede_oct2': < rede_oct2 >,
'rede_oct3': < rede_oct3 >,
'rede_oct4': < rede_oct4 >,
'rede_oct5': < rede_oct4 >,
'rede_oct6': < rede_oct4 >,
'rede_oct7': < rede_oct4 >,
'rede_oct8': < rede_oct4 >,
'bloco': < bloco >,
'mascara_oct1': < mascara_oct1 >,
'mascara_oct2': < mascara_oct2 >,
'mascara_oct3': < mascara_oct3 >,
'mascara_oct4': < mascara_oct4 >,
'mascara_oct5': < mascara_oct4 >,
'mascara_oct6': < mascara_oct4 >,
'mascara_oct7': < mascara_oct4 >,
'mascara_oct8': < mascara_oct4 >,
'broadcast': < broadcast >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'ativada': < ativada >}}
:raise TipoRedeNaoExisteError: NetworkType not found.
:raise InvalidParameterError: Invalid ID for Vlan or NetworkType.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise IPNaoDisponivelError: Network address unavailable to create a NetworkIPv6.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
]
variable[vlan_map] assign[=] call[name[dict], parameter[]]
call[name[vlan_map]][constant[id_vlan]] assign[=] name[id_vlan]
call[name[vlan_map]][constant[id_tipo_rede]] assign[=] name[id_tipo_rede]
call[name[vlan_map]][constant[num_hosts]] assign[=] name[num_hosts]
call[name[vlan_map]][constant[id_ambiente_vip]] assign[=] name[id_ambiente_vip]
<ast.Tuple object at 0x7da20c6a9780> assign[=] call[name[self].submit, parameter[dictionary[[<ast.Constant object at 0x7da20c6aab00>], [<ast.Name object at 0x7da20c6a9e70>]], constant[PUT], constant[network/ipv6/add/]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[add_network_ipv6_hosts] (
identifier[self] ,
identifier[id_vlan] ,
identifier[id_tipo_rede] ,
identifier[num_hosts] ,
identifier[id_ambiente_vip] = keyword[None] ):
literal[string]
identifier[vlan_map] = identifier[dict] ()
identifier[vlan_map] [ literal[string] ]= identifier[id_vlan]
identifier[vlan_map] [ literal[string] ]= identifier[id_tipo_rede]
identifier[vlan_map] [ literal[string] ]= identifier[num_hosts]
identifier[vlan_map] [ literal[string] ]= identifier[id_ambiente_vip]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ({ literal[string] : identifier[vlan_map] }, literal[string] , literal[string] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def add_network_ipv6_hosts(self, id_vlan, id_tipo_rede, num_hosts, id_ambiente_vip=None):
"""
Add new networkipv6
:param id_vlan: Identifier of the Vlan. Integer value and greater than zero.
:param id_tipo_rede: Identifier of the NetworkType. Integer value and greater than zero.
:param num_hosts: Number of hosts expected. Integer value and greater than zero.
:param id_ambiente_vip: Identifier of the Environment Vip. Integer value and greater than zero.
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_tipo_rede': < id_tipo_rede >,
'id_ambiente': < id_ambiente >,
'rede_oct1': < rede_oct1 >,
'rede_oct2': < rede_oct2 >,
'rede_oct3': < rede_oct3 >,
'rede_oct4': < rede_oct4 >,
'rede_oct5': < rede_oct4 >,
'rede_oct6': < rede_oct4 >,
'rede_oct7': < rede_oct4 >,
'rede_oct8': < rede_oct4 >,
'bloco': < bloco >,
'mascara_oct1': < mascara_oct1 >,
'mascara_oct2': < mascara_oct2 >,
'mascara_oct3': < mascara_oct3 >,
'mascara_oct4': < mascara_oct4 >,
'mascara_oct5': < mascara_oct4 >,
'mascara_oct6': < mascara_oct4 >,
'mascara_oct7': < mascara_oct4 >,
'mascara_oct8': < mascara_oct4 >,
'broadcast': < broadcast >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'ativada': < ativada >}}
:raise TipoRedeNaoExisteError: NetworkType not found.
:raise InvalidParameterError: Invalid ID for Vlan or NetworkType.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise IPNaoDisponivelError: Network address unavailable to create a NetworkIPv6.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
vlan_map = dict()
vlan_map['id_vlan'] = id_vlan
vlan_map['id_tipo_rede'] = id_tipo_rede
vlan_map['num_hosts'] = num_hosts
vlan_map['id_ambiente_vip'] = id_ambiente_vip
(code, xml) = self.submit({'vlan': vlan_map}, 'PUT', 'network/ipv6/add/')
return self.response(code, xml) |
def _multicall_callback(self, values, calls):
"""
Fires when we get information back from the XML-RPC server.
This is processes the raw results of system.multicall into a usable
iterator of values (and/or Faults).
:param values: list of data txkoji.Connection.call()
:param calls: list of calls we sent in this multicall RPC
:returns: KojiMultiCallIterator with the resulting values from all our
calls.
"""
result = KojiMultiCallIterator(values)
result.connection = self.connection
result.calls = calls
return result | def function[_multicall_callback, parameter[self, values, calls]]:
constant[
Fires when we get information back from the XML-RPC server.
This is processes the raw results of system.multicall into a usable
iterator of values (and/or Faults).
:param values: list of data txkoji.Connection.call()
:param calls: list of calls we sent in this multicall RPC
:returns: KojiMultiCallIterator with the resulting values from all our
calls.
]
variable[result] assign[=] call[name[KojiMultiCallIterator], parameter[name[values]]]
name[result].connection assign[=] name[self].connection
name[result].calls assign[=] name[calls]
return[name[result]] | keyword[def] identifier[_multicall_callback] ( identifier[self] , identifier[values] , identifier[calls] ):
literal[string]
identifier[result] = identifier[KojiMultiCallIterator] ( identifier[values] )
identifier[result] . identifier[connection] = identifier[self] . identifier[connection]
identifier[result] . identifier[calls] = identifier[calls]
keyword[return] identifier[result] | def _multicall_callback(self, values, calls):
"""
Fires when we get information back from the XML-RPC server.
This is processes the raw results of system.multicall into a usable
iterator of values (and/or Faults).
:param values: list of data txkoji.Connection.call()
:param calls: list of calls we sent in this multicall RPC
:returns: KojiMultiCallIterator with the resulting values from all our
calls.
"""
result = KojiMultiCallIterator(values)
result.connection = self.connection
result.calls = calls
return result |
def _astoref(ins):
''' Stores a floating point value into a memory address.
'''
output = _addr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
if indirect:
output.append('push hl')
output.extend(_float_oper(ins.quad[2]))
output.append('pop hl')
else:
output.extend(_float_oper(ins.quad[2]))
output.append('call __STOREF')
REQUIRES.add('storef.asm')
return output | def function[_astoref, parameter[ins]]:
constant[ Stores a floating point value into a memory address.
]
variable[output] assign[=] call[name[_addr], parameter[call[name[ins].quad][constant[1]]]]
variable[value] assign[=] call[name[ins].quad][constant[2]]
if compare[call[name[value]][constant[0]] equal[==] constant[*]] begin[:]
variable[value] assign[=] call[name[value]][<ast.Slice object at 0x7da204565420>]
variable[indirect] assign[=] constant[True]
if name[indirect] begin[:]
call[name[output].append, parameter[constant[push hl]]]
call[name[output].extend, parameter[call[name[_float_oper], parameter[call[name[ins].quad][constant[2]]]]]]
call[name[output].append, parameter[constant[pop hl]]]
call[name[output].append, parameter[constant[call __STOREF]]]
call[name[REQUIRES].add, parameter[constant[storef.asm]]]
return[name[output]] | keyword[def] identifier[_astoref] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_addr] ( identifier[ins] . identifier[quad] [ literal[int] ])
identifier[value] = identifier[ins] . identifier[quad] [ literal[int] ]
keyword[if] identifier[value] [ literal[int] ]== literal[string] :
identifier[value] = identifier[value] [ literal[int] :]
identifier[indirect] = keyword[True]
keyword[else] :
identifier[indirect] = keyword[False]
keyword[if] identifier[indirect] :
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[extend] ( identifier[_float_oper] ( identifier[ins] . identifier[quad] [ literal[int] ]))
identifier[output] . identifier[append] ( literal[string] )
keyword[else] :
identifier[output] . identifier[extend] ( identifier[_float_oper] ( identifier[ins] . identifier[quad] [ literal[int] ]))
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _astoref(ins):
""" Stores a floating point value into a memory address.
"""
output = _addr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True # depends on [control=['if'], data=[]]
else:
indirect = False
if indirect:
output.append('push hl')
output.extend(_float_oper(ins.quad[2]))
output.append('pop hl') # depends on [control=['if'], data=[]]
else:
output.extend(_float_oper(ins.quad[2]))
output.append('call __STOREF')
REQUIRES.add('storef.asm')
return output |
def asym_list_diff(list1, list2):
""" Asymmetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item)
return diff_list | def function[asym_list_diff, parameter[list1, list2]]:
constant[ Asymmetric list difference ]
variable[diff_list] assign[=] list[[]]
for taget[name[item]] in starred[name[list1]] begin[:]
if <ast.UnaryOp object at 0x7da204621de0> begin[:]
call[name[diff_list].append, parameter[name[item]]]
return[name[diff_list]] | keyword[def] identifier[asym_list_diff] ( identifier[list1] , identifier[list2] ):
literal[string]
identifier[diff_list] =[]
keyword[for] identifier[item] keyword[in] identifier[list1] :
keyword[if] keyword[not] identifier[item] keyword[in] identifier[list2] :
identifier[diff_list] . identifier[append] ( identifier[item] )
keyword[return] identifier[diff_list] | def asym_list_diff(list1, list2):
""" Asymmetric list difference """
diff_list = []
for item in list1:
if not item in list2:
diff_list.append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return diff_list |
def from_dict(cls, dct, project=None, delim=' | '):
r"""
This method converts a correctly formatted dictionary into OpenPNM
objects, and returns a handle to the *project* containing them.
Parameters
----------
dct : dictionary
The Python dictionary containing the data. The nesting and
labeling of the dictionary is used to create the appropriate
OpenPNM objects.
project : OpenPNM Project Object
The project with which the created objects should be associated.
If not supplied, one will be created.
Returns
-------
An OpenPNM Project containing the objects created to store the given
data.
"""
if project is None:
project = ws.new_project()
# Uncategorize pore/throat and labels/properties, if present
fd = FlatDict(dct, delimiter=delim)
# If . is the delimiter, replace with | otherwise things break
if delim == '.':
delim = ' | '
for key in list(fd.keys()):
new_key = key.replace('.', delim)
fd[new_key] = fd.pop(key)
d = FlatDict(delimiter=delim)
for key in list(fd.keys()):
new_key = key.replace('pore' + delim, 'pore.')
new_key = new_key.replace('throat' + delim, 'throat.')
new_key = new_key.replace('labels' + delim, '')
new_key = new_key.replace('properties' + delim, '')
d[new_key] = fd.pop(key)
# Plase data into correctly categorized dicts, for later handling
objs = {'network': NestedDict(),
'geometry': NestedDict(),
'physics': NestedDict(),
'phase': NestedDict(),
'algorithm': NestedDict(),
'base': NestedDict()}
for item in d.keys():
path = item.split(delim)
if len(path) > 2:
if path[-3] in objs.keys():
# Item is categorized by type, so note it
objs[path[-3]][path[-2]][path[-1]] = d[item]
else:
# item is nested, not categorized; make it a base
objs['base'][path[-2]][path[-1]] = d[item]
else:
# If not categorized by type, make it a base
objs['base'][path[-2]][path[-1]] = d[item]
# Convert to OpenPNM Objects, attempting to infer type
for objtype in objs.keys():
for name in objs[objtype].keys():
# Create empty object, using dummy name to avoid error
obj = project._new_object(objtype=objtype, name='')
# Overwrite name
obj._set_name(name=name, validate=False)
# Update new object with data from dict
obj.update(objs[objtype][name])
return project | def function[from_dict, parameter[cls, dct, project, delim]]:
constant[
This method converts a correctly formatted dictionary into OpenPNM
objects, and returns a handle to the *project* containing them.
Parameters
----------
dct : dictionary
The Python dictionary containing the data. The nesting and
labeling of the dictionary is used to create the appropriate
OpenPNM objects.
project : OpenPNM Project Object
The project with which the created objects should be associated.
If not supplied, one will be created.
Returns
-------
An OpenPNM Project containing the objects created to store the given
data.
]
if compare[name[project] is constant[None]] begin[:]
variable[project] assign[=] call[name[ws].new_project, parameter[]]
variable[fd] assign[=] call[name[FlatDict], parameter[name[dct]]]
if compare[name[delim] equal[==] constant[.]] begin[:]
variable[delim] assign[=] constant[ | ]
for taget[name[key]] in starred[call[name[list], parameter[call[name[fd].keys, parameter[]]]]] begin[:]
variable[new_key] assign[=] call[name[key].replace, parameter[constant[.], name[delim]]]
call[name[fd]][name[new_key]] assign[=] call[name[fd].pop, parameter[name[key]]]
variable[d] assign[=] call[name[FlatDict], parameter[]]
for taget[name[key]] in starred[call[name[list], parameter[call[name[fd].keys, parameter[]]]]] begin[:]
variable[new_key] assign[=] call[name[key].replace, parameter[binary_operation[constant[pore] + name[delim]], constant[pore.]]]
variable[new_key] assign[=] call[name[new_key].replace, parameter[binary_operation[constant[throat] + name[delim]], constant[throat.]]]
variable[new_key] assign[=] call[name[new_key].replace, parameter[binary_operation[constant[labels] + name[delim]], constant[]]]
variable[new_key] assign[=] call[name[new_key].replace, parameter[binary_operation[constant[properties] + name[delim]], constant[]]]
call[name[d]][name[new_key]] assign[=] call[name[fd].pop, parameter[name[key]]]
variable[objs] assign[=] dictionary[[<ast.Constant object at 0x7da18f09cb80>, <ast.Constant object at 0x7da18f09d0c0>, <ast.Constant object at 0x7da18f09edd0>, <ast.Constant object at 0x7da18f09fb50>, <ast.Constant object at 0x7da18f09ee60>, <ast.Constant object at 0x7da18f09ff70>], [<ast.Call object at 0x7da18f09f6a0>, <ast.Call object at 0x7da18f09f940>, <ast.Call object at 0x7da18f09dff0>, <ast.Call object at 0x7da18f09d900>, <ast.Call object at 0x7da18f09c400>, <ast.Call object at 0x7da18f09e530>]]
for taget[name[item]] in starred[call[name[d].keys, parameter[]]] begin[:]
variable[path] assign[=] call[name[item].split, parameter[name[delim]]]
if compare[call[name[len], parameter[name[path]]] greater[>] constant[2]] begin[:]
if compare[call[name[path]][<ast.UnaryOp object at 0x7da18f09c880>] in call[name[objs].keys, parameter[]]] begin[:]
call[call[call[name[objs]][call[name[path]][<ast.UnaryOp object at 0x7da18f09ffd0>]]][call[name[path]][<ast.UnaryOp object at 0x7da18f09e140>]]][call[name[path]][<ast.UnaryOp object at 0x7da18f09fd90>]] assign[=] call[name[d]][name[item]]
for taget[name[objtype]] in starred[call[name[objs].keys, parameter[]]] begin[:]
for taget[name[name]] in starred[call[call[name[objs]][name[objtype]].keys, parameter[]]] begin[:]
variable[obj] assign[=] call[name[project]._new_object, parameter[]]
call[name[obj]._set_name, parameter[]]
call[name[obj].update, parameter[call[call[name[objs]][name[objtype]]][name[name]]]]
return[name[project]] | keyword[def] identifier[from_dict] ( identifier[cls] , identifier[dct] , identifier[project] = keyword[None] , identifier[delim] = literal[string] ):
literal[string]
keyword[if] identifier[project] keyword[is] keyword[None] :
identifier[project] = identifier[ws] . identifier[new_project] ()
identifier[fd] = identifier[FlatDict] ( identifier[dct] , identifier[delimiter] = identifier[delim] )
keyword[if] identifier[delim] == literal[string] :
identifier[delim] = literal[string]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[fd] . identifier[keys] ()):
identifier[new_key] = identifier[key] . identifier[replace] ( literal[string] , identifier[delim] )
identifier[fd] [ identifier[new_key] ]= identifier[fd] . identifier[pop] ( identifier[key] )
identifier[d] = identifier[FlatDict] ( identifier[delimiter] = identifier[delim] )
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[fd] . identifier[keys] ()):
identifier[new_key] = identifier[key] . identifier[replace] ( literal[string] + identifier[delim] , literal[string] )
identifier[new_key] = identifier[new_key] . identifier[replace] ( literal[string] + identifier[delim] , literal[string] )
identifier[new_key] = identifier[new_key] . identifier[replace] ( literal[string] + identifier[delim] , literal[string] )
identifier[new_key] = identifier[new_key] . identifier[replace] ( literal[string] + identifier[delim] , literal[string] )
identifier[d] [ identifier[new_key] ]= identifier[fd] . identifier[pop] ( identifier[key] )
identifier[objs] ={ literal[string] : identifier[NestedDict] (),
literal[string] : identifier[NestedDict] (),
literal[string] : identifier[NestedDict] (),
literal[string] : identifier[NestedDict] (),
literal[string] : identifier[NestedDict] (),
literal[string] : identifier[NestedDict] ()}
keyword[for] identifier[item] keyword[in] identifier[d] . identifier[keys] ():
identifier[path] = identifier[item] . identifier[split] ( identifier[delim] )
keyword[if] identifier[len] ( identifier[path] )> literal[int] :
keyword[if] identifier[path] [- literal[int] ] keyword[in] identifier[objs] . identifier[keys] ():
identifier[objs] [ identifier[path] [- literal[int] ]][ identifier[path] [- literal[int] ]][ identifier[path] [- literal[int] ]]= identifier[d] [ identifier[item] ]
keyword[else] :
identifier[objs] [ literal[string] ][ identifier[path] [- literal[int] ]][ identifier[path] [- literal[int] ]]= identifier[d] [ identifier[item] ]
keyword[else] :
identifier[objs] [ literal[string] ][ identifier[path] [- literal[int] ]][ identifier[path] [- literal[int] ]]= identifier[d] [ identifier[item] ]
keyword[for] identifier[objtype] keyword[in] identifier[objs] . identifier[keys] ():
keyword[for] identifier[name] keyword[in] identifier[objs] [ identifier[objtype] ]. identifier[keys] ():
identifier[obj] = identifier[project] . identifier[_new_object] ( identifier[objtype] = identifier[objtype] , identifier[name] = literal[string] )
identifier[obj] . identifier[_set_name] ( identifier[name] = identifier[name] , identifier[validate] = keyword[False] )
identifier[obj] . identifier[update] ( identifier[objs] [ identifier[objtype] ][ identifier[name] ])
keyword[return] identifier[project] | def from_dict(cls, dct, project=None, delim=' | '):
"""
This method converts a correctly formatted dictionary into OpenPNM
objects, and returns a handle to the *project* containing them.
Parameters
----------
dct : dictionary
The Python dictionary containing the data. The nesting and
labeling of the dictionary is used to create the appropriate
OpenPNM objects.
project : OpenPNM Project Object
The project with which the created objects should be associated.
If not supplied, one will be created.
Returns
-------
An OpenPNM Project containing the objects created to store the given
data.
"""
if project is None:
project = ws.new_project() # depends on [control=['if'], data=['project']]
# Uncategorize pore/throat and labels/properties, if present
fd = FlatDict(dct, delimiter=delim)
# If . is the delimiter, replace with | otherwise things break
if delim == '.':
delim = ' | '
for key in list(fd.keys()):
new_key = key.replace('.', delim)
fd[new_key] = fd.pop(key) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['delim']]
d = FlatDict(delimiter=delim)
for key in list(fd.keys()):
new_key = key.replace('pore' + delim, 'pore.')
new_key = new_key.replace('throat' + delim, 'throat.')
new_key = new_key.replace('labels' + delim, '')
new_key = new_key.replace('properties' + delim, '')
d[new_key] = fd.pop(key) # depends on [control=['for'], data=['key']]
# Plase data into correctly categorized dicts, for later handling
objs = {'network': NestedDict(), 'geometry': NestedDict(), 'physics': NestedDict(), 'phase': NestedDict(), 'algorithm': NestedDict(), 'base': NestedDict()}
for item in d.keys():
path = item.split(delim)
if len(path) > 2:
if path[-3] in objs.keys():
# Item is categorized by type, so note it
objs[path[-3]][path[-2]][path[-1]] = d[item] # depends on [control=['if'], data=[]]
else:
# item is nested, not categorized; make it a base
objs['base'][path[-2]][path[-1]] = d[item] # depends on [control=['if'], data=[]]
else:
# If not categorized by type, make it a base
objs['base'][path[-2]][path[-1]] = d[item] # depends on [control=['for'], data=['item']]
# Convert to OpenPNM Objects, attempting to infer type
for objtype in objs.keys():
for name in objs[objtype].keys():
# Create empty object, using dummy name to avoid error
obj = project._new_object(objtype=objtype, name='')
# Overwrite name
obj._set_name(name=name, validate=False)
# Update new object with data from dict
obj.update(objs[objtype][name]) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['objtype']]
return project |
def _normalize_hosts(hosts):
"""
Canonicalize the *hosts* parameter.
>>> _normalize_hosts("host,127.0.0.2:2909")
[('127.0.0.2', 2909), ('host', 9092)]
:param hosts:
A list or comma-separated string of hostnames which may also include
port numbers. All of the following are valid::
b'host'
u'host'
b'host:1234'
u'host:1234,host:2345'
b'host:1234 , host:2345 '
[u'host1', b'host2']
[b'host:1234', b'host:2345']
Hostnames must be ASCII (IDN is not supported). The default Kafka port
of 9092 is implied when no port is given.
:returns: A list of unique (host, port) tuples.
:rtype: :class:`list` of (:class:`str`, :class:`int`) tuples
"""
if isinstance(hosts, bytes):
hosts = hosts.split(b',')
elif isinstance(hosts, _unicode):
hosts = hosts.split(u',')
result = set()
for host_port in hosts:
# FIXME This won't handle IPv6 addresses
res = nativeString(host_port).split(':')
host = res[0].strip()
port = int(res[1].strip()) if len(res) > 1 else DefaultKafkaPort
result.add((host, port))
return sorted(result) | def function[_normalize_hosts, parameter[hosts]]:
constant[
Canonicalize the *hosts* parameter.
>>> _normalize_hosts("host,127.0.0.2:2909")
[('127.0.0.2', 2909), ('host', 9092)]
:param hosts:
A list or comma-separated string of hostnames which may also include
port numbers. All of the following are valid::
b'host'
u'host'
b'host:1234'
u'host:1234,host:2345'
b'host:1234 , host:2345 '
[u'host1', b'host2']
[b'host:1234', b'host:2345']
Hostnames must be ASCII (IDN is not supported). The default Kafka port
of 9092 is implied when no port is given.
:returns: A list of unique (host, port) tuples.
:rtype: :class:`list` of (:class:`str`, :class:`int`) tuples
]
if call[name[isinstance], parameter[name[hosts], name[bytes]]] begin[:]
variable[hosts] assign[=] call[name[hosts].split, parameter[constant[b',']]]
variable[result] assign[=] call[name[set], parameter[]]
for taget[name[host_port]] in starred[name[hosts]] begin[:]
variable[res] assign[=] call[call[name[nativeString], parameter[name[host_port]]].split, parameter[constant[:]]]
variable[host] assign[=] call[call[name[res]][constant[0]].strip, parameter[]]
variable[port] assign[=] <ast.IfExp object at 0x7da1b04d8d30>
call[name[result].add, parameter[tuple[[<ast.Name object at 0x7da1b04d90c0>, <ast.Name object at 0x7da1b04d90f0>]]]]
return[call[name[sorted], parameter[name[result]]]] | keyword[def] identifier[_normalize_hosts] ( identifier[hosts] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[hosts] , identifier[bytes] ):
identifier[hosts] = identifier[hosts] . identifier[split] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[hosts] , identifier[_unicode] ):
identifier[hosts] = identifier[hosts] . identifier[split] ( literal[string] )
identifier[result] = identifier[set] ()
keyword[for] identifier[host_port] keyword[in] identifier[hosts] :
identifier[res] = identifier[nativeString] ( identifier[host_port] ). identifier[split] ( literal[string] )
identifier[host] = identifier[res] [ literal[int] ]. identifier[strip] ()
identifier[port] = identifier[int] ( identifier[res] [ literal[int] ]. identifier[strip] ()) keyword[if] identifier[len] ( identifier[res] )> literal[int] keyword[else] identifier[DefaultKafkaPort]
identifier[result] . identifier[add] (( identifier[host] , identifier[port] ))
keyword[return] identifier[sorted] ( identifier[result] ) | def _normalize_hosts(hosts):
"""
Canonicalize the *hosts* parameter.
>>> _normalize_hosts("host,127.0.0.2:2909")
[('127.0.0.2', 2909), ('host', 9092)]
:param hosts:
A list or comma-separated string of hostnames which may also include
port numbers. All of the following are valid::
b'host'
u'host'
b'host:1234'
u'host:1234,host:2345'
b'host:1234 , host:2345 '
[u'host1', b'host2']
[b'host:1234', b'host:2345']
Hostnames must be ASCII (IDN is not supported). The default Kafka port
of 9092 is implied when no port is given.
:returns: A list of unique (host, port) tuples.
:rtype: :class:`list` of (:class:`str`, :class:`int`) tuples
"""
if isinstance(hosts, bytes):
hosts = hosts.split(b',') # depends on [control=['if'], data=[]]
elif isinstance(hosts, _unicode):
hosts = hosts.split(u',') # depends on [control=['if'], data=[]]
result = set()
for host_port in hosts:
# FIXME This won't handle IPv6 addresses
res = nativeString(host_port).split(':')
host = res[0].strip()
port = int(res[1].strip()) if len(res) > 1 else DefaultKafkaPort
result.add((host, port)) # depends on [control=['for'], data=['host_port']]
return sorted(result) |
def __update_density(self, compound='', element=''):
"""Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_density_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_density = self.stack[compound][element]['isotopes']['density']['value']
ratio_density = zip(list_ratio, list_density)
for _ratio, _density in ratio_density:
_density_element += np.float(_ratio) * np.float(_density)
self.stack[compound][element]['density']['value'] = _density_element
_density_lock = self.density_lock
if not _density_lock[compound]:
self.__update_layer_density() | def function[__update_density, parameter[self, compound, element]]:
constant[Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
]
variable[_density_element] assign[=] constant[0]
variable[list_ratio] assign[=] call[call[call[call[name[self].stack][name[compound]]][name[element]]][constant[isotopes]]][constant[isotopic_ratio]]
variable[list_density] assign[=] call[call[call[call[call[name[self].stack][name[compound]]][name[element]]][constant[isotopes]]][constant[density]]][constant[value]]
variable[ratio_density] assign[=] call[name[zip], parameter[name[list_ratio], name[list_density]]]
for taget[tuple[[<ast.Name object at 0x7da20e9541c0>, <ast.Name object at 0x7da20e9575b0>]]] in starred[name[ratio_density]] begin[:]
<ast.AugAssign object at 0x7da20e954820>
call[call[call[call[name[self].stack][name[compound]]][name[element]]][constant[density]]][constant[value]] assign[=] name[_density_element]
variable[_density_lock] assign[=] name[self].density_lock
if <ast.UnaryOp object at 0x7da18bcc9060> begin[:]
call[name[self].__update_layer_density, parameter[]] | keyword[def] identifier[__update_density] ( identifier[self] , identifier[compound] = literal[string] , identifier[element] = literal[string] ):
literal[string]
identifier[_density_element] = literal[int]
identifier[list_ratio] = identifier[self] . identifier[stack] [ identifier[compound] ][ identifier[element] ][ literal[string] ][ literal[string] ]
identifier[list_density] = identifier[self] . identifier[stack] [ identifier[compound] ][ identifier[element] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[ratio_density] = identifier[zip] ( identifier[list_ratio] , identifier[list_density] )
keyword[for] identifier[_ratio] , identifier[_density] keyword[in] identifier[ratio_density] :
identifier[_density_element] += identifier[np] . identifier[float] ( identifier[_ratio] )* identifier[np] . identifier[float] ( identifier[_density] )
identifier[self] . identifier[stack] [ identifier[compound] ][ identifier[element] ][ literal[string] ][ literal[string] ]= identifier[_density_element]
identifier[_density_lock] = identifier[self] . identifier[density_lock]
keyword[if] keyword[not] identifier[_density_lock] [ identifier[compound] ]:
identifier[self] . identifier[__update_layer_density] () | def __update_density(self, compound='', element=''):
"""Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_density_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_density = self.stack[compound][element]['isotopes']['density']['value']
ratio_density = zip(list_ratio, list_density)
for (_ratio, _density) in ratio_density:
_density_element += np.float(_ratio) * np.float(_density) # depends on [control=['for'], data=[]]
self.stack[compound][element]['density']['value'] = _density_element
_density_lock = self.density_lock
if not _density_lock[compound]:
self.__update_layer_density() # depends on [control=['if'], data=[]] |
def screencap_exec(self, filename: _PATH = 'screencap.png') -> None:
'''Taking a screenshot of a device display, then copy it to your computer.'''
self._execute('-s', self.device_sn, 'exec-out',
'screencap', '-p', '>', filename, shell=True) | def function[screencap_exec, parameter[self, filename]]:
constant[Taking a screenshot of a device display, then copy it to your computer.]
call[name[self]._execute, parameter[constant[-s], name[self].device_sn, constant[exec-out], constant[screencap], constant[-p], constant[>], name[filename]]] | keyword[def] identifier[screencap_exec] ( identifier[self] , identifier[filename] : identifier[_PATH] = literal[string] )-> keyword[None] :
literal[string]
identifier[self] . identifier[_execute] ( literal[string] , identifier[self] . identifier[device_sn] , literal[string] ,
literal[string] , literal[string] , literal[string] , identifier[filename] , identifier[shell] = keyword[True] ) | def screencap_exec(self, filename: _PATH='screencap.png') -> None:
"""Taking a screenshot of a device display, then copy it to your computer."""
self._execute('-s', self.device_sn, 'exec-out', 'screencap', '-p', '>', filename, shell=True) |
def find_alliteration(self):
"""
Find alliterations in the complete verse.
:return:
"""
if len(self.phonological_features_text) == 0:
logger.error("No phonological transcription found")
raise ValueError
else:
first_sounds = []
for i, line in enumerate(self.phonological_features_text):
first_sounds.append([])
for j, short_line in enumerate(line):
first_sounds[i].append([])
for viisuord in short_line:
first_sounds[i][j].append(viisuord[0])
verse_alliterations = []
n_alliterations_lines = []
for i, first_sound_line in enumerate(first_sounds):
if isinstance(self.long_lines[i][0], ShortLine) and isinstance(self.long_lines[i][1], ShortLine):
self.long_lines[i][0].get_first_sounds()
self.long_lines[i][1].get_first_sounds()
alli, counter = self.long_lines[i][0].find_alliterations(self.long_lines[i][1])
verse_alliterations.append(alli)
n_alliterations_lines.append(counter)
elif isinstance(self.long_lines[i][0], LongLine):
self.long_lines[i][0].get_first_sounds()
alli, counter = self.long_lines[i][0].find_alliterations()
verse_alliterations.append(alli)
n_alliterations_lines.append(counter)
return verse_alliterations, n_alliterations_lines | def function[find_alliteration, parameter[self]]:
constant[
Find alliterations in the complete verse.
:return:
]
if compare[call[name[len], parameter[name[self].phonological_features_text]] equal[==] constant[0]] begin[:]
call[name[logger].error, parameter[constant[No phonological transcription found]]]
<ast.Raise object at 0x7da204620520> | keyword[def] identifier[find_alliteration] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[phonological_features_text] )== literal[int] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[ValueError]
keyword[else] :
identifier[first_sounds] =[]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[self] . identifier[phonological_features_text] ):
identifier[first_sounds] . identifier[append] ([])
keyword[for] identifier[j] , identifier[short_line] keyword[in] identifier[enumerate] ( identifier[line] ):
identifier[first_sounds] [ identifier[i] ]. identifier[append] ([])
keyword[for] identifier[viisuord] keyword[in] identifier[short_line] :
identifier[first_sounds] [ identifier[i] ][ identifier[j] ]. identifier[append] ( identifier[viisuord] [ literal[int] ])
identifier[verse_alliterations] =[]
identifier[n_alliterations_lines] =[]
keyword[for] identifier[i] , identifier[first_sound_line] keyword[in] identifier[enumerate] ( identifier[first_sounds] ):
keyword[if] identifier[isinstance] ( identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ], identifier[ShortLine] ) keyword[and] identifier[isinstance] ( identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ], identifier[ShortLine] ):
identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ]. identifier[get_first_sounds] ()
identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ]. identifier[get_first_sounds] ()
identifier[alli] , identifier[counter] = identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ]. identifier[find_alliterations] ( identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ])
identifier[verse_alliterations] . identifier[append] ( identifier[alli] )
identifier[n_alliterations_lines] . identifier[append] ( identifier[counter] )
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ], identifier[LongLine] ):
identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ]. identifier[get_first_sounds] ()
identifier[alli] , identifier[counter] = identifier[self] . identifier[long_lines] [ identifier[i] ][ literal[int] ]. identifier[find_alliterations] ()
identifier[verse_alliterations] . identifier[append] ( identifier[alli] )
identifier[n_alliterations_lines] . identifier[append] ( identifier[counter] )
keyword[return] identifier[verse_alliterations] , identifier[n_alliterations_lines] | def find_alliteration(self):
"""
Find alliterations in the complete verse.
:return:
"""
if len(self.phonological_features_text) == 0:
logger.error('No phonological transcription found')
raise ValueError # depends on [control=['if'], data=[]]
else:
first_sounds = []
for (i, line) in enumerate(self.phonological_features_text):
first_sounds.append([])
for (j, short_line) in enumerate(line):
first_sounds[i].append([])
for viisuord in short_line:
first_sounds[i][j].append(viisuord[0]) # depends on [control=['for'], data=['viisuord']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
verse_alliterations = []
n_alliterations_lines = []
for (i, first_sound_line) in enumerate(first_sounds):
if isinstance(self.long_lines[i][0], ShortLine) and isinstance(self.long_lines[i][1], ShortLine):
self.long_lines[i][0].get_first_sounds()
self.long_lines[i][1].get_first_sounds()
(alli, counter) = self.long_lines[i][0].find_alliterations(self.long_lines[i][1])
verse_alliterations.append(alli)
n_alliterations_lines.append(counter) # depends on [control=['if'], data=[]]
elif isinstance(self.long_lines[i][0], LongLine):
self.long_lines[i][0].get_first_sounds()
(alli, counter) = self.long_lines[i][0].find_alliterations()
verse_alliterations.append(alli)
n_alliterations_lines.append(counter) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (verse_alliterations, n_alliterations_lines) |
def get_year(self):
"""
Return the year from the database in the format expected by the URL.
"""
year = super(BuildableYearArchiveView, self).get_year()
fmt = self.get_year_format()
return date(int(year), 1, 1).strftime(fmt) | def function[get_year, parameter[self]]:
constant[
Return the year from the database in the format expected by the URL.
]
variable[year] assign[=] call[call[name[super], parameter[name[BuildableYearArchiveView], name[self]]].get_year, parameter[]]
variable[fmt] assign[=] call[name[self].get_year_format, parameter[]]
return[call[call[name[date], parameter[call[name[int], parameter[name[year]]], constant[1], constant[1]]].strftime, parameter[name[fmt]]]] | keyword[def] identifier[get_year] ( identifier[self] ):
literal[string]
identifier[year] = identifier[super] ( identifier[BuildableYearArchiveView] , identifier[self] ). identifier[get_year] ()
identifier[fmt] = identifier[self] . identifier[get_year_format] ()
keyword[return] identifier[date] ( identifier[int] ( identifier[year] ), literal[int] , literal[int] ). identifier[strftime] ( identifier[fmt] ) | def get_year(self):
"""
Return the year from the database in the format expected by the URL.
"""
year = super(BuildableYearArchiveView, self).get_year()
fmt = self.get_year_format()
return date(int(year), 1, 1).strftime(fmt) |
def get_ar_count(self):
"""Return the ar_count request paramteter
"""
ar_count = 1
try:
ar_count = int(self.request.form.get("ar_count", 1))
except (TypeError, ValueError):
ar_count = 1
return ar_count | def function[get_ar_count, parameter[self]]:
constant[Return the ar_count request paramteter
]
variable[ar_count] assign[=] constant[1]
<ast.Try object at 0x7da1b1d67190>
return[name[ar_count]] | keyword[def] identifier[get_ar_count] ( identifier[self] ):
literal[string]
identifier[ar_count] = literal[int]
keyword[try] :
identifier[ar_count] = identifier[int] ( identifier[self] . identifier[request] . identifier[form] . identifier[get] ( literal[string] , literal[int] ))
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[ar_count] = literal[int]
keyword[return] identifier[ar_count] | def get_ar_count(self):
"""Return the ar_count request paramteter
"""
ar_count = 1
try:
ar_count = int(self.request.form.get('ar_count', 1)) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
ar_count = 1 # depends on [control=['except'], data=[]]
return ar_count |
def recipe_seurat(adata, log=True, plot=False, copy=False):
"""Normalization and filtering as of Seurat [Satija15]_.
This uses a particular preprocessing.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
"""
if copy: adata = adata.copy()
pp.filter_cells(adata, min_genes=200)
pp.filter_genes(adata, min_cells=3)
pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
filter_result = filter_genes_dispersion(
adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)
if plot:
from ..plotting import _preprocessing as ppp # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=not log)
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
if log: pp.log1p(adata)
pp.scale(adata, max_value=10)
return adata if copy else None | def function[recipe_seurat, parameter[adata, log, plot, copy]]:
constant[Normalization and filtering as of Seurat [Satija15]_.
This uses a particular preprocessing.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
]
if name[copy] begin[:]
variable[adata] assign[=] call[name[adata].copy, parameter[]]
call[name[pp].filter_cells, parameter[name[adata]]]
call[name[pp].filter_genes, parameter[name[adata]]]
call[name[pp].normalize_per_cell, parameter[name[adata]]]
variable[filter_result] assign[=] call[name[filter_genes_dispersion], parameter[name[adata].X]]
if name[plot] begin[:]
from relative_module[plotting] import module[_preprocessing]
call[name[ppp].filter_genes_dispersion, parameter[name[filter_result]]]
call[name[adata]._inplace_subset_var, parameter[name[filter_result].gene_subset]]
if name[log] begin[:]
call[name[pp].log1p, parameter[name[adata]]]
call[name[pp].scale, parameter[name[adata]]]
return[<ast.IfExp object at 0x7da2054a5d20>] | keyword[def] identifier[recipe_seurat] ( identifier[adata] , identifier[log] = keyword[True] , identifier[plot] = keyword[False] , identifier[copy] = keyword[False] ):
literal[string]
keyword[if] identifier[copy] : identifier[adata] = identifier[adata] . identifier[copy] ()
identifier[pp] . identifier[filter_cells] ( identifier[adata] , identifier[min_genes] = literal[int] )
identifier[pp] . identifier[filter_genes] ( identifier[adata] , identifier[min_cells] = literal[int] )
identifier[pp] . identifier[normalize_per_cell] ( identifier[adata] , identifier[counts_per_cell_after] = literal[int] )
identifier[filter_result] = identifier[filter_genes_dispersion] (
identifier[adata] . identifier[X] , identifier[min_mean] = literal[int] , identifier[max_mean] = literal[int] , identifier[min_disp] = literal[int] , identifier[log] = keyword[not] identifier[log] )
keyword[if] identifier[plot] :
keyword[from] .. identifier[plotting] keyword[import] identifier[_preprocessing] keyword[as] identifier[ppp]
identifier[ppp] . identifier[filter_genes_dispersion] ( identifier[filter_result] , identifier[log] = keyword[not] identifier[log] )
identifier[adata] . identifier[_inplace_subset_var] ( identifier[filter_result] . identifier[gene_subset] )
keyword[if] identifier[log] : identifier[pp] . identifier[log1p] ( identifier[adata] )
identifier[pp] . identifier[scale] ( identifier[adata] , identifier[max_value] = literal[int] )
keyword[return] identifier[adata] keyword[if] identifier[copy] keyword[else] keyword[None] | def recipe_seurat(adata, log=True, plot=False, copy=False):
"""Normalization and filtering as of Seurat [Satija15]_.
This uses a particular preprocessing.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
"""
if copy:
adata = adata.copy() # depends on [control=['if'], data=[]]
pp.filter_cells(adata, min_genes=200)
pp.filter_genes(adata, min_cells=3)
pp.normalize_per_cell(adata, counts_per_cell_after=10000.0)
filter_result = filter_genes_dispersion(adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)
if plot:
from ..plotting import _preprocessing as ppp # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=not log) # depends on [control=['if'], data=[]]
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
if log:
pp.log1p(adata) # depends on [control=['if'], data=[]]
pp.scale(adata, max_value=10)
return adata if copy else None |
def register_for_deleted_resource(self, resource_id):
"""Registers for notification of a deleted resource.
``ResourceReceiver.deletedResources()`` is invoked when the
specified resource is deleted or removed from this bin.
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
to monitor
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not MONGO_LISTENER.receivers[self._ns][self._receiver]['d']:
MONGO_LISTENER.receivers[self._ns][self._receiver]['d'] = []
if isinstance(MONGO_LISTENER.receivers[self._ns][self._receiver]['d'], list):
MONGO_LISTENER.receivers[self._ns][self._receiver]['d'].append(resource_id.get_identifier()) | def function[register_for_deleted_resource, parameter[self, resource_id]]:
constant[Registers for notification of a deleted resource.
``ResourceReceiver.deletedResources()`` is invoked when the
specified resource is deleted or removed from this bin.
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
to monitor
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if <ast.UnaryOp object at 0x7da20c7ca830> begin[:]
call[call[call[name[MONGO_LISTENER].receivers][name[self]._ns]][name[self]._receiver]][constant[d]] assign[=] list[[]]
if call[name[isinstance], parameter[call[call[call[name[MONGO_LISTENER].receivers][name[self]._ns]][name[self]._receiver]][constant[d]], name[list]]] begin[:]
call[call[call[call[name[MONGO_LISTENER].receivers][name[self]._ns]][name[self]._receiver]][constant[d]].append, parameter[call[name[resource_id].get_identifier, parameter[]]]] | keyword[def] identifier[register_for_deleted_resource] ( identifier[self] , identifier[resource_id] ):
literal[string]
keyword[if] keyword[not] identifier[MONGO_LISTENER] . identifier[receivers] [ identifier[self] . identifier[_ns] ][ identifier[self] . identifier[_receiver] ][ literal[string] ]:
identifier[MONGO_LISTENER] . identifier[receivers] [ identifier[self] . identifier[_ns] ][ identifier[self] . identifier[_receiver] ][ literal[string] ]=[]
keyword[if] identifier[isinstance] ( identifier[MONGO_LISTENER] . identifier[receivers] [ identifier[self] . identifier[_ns] ][ identifier[self] . identifier[_receiver] ][ literal[string] ], identifier[list] ):
identifier[MONGO_LISTENER] . identifier[receivers] [ identifier[self] . identifier[_ns] ][ identifier[self] . identifier[_receiver] ][ literal[string] ]. identifier[append] ( identifier[resource_id] . identifier[get_identifier] ()) | def register_for_deleted_resource(self, resource_id):
"""Registers for notification of a deleted resource.
``ResourceReceiver.deletedResources()`` is invoked when the
specified resource is deleted or removed from this bin.
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
to monitor
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not MONGO_LISTENER.receivers[self._ns][self._receiver]['d']:
MONGO_LISTENER.receivers[self._ns][self._receiver]['d'] = [] # depends on [control=['if'], data=[]]
if isinstance(MONGO_LISTENER.receivers[self._ns][self._receiver]['d'], list):
MONGO_LISTENER.receivers[self._ns][self._receiver]['d'].append(resource_id.get_identifier()) # depends on [control=['if'], data=[]] |
def process(self, checksum, revision=None):
"""
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
"""
revert = None
if checksum in self: # potential revert
reverteds = list(self.up_to(checksum))
if len(reverteds) > 0: # If no reverted revisions, this is a noop
revert = Revert(revision, reverteds, self[checksum])
self.insert(checksum, revision)
return revert | def function[process, parameter[self, checksum, revision]]:
constant[
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
]
variable[revert] assign[=] constant[None]
if compare[name[checksum] in name[self]] begin[:]
variable[reverteds] assign[=] call[name[list], parameter[call[name[self].up_to, parameter[name[checksum]]]]]
if compare[call[name[len], parameter[name[reverteds]]] greater[>] constant[0]] begin[:]
variable[revert] assign[=] call[name[Revert], parameter[name[revision], name[reverteds], call[name[self]][name[checksum]]]]
call[name[self].insert, parameter[name[checksum], name[revision]]]
return[name[revert]] | keyword[def] identifier[process] ( identifier[self] , identifier[checksum] , identifier[revision] = keyword[None] ):
literal[string]
identifier[revert] = keyword[None]
keyword[if] identifier[checksum] keyword[in] identifier[self] :
identifier[reverteds] = identifier[list] ( identifier[self] . identifier[up_to] ( identifier[checksum] ))
keyword[if] identifier[len] ( identifier[reverteds] )> literal[int] :
identifier[revert] = identifier[Revert] ( identifier[revision] , identifier[reverteds] , identifier[self] [ identifier[checksum] ])
identifier[self] . identifier[insert] ( identifier[checksum] , identifier[revision] )
keyword[return] identifier[revert] | def process(self, checksum, revision=None):
"""
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
"""
revert = None
if checksum in self: # potential revert
reverteds = list(self.up_to(checksum))
if len(reverteds) > 0: # If no reverted revisions, this is a noop
revert = Revert(revision, reverteds, self[checksum]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['checksum', 'self']]
self.insert(checksum, revision)
return revert |
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs | def function[split_to_discretized_mix_logistic_params, parameter[inputs]]:
constant[Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
]
<ast.Tuple object at 0x7da1b2061870> assign[=] call[name[shape_list], parameter[name[inputs]]]
variable[num_mixtures] assign[=] binary_operation[name[output_dim] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]]
<ast.Tuple object at 0x7da1b20605e0> assign[=] call[name[tf].split, parameter[name[inputs]]]
variable[split_shape] assign[=] list[[<ast.Name object at 0x7da1b1e174f0>, <ast.Name object at 0x7da1b1e14400>, <ast.Name object at 0x7da1b1e14670>, <ast.Name object at 0x7da1b1e15ae0>, <ast.Constant object at 0x7da1b1e154b0>]]
variable[locs] assign[=] call[name[tf].reshape, parameter[name[locs], name[split_shape]]]
variable[log_scales] assign[=] call[name[tf].reshape, parameter[name[log_scales], name[split_shape]]]
variable[log_scales] assign[=] call[name[tf].maximum, parameter[name[log_scales], <ast.UnaryOp object at 0x7da1b1e15660>]]
variable[coeffs] assign[=] call[name[tf].reshape, parameter[name[coeffs], name[split_shape]]]
variable[coeffs] assign[=] call[name[tf].tanh, parameter[name[coeffs]]]
return[tuple[[<ast.Name object at 0x7da1b1e17d90>, <ast.Name object at 0x7da1b1e16a40>, <ast.Name object at 0x7da1b1e16260>, <ast.Name object at 0x7da1b1e17a30>]]] | keyword[def] identifier[split_to_discretized_mix_logistic_params] ( identifier[inputs] ):
literal[string]
identifier[batch] , identifier[height] , identifier[width] , identifier[output_dim] = identifier[shape_list] ( identifier[inputs] )
identifier[num_mixtures] = identifier[output_dim] // literal[int]
identifier[logits] , identifier[locs] , identifier[log_scales] , identifier[coeffs] = identifier[tf] . identifier[split] (
identifier[inputs] ,
identifier[num_or_size_splits] =[
identifier[num_mixtures] , identifier[num_mixtures] * literal[int] , identifier[num_mixtures] * literal[int] , identifier[num_mixtures] * literal[int]
],
identifier[axis] =- literal[int] )
identifier[split_shape] =[ identifier[batch] , identifier[height] , identifier[width] , identifier[num_mixtures] , literal[int] ]
identifier[locs] = identifier[tf] . identifier[reshape] ( identifier[locs] , identifier[split_shape] )
identifier[log_scales] = identifier[tf] . identifier[reshape] ( identifier[log_scales] , identifier[split_shape] )
identifier[log_scales] = identifier[tf] . identifier[maximum] ( identifier[log_scales] ,- literal[int] )
identifier[coeffs] = identifier[tf] . identifier[reshape] ( identifier[coeffs] , identifier[split_shape] )
identifier[coeffs] = identifier[tf] . identifier[tanh] ( identifier[coeffs] )
keyword[return] identifier[logits] , identifier[locs] , identifier[log_scales] , identifier[coeffs] | def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
(batch, height, width, output_dim) = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking
num_mixtures = output_dim // 10
(logits, locs, log_scales, coeffs) = tf.split(inputs, num_or_size_splits=[num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3], axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.0)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return (logits, locs, log_scales, coeffs) |
def out_format(data, out='nested', opts=None, **kwargs):
'''
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
'''
if not opts:
opts = __opts__
return salt.output.out_format(data, out, opts=opts, **kwargs) | def function[out_format, parameter[data, out, opts]]:
constant[
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
]
if <ast.UnaryOp object at 0x7da20e9b0100> begin[:]
variable[opts] assign[=] name[__opts__]
return[call[name[salt].output.out_format, parameter[name[data], name[out]]]] | keyword[def] identifier[out_format] ( identifier[data] , identifier[out] = literal[string] , identifier[opts] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[opts] :
identifier[opts] = identifier[__opts__]
keyword[return] identifier[salt] . identifier[output] . identifier[out_format] ( identifier[data] , identifier[out] , identifier[opts] = identifier[opts] ,** identifier[kwargs] ) | def out_format(data, out='nested', opts=None, **kwargs):
"""
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
"""
if not opts:
opts = __opts__ # depends on [control=['if'], data=[]]
return salt.output.out_format(data, out, opts=opts, **kwargs) |
async def async_send(self, request, headers=None, content=None, **config):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
loop = asyncio.get_event_loop()
if self.config.keep_alive and self._session is None:
self._session = requests.Session()
try:
session = self.creds.signed_session(self._session)
except TypeError: # Credentials does not support session injection
session = self.creds.signed_session()
if self._session is not None:
_LOGGER.warning(
"Your credentials class does not support session injection. Performance will not be at the maximum.")
kwargs = self._configure_session(session, **config)
if headers:
request.headers.update(headers)
if not kwargs.get('files'):
request.add_content(content)
if request.data:
kwargs['data'] = request.data
kwargs['headers'].update(request.headers)
response = None
try:
try:
future = loop.run_in_executor(
None,
functools.partial(
session.request,
request.method,
request.url,
**kwargs
)
)
return await future
except (oauth2.rfc6749.errors.InvalidGrantError,
oauth2.rfc6749.errors.TokenExpiredError) as err:
error = "Token expired or is invalid. Attempting to refresh."
_LOGGER.warning(error)
try:
session = self.creds.refresh_session()
kwargs = self._configure_session(session)
if request.data:
kwargs['data'] = request.data
kwargs['headers'].update(request.headers)
future = loop.run_in_executor(
None,
functools.partial(
session.request,
request.method,
request.url,
**kwargs
)
)
return await future
except (oauth2.rfc6749.errors.InvalidGrantError,
oauth2.rfc6749.errors.TokenExpiredError) as err:
msg = "Token expired or is invalid."
raise_with_traceback(TokenExpiredError, msg, err)
except (requests.RequestException,
oauth2.rfc6749.errors.OAuth2Error) as err:
msg = "Error occurred in request."
raise_with_traceback(ClientRequestError, msg, err)
finally:
self._close_local_session_if_necessary(response, session, kwargs['stream']) | <ast.AsyncFunctionDef object at 0x7da1b03e24d0> | keyword[async] keyword[def] identifier[async_send] ( identifier[self] , identifier[request] , identifier[headers] = keyword[None] , identifier[content] = keyword[None] ,** identifier[config] ):
literal[string]
identifier[loop] = identifier[asyncio] . identifier[get_event_loop] ()
keyword[if] identifier[self] . identifier[config] . identifier[keep_alive] keyword[and] identifier[self] . identifier[_session] keyword[is] keyword[None] :
identifier[self] . identifier[_session] = identifier[requests] . identifier[Session] ()
keyword[try] :
identifier[session] = identifier[self] . identifier[creds] . identifier[signed_session] ( identifier[self] . identifier[_session] )
keyword[except] identifier[TypeError] :
identifier[session] = identifier[self] . identifier[creds] . identifier[signed_session] ()
keyword[if] identifier[self] . identifier[_session] keyword[is] keyword[not] keyword[None] :
identifier[_LOGGER] . identifier[warning] (
literal[string] )
identifier[kwargs] = identifier[self] . identifier[_configure_session] ( identifier[session] ,** identifier[config] )
keyword[if] identifier[headers] :
identifier[request] . identifier[headers] . identifier[update] ( identifier[headers] )
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[request] . identifier[add_content] ( identifier[content] )
keyword[if] identifier[request] . identifier[data] :
identifier[kwargs] [ literal[string] ]= identifier[request] . identifier[data]
identifier[kwargs] [ literal[string] ]. identifier[update] ( identifier[request] . identifier[headers] )
identifier[response] = keyword[None]
keyword[try] :
keyword[try] :
identifier[future] = identifier[loop] . identifier[run_in_executor] (
keyword[None] ,
identifier[functools] . identifier[partial] (
identifier[session] . identifier[request] ,
identifier[request] . identifier[method] ,
identifier[request] . identifier[url] ,
** identifier[kwargs]
)
)
keyword[return] keyword[await] identifier[future]
keyword[except] ( identifier[oauth2] . identifier[rfc6749] . identifier[errors] . identifier[InvalidGrantError] ,
identifier[oauth2] . identifier[rfc6749] . identifier[errors] . identifier[TokenExpiredError] ) keyword[as] identifier[err] :
identifier[error] = literal[string]
identifier[_LOGGER] . identifier[warning] ( identifier[error] )
keyword[try] :
identifier[session] = identifier[self] . identifier[creds] . identifier[refresh_session] ()
identifier[kwargs] = identifier[self] . identifier[_configure_session] ( identifier[session] )
keyword[if] identifier[request] . identifier[data] :
identifier[kwargs] [ literal[string] ]= identifier[request] . identifier[data]
identifier[kwargs] [ literal[string] ]. identifier[update] ( identifier[request] . identifier[headers] )
identifier[future] = identifier[loop] . identifier[run_in_executor] (
keyword[None] ,
identifier[functools] . identifier[partial] (
identifier[session] . identifier[request] ,
identifier[request] . identifier[method] ,
identifier[request] . identifier[url] ,
** identifier[kwargs]
)
)
keyword[return] keyword[await] identifier[future]
keyword[except] ( identifier[oauth2] . identifier[rfc6749] . identifier[errors] . identifier[InvalidGrantError] ,
identifier[oauth2] . identifier[rfc6749] . identifier[errors] . identifier[TokenExpiredError] ) keyword[as] identifier[err] :
identifier[msg] = literal[string]
identifier[raise_with_traceback] ( identifier[TokenExpiredError] , identifier[msg] , identifier[err] )
keyword[except] ( identifier[requests] . identifier[RequestException] ,
identifier[oauth2] . identifier[rfc6749] . identifier[errors] . identifier[OAuth2Error] ) keyword[as] identifier[err] :
identifier[msg] = literal[string]
identifier[raise_with_traceback] ( identifier[ClientRequestError] , identifier[msg] , identifier[err] )
keyword[finally] :
identifier[self] . identifier[_close_local_session_if_necessary] ( identifier[response] , identifier[session] , identifier[kwargs] [ literal[string] ]) | async def async_send(self, request, headers=None, content=None, **config):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
loop = asyncio.get_event_loop()
if self.config.keep_alive and self._session is None:
self._session = requests.Session() # depends on [control=['if'], data=[]]
try:
session = self.creds.signed_session(self._session) # depends on [control=['try'], data=[]]
except TypeError: # Credentials does not support session injection
session = self.creds.signed_session()
if self._session is not None:
_LOGGER.warning('Your credentials class does not support session injection. Performance will not be at the maximum.') # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
kwargs = self._configure_session(session, **config)
if headers:
request.headers.update(headers) # depends on [control=['if'], data=[]]
if not kwargs.get('files'):
request.add_content(content) # depends on [control=['if'], data=[]]
if request.data:
kwargs['data'] = request.data # depends on [control=['if'], data=[]]
kwargs['headers'].update(request.headers)
response = None
try:
try:
future = loop.run_in_executor(None, functools.partial(session.request, request.method, request.url, **kwargs))
return await future # depends on [control=['try'], data=[]]
except (oauth2.rfc6749.errors.InvalidGrantError, oauth2.rfc6749.errors.TokenExpiredError) as err:
error = 'Token expired or is invalid. Attempting to refresh.'
_LOGGER.warning(error) # depends on [control=['except'], data=[]]
try:
session = self.creds.refresh_session()
kwargs = self._configure_session(session)
if request.data:
kwargs['data'] = request.data # depends on [control=['if'], data=[]]
kwargs['headers'].update(request.headers)
future = loop.run_in_executor(None, functools.partial(session.request, request.method, request.url, **kwargs))
return await future # depends on [control=['try'], data=[]]
except (oauth2.rfc6749.errors.InvalidGrantError, oauth2.rfc6749.errors.TokenExpiredError) as err:
msg = 'Token expired or is invalid.'
raise_with_traceback(TokenExpiredError, msg, err) # depends on [control=['except'], data=['err']] # depends on [control=['try'], data=[]]
except (requests.RequestException, oauth2.rfc6749.errors.OAuth2Error) as err:
msg = 'Error occurred in request.'
raise_with_traceback(ClientRequestError, msg, err) # depends on [control=['except'], data=['err']]
finally:
self._close_local_session_if_necessary(response, session, kwargs['stream']) |
def _setup_logging():
"""Setup logging to log to nowhere by default.
For details, see:
http://docs.python.org/3/howto/logging.html#library-config
Internal function.
"""
import logging
logger = logging.getLogger('spotify-connect')
handler = logging.NullHandler()
logger.addHandler(handler) | def function[_setup_logging, parameter[]]:
constant[Setup logging to log to nowhere by default.
For details, see:
http://docs.python.org/3/howto/logging.html#library-config
Internal function.
]
import module[logging]
variable[logger] assign[=] call[name[logging].getLogger, parameter[constant[spotify-connect]]]
variable[handler] assign[=] call[name[logging].NullHandler, parameter[]]
call[name[logger].addHandler, parameter[name[handler]]] | keyword[def] identifier[_setup_logging] ():
literal[string]
keyword[import] identifier[logging]
identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[handler] = identifier[logging] . identifier[NullHandler] ()
identifier[logger] . identifier[addHandler] ( identifier[handler] ) | def _setup_logging():
"""Setup logging to log to nowhere by default.
For details, see:
http://docs.python.org/3/howto/logging.html#library-config
Internal function.
"""
import logging
logger = logging.getLogger('spotify-connect')
handler = logging.NullHandler()
logger.addHandler(handler) |
def execute(self, nc_file_list, index_list, in_weight_table,
out_nc, grid_type, mp_lock):
"""The source code of the tool."""
if not os.path.exists(out_nc):
raise Exception("Outfile has not been created. "
"You need to run: generateOutputInflowFile "
"function ...")
if len(nc_file_list) != len(index_list):
raise Exception("ERROR: Number of runoff files not equal to "
"number of indices ...")
demo_file_list = nc_file_list[0]
if not isinstance(nc_file_list[0], list):
demo_file_list = [demo_file_list]
self.data_validation(demo_file_list[0])
self.read_in_weight_table(in_weight_table)
conversion_factor = self.get_conversion_factor(demo_file_list[0],
len(demo_file_list))
# get indices of subset of data
lon_ind_all = [int(i) for i in self.dict_list[self.header_wt[2]]]
lat_ind_all = [int(j) for j in self.dict_list[self.header_wt[3]]]
# Obtain a subset of runoff data based on the indices in the
# weight table
min_lon_ind_all = min(lon_ind_all)
max_lon_ind_all = max(lon_ind_all)
min_lat_ind_all = min(lat_ind_all)
max_lat_ind_all = max(lat_ind_all)
lon_slice = slice(min_lon_ind_all, max_lon_ind_all + 1)
lat_slice = slice(min_lat_ind_all, max_lat_ind_all + 1)
index_new = []
# combine inflow data
for nc_file_array_index, nc_file_array in enumerate(nc_file_list):
index = index_list[nc_file_array_index]
if not isinstance(nc_file_array, list):
nc_file_array = [nc_file_array]
data_subset_all = None
for nc_file in nc_file_array:
# Validate the netcdf dataset
self.data_validation(nc_file)
# Read the netcdf dataset
data_in_nc = Dataset(nc_file)
# Calculate water inflows
runoff_dimension_size = \
len(data_in_nc.variables[self.runoff_vars[0]].dimensions)
if runoff_dimension_size == 2:
# obtain subset of surface and subsurface runoff
data_subset_runoff = \
data_in_nc.variables[self.runoff_vars[0]][
lat_slice, lon_slice]
for var_name in self.runoff_vars[1:]:
data_subset_runoff += \
data_in_nc.variables[var_name][
lat_slice, lon_slice]
# get runoff dims
len_time_subset = 1
len_lat_subset = data_subset_runoff.shape[0]
len_lon_subset = data_subset_runoff.shape[1]
# reshape the runoff
data_subset_runoff = data_subset_runoff.reshape(
len_lat_subset * len_lon_subset)
elif runoff_dimension_size == 3:
# obtain subset of surface and subsurface runoff
data_subset_runoff = \
data_in_nc.variables[self.runoff_vars[0]][
:, lat_slice, lon_slice]
for var_name in self.runoff_vars[1:]:
data_subset_runoff += \
data_in_nc.variables[var_name][
:, lat_slice, lon_slice]
# get runoff dims
len_time_subset = data_subset_runoff.shape[0]
len_lat_subset = data_subset_runoff.shape[1]
len_lon_subset = data_subset_runoff.shape[2]
# reshape the runoff
data_subset_runoff = \
data_subset_runoff.reshape(
len_time_subset,
(len_lat_subset * len_lon_subset))
data_in_nc.close()
if not index_new:
# compute new indices based on the data_subset_surface
for r in range(0, self.count):
ind_lat_orig = lat_ind_all[r]
ind_lon_orig = lon_ind_all[r]
index_new.append(
(ind_lat_orig - min_lat_ind_all) * len_lon_subset
+ (ind_lon_orig - min_lon_ind_all))
# obtain a new subset of data
if runoff_dimension_size == 2:
data_subset_new = data_subset_runoff[index_new]
elif runoff_dimension_size == 3:
data_subset_new = data_subset_runoff[:, index_new]
# FILTER DATA
try:
# set masked values to zero
data_subset_new = data_subset_new.filled(fill_value=0)
except AttributeError:
pass
# set negative values to zero
data_subset_new[data_subset_new < 0] = 0
# combine data
if data_subset_all is None:
data_subset_all = data_subset_new
else:
data_subset_all = np.add(data_subset_all, data_subset_new)
if runoff_dimension_size == 3 and len_time_subset > 1:
inflow_data = np.zeros((len_time_subset, self.size_stream_id))
else:
inflow_data = np.zeros(self.size_stream_id)
pointer = 0
for stream_index in xrange(self.size_stream_id):
npoints = int(self.dict_list[self.header_wt[4]][pointer])
# Check if all npoints points correspond to the same streamID
if len(set(self.dict_list[self.header_wt[0]][
pointer: (pointer + npoints)])) != 1:
print("ROW INDEX {0}".format(pointer))
print("COMID {0}".format(
self.dict_list[self.header_wt[0]][pointer]))
raise Exception(self.error_messages[2])
area_sqm_npoints = \
np.array([float(k) for k in
self.dict_list[self.header_wt[1]][
pointer: (pointer + npoints)]])
# assume data is incremental
if runoff_dimension_size == 3:
data_goal = data_subset_all[:, pointer:(pointer + npoints)]
else:
data_goal = data_subset_all[pointer:(pointer + npoints)]
if grid_type == 't255':
# A) ERA Interim Low Res (T255) - data is cumulative
# from time 3/6/9/12
# (time zero not included, so assumed to be zero)
ro_first_half = \
np.concatenate([data_goal[0:1, ],
np.subtract(data_goal[1:4, ],
data_goal[0:3, ])])
# from time 15/18/21/24
# (time restarts at time 12, assumed to be zero)
ro_second_half = \
np.concatenate([data_goal[4:5, ],
np.subtract(data_goal[5:, ],
data_goal[4:7, ])])
ro_stream = \
np.multiply(
np.concatenate([ro_first_half, ro_second_half]),
area_sqm_npoints)
else:
ro_stream = data_goal * area_sqm_npoints * \
conversion_factor
# filter nan
ro_stream[np.isnan(ro_stream)] = 0
if ro_stream.any():
if runoff_dimension_size == 3 and len_time_subset > 1:
inflow_data[:, stream_index] = ro_stream.sum(axis=1)
else:
inflow_data[stream_index] = ro_stream.sum()
pointer += npoints
# only one process is allowed to write at a time to netcdf file
mp_lock.acquire()
data_out_nc = Dataset(out_nc, "a", format="NETCDF3_CLASSIC")
if runoff_dimension_size == 3 and len_time_subset > 1:
data_out_nc.variables['m3_riv'][
index*len_time_subset:(index+1)*len_time_subset, :] = \
inflow_data
else:
data_out_nc.variables['m3_riv'][index] = inflow_data
data_out_nc.close()
mp_lock.release() | def function[execute, parameter[self, nc_file_list, index_list, in_weight_table, out_nc, grid_type, mp_lock]]:
constant[The source code of the tool.]
if <ast.UnaryOp object at 0x7da204347ee0> begin[:]
<ast.Raise object at 0x7da204345c60>
if compare[call[name[len], parameter[name[nc_file_list]]] not_equal[!=] call[name[len], parameter[name[index_list]]]] begin[:]
<ast.Raise object at 0x7da2043444f0>
variable[demo_file_list] assign[=] call[name[nc_file_list]][constant[0]]
if <ast.UnaryOp object at 0x7da204345a20> begin[:]
variable[demo_file_list] assign[=] list[[<ast.Name object at 0x7da204345c90>]]
call[name[self].data_validation, parameter[call[name[demo_file_list]][constant[0]]]]
call[name[self].read_in_weight_table, parameter[name[in_weight_table]]]
variable[conversion_factor] assign[=] call[name[self].get_conversion_factor, parameter[call[name[demo_file_list]][constant[0]], call[name[len], parameter[name[demo_file_list]]]]]
variable[lon_ind_all] assign[=] <ast.ListComp object at 0x7da204344370>
variable[lat_ind_all] assign[=] <ast.ListComp object at 0x7da2043473a0>
variable[min_lon_ind_all] assign[=] call[name[min], parameter[name[lon_ind_all]]]
variable[max_lon_ind_all] assign[=] call[name[max], parameter[name[lon_ind_all]]]
variable[min_lat_ind_all] assign[=] call[name[min], parameter[name[lat_ind_all]]]
variable[max_lat_ind_all] assign[=] call[name[max], parameter[name[lat_ind_all]]]
variable[lon_slice] assign[=] call[name[slice], parameter[name[min_lon_ind_all], binary_operation[name[max_lon_ind_all] + constant[1]]]]
variable[lat_slice] assign[=] call[name[slice], parameter[name[min_lat_ind_all], binary_operation[name[max_lat_ind_all] + constant[1]]]]
variable[index_new] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2043448b0>, <ast.Name object at 0x7da204346020>]]] in starred[call[name[enumerate], parameter[name[nc_file_list]]]] begin[:]
variable[index] assign[=] call[name[index_list]][name[nc_file_array_index]]
if <ast.UnaryOp object at 0x7da2043449d0> begin[:]
variable[nc_file_array] assign[=] list[[<ast.Name object at 0x7da204344dc0>]]
variable[data_subset_all] assign[=] constant[None]
for taget[name[nc_file]] in starred[name[nc_file_array]] begin[:]
call[name[self].data_validation, parameter[name[nc_file]]]
variable[data_in_nc] assign[=] call[name[Dataset], parameter[name[nc_file]]]
variable[runoff_dimension_size] assign[=] call[name[len], parameter[call[name[data_in_nc].variables][call[name[self].runoff_vars][constant[0]]].dimensions]]
if compare[name[runoff_dimension_size] equal[==] constant[2]] begin[:]
variable[data_subset_runoff] assign[=] call[call[name[data_in_nc].variables][call[name[self].runoff_vars][constant[0]]]][tuple[[<ast.Name object at 0x7da204347010>, <ast.Name object at 0x7da2043479d0>]]]
for taget[name[var_name]] in starred[call[name[self].runoff_vars][<ast.Slice object at 0x7da204344340>]] begin[:]
<ast.AugAssign object at 0x7da204347b20>
variable[len_time_subset] assign[=] constant[1]
variable[len_lat_subset] assign[=] call[name[data_subset_runoff].shape][constant[0]]
variable[len_lon_subset] assign[=] call[name[data_subset_runoff].shape][constant[1]]
variable[data_subset_runoff] assign[=] call[name[data_subset_runoff].reshape, parameter[binary_operation[name[len_lat_subset] * name[len_lon_subset]]]]
call[name[data_in_nc].close, parameter[]]
if <ast.UnaryOp object at 0x7da204344f10> begin[:]
for taget[name[r]] in starred[call[name[range], parameter[constant[0], name[self].count]]] begin[:]
variable[ind_lat_orig] assign[=] call[name[lat_ind_all]][name[r]]
variable[ind_lon_orig] assign[=] call[name[lon_ind_all]][name[r]]
call[name[index_new].append, parameter[binary_operation[binary_operation[binary_operation[name[ind_lat_orig] - name[min_lat_ind_all]] * name[len_lon_subset]] + binary_operation[name[ind_lon_orig] - name[min_lon_ind_all]]]]]
if compare[name[runoff_dimension_size] equal[==] constant[2]] begin[:]
variable[data_subset_new] assign[=] call[name[data_subset_runoff]][name[index_new]]
<ast.Try object at 0x7da1b0ba6ad0>
call[name[data_subset_new]][compare[name[data_subset_new] less[<] constant[0]]] assign[=] constant[0]
if compare[name[data_subset_all] is constant[None]] begin[:]
variable[data_subset_all] assign[=] name[data_subset_new]
if <ast.BoolOp object at 0x7da1b0ba5ea0> begin[:]
variable[inflow_data] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0ba7d90>, <ast.Attribute object at 0x7da1b0ba6b30>]]]]
variable[pointer] assign[=] constant[0]
for taget[name[stream_index]] in starred[call[name[xrange], parameter[name[self].size_stream_id]]] begin[:]
variable[npoints] assign[=] call[name[int], parameter[call[call[name[self].dict_list][call[name[self].header_wt][constant[4]]]][name[pointer]]]]
if compare[call[name[len], parameter[call[name[set], parameter[call[call[name[self].dict_list][call[name[self].header_wt][constant[0]]]][<ast.Slice object at 0x7da1b0ba5d50>]]]]] not_equal[!=] constant[1]] begin[:]
call[name[print], parameter[call[constant[ROW INDEX {0}].format, parameter[name[pointer]]]]]
call[name[print], parameter[call[constant[COMID {0}].format, parameter[call[call[name[self].dict_list][call[name[self].header_wt][constant[0]]]][name[pointer]]]]]]
<ast.Raise object at 0x7da1b0ba7be0>
variable[area_sqm_npoints] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b0ba5630>]]
if compare[name[runoff_dimension_size] equal[==] constant[3]] begin[:]
variable[data_goal] assign[=] call[name[data_subset_all]][tuple[[<ast.Slice object at 0x7da1b0ba68f0>, <ast.Slice object at 0x7da1b0ba4a90>]]]
if compare[name[grid_type] equal[==] constant[t255]] begin[:]
variable[ro_first_half] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da1b0ba50c0>, <ast.Call object at 0x7da1b0ba4eb0>]]]]
variable[ro_second_half] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da204567d30>, <ast.Call object at 0x7da2045661d0>]]]]
variable[ro_stream] assign[=] call[name[np].multiply, parameter[call[name[np].concatenate, parameter[list[[<ast.Name object at 0x7da204564af0>, <ast.Name object at 0x7da204566140>]]]], name[area_sqm_npoints]]]
call[name[ro_stream]][call[name[np].isnan, parameter[name[ro_stream]]]] assign[=] constant[0]
if call[name[ro_stream].any, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da204565150> begin[:]
call[name[inflow_data]][tuple[[<ast.Slice object at 0x7da204567760>, <ast.Name object at 0x7da204565b40>]]] assign[=] call[name[ro_stream].sum, parameter[]]
<ast.AugAssign object at 0x7da204567610>
call[name[mp_lock].acquire, parameter[]]
variable[data_out_nc] assign[=] call[name[Dataset], parameter[name[out_nc], constant[a]]]
if <ast.BoolOp object at 0x7da204564520> begin[:]
call[call[name[data_out_nc].variables][constant[m3_riv]]][tuple[[<ast.Slice object at 0x7da204567e50>, <ast.Slice object at 0x7da204567c40>]]] assign[=] name[inflow_data]
call[name[data_out_nc].close, parameter[]]
call[name[mp_lock].release, parameter[]] | keyword[def] identifier[execute] ( identifier[self] , identifier[nc_file_list] , identifier[index_list] , identifier[in_weight_table] ,
identifier[out_nc] , identifier[grid_type] , identifier[mp_lock] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[out_nc] ):
keyword[raise] identifier[Exception] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[len] ( identifier[nc_file_list] )!= identifier[len] ( identifier[index_list] ):
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
identifier[demo_file_list] = identifier[nc_file_list] [ literal[int] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[nc_file_list] [ literal[int] ], identifier[list] ):
identifier[demo_file_list] =[ identifier[demo_file_list] ]
identifier[self] . identifier[data_validation] ( identifier[demo_file_list] [ literal[int] ])
identifier[self] . identifier[read_in_weight_table] ( identifier[in_weight_table] )
identifier[conversion_factor] = identifier[self] . identifier[get_conversion_factor] ( identifier[demo_file_list] [ literal[int] ],
identifier[len] ( identifier[demo_file_list] ))
identifier[lon_ind_all] =[ identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]]]
identifier[lat_ind_all] =[ identifier[int] ( identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]]]
identifier[min_lon_ind_all] = identifier[min] ( identifier[lon_ind_all] )
identifier[max_lon_ind_all] = identifier[max] ( identifier[lon_ind_all] )
identifier[min_lat_ind_all] = identifier[min] ( identifier[lat_ind_all] )
identifier[max_lat_ind_all] = identifier[max] ( identifier[lat_ind_all] )
identifier[lon_slice] = identifier[slice] ( identifier[min_lon_ind_all] , identifier[max_lon_ind_all] + literal[int] )
identifier[lat_slice] = identifier[slice] ( identifier[min_lat_ind_all] , identifier[max_lat_ind_all] + literal[int] )
identifier[index_new] =[]
keyword[for] identifier[nc_file_array_index] , identifier[nc_file_array] keyword[in] identifier[enumerate] ( identifier[nc_file_list] ):
identifier[index] = identifier[index_list] [ identifier[nc_file_array_index] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[nc_file_array] , identifier[list] ):
identifier[nc_file_array] =[ identifier[nc_file_array] ]
identifier[data_subset_all] = keyword[None]
keyword[for] identifier[nc_file] keyword[in] identifier[nc_file_array] :
identifier[self] . identifier[data_validation] ( identifier[nc_file] )
identifier[data_in_nc] = identifier[Dataset] ( identifier[nc_file] )
identifier[runoff_dimension_size] = identifier[len] ( identifier[data_in_nc] . identifier[variables] [ identifier[self] . identifier[runoff_vars] [ literal[int] ]]. identifier[dimensions] )
keyword[if] identifier[runoff_dimension_size] == literal[int] :
identifier[data_subset_runoff] = identifier[data_in_nc] . identifier[variables] [ identifier[self] . identifier[runoff_vars] [ literal[int] ]][
identifier[lat_slice] , identifier[lon_slice] ]
keyword[for] identifier[var_name] keyword[in] identifier[self] . identifier[runoff_vars] [ literal[int] :]:
identifier[data_subset_runoff] += identifier[data_in_nc] . identifier[variables] [ identifier[var_name] ][
identifier[lat_slice] , identifier[lon_slice] ]
identifier[len_time_subset] = literal[int]
identifier[len_lat_subset] = identifier[data_subset_runoff] . identifier[shape] [ literal[int] ]
identifier[len_lon_subset] = identifier[data_subset_runoff] . identifier[shape] [ literal[int] ]
identifier[data_subset_runoff] = identifier[data_subset_runoff] . identifier[reshape] (
identifier[len_lat_subset] * identifier[len_lon_subset] )
keyword[elif] identifier[runoff_dimension_size] == literal[int] :
identifier[data_subset_runoff] = identifier[data_in_nc] . identifier[variables] [ identifier[self] . identifier[runoff_vars] [ literal[int] ]][
:, identifier[lat_slice] , identifier[lon_slice] ]
keyword[for] identifier[var_name] keyword[in] identifier[self] . identifier[runoff_vars] [ literal[int] :]:
identifier[data_subset_runoff] += identifier[data_in_nc] . identifier[variables] [ identifier[var_name] ][
:, identifier[lat_slice] , identifier[lon_slice] ]
identifier[len_time_subset] = identifier[data_subset_runoff] . identifier[shape] [ literal[int] ]
identifier[len_lat_subset] = identifier[data_subset_runoff] . identifier[shape] [ literal[int] ]
identifier[len_lon_subset] = identifier[data_subset_runoff] . identifier[shape] [ literal[int] ]
identifier[data_subset_runoff] = identifier[data_subset_runoff] . identifier[reshape] (
identifier[len_time_subset] ,
( identifier[len_lat_subset] * identifier[len_lon_subset] ))
identifier[data_in_nc] . identifier[close] ()
keyword[if] keyword[not] identifier[index_new] :
keyword[for] identifier[r] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[count] ):
identifier[ind_lat_orig] = identifier[lat_ind_all] [ identifier[r] ]
identifier[ind_lon_orig] = identifier[lon_ind_all] [ identifier[r] ]
identifier[index_new] . identifier[append] (
( identifier[ind_lat_orig] - identifier[min_lat_ind_all] )* identifier[len_lon_subset]
+( identifier[ind_lon_orig] - identifier[min_lon_ind_all] ))
keyword[if] identifier[runoff_dimension_size] == literal[int] :
identifier[data_subset_new] = identifier[data_subset_runoff] [ identifier[index_new] ]
keyword[elif] identifier[runoff_dimension_size] == literal[int] :
identifier[data_subset_new] = identifier[data_subset_runoff] [:, identifier[index_new] ]
keyword[try] :
identifier[data_subset_new] = identifier[data_subset_new] . identifier[filled] ( identifier[fill_value] = literal[int] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[data_subset_new] [ identifier[data_subset_new] < literal[int] ]= literal[int]
keyword[if] identifier[data_subset_all] keyword[is] keyword[None] :
identifier[data_subset_all] = identifier[data_subset_new]
keyword[else] :
identifier[data_subset_all] = identifier[np] . identifier[add] ( identifier[data_subset_all] , identifier[data_subset_new] )
keyword[if] identifier[runoff_dimension_size] == literal[int] keyword[and] identifier[len_time_subset] > literal[int] :
identifier[inflow_data] = identifier[np] . identifier[zeros] (( identifier[len_time_subset] , identifier[self] . identifier[size_stream_id] ))
keyword[else] :
identifier[inflow_data] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[size_stream_id] )
identifier[pointer] = literal[int]
keyword[for] identifier[stream_index] keyword[in] identifier[xrange] ( identifier[self] . identifier[size_stream_id] ):
identifier[npoints] = identifier[int] ( identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]][ identifier[pointer] ])
keyword[if] identifier[len] ( identifier[set] ( identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]][
identifier[pointer] :( identifier[pointer] + identifier[npoints] )]))!= literal[int] :
identifier[print] ( literal[string] . identifier[format] ( identifier[pointer] ))
identifier[print] ( literal[string] . identifier[format] (
identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]][ identifier[pointer] ]))
keyword[raise] identifier[Exception] ( identifier[self] . identifier[error_messages] [ literal[int] ])
identifier[area_sqm_npoints] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[k] ) keyword[for] identifier[k] keyword[in]
identifier[self] . identifier[dict_list] [ identifier[self] . identifier[header_wt] [ literal[int] ]][
identifier[pointer] :( identifier[pointer] + identifier[npoints] )]])
keyword[if] identifier[runoff_dimension_size] == literal[int] :
identifier[data_goal] = identifier[data_subset_all] [:, identifier[pointer] :( identifier[pointer] + identifier[npoints] )]
keyword[else] :
identifier[data_goal] = identifier[data_subset_all] [ identifier[pointer] :( identifier[pointer] + identifier[npoints] )]
keyword[if] identifier[grid_type] == literal[string] :
identifier[ro_first_half] = identifier[np] . identifier[concatenate] ([ identifier[data_goal] [ literal[int] : literal[int] ,],
identifier[np] . identifier[subtract] ( identifier[data_goal] [ literal[int] : literal[int] ,],
identifier[data_goal] [ literal[int] : literal[int] ,])])
identifier[ro_second_half] = identifier[np] . identifier[concatenate] ([ identifier[data_goal] [ literal[int] : literal[int] ,],
identifier[np] . identifier[subtract] ( identifier[data_goal] [ literal[int] :,],
identifier[data_goal] [ literal[int] : literal[int] ,])])
identifier[ro_stream] = identifier[np] . identifier[multiply] (
identifier[np] . identifier[concatenate] ([ identifier[ro_first_half] , identifier[ro_second_half] ]),
identifier[area_sqm_npoints] )
keyword[else] :
identifier[ro_stream] = identifier[data_goal] * identifier[area_sqm_npoints] * identifier[conversion_factor]
identifier[ro_stream] [ identifier[np] . identifier[isnan] ( identifier[ro_stream] )]= literal[int]
keyword[if] identifier[ro_stream] . identifier[any] ():
keyword[if] identifier[runoff_dimension_size] == literal[int] keyword[and] identifier[len_time_subset] > literal[int] :
identifier[inflow_data] [:, identifier[stream_index] ]= identifier[ro_stream] . identifier[sum] ( identifier[axis] = literal[int] )
keyword[else] :
identifier[inflow_data] [ identifier[stream_index] ]= identifier[ro_stream] . identifier[sum] ()
identifier[pointer] += identifier[npoints]
identifier[mp_lock] . identifier[acquire] ()
identifier[data_out_nc] = identifier[Dataset] ( identifier[out_nc] , literal[string] , identifier[format] = literal[string] )
keyword[if] identifier[runoff_dimension_size] == literal[int] keyword[and] identifier[len_time_subset] > literal[int] :
identifier[data_out_nc] . identifier[variables] [ literal[string] ][
identifier[index] * identifier[len_time_subset] :( identifier[index] + literal[int] )* identifier[len_time_subset] ,:]= identifier[inflow_data]
keyword[else] :
identifier[data_out_nc] . identifier[variables] [ literal[string] ][ identifier[index] ]= identifier[inflow_data]
identifier[data_out_nc] . identifier[close] ()
identifier[mp_lock] . identifier[release] () | def execute(self, nc_file_list, index_list, in_weight_table, out_nc, grid_type, mp_lock):
"""The source code of the tool."""
if not os.path.exists(out_nc):
raise Exception('Outfile has not been created. You need to run: generateOutputInflowFile function ...') # depends on [control=['if'], data=[]]
if len(nc_file_list) != len(index_list):
raise Exception('ERROR: Number of runoff files not equal to number of indices ...') # depends on [control=['if'], data=[]]
demo_file_list = nc_file_list[0]
if not isinstance(nc_file_list[0], list):
demo_file_list = [demo_file_list] # depends on [control=['if'], data=[]]
self.data_validation(demo_file_list[0])
self.read_in_weight_table(in_weight_table)
conversion_factor = self.get_conversion_factor(demo_file_list[0], len(demo_file_list)) # get indices of subset of data
lon_ind_all = [int(i) for i in self.dict_list[self.header_wt[2]]]
lat_ind_all = [int(j) for j in self.dict_list[self.header_wt[3]]] # Obtain a subset of runoff data based on the indices in the
# weight table
min_lon_ind_all = min(lon_ind_all)
max_lon_ind_all = max(lon_ind_all)
min_lat_ind_all = min(lat_ind_all)
max_lat_ind_all = max(lat_ind_all)
lon_slice = slice(min_lon_ind_all, max_lon_ind_all + 1)
lat_slice = slice(min_lat_ind_all, max_lat_ind_all + 1)
index_new = [] # combine inflow data
for (nc_file_array_index, nc_file_array) in enumerate(nc_file_list):
index = index_list[nc_file_array_index]
if not isinstance(nc_file_array, list):
nc_file_array = [nc_file_array] # depends on [control=['if'], data=[]]
data_subset_all = None
for nc_file in nc_file_array: # Validate the netcdf dataset
self.data_validation(nc_file) # Read the netcdf dataset
data_in_nc = Dataset(nc_file) # Calculate water inflows
runoff_dimension_size = len(data_in_nc.variables[self.runoff_vars[0]].dimensions)
if runoff_dimension_size == 2: # obtain subset of surface and subsurface runoff
data_subset_runoff = data_in_nc.variables[self.runoff_vars[0]][lat_slice, lon_slice]
for var_name in self.runoff_vars[1:]:
data_subset_runoff += data_in_nc.variables[var_name][lat_slice, lon_slice] # depends on [control=['for'], data=['var_name']] # get runoff dims
len_time_subset = 1
len_lat_subset = data_subset_runoff.shape[0]
len_lon_subset = data_subset_runoff.shape[1] # reshape the runoff
data_subset_runoff = data_subset_runoff.reshape(len_lat_subset * len_lon_subset) # depends on [control=['if'], data=[]]
elif runoff_dimension_size == 3: # obtain subset of surface and subsurface runoff
data_subset_runoff = data_in_nc.variables[self.runoff_vars[0]][:, lat_slice, lon_slice]
for var_name in self.runoff_vars[1:]:
data_subset_runoff += data_in_nc.variables[var_name][:, lat_slice, lon_slice] # depends on [control=['for'], data=['var_name']] # get runoff dims
len_time_subset = data_subset_runoff.shape[0]
len_lat_subset = data_subset_runoff.shape[1]
len_lon_subset = data_subset_runoff.shape[2] # reshape the runoff
data_subset_runoff = data_subset_runoff.reshape(len_time_subset, len_lat_subset * len_lon_subset) # depends on [control=['if'], data=[]]
data_in_nc.close()
if not index_new: # compute new indices based on the data_subset_surface
for r in range(0, self.count):
ind_lat_orig = lat_ind_all[r]
ind_lon_orig = lon_ind_all[r]
index_new.append((ind_lat_orig - min_lat_ind_all) * len_lon_subset + (ind_lon_orig - min_lon_ind_all)) # depends on [control=['for'], data=['r']] # depends on [control=['if'], data=[]] # obtain a new subset of data
if runoff_dimension_size == 2:
data_subset_new = data_subset_runoff[index_new] # depends on [control=['if'], data=[]]
elif runoff_dimension_size == 3:
data_subset_new = data_subset_runoff[:, index_new] # depends on [control=['if'], data=[]] # FILTER DATA
try: # set masked values to zero
data_subset_new = data_subset_new.filled(fill_value=0) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # set negative values to zero
data_subset_new[data_subset_new < 0] = 0 # combine data
if data_subset_all is None:
data_subset_all = data_subset_new # depends on [control=['if'], data=['data_subset_all']]
else:
data_subset_all = np.add(data_subset_all, data_subset_new) # depends on [control=['for'], data=['nc_file']]
if runoff_dimension_size == 3 and len_time_subset > 1:
inflow_data = np.zeros((len_time_subset, self.size_stream_id)) # depends on [control=['if'], data=[]]
else:
inflow_data = np.zeros(self.size_stream_id)
pointer = 0
for stream_index in xrange(self.size_stream_id):
npoints = int(self.dict_list[self.header_wt[4]][pointer]) # Check if all npoints points correspond to the same streamID
if len(set(self.dict_list[self.header_wt[0]][pointer:pointer + npoints])) != 1:
print('ROW INDEX {0}'.format(pointer))
print('COMID {0}'.format(self.dict_list[self.header_wt[0]][pointer]))
raise Exception(self.error_messages[2]) # depends on [control=['if'], data=[]]
area_sqm_npoints = np.array([float(k) for k in self.dict_list[self.header_wt[1]][pointer:pointer + npoints]]) # assume data is incremental
if runoff_dimension_size == 3:
data_goal = data_subset_all[:, pointer:pointer + npoints] # depends on [control=['if'], data=[]]
else:
data_goal = data_subset_all[pointer:pointer + npoints]
if grid_type == 't255': # A) ERA Interim Low Res (T255) - data is cumulative
# from time 3/6/9/12
# (time zero not included, so assumed to be zero)
ro_first_half = np.concatenate([data_goal[0:1,], np.subtract(data_goal[1:4,], data_goal[0:3,])]) # from time 15/18/21/24
# (time restarts at time 12, assumed to be zero)
ro_second_half = np.concatenate([data_goal[4:5,], np.subtract(data_goal[5:,], data_goal[4:7,])])
ro_stream = np.multiply(np.concatenate([ro_first_half, ro_second_half]), area_sqm_npoints) # depends on [control=['if'], data=[]]
else:
ro_stream = data_goal * area_sqm_npoints * conversion_factor # filter nan
ro_stream[np.isnan(ro_stream)] = 0
if ro_stream.any():
if runoff_dimension_size == 3 and len_time_subset > 1:
inflow_data[:, stream_index] = ro_stream.sum(axis=1) # depends on [control=['if'], data=[]]
else:
inflow_data[stream_index] = ro_stream.sum() # depends on [control=['if'], data=[]]
pointer += npoints # depends on [control=['for'], data=['stream_index']] # only one process is allowed to write at a time to netcdf file
mp_lock.acquire()
data_out_nc = Dataset(out_nc, 'a', format='NETCDF3_CLASSIC')
if runoff_dimension_size == 3 and len_time_subset > 1:
data_out_nc.variables['m3_riv'][index * len_time_subset:(index + 1) * len_time_subset, :] = inflow_data # depends on [control=['if'], data=[]]
else:
data_out_nc.variables['m3_riv'][index] = inflow_data
data_out_nc.close()
mp_lock.release() # depends on [control=['for'], data=[]] |
def index(self, element: Element) -> int:
"""
Return the index in the array of the first item whose value is element.
It is an error if there is no such item.
>>> element = String('hello')
>>> array = Array(content=[element])
>>> array.index(element)
0
"""
from refract.refraction import refract
return self.content.index(refract(element)) | def function[index, parameter[self, element]]:
constant[
Return the index in the array of the first item whose value is element.
It is an error if there is no such item.
>>> element = String('hello')
>>> array = Array(content=[element])
>>> array.index(element)
0
]
from relative_module[refract.refraction] import module[refract]
return[call[name[self].content.index, parameter[call[name[refract], parameter[name[element]]]]]] | keyword[def] identifier[index] ( identifier[self] , identifier[element] : identifier[Element] )-> identifier[int] :
literal[string]
keyword[from] identifier[refract] . identifier[refraction] keyword[import] identifier[refract]
keyword[return] identifier[self] . identifier[content] . identifier[index] ( identifier[refract] ( identifier[element] )) | def index(self, element: Element) -> int:
"""
Return the index in the array of the first item whose value is element.
It is an error if there is no such item.
>>> element = String('hello')
>>> array = Array(content=[element])
>>> array.index(element)
0
"""
from refract.refraction import refract
return self.content.index(refract(element)) |
def __init_keystone_session_v3(self, check=False):
"""
Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported.
"""
try:
# may fail on Python 2.6?
from keystoneauth1.identity import v3 as keystone_v3
except ImportError:
log.warning("Cannot load Keystone API v3 library.")
return None
auth = keystone_v3.Password(
auth_url=self._os_auth_url,
username=self._os_username,
password=self._os_password,
user_domain_name=self._os_user_domain_name,
project_domain_name=self._os_project_domain_name,
project_name=self._os_tenant_name,
)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug("Checking that Keystone API v3 session works...")
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess)
nova.flavors.list()
except keystoneauth1.exceptions.NotFound as err:
log.warning("Creating Keystone v3 session failed: %s", err)
return None
except keystoneauth1.exceptions.ClientException as err:
log.error("OpenStack server rejected request (likely configuration error?): %s", err)
return None # FIXME: should we be raising an error instead?
# if we got to this point, v3 session is valid
log.info("Using Keystone API v3 session to authenticate to OpenStack")
return sess | def function[__init_keystone_session_v3, parameter[self, check]]:
constant[
Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported.
]
<ast.Try object at 0x7da1b08c96c0>
variable[auth] assign[=] call[name[keystone_v3].Password, parameter[]]
variable[sess] assign[=] call[name[keystoneauth1].session.Session, parameter[]]
if name[check] begin[:]
call[name[log].debug, parameter[constant[Checking that Keystone API v3 session works...]]]
<ast.Try object at 0x7da1b08c8dc0>
call[name[log].info, parameter[constant[Using Keystone API v3 session to authenticate to OpenStack]]]
return[name[sess]] | keyword[def] identifier[__init_keystone_session_v3] ( identifier[self] , identifier[check] = keyword[False] ):
literal[string]
keyword[try] :
keyword[from] identifier[keystoneauth1] . identifier[identity] keyword[import] identifier[v3] keyword[as] identifier[keystone_v3]
keyword[except] identifier[ImportError] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] keyword[None]
identifier[auth] = identifier[keystone_v3] . identifier[Password] (
identifier[auth_url] = identifier[self] . identifier[_os_auth_url] ,
identifier[username] = identifier[self] . identifier[_os_username] ,
identifier[password] = identifier[self] . identifier[_os_password] ,
identifier[user_domain_name] = identifier[self] . identifier[_os_user_domain_name] ,
identifier[project_domain_name] = identifier[self] . identifier[_os_project_domain_name] ,
identifier[project_name] = identifier[self] . identifier[_os_tenant_name] ,
)
identifier[sess] = identifier[keystoneauth1] . identifier[session] . identifier[Session] ( identifier[auth] = identifier[auth] , identifier[verify] = identifier[self] . identifier[_os_cacert] )
keyword[if] identifier[check] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[nova] = identifier[nova_client] . identifier[Client] ( identifier[self] . identifier[_compute_api_version] , identifier[session] = identifier[sess] )
identifier[nova] . identifier[flavors] . identifier[list] ()
keyword[except] identifier[keystoneauth1] . identifier[exceptions] . identifier[NotFound] keyword[as] identifier[err] :
identifier[log] . identifier[warning] ( literal[string] , identifier[err] )
keyword[return] keyword[None]
keyword[except] identifier[keystoneauth1] . identifier[exceptions] . identifier[ClientException] keyword[as] identifier[err] :
identifier[log] . identifier[error] ( literal[string] , identifier[err] )
keyword[return] keyword[None]
identifier[log] . identifier[info] ( literal[string] )
keyword[return] identifier[sess] | def __init_keystone_session_v3(self, check=False):
"""
Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported.
"""
try:
# may fail on Python 2.6?
from keystoneauth1.identity import v3 as keystone_v3 # depends on [control=['try'], data=[]]
except ImportError:
log.warning('Cannot load Keystone API v3 library.')
return None # depends on [control=['except'], data=[]]
auth = keystone_v3.Password(auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, user_domain_name=self._os_user_domain_name, project_domain_name=self._os_project_domain_name, project_name=self._os_tenant_name)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug('Checking that Keystone API v3 session works...')
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess)
nova.flavors.list() # depends on [control=['try'], data=[]]
except keystoneauth1.exceptions.NotFound as err:
log.warning('Creating Keystone v3 session failed: %s', err)
return None # depends on [control=['except'], data=['err']]
except keystoneauth1.exceptions.ClientException as err:
log.error('OpenStack server rejected request (likely configuration error?): %s', err)
return None # FIXME: should we be raising an error instead? # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]]
# if we got to this point, v3 session is valid
log.info('Using Keystone API v3 session to authenticate to OpenStack')
return sess |
def objIngest(obj_file):
import sys,os,math,re
import pyfits
import MOPfiles
obj=MOPfiles.read(obj_file)
"""
The SQL description of the source table
+-------------+---------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-------------+---------+------+-----+---------+----------------+
| sourceID | int(11) | | PRI | NULL | auto_increment |
| x_pix | float | YES | MUL | NULL | |
| y_pix | float | YES | | NULL | |
| iso_flux | float | YES | | NULL | |
| iso_err | float | YES | | NULL | |
| aper_flux | float | YES | | NULL | |
| aper_err | float | YES | | NULL | |
| iso_area | float | YES | | NULL | |
| kron_radius | float | YES | MUL | NULL | |
| elongation | float | YES | | NULL | |
| cxx | float | YES | | NULL | |
| cyy | float | YES | | NULL | |
| cxy | float | YES | | NULL | |
| max_flux | float | YES | | NULL | |
| max_int | float | YES | | NULL | |
| mag_dao | float | YES | MUL | NULL | |
| merr_dao | float | YES | | NULL | |
| sky_cts | float | YES | | NULL | |
| chi2 | float | YES | | NULL | |
| npix | float | YES | | NULL | |
| sharp | float | YES | | NULL | |
| ra_deg | float | YES | MUL | NULL | |
| dec_deg | float | YES | | NULL | |
+-------------+---------+------+-----+---------+----------------+
"""
"""
Columns in the SOURCE table...
## X Y FLUX_ISO FLUXERR_ISO FLUX_APER FLUXERR_APER ISOAREA_IMAGE KRON_RADIUS ELONGATION CXX_IMAGE CYY_IMAGE CXY_IMAGE FLUX_MAX ID MAX_INT FLUX MERR SKY ELON X^2 N_PIX MAG SHARP SIZE
"""
### The mapping
obj['hdu2sql']={'MAX_INT': 'peak',
'FLUX': 'flux',
'MAG': 'mag',
'MERR': 'merr',
'SKY': 'sky',
'ELON': 'elongation',
'X^2': 'chi2',
'N_PIX': 'npix',
'SHARP': 'sharpness',
'Y': 'yPix',
'X': 'xPix',
'SIZE': 'size',
'RA': 'raDeg',
'DEC': 'decDeg',
}
MOPfiles.store(obj)
return | def function[objIngest, parameter[obj_file]]:
import module[sys], module[os], module[math], module[re]
import module[pyfits]
import module[MOPfiles]
variable[obj] assign[=] call[name[MOPfiles].read, parameter[name[obj_file]]]
constant[
The SQL description of the source table
+-------------+---------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-------------+---------+------+-----+---------+----------------+
| sourceID | int(11) | | PRI | NULL | auto_increment |
| x_pix | float | YES | MUL | NULL | |
| y_pix | float | YES | | NULL | |
| iso_flux | float | YES | | NULL | |
| iso_err | float | YES | | NULL | |
| aper_flux | float | YES | | NULL | |
| aper_err | float | YES | | NULL | |
| iso_area | float | YES | | NULL | |
| kron_radius | float | YES | MUL | NULL | |
| elongation | float | YES | | NULL | |
| cxx | float | YES | | NULL | |
| cyy | float | YES | | NULL | |
| cxy | float | YES | | NULL | |
| max_flux | float | YES | | NULL | |
| max_int | float | YES | | NULL | |
| mag_dao | float | YES | MUL | NULL | |
| merr_dao | float | YES | | NULL | |
| sky_cts | float | YES | | NULL | |
| chi2 | float | YES | | NULL | |
| npix | float | YES | | NULL | |
| sharp | float | YES | | NULL | |
| ra_deg | float | YES | MUL | NULL | |
| dec_deg | float | YES | | NULL | |
+-------------+---------+------+-----+---------+----------------+
]
constant[
Columns in the SOURCE table...
## X Y FLUX_ISO FLUXERR_ISO FLUX_APER FLUXERR_APER ISOAREA_IMAGE KRON_RADIUS ELONGATION CXX_IMAGE CYY_IMAGE CXY_IMAGE FLUX_MAX ID MAX_INT FLUX MERR SKY ELON X^2 N_PIX MAG SHARP SIZE
]
call[name[obj]][constant[hdu2sql]] assign[=] dictionary[[<ast.Constant object at 0x7da1b19319c0>, <ast.Constant object at 0x7da1b19308b0>, <ast.Constant object at 0x7da1b1930880>, <ast.Constant object at 0x7da1b1933580>, <ast.Constant object at 0x7da1b1933d30>, <ast.Constant object at 0x7da1b1930dc0>, <ast.Constant object at 0x7da1b1932b30>, <ast.Constant object at 0x7da1b1933a30>, <ast.Constant object at 0x7da1b1932320>, <ast.Constant object at 0x7da1b1931090>, <ast.Constant object at 0x7da1b19323e0>, <ast.Constant object at 0x7da1b1933700>, <ast.Constant object at 0x7da1b19319f0>, <ast.Constant object at 0x7da1b1932ad0>], [<ast.Constant object at 0x7da1b19311b0>, <ast.Constant object at 0x7da1b1931870>, <ast.Constant object at 0x7da1b1933100>, <ast.Constant object at 0x7da1b1932d10>, <ast.Constant object at 0x7da1b1933910>, <ast.Constant object at 0x7da1b1932dd0>, <ast.Constant object at 0x7da1b1932bf0>, <ast.Constant object at 0x7da1b1933790>, <ast.Constant object at 0x7da1b19314b0>, <ast.Constant object at 0x7da1b19329b0>, <ast.Constant object at 0x7da1b1931ab0>, <ast.Constant object at 0x7da1b19323b0>, <ast.Constant object at 0x7da1b1932a40>, <ast.Constant object at 0x7da1b1933cd0>]]
call[name[MOPfiles].store, parameter[name[obj]]]
return[None] | keyword[def] identifier[objIngest] ( identifier[obj_file] ):
keyword[import] identifier[sys] , identifier[os] , identifier[math] , identifier[re]
keyword[import] identifier[pyfits]
keyword[import] identifier[MOPfiles]
identifier[obj] = identifier[MOPfiles] . identifier[read] ( identifier[obj_file] )
literal[string]
literal[string]
identifier[obj] [ literal[string] ]={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[MOPfiles] . identifier[store] ( identifier[obj] )
keyword[return] | def objIngest(obj_file):
import sys, os, math, re
import pyfits
import MOPfiles
obj = MOPfiles.read(obj_file)
'\n The SQL description of the source table\n +-------------+---------+------+-----+---------+----------------+\n | Field | Type | Null | Key | Default | Extra |\n +-------------+---------+------+-----+---------+----------------+\n | sourceID | int(11) | | PRI | NULL | auto_increment |\n | x_pix | float | YES | MUL | NULL | |\n | y_pix | float | YES | | NULL | |\n | iso_flux | float | YES | | NULL | |\n | iso_err | float | YES | | NULL | |\n | aper_flux | float | YES | | NULL | |\n | aper_err | float | YES | | NULL | |\n | iso_area | float | YES | | NULL | |\n | kron_radius | float | YES | MUL | NULL | |\n | elongation | float | YES | | NULL | |\n | cxx | float | YES | | NULL | |\n | cyy | float | YES | | NULL | |\n | cxy | float | YES | | NULL | |\n | max_flux | float | YES | | NULL | |\n | max_int | float | YES | | NULL | |\n | mag_dao | float | YES | MUL | NULL | |\n | merr_dao | float | YES | | NULL | |\n | sky_cts | float | YES | | NULL | |\n | chi2 | float | YES | | NULL | |\n | npix | float | YES | | NULL | |\n | sharp | float | YES | | NULL | |\n | ra_deg | float | YES | MUL | NULL | |\n | dec_deg | float | YES | | NULL | |\n +-------------+---------+------+-----+---------+----------------+\n '
'\n Columns in the SOURCE table...\n ## X Y FLUX_ISO FLUXERR_ISO FLUX_APER FLUXERR_APER ISOAREA_IMAGE KRON_RADIUS ELONGATION CXX_IMAGE CYY_IMAGE CXY_IMAGE FLUX_MAX ID MAX_INT FLUX MERR SKY ELON X^2 N_PIX MAG SHARP SIZE\n\n ' ### The mapping
obj['hdu2sql'] = {'MAX_INT': 'peak', 'FLUX': 'flux', 'MAG': 'mag', 'MERR': 'merr', 'SKY': 'sky', 'ELON': 'elongation', 'X^2': 'chi2', 'N_PIX': 'npix', 'SHARP': 'sharpness', 'Y': 'yPix', 'X': 'xPix', 'SIZE': 'size', 'RA': 'raDeg', 'DEC': 'decDeg'}
MOPfiles.store(obj)
return |
def create_translate_dictionaries(symbols):
u"""create translate dictionaries for text, google, docomo, kddi and softbank via `symbols`
create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse).
method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)`
each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key)
DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u"\uE63E"` for DoCoMo's sun emoji is not expected. expected character
for DoCoMo's sun is decoded character from `"\xF8\x9F"` (actually decoded unicode of `"\xF8\xF9"` is `u"\uE63E"` however not all emoji
can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis`
attribute and unicode formatted is `usjis` attribute.)
"""
unicode_to_text = {}
unicode_to_docomo_img = {}
unicode_to_kddi_img = {}
unicode_to_softbank_img = {}
unicode_to_google = {}
unicode_to_docomo = {}
unicode_to_kddi = {}
unicode_to_softbank = {}
google_to_unicode = {}
docomo_to_unicode = {}
kddi_to_unicode = {}
softbank_to_unicode = {}
for x in symbols:
if x.unicode.keyable:
unicode_to_text[unicode(x.unicode)] = x.unicode.fallback
unicode_to_docomo_img[unicode(x.unicode)] = x.docomo.thumbnail
unicode_to_kddi_img[unicode(x.unicode)] = x.kddi.thumbnail
unicode_to_softbank_img[unicode(x.unicode)] = x.softbank.thumbnail
unicode_to_google[unicode(x.unicode)] = unicode(x.google)
unicode_to_docomo[unicode(x.unicode)] = unicode(x.docomo)
unicode_to_kddi[unicode(x.unicode)] = unicode(x.kddi)
unicode_to_softbank[unicode(x.unicode)] = unicode(x.softbank)
if x.google.keyable: google_to_unicode[unicode(x.google)] = unicode(x.unicode)
if x.docomo.keyable: docomo_to_unicode[unicode(x.docomo)] = unicode(x.unicode)
if x.kddi.keyable: kddi_to_unicode[unicode(x.kddi)] = unicode(x.unicode)
if x.softbank.keyable: softbank_to_unicode[unicode(x.softbank)] = unicode(x.unicode)
return {
# forward reverse
'text': (None, unicode_to_text),
'docomo_img': (None, unicode_to_docomo_img),
'kddi_img': (None, unicode_to_kddi_img),
'softbank_img': (None, unicode_to_softbank_img),
'google': (google_to_unicode, unicode_to_google),
'docomo': (docomo_to_unicode, unicode_to_docomo),
'kddi': (kddi_to_unicode, unicode_to_kddi),
'softbank': (softbank_to_unicode, unicode_to_softbank),
} | def function[create_translate_dictionaries, parameter[symbols]]:
constant[create translate dictionaries for text, google, docomo, kddi and softbank via `symbols`
create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse).
method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)`
each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key)
DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u""` for DoCoMo's sun emoji is not expected. expected character
for DoCoMo's sun is decoded character from `"ø"` (actually decoded unicode of `"øù"` is `u""` however not all emoji
can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis`
attribute and unicode formatted is `usjis` attribute.)
]
variable[unicode_to_text] assign[=] dictionary[[], []]
variable[unicode_to_docomo_img] assign[=] dictionary[[], []]
variable[unicode_to_kddi_img] assign[=] dictionary[[], []]
variable[unicode_to_softbank_img] assign[=] dictionary[[], []]
variable[unicode_to_google] assign[=] dictionary[[], []]
variable[unicode_to_docomo] assign[=] dictionary[[], []]
variable[unicode_to_kddi] assign[=] dictionary[[], []]
variable[unicode_to_softbank] assign[=] dictionary[[], []]
variable[google_to_unicode] assign[=] dictionary[[], []]
variable[docomo_to_unicode] assign[=] dictionary[[], []]
variable[kddi_to_unicode] assign[=] dictionary[[], []]
variable[softbank_to_unicode] assign[=] dictionary[[], []]
for taget[name[x]] in starred[name[symbols]] begin[:]
if name[x].unicode.keyable begin[:]
call[name[unicode_to_text]][call[name[unicode], parameter[name[x].unicode]]] assign[=] name[x].unicode.fallback
call[name[unicode_to_docomo_img]][call[name[unicode], parameter[name[x].unicode]]] assign[=] name[x].docomo.thumbnail
call[name[unicode_to_kddi_img]][call[name[unicode], parameter[name[x].unicode]]] assign[=] name[x].kddi.thumbnail
call[name[unicode_to_softbank_img]][call[name[unicode], parameter[name[x].unicode]]] assign[=] name[x].softbank.thumbnail
call[name[unicode_to_google]][call[name[unicode], parameter[name[x].unicode]]] assign[=] call[name[unicode], parameter[name[x].google]]
call[name[unicode_to_docomo]][call[name[unicode], parameter[name[x].unicode]]] assign[=] call[name[unicode], parameter[name[x].docomo]]
call[name[unicode_to_kddi]][call[name[unicode], parameter[name[x].unicode]]] assign[=] call[name[unicode], parameter[name[x].kddi]]
call[name[unicode_to_softbank]][call[name[unicode], parameter[name[x].unicode]]] assign[=] call[name[unicode], parameter[name[x].softbank]]
if name[x].google.keyable begin[:]
call[name[google_to_unicode]][call[name[unicode], parameter[name[x].google]]] assign[=] call[name[unicode], parameter[name[x].unicode]]
if name[x].docomo.keyable begin[:]
call[name[docomo_to_unicode]][call[name[unicode], parameter[name[x].docomo]]] assign[=] call[name[unicode], parameter[name[x].unicode]]
if name[x].kddi.keyable begin[:]
call[name[kddi_to_unicode]][call[name[unicode], parameter[name[x].kddi]]] assign[=] call[name[unicode], parameter[name[x].unicode]]
if name[x].softbank.keyable begin[:]
call[name[softbank_to_unicode]][call[name[unicode], parameter[name[x].softbank]]] assign[=] call[name[unicode], parameter[name[x].unicode]]
return[dictionary[[<ast.Constant object at 0x7da18f09e4d0>, <ast.Constant object at 0x7da18f09ef20>, <ast.Constant object at 0x7da18f09c8e0>, <ast.Constant object at 0x7da18f09c400>, <ast.Constant object at 0x7da18f09e5c0>, <ast.Constant object at 0x7da18f09cac0>, <ast.Constant object at 0x7da18f09f880>, <ast.Constant object at 0x7da18f09c040>], [<ast.Tuple object at 0x7da18f09fa30>, <ast.Tuple object at 0x7da18f09ef50>, <ast.Tuple object at 0x7da18f09d870>, <ast.Tuple object at 0x7da18f09c0a0>, <ast.Tuple object at 0x7da18f09f3d0>, <ast.Tuple object at 0x7da18f09d900>, <ast.Tuple object at 0x7da18f09d240>, <ast.Tuple object at 0x7da18f09fcd0>]]] | keyword[def] identifier[create_translate_dictionaries] ( identifier[symbols] ):
literal[string]
identifier[unicode_to_text] ={}
identifier[unicode_to_docomo_img] ={}
identifier[unicode_to_kddi_img] ={}
identifier[unicode_to_softbank_img] ={}
identifier[unicode_to_google] ={}
identifier[unicode_to_docomo] ={}
identifier[unicode_to_kddi] ={}
identifier[unicode_to_softbank] ={}
identifier[google_to_unicode] ={}
identifier[docomo_to_unicode] ={}
identifier[kddi_to_unicode] ={}
identifier[softbank_to_unicode] ={}
keyword[for] identifier[x] keyword[in] identifier[symbols] :
keyword[if] identifier[x] . identifier[unicode] . identifier[keyable] :
identifier[unicode_to_text] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[x] . identifier[unicode] . identifier[fallback]
identifier[unicode_to_docomo_img] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[x] . identifier[docomo] . identifier[thumbnail]
identifier[unicode_to_kddi_img] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[x] . identifier[kddi] . identifier[thumbnail]
identifier[unicode_to_softbank_img] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[x] . identifier[softbank] . identifier[thumbnail]
identifier[unicode_to_google] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[unicode] ( identifier[x] . identifier[google] )
identifier[unicode_to_docomo] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[unicode] ( identifier[x] . identifier[docomo] )
identifier[unicode_to_kddi] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[unicode] ( identifier[x] . identifier[kddi] )
identifier[unicode_to_softbank] [ identifier[unicode] ( identifier[x] . identifier[unicode] )]= identifier[unicode] ( identifier[x] . identifier[softbank] )
keyword[if] identifier[x] . identifier[google] . identifier[keyable] : identifier[google_to_unicode] [ identifier[unicode] ( identifier[x] . identifier[google] )]= identifier[unicode] ( identifier[x] . identifier[unicode] )
keyword[if] identifier[x] . identifier[docomo] . identifier[keyable] : identifier[docomo_to_unicode] [ identifier[unicode] ( identifier[x] . identifier[docomo] )]= identifier[unicode] ( identifier[x] . identifier[unicode] )
keyword[if] identifier[x] . identifier[kddi] . identifier[keyable] : identifier[kddi_to_unicode] [ identifier[unicode] ( identifier[x] . identifier[kddi] )]= identifier[unicode] ( identifier[x] . identifier[unicode] )
keyword[if] identifier[x] . identifier[softbank] . identifier[keyable] : identifier[softbank_to_unicode] [ identifier[unicode] ( identifier[x] . identifier[softbank] )]= identifier[unicode] ( identifier[x] . identifier[unicode] )
keyword[return] {
literal[string] :( keyword[None] , identifier[unicode_to_text] ),
literal[string] :( keyword[None] , identifier[unicode_to_docomo_img] ),
literal[string] :( keyword[None] , identifier[unicode_to_kddi_img] ),
literal[string] :( keyword[None] , identifier[unicode_to_softbank_img] ),
literal[string] :( identifier[google_to_unicode] , identifier[unicode_to_google] ),
literal[string] :( identifier[docomo_to_unicode] , identifier[unicode_to_docomo] ),
literal[string] :( identifier[kddi_to_unicode] , identifier[unicode_to_kddi] ),
literal[string] :( identifier[softbank_to_unicode] , identifier[unicode_to_softbank] ),
} | def create_translate_dictionaries(symbols):
u"""create translate dictionaries for text, google, docomo, kddi and softbank via `symbols`
create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse).
method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)`
each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key)
DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u"\ue63e"` for DoCoMo's sun emoji is not expected. expected character
for DoCoMo's sun is decoded character from `"ø\x9f"` (actually decoded unicode of `"øù"` is `u"\ue63e"` however not all emoji
can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis`
attribute and unicode formatted is `usjis` attribute.)
"""
unicode_to_text = {}
unicode_to_docomo_img = {}
unicode_to_kddi_img = {}
unicode_to_softbank_img = {}
unicode_to_google = {}
unicode_to_docomo = {}
unicode_to_kddi = {}
unicode_to_softbank = {}
google_to_unicode = {}
docomo_to_unicode = {}
kddi_to_unicode = {}
softbank_to_unicode = {}
for x in symbols:
if x.unicode.keyable:
unicode_to_text[unicode(x.unicode)] = x.unicode.fallback
unicode_to_docomo_img[unicode(x.unicode)] = x.docomo.thumbnail
unicode_to_kddi_img[unicode(x.unicode)] = x.kddi.thumbnail
unicode_to_softbank_img[unicode(x.unicode)] = x.softbank.thumbnail
unicode_to_google[unicode(x.unicode)] = unicode(x.google)
unicode_to_docomo[unicode(x.unicode)] = unicode(x.docomo)
unicode_to_kddi[unicode(x.unicode)] = unicode(x.kddi)
unicode_to_softbank[unicode(x.unicode)] = unicode(x.softbank) # depends on [control=['if'], data=[]]
if x.google.keyable:
google_to_unicode[unicode(x.google)] = unicode(x.unicode) # depends on [control=['if'], data=[]]
if x.docomo.keyable:
docomo_to_unicode[unicode(x.docomo)] = unicode(x.unicode) # depends on [control=['if'], data=[]]
if x.kddi.keyable:
kddi_to_unicode[unicode(x.kddi)] = unicode(x.unicode) # depends on [control=['if'], data=[]]
if x.softbank.keyable:
softbank_to_unicode[unicode(x.softbank)] = unicode(x.unicode) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']]
# forward reverse
return {'text': (None, unicode_to_text), 'docomo_img': (None, unicode_to_docomo_img), 'kddi_img': (None, unicode_to_kddi_img), 'softbank_img': (None, unicode_to_softbank_img), 'google': (google_to_unicode, unicode_to_google), 'docomo': (docomo_to_unicode, unicode_to_docomo), 'kddi': (kddi_to_unicode, unicode_to_kddi), 'softbank': (softbank_to_unicode, unicode_to_softbank)} |
def label(self, request, tag):
"""
Render the label of the wrapped L{Parameter} or L{ChoiceParameter} instance.
"""
if self.parameter.label:
tag[self.parameter.label]
return tag | def function[label, parameter[self, request, tag]]:
constant[
Render the label of the wrapped L{Parameter} or L{ChoiceParameter} instance.
]
if name[self].parameter.label begin[:]
call[name[tag]][name[self].parameter.label]
return[name[tag]] | keyword[def] identifier[label] ( identifier[self] , identifier[request] , identifier[tag] ):
literal[string]
keyword[if] identifier[self] . identifier[parameter] . identifier[label] :
identifier[tag] [ identifier[self] . identifier[parameter] . identifier[label] ]
keyword[return] identifier[tag] | def label(self, request, tag):
"""
Render the label of the wrapped L{Parameter} or L{ChoiceParameter} instance.
"""
if self.parameter.label:
tag[self.parameter.label] # depends on [control=['if'], data=[]]
return tag |
def main(argv=None):
"""General function for converting between ReSpecTh and ChemKED files based on extension.
"""
parser = ArgumentParser(
description='Convert between ReSpecTh XML file and ChemKED YAML file '
'automatically based on file extension.'
)
parser.add_argument('-i', '--input',
type=str,
required=True,
help='Input filename (e.g., "file1.yaml" or "file2.xml")'
)
parser.add_argument('-o', '--output',
type=str,
required=False,
default='',
help='Output filename (e.g., "file1.xml" or "file2.yaml")'
)
parser.add_argument('-fa', '--file-author',
dest='file_author',
type=str,
required=False,
default='',
help='File author name to override original'
)
parser.add_argument('-fo', '--file-author-orcid',
dest='file_author_orcid',
type=str,
required=False,
default='',
help='File author ORCID'
)
args = parser.parse_args(argv)
if os.path.splitext(args.input)[1] == '.xml' and os.path.splitext(args.output)[1] == '.yaml':
respth2ck(['-i', args.input, '-o', args.output, '-fa', args.file_author,
'-fo', args.file_author_orcid])
elif os.path.splitext(args.input)[1] == '.yaml' and os.path.splitext(args.output)[1] == '.xml':
c = chemked.ChemKED(yaml_file=args.input)
c.convert_to_ReSpecTh(args.output)
elif os.path.splitext(args.input)[1] == '.xml' and os.path.splitext(args.output)[1] == '.xml':
raise KeywordError('Cannot convert .xml to .xml')
elif os.path.splitext(args.input)[1] == '.yaml' and os.path.splitext(args.output)[1] == '.yaml':
raise KeywordError('Cannot convert .yaml to .yaml')
else:
raise KeywordError('Input/output args need to be .xml/.yaml') | def function[main, parameter[argv]]:
constant[General function for converting between ReSpecTh and ChemKED files based on extension.
]
variable[parser] assign[=] call[name[ArgumentParser], parameter[]]
call[name[parser].add_argument, parameter[constant[-i], constant[--input]]]
call[name[parser].add_argument, parameter[constant[-o], constant[--output]]]
call[name[parser].add_argument, parameter[constant[-fa], constant[--file-author]]]
call[name[parser].add_argument, parameter[constant[-fo], constant[--file-author-orcid]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[argv]]]
if <ast.BoolOp object at 0x7da1b25643a0> begin[:]
call[name[respth2ck], parameter[list[[<ast.Constant object at 0x7da1b2452110>, <ast.Attribute object at 0x7da1b2451de0>, <ast.Constant object at 0x7da1b2451d80>, <ast.Attribute object at 0x7da1b2451d50>, <ast.Constant object at 0x7da1b2452c50>, <ast.Attribute object at 0x7da1b2452c80>, <ast.Constant object at 0x7da1b2452ce0>, <ast.Attribute object at 0x7da1b2452d10>]]]] | keyword[def] identifier[main] ( identifier[argv] = keyword[None] ):
literal[string]
identifier[parser] = identifier[ArgumentParser] (
identifier[description] = literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[type] = identifier[str] ,
identifier[required] = keyword[True] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[type] = identifier[str] ,
identifier[required] = keyword[False] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[str] ,
identifier[required] = keyword[False] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[str] ,
identifier[required] = keyword[False] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[argv] )
keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[input] )[ literal[int] ]== literal[string] keyword[and] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[output] )[ literal[int] ]== literal[string] :
identifier[respth2ck] ([ literal[string] , identifier[args] . identifier[input] , literal[string] , identifier[args] . identifier[output] , literal[string] , identifier[args] . identifier[file_author] ,
literal[string] , identifier[args] . identifier[file_author_orcid] ])
keyword[elif] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[input] )[ literal[int] ]== literal[string] keyword[and] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[output] )[ literal[int] ]== literal[string] :
identifier[c] = identifier[chemked] . identifier[ChemKED] ( identifier[yaml_file] = identifier[args] . identifier[input] )
identifier[c] . identifier[convert_to_ReSpecTh] ( identifier[args] . identifier[output] )
keyword[elif] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[input] )[ literal[int] ]== literal[string] keyword[and] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[output] )[ literal[int] ]== literal[string] :
keyword[raise] identifier[KeywordError] ( literal[string] )
keyword[elif] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[input] )[ literal[int] ]== literal[string] keyword[and] identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[output] )[ literal[int] ]== literal[string] :
keyword[raise] identifier[KeywordError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[KeywordError] ( literal[string] ) | def main(argv=None):
"""General function for converting between ReSpecTh and ChemKED files based on extension.
"""
parser = ArgumentParser(description='Convert between ReSpecTh XML file and ChemKED YAML file automatically based on file extension.')
parser.add_argument('-i', '--input', type=str, required=True, help='Input filename (e.g., "file1.yaml" or "file2.xml")')
parser.add_argument('-o', '--output', type=str, required=False, default='', help='Output filename (e.g., "file1.xml" or "file2.yaml")')
parser.add_argument('-fa', '--file-author', dest='file_author', type=str, required=False, default='', help='File author name to override original')
parser.add_argument('-fo', '--file-author-orcid', dest='file_author_orcid', type=str, required=False, default='', help='File author ORCID')
args = parser.parse_args(argv)
if os.path.splitext(args.input)[1] == '.xml' and os.path.splitext(args.output)[1] == '.yaml':
respth2ck(['-i', args.input, '-o', args.output, '-fa', args.file_author, '-fo', args.file_author_orcid]) # depends on [control=['if'], data=[]]
elif os.path.splitext(args.input)[1] == '.yaml' and os.path.splitext(args.output)[1] == '.xml':
c = chemked.ChemKED(yaml_file=args.input)
c.convert_to_ReSpecTh(args.output) # depends on [control=['if'], data=[]]
elif os.path.splitext(args.input)[1] == '.xml' and os.path.splitext(args.output)[1] == '.xml':
raise KeywordError('Cannot convert .xml to .xml') # depends on [control=['if'], data=[]]
elif os.path.splitext(args.input)[1] == '.yaml' and os.path.splitext(args.output)[1] == '.yaml':
raise KeywordError('Cannot convert .yaml to .yaml') # depends on [control=['if'], data=[]]
else:
raise KeywordError('Input/output args need to be .xml/.yaml') |
def restore(self, name, ttl, value, replace=False):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
params = [name, ttl, value]
if replace:
params.append('REPLACE')
return self.execute_command('RESTORE', *params) | def function[restore, parameter[self, name, ttl, value, replace]]:
constant[
Create a key using the provided serialized value, previously obtained
using DUMP.
]
variable[params] assign[=] list[[<ast.Name object at 0x7da1b1f95ff0>, <ast.Name object at 0x7da1b1f94e80>, <ast.Name object at 0x7da1b1f95840>]]
if name[replace] begin[:]
call[name[params].append, parameter[constant[REPLACE]]]
return[call[name[self].execute_command, parameter[constant[RESTORE], <ast.Starred object at 0x7da1b1f96e90>]]] | keyword[def] identifier[restore] ( identifier[self] , identifier[name] , identifier[ttl] , identifier[value] , identifier[replace] = keyword[False] ):
literal[string]
identifier[params] =[ identifier[name] , identifier[ttl] , identifier[value] ]
keyword[if] identifier[replace] :
identifier[params] . identifier[append] ( literal[string] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] ,* identifier[params] ) | def restore(self, name, ttl, value, replace=False):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
params = [name, ttl, value]
if replace:
params.append('REPLACE') # depends on [control=['if'], data=[]]
return self.execute_command('RESTORE', *params) |
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,)) | def function[_mainthread_poll_readable, parameter[self]]:
constant[Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
]
variable[events] assign[=] call[name[self]._recv_selector.select, parameter[name[self].block_time]]
for taget[tuple[[<ast.Name object at 0x7da1b241c970>, <ast.Name object at 0x7da1b241c940>]]] in starred[name[events]] begin[:]
if compare[name[mask] equal[==] name[selectors].EVENT_READ] begin[:]
call[name[self]._recv_selector.unregister, parameter[name[key].fileobj]]
call[name[self]._threads_limiter.start_thread, parameter[]] | keyword[def] identifier[_mainthread_poll_readable] ( identifier[self] ):
literal[string]
identifier[events] = identifier[self] . identifier[_recv_selector] . identifier[select] ( identifier[self] . identifier[block_time] )
keyword[for] identifier[key] , identifier[mask] keyword[in] identifier[events] :
keyword[if] identifier[mask] == identifier[selectors] . identifier[EVENT_READ] :
identifier[self] . identifier[_recv_selector] . identifier[unregister] ( identifier[key] . identifier[fileobj] )
identifier[self] . identifier[_threads_limiter] . identifier[start_thread] ( identifier[target] = identifier[self] . identifier[_subthread_handle_readable] ,
identifier[args] =( identifier[key] . identifier[fileobj] ,)) | def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for (key, mask) in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable, args=(key.fileobj,)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def download_financialzip():
"""
会创建一个download/文件夹
"""
result = get_filename()
res = []
for item, md5 in result:
if item in os.listdir(download_path) and md5==QA_util_file_md5('{}{}{}'.format(download_path,os.sep,item)):
print('FILE {} is already in {}'.format(item, download_path))
else:
print('CURRENTLY GET/UPDATE {}'.format(item[0:12]))
r = requests.get('http://down.tdx.com.cn:8001/fin/{}'.format(item))
file = '{}{}{}'.format(download_path, os.sep, item)
with open(file, "wb") as code:
code.write(r.content)
res.append(item)
return res | def function[download_financialzip, parameter[]]:
constant[
会创建一个download/文件夹
]
variable[result] assign[=] call[name[get_filename], parameter[]]
variable[res] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2063550>, <ast.Name object at 0x7da1b2062800>]]] in starred[name[result]] begin[:]
if <ast.BoolOp object at 0x7da1b2063460> begin[:]
call[name[print], parameter[call[constant[FILE {} is already in {}].format, parameter[name[item], name[download_path]]]]]
return[name[res]] | keyword[def] identifier[download_financialzip] ():
literal[string]
identifier[result] = identifier[get_filename] ()
identifier[res] =[]
keyword[for] identifier[item] , identifier[md5] keyword[in] identifier[result] :
keyword[if] identifier[item] keyword[in] identifier[os] . identifier[listdir] ( identifier[download_path] ) keyword[and] identifier[md5] == identifier[QA_util_file_md5] ( literal[string] . identifier[format] ( identifier[download_path] , identifier[os] . identifier[sep] , identifier[item] )):
identifier[print] ( literal[string] . identifier[format] ( identifier[item] , identifier[download_path] ))
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[item] [ literal[int] : literal[int] ]))
identifier[r] = identifier[requests] . identifier[get] ( literal[string] . identifier[format] ( identifier[item] ))
identifier[file] = literal[string] . identifier[format] ( identifier[download_path] , identifier[os] . identifier[sep] , identifier[item] )
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[code] :
identifier[code] . identifier[write] ( identifier[r] . identifier[content] )
identifier[res] . identifier[append] ( identifier[item] )
keyword[return] identifier[res] | def download_financialzip():
"""
会创建一个download/文件夹
"""
result = get_filename()
res = []
for (item, md5) in result:
if item in os.listdir(download_path) and md5 == QA_util_file_md5('{}{}{}'.format(download_path, os.sep, item)):
print('FILE {} is already in {}'.format(item, download_path)) # depends on [control=['if'], data=[]]
else:
print('CURRENTLY GET/UPDATE {}'.format(item[0:12]))
r = requests.get('http://down.tdx.com.cn:8001/fin/{}'.format(item))
file = '{}{}{}'.format(download_path, os.sep, item)
with open(file, 'wb') as code:
code.write(r.content) # depends on [control=['with'], data=['code']]
res.append(item) # depends on [control=['for'], data=[]]
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.