code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,
half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme
"""
incar = Incar.from_file("INCAR")
# Defaults to using the SCAN metaGGA
metaGGA = incar.get("METAGGA", "SCAN")
# Pre optimze WAVECAR and structure using regular GGA
pre_opt_setings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": None,
"LWAVE": True,
"NSW": 0}}}]
jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar,
final=False, suffix=".precondition",
settings_override=pre_opt_setings)]
# Finish with regular double relaxation style run using SCAN
jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar,
ediffg=ediffg,
half_kpts_first_relax=half_kpts_first_relax))
# Ensure the first relaxation doesn't overwrite the original inputs
jobs[1].backup = False
# Update double_relaxation job to start from pre-optimized run
post_opt_settings = [{"dict": "INCAR",
"action": {"_set": {"METAGGA": metaGGA, "ISTART": 1,
"NSW": incar.get("NSW", 99),
"LWAVE": incar.get("LWAVE", False)}}},
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
if jobs[1].settings_override:
post_opt_settings = jobs[1].settings_override + post_opt_settings
jobs[1].settings_override = post_opt_settings
return jobs | def function[metagga_opt_run, parameter[cls, vasp_cmd, auto_npar, ediffg, half_kpts_first_relax, auto_continue]]:
constant[
Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme
]
variable[incar] assign[=] call[name[Incar].from_file, parameter[constant[INCAR]]]
variable[metaGGA] assign[=] call[name[incar].get, parameter[constant[METAGGA], constant[SCAN]]]
variable[pre_opt_setings] assign[=] list[[<ast.Dict object at 0x7da1b26af8b0>]]
variable[jobs] assign[=] list[[<ast.Call object at 0x7da1b26ac490>]]
call[name[jobs].extend, parameter[call[name[VaspJob].double_relaxation_run, parameter[name[vasp_cmd]]]]]
call[name[jobs]][constant[1]].backup assign[=] constant[False]
variable[post_opt_settings] assign[=] list[[<ast.Dict object at 0x7da1b26ae7a0>, <ast.Dict object at 0x7da1b04144c0>]]
if call[name[jobs]][constant[1]].settings_override begin[:]
variable[post_opt_settings] assign[=] binary_operation[call[name[jobs]][constant[1]].settings_override + name[post_opt_settings]]
call[name[jobs]][constant[1]].settings_override assign[=] name[post_opt_settings]
return[name[jobs]] | keyword[def] identifier[metagga_opt_run] ( identifier[cls] , identifier[vasp_cmd] , identifier[auto_npar] = keyword[True] , identifier[ediffg] =- literal[int] ,
identifier[half_kpts_first_relax] = keyword[False] , identifier[auto_continue] = keyword[False] ):
literal[string]
identifier[incar] = identifier[Incar] . identifier[from_file] ( literal[string] )
identifier[metaGGA] = identifier[incar] . identifier[get] ( literal[string] , literal[string] )
identifier[pre_opt_setings] =[{ literal[string] : literal[string] ,
literal[string] :{ literal[string] :{ literal[string] : keyword[None] ,
literal[string] : keyword[True] ,
literal[string] : literal[int] }}}]
identifier[jobs] =[ identifier[VaspJob] ( identifier[vasp_cmd] , identifier[auto_npar] = identifier[auto_npar] ,
identifier[final] = keyword[False] , identifier[suffix] = literal[string] ,
identifier[settings_override] = identifier[pre_opt_setings] )]
identifier[jobs] . identifier[extend] ( identifier[VaspJob] . identifier[double_relaxation_run] ( identifier[vasp_cmd] , identifier[auto_npar] = identifier[auto_npar] ,
identifier[ediffg] = identifier[ediffg] ,
identifier[half_kpts_first_relax] = identifier[half_kpts_first_relax] ))
identifier[jobs] [ literal[int] ]. identifier[backup] = keyword[False]
identifier[post_opt_settings] =[{ literal[string] : literal[string] ,
literal[string] :{ literal[string] :{ literal[string] : identifier[metaGGA] , literal[string] : literal[int] ,
literal[string] : identifier[incar] . identifier[get] ( literal[string] , literal[int] ),
literal[string] : identifier[incar] . identifier[get] ( literal[string] , keyword[False] )}}},
{ literal[string] : literal[string] ,
literal[string] :{ literal[string] :{ literal[string] : literal[string] }}}]
keyword[if] identifier[jobs] [ literal[int] ]. identifier[settings_override] :
identifier[post_opt_settings] = identifier[jobs] [ literal[int] ]. identifier[settings_override] + identifier[post_opt_settings]
identifier[jobs] [ literal[int] ]. identifier[settings_override] = identifier[post_opt_settings]
keyword[return] identifier[jobs] | def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False):
"""
Returns a list of thres jobs to perform an optimization for any
metaGGA functional. There is an initial calculation of the
GGA wavefunction which is fed into the initial metaGGA optimization
to precondition the electronic structure optimizer. The metaGGA
optimization is performed using the double relaxation scheme
"""
incar = Incar.from_file('INCAR')
# Defaults to using the SCAN metaGGA
metaGGA = incar.get('METAGGA', 'SCAN')
# Pre optimze WAVECAR and structure using regular GGA
pre_opt_setings = [{'dict': 'INCAR', 'action': {'_set': {'METAGGA': None, 'LWAVE': True, 'NSW': 0}}}]
jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, final=False, suffix='.precondition', settings_override=pre_opt_setings)]
# Finish with regular double relaxation style run using SCAN
jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=ediffg, half_kpts_first_relax=half_kpts_first_relax))
# Ensure the first relaxation doesn't overwrite the original inputs
jobs[1].backup = False
# Update double_relaxation job to start from pre-optimized run
post_opt_settings = [{'dict': 'INCAR', 'action': {'_set': {'METAGGA': metaGGA, 'ISTART': 1, 'NSW': incar.get('NSW', 99), 'LWAVE': incar.get('LWAVE', False)}}}, {'file': 'CONTCAR', 'action': {'_file_copy': {'dest': 'POSCAR'}}}]
if jobs[1].settings_override:
post_opt_settings = jobs[1].settings_override + post_opt_settings # depends on [control=['if'], data=[]]
jobs[1].settings_override = post_opt_settings
return jobs |
def arg_types(**kwargs):
"""
Mark the expected types of certain arguments. Arguments for which
no types are provided default to strings. To specify an argument
type, give this decorator a keyword argument, where the argument
name is the name of the function argument and the value is a
callable taking one argument, which will convert a string to a
value of that type.
Note that the 'bool' type is treated specially.
"""
def decorator(func):
if not hasattr(func, '_bark_types'):
func._bark_types = {}
func._bark_types.update(kwargs)
return func
return decorator | def function[arg_types, parameter[]]:
constant[
Mark the expected types of certain arguments. Arguments for which
no types are provided default to strings. To specify an argument
type, give this decorator a keyword argument, where the argument
name is the name of the function argument and the value is a
callable taking one argument, which will convert a string to a
value of that type.
Note that the 'bool' type is treated specially.
]
def function[decorator, parameter[func]]:
if <ast.UnaryOp object at 0x7da20c9927a0> begin[:]
name[func]._bark_types assign[=] dictionary[[], []]
call[name[func]._bark_types.update, parameter[name[kwargs]]]
return[name[func]]
return[name[decorator]] | keyword[def] identifier[arg_types] (** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[func] , literal[string] ):
identifier[func] . identifier[_bark_types] ={}
identifier[func] . identifier[_bark_types] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[func]
keyword[return] identifier[decorator] | def arg_types(**kwargs):
"""
Mark the expected types of certain arguments. Arguments for which
no types are provided default to strings. To specify an argument
type, give this decorator a keyword argument, where the argument
name is the name of the function argument and the value is a
callable taking one argument, which will convert a string to a
value of that type.
Note that the 'bool' type is treated specially.
"""
def decorator(func):
if not hasattr(func, '_bark_types'):
func._bark_types = {} # depends on [control=['if'], data=[]]
func._bark_types.update(kwargs)
return func
return decorator |
def process(specs):
"""
Executes the passed in list of specs
"""
pout, pin = chain_specs(specs)
LOG.info("Processing")
sw = StopWatch().start()
r = pout.process(pin)
if r:
print(r)
LOG.info("Finished in %s", sw.read()) | def function[process, parameter[specs]]:
constant[
Executes the passed in list of specs
]
<ast.Tuple object at 0x7da1b1417a90> assign[=] call[name[chain_specs], parameter[name[specs]]]
call[name[LOG].info, parameter[constant[Processing]]]
variable[sw] assign[=] call[call[name[StopWatch], parameter[]].start, parameter[]]
variable[r] assign[=] call[name[pout].process, parameter[name[pin]]]
if name[r] begin[:]
call[name[print], parameter[name[r]]]
call[name[LOG].info, parameter[constant[Finished in %s], call[name[sw].read, parameter[]]]] | keyword[def] identifier[process] ( identifier[specs] ):
literal[string]
identifier[pout] , identifier[pin] = identifier[chain_specs] ( identifier[specs] )
identifier[LOG] . identifier[info] ( literal[string] )
identifier[sw] = identifier[StopWatch] (). identifier[start] ()
identifier[r] = identifier[pout] . identifier[process] ( identifier[pin] )
keyword[if] identifier[r] :
identifier[print] ( identifier[r] )
identifier[LOG] . identifier[info] ( literal[string] , identifier[sw] . identifier[read] ()) | def process(specs):
"""
Executes the passed in list of specs
"""
(pout, pin) = chain_specs(specs)
LOG.info('Processing')
sw = StopWatch().start()
r = pout.process(pin)
if r:
print(r) # depends on [control=['if'], data=[]]
LOG.info('Finished in %s', sw.read()) |
def get_bin_form(self, *args, **kwargs):
"""Pass through to provider BinAdminSession.get_bin_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'bin_record_types' in kwargs:
return self.get_bin_form_for_create(*args, **kwargs)
else:
return self.get_bin_form_for_update(*args, **kwargs) | def function[get_bin_form, parameter[self]]:
constant[Pass through to provider BinAdminSession.get_bin_form_for_update]
if <ast.BoolOp object at 0x7da1b0a666e0> begin[:]
return[call[name[self].get_bin_form_for_create, parameter[<ast.Starred object at 0x7da1b26aca90>]]] | keyword[def] identifier[get_bin_form] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[args] [- literal[int] ], identifier[list] ) keyword[or] literal[string] keyword[in] identifier[kwargs] :
keyword[return] identifier[self] . identifier[get_bin_form_for_create] (* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[self] . identifier[get_bin_form_for_update] (* identifier[args] ,** identifier[kwargs] ) | def get_bin_form(self, *args, **kwargs):
"""Pass through to provider BinAdminSession.get_bin_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'bin_record_types' in kwargs:
return self.get_bin_form_for_create(*args, **kwargs) # depends on [control=['if'], data=[]]
else:
return self.get_bin_form_for_update(*args, **kwargs) |
def read(self, payloadType, elsClient):
"""Fetches the latest data for this entity from api.elsevier.com.
Returns True if successful; else, False."""
if elsClient:
self._client = elsClient;
elif not self.client:
raise ValueError('''Entity object not currently bound to elsClient instance. Call .read() with elsClient argument or set .client attribute.''')
try:
api_response = self.client.exec_request(self.uri)
if isinstance(api_response[payloadType], list):
self._data = api_response[payloadType][0]
else:
self._data = api_response[payloadType]
## TODO: check if URI is the same, if necessary update and log warning.
logger.info("Data loaded for " + self.uri)
return True
except (requests.HTTPError, requests.RequestException) as e:
for elm in e.args:
logger.warning(elm)
return False | def function[read, parameter[self, payloadType, elsClient]]:
constant[Fetches the latest data for this entity from api.elsevier.com.
Returns True if successful; else, False.]
if name[elsClient] begin[:]
name[self]._client assign[=] name[elsClient]
<ast.Try object at 0x7da2046236a0> | keyword[def] identifier[read] ( identifier[self] , identifier[payloadType] , identifier[elsClient] ):
literal[string]
keyword[if] identifier[elsClient] :
identifier[self] . identifier[_client] = identifier[elsClient] ;
keyword[elif] keyword[not] identifier[self] . identifier[client] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[api_response] = identifier[self] . identifier[client] . identifier[exec_request] ( identifier[self] . identifier[uri] )
keyword[if] identifier[isinstance] ( identifier[api_response] [ identifier[payloadType] ], identifier[list] ):
identifier[self] . identifier[_data] = identifier[api_response] [ identifier[payloadType] ][ literal[int] ]
keyword[else] :
identifier[self] . identifier[_data] = identifier[api_response] [ identifier[payloadType] ]
identifier[logger] . identifier[info] ( literal[string] + identifier[self] . identifier[uri] )
keyword[return] keyword[True]
keyword[except] ( identifier[requests] . identifier[HTTPError] , identifier[requests] . identifier[RequestException] ) keyword[as] identifier[e] :
keyword[for] identifier[elm] keyword[in] identifier[e] . identifier[args] :
identifier[logger] . identifier[warning] ( identifier[elm] )
keyword[return] keyword[False] | def read(self, payloadType, elsClient):
"""Fetches the latest data for this entity from api.elsevier.com.
Returns True if successful; else, False."""
if elsClient:
self._client = elsClient # depends on [control=['if'], data=[]]
elif not self.client:
raise ValueError('Entity object not currently bound to elsClient instance. Call .read() with elsClient argument or set .client attribute.') # depends on [control=['if'], data=[]]
try:
api_response = self.client.exec_request(self.uri)
if isinstance(api_response[payloadType], list):
self._data = api_response[payloadType][0] # depends on [control=['if'], data=[]]
else:
self._data = api_response[payloadType]
## TODO: check if URI is the same, if necessary update and log warning.
logger.info('Data loaded for ' + self.uri)
return True # depends on [control=['try'], data=[]]
except (requests.HTTPError, requests.RequestException) as e:
for elm in e.args:
logger.warning(elm) # depends on [control=['for'], data=['elm']]
return False # depends on [control=['except'], data=['e']] |
def show_key(kwargs=None, call=None):
'''
List the keys available
'''
if call != 'function':
log.error(
'The list_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
rcode, data = query(
command='my/keys/{0}'.format(kwargs['keyname']),
method='GET',
)
return {'keys': {data['name']: data['key']}} | def function[show_key, parameter[kwargs, call]]:
constant[
List the keys available
]
if compare[name[call] not_equal[!=] constant[function]] begin[:]
call[name[log].error, parameter[constant[The list_keys function must be called with -f or --function.]]]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b1c48670> begin[:]
variable[kwargs] assign[=] dictionary[[], []]
if compare[constant[keyname] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[log].error, parameter[constant[A keyname is required.]]]
return[constant[False]]
<ast.Tuple object at 0x7da1b1c49e10> assign[=] call[name[query], parameter[]]
return[dictionary[[<ast.Constant object at 0x7da1b1c4a9e0>], [<ast.Dict object at 0x7da1b1c4aa40>]]] | keyword[def] identifier[show_key] ( identifier[kwargs] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
identifier[log] . identifier[error] (
literal[string]
)
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[kwargs] :
identifier[kwargs] ={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[rcode] , identifier[data] = identifier[query] (
identifier[command] = literal[string] . identifier[format] ( identifier[kwargs] [ literal[string] ]),
identifier[method] = literal[string] ,
)
keyword[return] { literal[string] :{ identifier[data] [ literal[string] ]: identifier[data] [ literal[string] ]}} | def show_key(kwargs=None, call=None):
"""
List the keys available
"""
if call != 'function':
log.error('The list_keys function must be called with -f or --function.')
return False # depends on [control=['if'], data=[]]
if not kwargs:
kwargs = {} # depends on [control=['if'], data=[]]
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False # depends on [control=['if'], data=[]]
(rcode, data) = query(command='my/keys/{0}'.format(kwargs['keyname']), method='GET')
return {'keys': {data['name']: data['key']}} |
def merge_read_pair_seq(self, r1, r2):
"""
Merge the sequence of two reads into one continuous read either
by inserting the missing DNA, or joining on the common sequence.
Parameters
----------
r1 : libsam.Read
Read 1
r2 : libsam.Read
Read 2
"""
s1 = r1.pos # + 1
# end of first read
e1 = s1 + r1.length - 1
# start of second read
s2 = r2.pos # + 1
e2 = s2 + r2.length - 1
inner = s2 - e1 - 1
if inner >= 0:
seq = self.dna((r1.chr, s1, e2))
else:
# Reads overlap so concatenate the first read with the
# portion of the second read that is not overlapping
# (inner is negative so flip sign for array indexing)
seq = r1.seq + r2.seq[-inner:]
return seq | def function[merge_read_pair_seq, parameter[self, r1, r2]]:
constant[
Merge the sequence of two reads into one continuous read either
by inserting the missing DNA, or joining on the common sequence.
Parameters
----------
r1 : libsam.Read
Read 1
r2 : libsam.Read
Read 2
]
variable[s1] assign[=] name[r1].pos
variable[e1] assign[=] binary_operation[binary_operation[name[s1] + name[r1].length] - constant[1]]
variable[s2] assign[=] name[r2].pos
variable[e2] assign[=] binary_operation[binary_operation[name[s2] + name[r2].length] - constant[1]]
variable[inner] assign[=] binary_operation[binary_operation[name[s2] - name[e1]] - constant[1]]
if compare[name[inner] greater_or_equal[>=] constant[0]] begin[:]
variable[seq] assign[=] call[name[self].dna, parameter[tuple[[<ast.Attribute object at 0x7da20c6c6560>, <ast.Name object at 0x7da20c6c4f40>, <ast.Name object at 0x7da20c6c77c0>]]]]
return[name[seq]] | keyword[def] identifier[merge_read_pair_seq] ( identifier[self] , identifier[r1] , identifier[r2] ):
literal[string]
identifier[s1] = identifier[r1] . identifier[pos]
identifier[e1] = identifier[s1] + identifier[r1] . identifier[length] - literal[int]
identifier[s2] = identifier[r2] . identifier[pos]
identifier[e2] = identifier[s2] + identifier[r2] . identifier[length] - literal[int]
identifier[inner] = identifier[s2] - identifier[e1] - literal[int]
keyword[if] identifier[inner] >= literal[int] :
identifier[seq] = identifier[self] . identifier[dna] (( identifier[r1] . identifier[chr] , identifier[s1] , identifier[e2] ))
keyword[else] :
identifier[seq] = identifier[r1] . identifier[seq] + identifier[r2] . identifier[seq] [- identifier[inner] :]
keyword[return] identifier[seq] | def merge_read_pair_seq(self, r1, r2):
"""
Merge the sequence of two reads into one continuous read either
by inserting the missing DNA, or joining on the common sequence.
Parameters
----------
r1 : libsam.Read
Read 1
r2 : libsam.Read
Read 2
"""
s1 = r1.pos # + 1
# end of first read
e1 = s1 + r1.length - 1
# start of second read
s2 = r2.pos # + 1
e2 = s2 + r2.length - 1
inner = s2 - e1 - 1
if inner >= 0:
seq = self.dna((r1.chr, s1, e2)) # depends on [control=['if'], data=[]]
else:
# Reads overlap so concatenate the first read with the
# portion of the second read that is not overlapping
# (inner is negative so flip sign for array indexing)
seq = r1.seq + r2.seq[-inner:]
return seq |
def parametric_line(x, y):
"""
Parameters
----------
x : 1D numpy array
y : 1D numpy array
"""
if len(x) != len(y):
raise ValueError("Arrays must be the same length")
X = np.ones((len(x), len(x)))*np.nan
Y = X.copy()
for i in range(len(x)):
X[i, :(i+1)] = x[:(i+1)]
Y[i, :(i+1)] = y[:(i+1)]
return X, Y | def function[parametric_line, parameter[x, y]]:
constant[
Parameters
----------
x : 1D numpy array
y : 1D numpy array
]
if compare[call[name[len], parameter[name[x]]] not_equal[!=] call[name[len], parameter[name[y]]]] begin[:]
<ast.Raise object at 0x7da1b12b5510>
variable[X] assign[=] binary_operation[call[name[np].ones, parameter[tuple[[<ast.Call object at 0x7da1b12b5900>, <ast.Call object at 0x7da1b12b5570>]]]] * name[np].nan]
variable[Y] assign[=] call[name[X].copy, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[x]]]]]] begin[:]
call[name[X]][tuple[[<ast.Name object at 0x7da1b12b4f10>, <ast.Slice object at 0x7da1b12b4340>]]] assign[=] call[name[x]][<ast.Slice object at 0x7da1b12b48b0>]
call[name[Y]][tuple[[<ast.Name object at 0x7da1b12b6080>, <ast.Slice object at 0x7da1b12b52d0>]]] assign[=] call[name[y]][<ast.Slice object at 0x7da1b1243ac0>]
return[tuple[[<ast.Name object at 0x7da1b11aba90>, <ast.Name object at 0x7da1b11abcd0>]]] | keyword[def] identifier[parametric_line] ( identifier[x] , identifier[y] ):
literal[string]
keyword[if] identifier[len] ( identifier[x] )!= identifier[len] ( identifier[y] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[X] = identifier[np] . identifier[ones] (( identifier[len] ( identifier[x] ), identifier[len] ( identifier[x] )))* identifier[np] . identifier[nan]
identifier[Y] = identifier[X] . identifier[copy] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[x] )):
identifier[X] [ identifier[i] ,:( identifier[i] + literal[int] )]= identifier[x] [:( identifier[i] + literal[int] )]
identifier[Y] [ identifier[i] ,:( identifier[i] + literal[int] )]= identifier[y] [:( identifier[i] + literal[int] )]
keyword[return] identifier[X] , identifier[Y] | def parametric_line(x, y):
"""
Parameters
----------
x : 1D numpy array
y : 1D numpy array
"""
if len(x) != len(y):
raise ValueError('Arrays must be the same length') # depends on [control=['if'], data=[]]
X = np.ones((len(x), len(x))) * np.nan
Y = X.copy()
for i in range(len(x)):
X[i, :i + 1] = x[:i + 1]
Y[i, :i + 1] = y[:i + 1] # depends on [control=['for'], data=['i']]
return (X, Y) |
def add_sample(self, sample: Sample) -> None:
"""Add a :class:`Sample` to this :class:`SampleSheet`.
All samples are validated against the first sample added to the sample
sheet to ensure there are no ID collisions or incompatible read
structures (if supplied). All samples are also validated against the
``"[Reads]"`` section of the sample sheet if it has been defined.
The following validation is performed when adding a sample:
- :class:`Read_Structure` is identical in all samples, if supplied
- :class:`Read_Structure` is compatible with ``"[Reads]"``, if \
supplied
- Samples on the same ``"Lane"`` cannot have the same \
``"Sample_ID"`` and ``"Library_ID"``.
- Samples cannot have the same ``"Sample_ID"`` if no ``"Lane"`` \
has been defined.
- The same ``"index"`` or ``"index2"`` combination cannot exist \
per flowcell or per lane if lanes have been defined.
- All samples have the same index design (``"index"``, \
``"index2"``) per flowcell or per lane if lanes have been \
defined.
Args:
sample: :class:`Sample` to add to this :class:`SampleSheet`.
Note:
It is unclear if the Illumina specification truly allows for
equivalent samples to exist on the same sample sheet. To mitigate
the warnings in this library when you encounter such a case, use
a code pattern like the following:
>>> import warnings
>>> warnings.simplefilter("ignore")
>>> from sample_sheet import SampleSheet
>>> SampleSheet('tests/resources/single-end-colliding-sample-ids.csv');
SampleSheet('tests/resources/single-end-colliding-sample-ids.csv')
"""
# Do not allow samples without Sample_ID defined.
if sample.Sample_ID is None:
raise ValueError('Sample must have "Sample_ID" defined.')
# Set whether the samples will have ``index`` or ``index2``.
if len(self.samples) == 0:
self.samples_have_index = sample.index is not None
self.samples_have_index2 = sample.index2 is not None
if (
len(self.samples) == 0
and sample.Read_Structure is not None
and self.Read_Structure is None
):
# If this is the first sample added to the sample sheet then
# assume the ``SampleSheet.Read_Structure`` inherits the
# ``sample.Read_Structure`` only if ``SampleSheet.Read_Structure``
# has not already been defined. If ``SampleSheet.reads`` has been
# defined then validate the new read_structure against it.
if (
self.is_paired_end
and not sample.Read_Structure.is_paired_end
or self.is_single_end # noqa
and not sample.Read_Structure.is_single_end
):
raise ValueError(
f'Sample sheet pairing has been set with '
f'Reads:"{self.Reads}" and is not compatible with sample '
f'read structure: {sample.Read_Structure}'
)
# Make a copy of this samples read_structure for the sample sheet.
self.Read_Structure = sample.Read_Structure.copy()
# Validate this sample against the ``SampleSheet.Read_Structure``
# attribute, which can be None, to ensure they are the same.
if self.Read_Structure != sample.Read_Structure:
raise ValueError(
f'Sample read structure ({sample.Read_Structure}) different '
f'than read structure in samplesheet ({self.Read_Structure}).'
)
# Compare this sample against all those already defined to ensure none
# have equal ``Sample_ID``, ``Library_ID``, and ``Lane`` attributes.
# Ensure that all samples have attributes ``index``, ``index2``, or
# both if they have been defined.
for other in self.samples:
if sample == other:
message = (
f'Two equivalent samples added:'
f'\n\n1): {sample.__repr__()}\n2): {other.__repr__()}\n'
)
# TODO: Look into if this is truly illegal or not.
warnings.warn(UserWarning(message))
if sample.index is None and self.samples_have_index:
raise ValueError(
f'Cannot add a sample without attribute `index` if a '
f'previous sample has `index` set: {sample})'
)
if sample.index2 is None and self.samples_have_index2:
raise ValueError(
f'Cannot add a sample without attribute `index2` if a '
f'previous sample has `index2` set: {sample})'
)
# Prevent index collisions when samples are dual-indexed
if (
self.samples_have_index
and self.samples_have_index2
and sample.index == other.index
and sample.index2 == other.index2
and sample.Lane == other.Lane
):
raise ValueError(
f'Sample index combination for {sample} has already been '
f'added on this lane or flowcell: {other}'
)
# Prevent index collisions when samples are single-indexed (index)
if (
self.samples_have_index
and not self.samples_have_index2
and sample.index == other.index
and sample.Lane == other.Lane
):
raise ValueError(
f'First sample index for {sample} has already been '
f'added on this lane or flowcell: {other}'
)
# Prevent index collisions when samples are single-indexed (index2)
if (
not self.samples_have_index
and self.samples_have_index2
and sample.index2 == other.index2
and sample.Lane == other.Lane
):
raise ValueError(
f'Second sample index for {sample} has already been '
f'added on this lane or flowcell: {other}'
)
sample.sample_sheet = self
self._samples.append(sample) | def function[add_sample, parameter[self, sample]]:
constant[Add a :class:`Sample` to this :class:`SampleSheet`.
All samples are validated against the first sample added to the sample
sheet to ensure there are no ID collisions or incompatible read
structures (if supplied). All samples are also validated against the
``"[Reads]"`` section of the sample sheet if it has been defined.
The following validation is performed when adding a sample:
- :class:`Read_Structure` is identical in all samples, if supplied
- :class:`Read_Structure` is compatible with ``"[Reads]"``, if supplied
- Samples on the same ``"Lane"`` cannot have the same ``"Sample_ID"`` and ``"Library_ID"``.
- Samples cannot have the same ``"Sample_ID"`` if no ``"Lane"`` has been defined.
- The same ``"index"`` or ``"index2"`` combination cannot exist per flowcell or per lane if lanes have been defined.
- All samples have the same index design (``"index"``, ``"index2"``) per flowcell or per lane if lanes have been defined.
Args:
sample: :class:`Sample` to add to this :class:`SampleSheet`.
Note:
It is unclear if the Illumina specification truly allows for
equivalent samples to exist on the same sample sheet. To mitigate
the warnings in this library when you encounter such a case, use
a code pattern like the following:
>>> import warnings
>>> warnings.simplefilter("ignore")
>>> from sample_sheet import SampleSheet
>>> SampleSheet('tests/resources/single-end-colliding-sample-ids.csv');
SampleSheet('tests/resources/single-end-colliding-sample-ids.csv')
]
if compare[name[sample].Sample_ID is constant[None]] begin[:]
<ast.Raise object at 0x7da2047eac20>
if compare[call[name[len], parameter[name[self].samples]] equal[==] constant[0]] begin[:]
name[self].samples_have_index assign[=] compare[name[sample].index is_not constant[None]]
name[self].samples_have_index2 assign[=] compare[name[sample].index2 is_not constant[None]]
if <ast.BoolOp object at 0x7da2047e8730> begin[:]
if <ast.BoolOp object at 0x7da2054a70a0> begin[:]
<ast.Raise object at 0x7da2054a5e40>
name[self].Read_Structure assign[=] call[name[sample].Read_Structure.copy, parameter[]]
if compare[name[self].Read_Structure not_equal[!=] name[sample].Read_Structure] begin[:]
<ast.Raise object at 0x7da2054a5a20>
for taget[name[other]] in starred[name[self].samples] begin[:]
if compare[name[sample] equal[==] name[other]] begin[:]
variable[message] assign[=] <ast.JoinedStr object at 0x7da2054a5db0>
call[name[warnings].warn, parameter[call[name[UserWarning], parameter[name[message]]]]]
if <ast.BoolOp object at 0x7da2054a6680> begin[:]
<ast.Raise object at 0x7da2054a62f0>
if <ast.BoolOp object at 0x7da2054a6fe0> begin[:]
<ast.Raise object at 0x7da2054a46a0>
if <ast.BoolOp object at 0x7da2054a6d40> begin[:]
<ast.Raise object at 0x7da2054a7400>
if <ast.BoolOp object at 0x7da2054a7430> begin[:]
<ast.Raise object at 0x7da2054a5cc0>
if <ast.BoolOp object at 0x7da2054a7910> begin[:]
<ast.Raise object at 0x7da2054a45b0>
name[sample].sample_sheet assign[=] name[self]
call[name[self]._samples.append, parameter[name[sample]]] | keyword[def] identifier[add_sample] ( identifier[self] , identifier[sample] : identifier[Sample] )-> keyword[None] :
literal[string]
keyword[if] identifier[sample] . identifier[Sample_ID] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[self] . identifier[samples] )== literal[int] :
identifier[self] . identifier[samples_have_index] = identifier[sample] . identifier[index] keyword[is] keyword[not] keyword[None]
identifier[self] . identifier[samples_have_index2] = identifier[sample] . identifier[index2] keyword[is] keyword[not] keyword[None]
keyword[if] (
identifier[len] ( identifier[self] . identifier[samples] )== literal[int]
keyword[and] identifier[sample] . identifier[Read_Structure] keyword[is] keyword[not] keyword[None]
keyword[and] identifier[self] . identifier[Read_Structure] keyword[is] keyword[None]
):
keyword[if] (
identifier[self] . identifier[is_paired_end]
keyword[and] keyword[not] identifier[sample] . identifier[Read_Structure] . identifier[is_paired_end]
keyword[or] identifier[self] . identifier[is_single_end]
keyword[and] keyword[not] identifier[sample] . identifier[Read_Structure] . identifier[is_single_end]
):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string]
)
identifier[self] . identifier[Read_Structure] = identifier[sample] . identifier[Read_Structure] . identifier[copy] ()
keyword[if] identifier[self] . identifier[Read_Structure] != identifier[sample] . identifier[Read_Structure] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[for] identifier[other] keyword[in] identifier[self] . identifier[samples] :
keyword[if] identifier[sample] == identifier[other] :
identifier[message] =(
literal[string]
literal[string]
)
identifier[warnings] . identifier[warn] ( identifier[UserWarning] ( identifier[message] ))
keyword[if] identifier[sample] . identifier[index] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[samples_have_index] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] identifier[sample] . identifier[index2] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[samples_have_index2] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] (
identifier[self] . identifier[samples_have_index]
keyword[and] identifier[self] . identifier[samples_have_index2]
keyword[and] identifier[sample] . identifier[index] == identifier[other] . identifier[index]
keyword[and] identifier[sample] . identifier[index2] == identifier[other] . identifier[index2]
keyword[and] identifier[sample] . identifier[Lane] == identifier[other] . identifier[Lane]
):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] (
identifier[self] . identifier[samples_have_index]
keyword[and] keyword[not] identifier[self] . identifier[samples_have_index2]
keyword[and] identifier[sample] . identifier[index] == identifier[other] . identifier[index]
keyword[and] identifier[sample] . identifier[Lane] == identifier[other] . identifier[Lane]
):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] (
keyword[not] identifier[self] . identifier[samples_have_index]
keyword[and] identifier[self] . identifier[samples_have_index2]
keyword[and] identifier[sample] . identifier[index2] == identifier[other] . identifier[index2]
keyword[and] identifier[sample] . identifier[Lane] == identifier[other] . identifier[Lane]
):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
identifier[sample] . identifier[sample_sheet] = identifier[self]
identifier[self] . identifier[_samples] . identifier[append] ( identifier[sample] ) | def add_sample(self, sample: Sample) -> None:
"""Add a :class:`Sample` to this :class:`SampleSheet`.
All samples are validated against the first sample added to the sample
sheet to ensure there are no ID collisions or incompatible read
structures (if supplied). All samples are also validated against the
``"[Reads]"`` section of the sample sheet if it has been defined.
The following validation is performed when adding a sample:
- :class:`Read_Structure` is identical in all samples, if supplied
- :class:`Read_Structure` is compatible with ``"[Reads]"``, if supplied
- Samples on the same ``"Lane"`` cannot have the same ``"Sample_ID"`` and ``"Library_ID"``.
- Samples cannot have the same ``"Sample_ID"`` if no ``"Lane"`` has been defined.
- The same ``"index"`` or ``"index2"`` combination cannot exist per flowcell or per lane if lanes have been defined.
- All samples have the same index design (``"index"``, ``"index2"``) per flowcell or per lane if lanes have been defined.
Args:
sample: :class:`Sample` to add to this :class:`SampleSheet`.
Note:
It is unclear if the Illumina specification truly allows for
equivalent samples to exist on the same sample sheet. To mitigate
the warnings in this library when you encounter such a case, use
a code pattern like the following:
>>> import warnings
>>> warnings.simplefilter("ignore")
>>> from sample_sheet import SampleSheet
>>> SampleSheet('tests/resources/single-end-colliding-sample-ids.csv');
SampleSheet('tests/resources/single-end-colliding-sample-ids.csv')
"""
# Do not allow samples without Sample_ID defined.
if sample.Sample_ID is None:
raise ValueError('Sample must have "Sample_ID" defined.') # depends on [control=['if'], data=[]]
# Set whether the samples will have ``index`` or ``index2``.
if len(self.samples) == 0:
self.samples_have_index = sample.index is not None
self.samples_have_index2 = sample.index2 is not None # depends on [control=['if'], data=[]]
if len(self.samples) == 0 and sample.Read_Structure is not None and (self.Read_Structure is None):
# If this is the first sample added to the sample sheet then
# assume the ``SampleSheet.Read_Structure`` inherits the
# ``sample.Read_Structure`` only if ``SampleSheet.Read_Structure``
# has not already been defined. If ``SampleSheet.reads`` has been
# defined then validate the new read_structure against it.
if self.is_paired_end and (not sample.Read_Structure.is_paired_end) or (self.is_single_end and (not sample.Read_Structure.is_single_end)): # noqa
raise ValueError(f'Sample sheet pairing has been set with Reads:"{self.Reads}" and is not compatible with sample read structure: {sample.Read_Structure}') # depends on [control=['if'], data=[]]
# Make a copy of this samples read_structure for the sample sheet.
self.Read_Structure = sample.Read_Structure.copy() # depends on [control=['if'], data=[]]
# Validate this sample against the ``SampleSheet.Read_Structure``
# attribute, which can be None, to ensure they are the same.
if self.Read_Structure != sample.Read_Structure:
raise ValueError(f'Sample read structure ({sample.Read_Structure}) different than read structure in samplesheet ({self.Read_Structure}).') # depends on [control=['if'], data=[]]
# Compare this sample against all those already defined to ensure none
# have equal ``Sample_ID``, ``Library_ID``, and ``Lane`` attributes.
# Ensure that all samples have attributes ``index``, ``index2``, or
# both if they have been defined.
for other in self.samples:
if sample == other:
message = f'Two equivalent samples added:\n\n1): {sample.__repr__()}\n2): {other.__repr__()}\n'
# TODO: Look into if this is truly illegal or not.
warnings.warn(UserWarning(message)) # depends on [control=['if'], data=['sample', 'other']]
if sample.index is None and self.samples_have_index:
raise ValueError(f'Cannot add a sample without attribute `index` if a previous sample has `index` set: {sample})') # depends on [control=['if'], data=[]]
if sample.index2 is None and self.samples_have_index2:
raise ValueError(f'Cannot add a sample without attribute `index2` if a previous sample has `index2` set: {sample})') # depends on [control=['if'], data=[]]
# Prevent index collisions when samples are dual-indexed
if self.samples_have_index and self.samples_have_index2 and (sample.index == other.index) and (sample.index2 == other.index2) and (sample.Lane == other.Lane):
raise ValueError(f'Sample index combination for {sample} has already been added on this lane or flowcell: {other}') # depends on [control=['if'], data=[]]
# Prevent index collisions when samples are single-indexed (index)
if self.samples_have_index and (not self.samples_have_index2) and (sample.index == other.index) and (sample.Lane == other.Lane):
raise ValueError(f'First sample index for {sample} has already been added on this lane or flowcell: {other}') # depends on [control=['if'], data=[]]
# Prevent index collisions when samples are single-indexed (index2)
if not self.samples_have_index and self.samples_have_index2 and (sample.index2 == other.index2) and (sample.Lane == other.Lane):
raise ValueError(f'Second sample index for {sample} has already been added on this lane or flowcell: {other}') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['other']]
sample.sample_sheet = self
self._samples.append(sample) |
def btc_tx_output_parse_script( scriptpubkey ):
"""
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
"""
script_type = None
reqSigs = None
addresses = []
script_type = btc_script_classify(scriptpubkey)
script_tokens = btc_script_deserialize(scriptpubkey)
if script_type in ['p2pkh']:
script_type = "pubkeyhash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type in ['p2sh', 'p2sh-p2wpkh', 'p2sh-p2wsh']:
script_type = "scripthash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type == 'p2pk':
script_type = "pubkey"
reqSigs = 1
elif script_type is None:
script_type = "nonstandard"
ret = {
"asm": btc_tx_script_to_asm(scriptpubkey),
"hex": scriptpubkey,
"type": script_type
}
if addresses is not None:
ret['addresses'] = addresses
if reqSigs is not None:
ret['reqSigs'] = reqSigs
# print 'parse script {}: {}'.format(scriptpubkey, ret)
return ret | def function[btc_tx_output_parse_script, parameter[scriptpubkey]]:
constant[
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
]
variable[script_type] assign[=] constant[None]
variable[reqSigs] assign[=] constant[None]
variable[addresses] assign[=] list[[]]
variable[script_type] assign[=] call[name[btc_script_classify], parameter[name[scriptpubkey]]]
variable[script_tokens] assign[=] call[name[btc_script_deserialize], parameter[name[scriptpubkey]]]
if compare[name[script_type] in list[[<ast.Constant object at 0x7da1b28f3820>]]] begin[:]
variable[script_type] assign[=] constant[pubkeyhash]
variable[reqSigs] assign[=] constant[1]
variable[addr] assign[=] call[name[btc_script_hex_to_address], parameter[name[scriptpubkey]]]
if <ast.UnaryOp object at 0x7da1b28f3ac0> begin[:]
<ast.Raise object at 0x7da1b28f3b50>
variable[addresses] assign[=] list[[<ast.Name object at 0x7da1b28f3550>]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b28f1120>, <ast.Constant object at 0x7da1b28f0460>, <ast.Constant object at 0x7da1b28f1f90>], [<ast.Call object at 0x7da1b28f1600>, <ast.Name object at 0x7da1b28f1f60>, <ast.Name object at 0x7da1b28f2680>]]
if compare[name[addresses] is_not constant[None]] begin[:]
call[name[ret]][constant[addresses]] assign[=] name[addresses]
if compare[name[reqSigs] is_not constant[None]] begin[:]
call[name[ret]][constant[reqSigs]] assign[=] name[reqSigs]
return[name[ret]] | keyword[def] identifier[btc_tx_output_parse_script] ( identifier[scriptpubkey] ):
literal[string]
identifier[script_type] = keyword[None]
identifier[reqSigs] = keyword[None]
identifier[addresses] =[]
identifier[script_type] = identifier[btc_script_classify] ( identifier[scriptpubkey] )
identifier[script_tokens] = identifier[btc_script_deserialize] ( identifier[scriptpubkey] )
keyword[if] identifier[script_type] keyword[in] [ literal[string] ]:
identifier[script_type] = literal[string]
identifier[reqSigs] = literal[int]
identifier[addr] = identifier[btc_script_hex_to_address] ( identifier[scriptpubkey] )
keyword[if] keyword[not] identifier[addr] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[addresses] =[ identifier[addr] ]
keyword[elif] identifier[script_type] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[script_type] = literal[string]
identifier[reqSigs] = literal[int]
identifier[addr] = identifier[btc_script_hex_to_address] ( identifier[scriptpubkey] )
keyword[if] keyword[not] identifier[addr] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[addresses] =[ identifier[addr] ]
keyword[elif] identifier[script_type] == literal[string] :
identifier[script_type] = literal[string]
identifier[reqSigs] = literal[int]
keyword[elif] identifier[script_type] keyword[is] keyword[None] :
identifier[script_type] = literal[string]
identifier[ret] ={
literal[string] : identifier[btc_tx_script_to_asm] ( identifier[scriptpubkey] ),
literal[string] : identifier[scriptpubkey] ,
literal[string] : identifier[script_type]
}
keyword[if] identifier[addresses] keyword[is] keyword[not] keyword[None] :
identifier[ret] [ literal[string] ]= identifier[addresses]
keyword[if] identifier[reqSigs] keyword[is] keyword[not] keyword[None] :
identifier[ret] [ literal[string] ]= identifier[reqSigs]
keyword[return] identifier[ret] | def btc_tx_output_parse_script(scriptpubkey):
"""
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
"""
script_type = None
reqSigs = None
addresses = []
script_type = btc_script_classify(scriptpubkey)
script_tokens = btc_script_deserialize(scriptpubkey)
if script_type in ['p2pkh']:
script_type = 'pubkeyhash'
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError('Failed to parse scriptpubkey address') # depends on [control=['if'], data=[]]
addresses = [addr] # depends on [control=['if'], data=['script_type']]
elif script_type in ['p2sh', 'p2sh-p2wpkh', 'p2sh-p2wsh']:
script_type = 'scripthash'
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError('Failed to parse scriptpubkey address') # depends on [control=['if'], data=[]]
addresses = [addr] # depends on [control=['if'], data=['script_type']]
elif script_type == 'p2pk':
script_type = 'pubkey'
reqSigs = 1 # depends on [control=['if'], data=['script_type']]
elif script_type is None:
script_type = 'nonstandard' # depends on [control=['if'], data=['script_type']]
ret = {'asm': btc_tx_script_to_asm(scriptpubkey), 'hex': scriptpubkey, 'type': script_type}
if addresses is not None:
ret['addresses'] = addresses # depends on [control=['if'], data=['addresses']]
if reqSigs is not None:
ret['reqSigs'] = reqSigs # depends on [control=['if'], data=['reqSigs']]
# print 'parse script {}: {}'.format(scriptpubkey, ret)
return ret |
def closed(self) -> bool:
'''Return whether the connection is closed.'''
return not self.writer or not self.reader or self.reader.at_eof() | def function[closed, parameter[self]]:
constant[Return whether the connection is closed.]
return[<ast.BoolOp object at 0x7da204347700>] | keyword[def] identifier[closed] ( identifier[self] )-> identifier[bool] :
literal[string]
keyword[return] keyword[not] identifier[self] . identifier[writer] keyword[or] keyword[not] identifier[self] . identifier[reader] keyword[or] identifier[self] . identifier[reader] . identifier[at_eof] () | def closed(self) -> bool:
"""Return whether the connection is closed."""
return not self.writer or not self.reader or self.reader.at_eof() |
def align(self, scale, center, angle, height, width):
""" Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
"""
# rescale
scaled_im = self.resize(scale)
# transform
cx = scaled_im.center[1]
cy = scaled_im.center[0]
dx = cx - center[0] * scale
dy = cy - center[1] * scale
translation = np.array([dy, dx])
tf_im = scaled_im.transform(translation, angle)
# crop
aligned_im = tf_im.crop(height, width)
return aligned_im | def function[align, parameter[self, scale, center, angle, height, width]]:
constant[ Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
]
variable[scaled_im] assign[=] call[name[self].resize, parameter[name[scale]]]
variable[cx] assign[=] call[name[scaled_im].center][constant[1]]
variable[cy] assign[=] call[name[scaled_im].center][constant[0]]
variable[dx] assign[=] binary_operation[name[cx] - binary_operation[call[name[center]][constant[0]] * name[scale]]]
variable[dy] assign[=] binary_operation[name[cy] - binary_operation[call[name[center]][constant[1]] * name[scale]]]
variable[translation] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b0499e40>, <ast.Name object at 0x7da1b0499a20>]]]]
variable[tf_im] assign[=] call[name[scaled_im].transform, parameter[name[translation], name[angle]]]
variable[aligned_im] assign[=] call[name[tf_im].crop, parameter[name[height], name[width]]]
return[name[aligned_im]] | keyword[def] identifier[align] ( identifier[self] , identifier[scale] , identifier[center] , identifier[angle] , identifier[height] , identifier[width] ):
literal[string]
identifier[scaled_im] = identifier[self] . identifier[resize] ( identifier[scale] )
identifier[cx] = identifier[scaled_im] . identifier[center] [ literal[int] ]
identifier[cy] = identifier[scaled_im] . identifier[center] [ literal[int] ]
identifier[dx] = identifier[cx] - identifier[center] [ literal[int] ]* identifier[scale]
identifier[dy] = identifier[cy] - identifier[center] [ literal[int] ]* identifier[scale]
identifier[translation] = identifier[np] . identifier[array] ([ identifier[dy] , identifier[dx] ])
identifier[tf_im] = identifier[scaled_im] . identifier[transform] ( identifier[translation] , identifier[angle] )
identifier[aligned_im] = identifier[tf_im] . identifier[crop] ( identifier[height] , identifier[width] )
keyword[return] identifier[aligned_im] | def align(self, scale, center, angle, height, width):
""" Create a thumbnail from the original image that
is scaled by the given factor, centered on the center pixel, oriented along the grasp angle, and cropped to the desired height and width.
Parameters
----------
scale : float
scale factor to apply
center : 2D array
array containing the row and column index of the pixel to center on
angle : float
angle to align the image to
height : int
height of the final image
width : int
width of the final image
"""
# rescale
scaled_im = self.resize(scale)
# transform
cx = scaled_im.center[1]
cy = scaled_im.center[0]
dx = cx - center[0] * scale
dy = cy - center[1] * scale
translation = np.array([dy, dx])
tf_im = scaled_im.transform(translation, angle)
# crop
aligned_im = tf_im.crop(height, width)
return aligned_im |
def get_block_hash(block_height, coin_symbol='btc', api_key=None):
'''
Takes a block_height and returns the block_hash
'''
return get_block_overview(block_representation=block_height,
coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['hash'] | def function[get_block_hash, parameter[block_height, coin_symbol, api_key]]:
constant[
Takes a block_height and returns the block_hash
]
return[call[call[name[get_block_overview], parameter[]]][constant[hash]]] | keyword[def] identifier[get_block_hash] ( identifier[block_height] , identifier[coin_symbol] = literal[string] , identifier[api_key] = keyword[None] ):
literal[string]
keyword[return] identifier[get_block_overview] ( identifier[block_representation] = identifier[block_height] ,
identifier[coin_symbol] = identifier[coin_symbol] , identifier[txn_limit] = literal[int] , identifier[api_key] = identifier[api_key] )[ literal[string] ] | def get_block_hash(block_height, coin_symbol='btc', api_key=None):
"""
Takes a block_height and returns the block_hash
"""
return get_block_overview(block_representation=block_height, coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['hash'] |
def nl_list_for_each_entry(pos, head, member):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L79.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
"""
pos = nl_list_entry(head.next_, type(pos), member)
while True:
yield pos
if getattr(pos, member) != head:
pos = nl_list_entry(getattr(pos, member).next_, type(pos), member)
continue
break | def function[nl_list_for_each_entry, parameter[pos, head, member]]:
constant[https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L79.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
]
variable[pos] assign[=] call[name[nl_list_entry], parameter[name[head].next_, call[name[type], parameter[name[pos]]], name[member]]]
while constant[True] begin[:]
<ast.Yield object at 0x7da1b264a980>
if compare[call[name[getattr], parameter[name[pos], name[member]]] not_equal[!=] name[head]] begin[:]
variable[pos] assign[=] call[name[nl_list_entry], parameter[call[name[getattr], parameter[name[pos], name[member]]].next_, call[name[type], parameter[name[pos]]], name[member]]]
continue
break | keyword[def] identifier[nl_list_for_each_entry] ( identifier[pos] , identifier[head] , identifier[member] ):
literal[string]
identifier[pos] = identifier[nl_list_entry] ( identifier[head] . identifier[next_] , identifier[type] ( identifier[pos] ), identifier[member] )
keyword[while] keyword[True] :
keyword[yield] identifier[pos]
keyword[if] identifier[getattr] ( identifier[pos] , identifier[member] )!= identifier[head] :
identifier[pos] = identifier[nl_list_entry] ( identifier[getattr] ( identifier[pos] , identifier[member] ). identifier[next_] , identifier[type] ( identifier[pos] ), identifier[member] )
keyword[continue]
keyword[break] | def nl_list_for_each_entry(pos, head, member):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L79.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
"""
pos = nl_list_entry(head.next_, type(pos), member)
while True:
yield pos
if getattr(pos, member) != head:
pos = nl_list_entry(getattr(pos, member).next_, type(pos), member)
continue # depends on [control=['if'], data=[]]
break # depends on [control=['while'], data=[]] |
def difference(self, *others):
r"""Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting difference multiset.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
if element in _elements:
old_multiplicity = _elements[element]
new_multiplicity = old_multiplicity - multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity
else:
del _elements[element]
_total -= old_multiplicity
result._total = _total
return result | def function[difference, parameter[self]]:
constant[Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting difference multiset.
]
variable[result] assign[=] call[name[self].__copy__, parameter[]]
variable[_elements] assign[=] name[result]._elements
variable[_total] assign[=] name[result]._total
for taget[name[other]] in starred[call[name[map], parameter[name[self]._as_multiset, name[others]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b25d2020>, <ast.Name object at 0x7da1b25d02e0>]]] in starred[call[name[other].items, parameter[]]] begin[:]
if compare[name[element] in name[_elements]] begin[:]
variable[old_multiplicity] assign[=] call[name[_elements]][name[element]]
variable[new_multiplicity] assign[=] binary_operation[name[old_multiplicity] - name[multiplicity]]
if compare[name[new_multiplicity] greater[>] constant[0]] begin[:]
call[name[_elements]][name[element]] assign[=] name[new_multiplicity]
<ast.AugAssign object at 0x7da1b25d3550>
name[result]._total assign[=] name[_total]
return[name[result]] | keyword[def] identifier[difference] ( identifier[self] ,* identifier[others] ):
literal[string]
identifier[result] = identifier[self] . identifier[__copy__] ()
identifier[_elements] = identifier[result] . identifier[_elements]
identifier[_total] = identifier[result] . identifier[_total]
keyword[for] identifier[other] keyword[in] identifier[map] ( identifier[self] . identifier[_as_multiset] , identifier[others] ):
keyword[for] identifier[element] , identifier[multiplicity] keyword[in] identifier[other] . identifier[items] ():
keyword[if] identifier[element] keyword[in] identifier[_elements] :
identifier[old_multiplicity] = identifier[_elements] [ identifier[element] ]
identifier[new_multiplicity] = identifier[old_multiplicity] - identifier[multiplicity]
keyword[if] identifier[new_multiplicity] > literal[int] :
identifier[_elements] [ identifier[element] ]= identifier[new_multiplicity]
identifier[_total] -= identifier[multiplicity]
keyword[else] :
keyword[del] identifier[_elements] [ identifier[element] ]
identifier[_total] -= identifier[old_multiplicity]
identifier[result] . identifier[_total] = identifier[_total]
keyword[return] identifier[result] | def difference(self, *others):
"""Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\\[~T]
or :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].
Returns:
The resulting difference multiset.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_multiset, others):
for (element, multiplicity) in other.items():
if element in _elements:
old_multiplicity = _elements[element]
new_multiplicity = old_multiplicity - multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity # depends on [control=['if'], data=['new_multiplicity']]
else:
del _elements[element]
_total -= old_multiplicity # depends on [control=['if'], data=['element', '_elements']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['other']]
result._total = _total
return result |
def _new_temp_file(self, hint='warcrecsess'):
'''Return new temp file.'''
return wpull.body.new_temp_file(
directory=self._temp_dir, hint=hint
) | def function[_new_temp_file, parameter[self, hint]]:
constant[Return new temp file.]
return[call[name[wpull].body.new_temp_file, parameter[]]] | keyword[def] identifier[_new_temp_file] ( identifier[self] , identifier[hint] = literal[string] ):
literal[string]
keyword[return] identifier[wpull] . identifier[body] . identifier[new_temp_file] (
identifier[directory] = identifier[self] . identifier[_temp_dir] , identifier[hint] = identifier[hint]
) | def _new_temp_file(self, hint='warcrecsess'):
"""Return new temp file."""
return wpull.body.new_temp_file(directory=self._temp_dir, hint=hint) |
def profile_function(self):
"""Calculates heatmap for function."""
with _CodeHeatmapCalculator() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
code_lines, start_line = inspect.getsourcelines(self._run_object)
source_lines = []
for line in code_lines:
source_lines.append(('line', start_line, line))
start_line += 1
filename = os.path.abspath(inspect.getsourcefile(self._run_object))
heatmap = prof.heatmap[filename]
run_time = sum(time for time in heatmap.values())
return {
'objectName': self._object_name,
'runTime': run_time,
'result': result,
'timestamp': int(time.time()),
'heatmaps': [{
'name': self._object_name,
'heatmap': heatmap,
'executionCount': prof.execution_count[filename],
'srcCode': source_lines,
'runTime': run_time
}]
} | def function[profile_function, parameter[self]]:
constant[Calculates heatmap for function.]
with call[name[_CodeHeatmapCalculator], parameter[]] begin[:]
variable[result] assign[=] call[name[self]._run_object, parameter[<ast.Starred object at 0x7da18f09d840>]]
<ast.Tuple object at 0x7da18f09e3e0> assign[=] call[name[inspect].getsourcelines, parameter[name[self]._run_object]]
variable[source_lines] assign[=] list[[]]
for taget[name[line]] in starred[name[code_lines]] begin[:]
call[name[source_lines].append, parameter[tuple[[<ast.Constant object at 0x7da18f09cd60>, <ast.Name object at 0x7da18f09c3d0>, <ast.Name object at 0x7da18f09d180>]]]]
<ast.AugAssign object at 0x7da18f09d000>
variable[filename] assign[=] call[name[os].path.abspath, parameter[call[name[inspect].getsourcefile, parameter[name[self]._run_object]]]]
variable[heatmap] assign[=] call[name[prof].heatmap][name[filename]]
variable[run_time] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da20c6c4520>]]
return[dictionary[[<ast.Constant object at 0x7da20c6c4eb0>, <ast.Constant object at 0x7da20c6c4940>, <ast.Constant object at 0x7da20c6c6710>, <ast.Constant object at 0x7da20c6c42b0>, <ast.Constant object at 0x7da20c6c4340>], [<ast.Attribute object at 0x7da20c6c5de0>, <ast.Name object at 0x7da20c6c52d0>, <ast.Name object at 0x7da20c6c6c50>, <ast.Call object at 0x7da20c6c5420>, <ast.List object at 0x7da20c6c50f0>]]] | keyword[def] identifier[profile_function] ( identifier[self] ):
literal[string]
keyword[with] identifier[_CodeHeatmapCalculator] () keyword[as] identifier[prof] :
identifier[result] = identifier[self] . identifier[_run_object] (* identifier[self] . identifier[_run_args] ,** identifier[self] . identifier[_run_kwargs] )
identifier[code_lines] , identifier[start_line] = identifier[inspect] . identifier[getsourcelines] ( identifier[self] . identifier[_run_object] )
identifier[source_lines] =[]
keyword[for] identifier[line] keyword[in] identifier[code_lines] :
identifier[source_lines] . identifier[append] (( literal[string] , identifier[start_line] , identifier[line] ))
identifier[start_line] += literal[int]
identifier[filename] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[inspect] . identifier[getsourcefile] ( identifier[self] . identifier[_run_object] ))
identifier[heatmap] = identifier[prof] . identifier[heatmap] [ identifier[filename] ]
identifier[run_time] = identifier[sum] ( identifier[time] keyword[for] identifier[time] keyword[in] identifier[heatmap] . identifier[values] ())
keyword[return] {
literal[string] : identifier[self] . identifier[_object_name] ,
literal[string] : identifier[run_time] ,
literal[string] : identifier[result] ,
literal[string] : identifier[int] ( identifier[time] . identifier[time] ()),
literal[string] :[{
literal[string] : identifier[self] . identifier[_object_name] ,
literal[string] : identifier[heatmap] ,
literal[string] : identifier[prof] . identifier[execution_count] [ identifier[filename] ],
literal[string] : identifier[source_lines] ,
literal[string] : identifier[run_time]
}]
} | def profile_function(self):
"""Calculates heatmap for function."""
with _CodeHeatmapCalculator() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs) # depends on [control=['with'], data=[]]
(code_lines, start_line) = inspect.getsourcelines(self._run_object)
source_lines = []
for line in code_lines:
source_lines.append(('line', start_line, line))
start_line += 1 # depends on [control=['for'], data=['line']]
filename = os.path.abspath(inspect.getsourcefile(self._run_object))
heatmap = prof.heatmap[filename]
run_time = sum((time for time in heatmap.values()))
return {'objectName': self._object_name, 'runTime': run_time, 'result': result, 'timestamp': int(time.time()), 'heatmaps': [{'name': self._object_name, 'heatmap': heatmap, 'executionCount': prof.execution_count[filename], 'srcCode': source_lines, 'runTime': run_time}]} |
def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = BigIntSpinbox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
except TypeError as err:
# initiate the editor with default values
pass
return editor | def function[createEditor, parameter[self, parent, option, index]]:
constant[Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
]
variable[editor] assign[=] call[name[BigIntSpinbox], parameter[name[parent]]]
<ast.Try object at 0x7da2047ea4d0>
return[name[editor]] | keyword[def] identifier[createEditor] ( identifier[self] , identifier[parent] , identifier[option] , identifier[index] ):
literal[string]
identifier[editor] = identifier[BigIntSpinbox] ( identifier[parent] )
keyword[try] :
identifier[editor] . identifier[setMinimum] ( identifier[self] . identifier[minimum] )
identifier[editor] . identifier[setMaximum] ( identifier[self] . identifier[maximum] )
identifier[editor] . identifier[setSingleStep] ( identifier[self] . identifier[singleStep] )
keyword[except] identifier[TypeError] keyword[as] identifier[err] :
keyword[pass]
keyword[return] identifier[editor] | def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = BigIntSpinbox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep) # depends on [control=['try'], data=[]]
except TypeError as err:
# initiate the editor with default values
pass # depends on [control=['except'], data=[]]
return editor |
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric) | def function[value_counts, parameter[arg, metric_name]]:
constant[
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
]
variable[base] assign[=] call[name[ir].find_base_table, parameter[name[arg]]]
variable[metric] assign[=] call[call[name[base].count, parameter[]].name, parameter[name[metric_name]]]
<ast.Try object at 0x7da18bc72800>
return[call[call[name[base].group_by, parameter[name[arg]]].aggregate, parameter[name[metric]]]] | keyword[def] identifier[value_counts] ( identifier[arg] , identifier[metric_name] = literal[string] ):
literal[string]
identifier[base] = identifier[ir] . identifier[find_base_table] ( identifier[arg] )
identifier[metric] = identifier[base] . identifier[count] (). identifier[name] ( identifier[metric_name] )
keyword[try] :
identifier[arg] . identifier[get_name] ()
keyword[except] identifier[com] . identifier[ExpressionError] :
identifier[arg] = identifier[arg] . identifier[name] ( literal[string] )
keyword[return] identifier[base] . identifier[group_by] ( identifier[arg] ). identifier[aggregate] ( identifier[metric] ) | def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name() # depends on [control=['try'], data=[]]
except com.ExpressionError:
arg = arg.name('unnamed') # depends on [control=['except'], data=[]]
return base.group_by(arg).aggregate(metric) |
def log_callback(wrapped_function):
"""Decorator that produces DEBUG level log messages before and after
calling a parser method.
If a callback raises an IgnoredMatchException the log will show 'IGNORED'
instead to indicate that the parser will not create any objects from
the matched string.
Example:
DEBUG:poyo.parser:parse_simple <- 123: 456.789
DEBUG:poyo.parser:parse_int <- 123
DEBUG:poyo.parser:parse_int -> 123
DEBUG:poyo.parser:parse_float <- 456.789
DEBUG:poyo.parser:parse_float -> 456.789
DEBUG:poyo.parser:parse_simple -> <Simple name: 123, value: 456.789>
"""
def debug_log(message):
"""Helper to log an escaped version of the given message to DEBUG"""
logger.debug(message.encode('unicode_escape').decode())
@functools.wraps(wrapped_function)
def _wrapper(parser, match, **kwargs):
func_name = wrapped_function.__name__
debug_log(u'{func_name} <- {matched_string}'.format(
func_name=func_name,
matched_string=match.group(),
))
try:
result = wrapped_function(parser, match, **kwargs)
except IgnoredMatchException:
debug_log(u'{func_name} -> IGNORED'.format(func_name=func_name))
raise
debug_log(u'{func_name} -> {result}'.format(
func_name=func_name,
result=result,
))
return result
return _wrapper | def function[log_callback, parameter[wrapped_function]]:
constant[Decorator that produces DEBUG level log messages before and after
calling a parser method.
If a callback raises an IgnoredMatchException the log will show 'IGNORED'
instead to indicate that the parser will not create any objects from
the matched string.
Example:
DEBUG:poyo.parser:parse_simple <- 123: 456.789
DEBUG:poyo.parser:parse_int <- 123
DEBUG:poyo.parser:parse_int -> 123
DEBUG:poyo.parser:parse_float <- 456.789
DEBUG:poyo.parser:parse_float -> 456.789
DEBUG:poyo.parser:parse_simple -> <Simple name: 123, value: 456.789>
]
def function[debug_log, parameter[message]]:
constant[Helper to log an escaped version of the given message to DEBUG]
call[name[logger].debug, parameter[call[call[name[message].encode, parameter[constant[unicode_escape]]].decode, parameter[]]]]
def function[_wrapper, parameter[parser, match]]:
variable[func_name] assign[=] name[wrapped_function].__name__
call[name[debug_log], parameter[call[constant[{func_name} <- {matched_string}].format, parameter[]]]]
<ast.Try object at 0x7da1b00f6e90>
call[name[debug_log], parameter[call[constant[{func_name} -> {result}].format, parameter[]]]]
return[name[result]]
return[name[_wrapper]] | keyword[def] identifier[log_callback] ( identifier[wrapped_function] ):
literal[string]
keyword[def] identifier[debug_log] ( identifier[message] ):
literal[string]
identifier[logger] . identifier[debug] ( identifier[message] . identifier[encode] ( literal[string] ). identifier[decode] ())
@ identifier[functools] . identifier[wraps] ( identifier[wrapped_function] )
keyword[def] identifier[_wrapper] ( identifier[parser] , identifier[match] ,** identifier[kwargs] ):
identifier[func_name] = identifier[wrapped_function] . identifier[__name__]
identifier[debug_log] ( literal[string] . identifier[format] (
identifier[func_name] = identifier[func_name] ,
identifier[matched_string] = identifier[match] . identifier[group] (),
))
keyword[try] :
identifier[result] = identifier[wrapped_function] ( identifier[parser] , identifier[match] ,** identifier[kwargs] )
keyword[except] identifier[IgnoredMatchException] :
identifier[debug_log] ( literal[string] . identifier[format] ( identifier[func_name] = identifier[func_name] ))
keyword[raise]
identifier[debug_log] ( literal[string] . identifier[format] (
identifier[func_name] = identifier[func_name] ,
identifier[result] = identifier[result] ,
))
keyword[return] identifier[result]
keyword[return] identifier[_wrapper] | def log_callback(wrapped_function):
"""Decorator that produces DEBUG level log messages before and after
calling a parser method.
If a callback raises an IgnoredMatchException the log will show 'IGNORED'
instead to indicate that the parser will not create any objects from
the matched string.
Example:
DEBUG:poyo.parser:parse_simple <- 123: 456.789
DEBUG:poyo.parser:parse_int <- 123
DEBUG:poyo.parser:parse_int -> 123
DEBUG:poyo.parser:parse_float <- 456.789
DEBUG:poyo.parser:parse_float -> 456.789
DEBUG:poyo.parser:parse_simple -> <Simple name: 123, value: 456.789>
"""
def debug_log(message):
"""Helper to log an escaped version of the given message to DEBUG"""
logger.debug(message.encode('unicode_escape').decode())
@functools.wraps(wrapped_function)
def _wrapper(parser, match, **kwargs):
func_name = wrapped_function.__name__
debug_log(u'{func_name} <- {matched_string}'.format(func_name=func_name, matched_string=match.group()))
try:
result = wrapped_function(parser, match, **kwargs) # depends on [control=['try'], data=[]]
except IgnoredMatchException:
debug_log(u'{func_name} -> IGNORED'.format(func_name=func_name))
raise # depends on [control=['except'], data=[]]
debug_log(u'{func_name} -> {result}'.format(func_name=func_name, result=result))
return result
return _wrapper |
def mean_value_difference(data, ground_truth, mask=None, normalized=False,
force_lower_is_better=True):
r"""Return difference in mean value between ``data`` and ``ground_truth``.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean value difference, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mvd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{MVD}(f, g) =
\Big| \overline{f} - \overline{g} \Big|,
or, in normalized form
.. math::
\mathrm{MVD_N}(f, g) =
\frac{\Big| \overline{f} - \overline{g} \Big|}
{|\overline{f}| + |\overline{g}|}
where :math:`\overline{f}` is the mean value of :math:`f`,
.. math::
\overline{f} = \frac{\langle f, 1\rangle}{\|1|_1}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
l1_norm = odl.solvers.L1Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask
# Volume of space
vol = l1_norm(space.one())
data_mean = data.inner(space.one()) / vol
ground_truth_mean = ground_truth.inner(space.one()) / vol
fom = np.abs(data_mean - ground_truth_mean)
if normalized:
fom /= (np.abs(data_mean) + np.abs(ground_truth_mean))
# Ignore `force_lower_is_better` since that's already the case
return fom | def function[mean_value_difference, parameter[data, ground_truth, mask, normalized, force_lower_is_better]]:
constant[Return difference in mean value between ``data`` and ``ground_truth``.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean value difference, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mvd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{MVD}(f, g) =
\Big| \overline{f} - \overline{g} \Big|,
or, in normalized form
.. math::
\mathrm{MVD_N}(f, g) =
\frac{\Big| \overline{f} - \overline{g} \Big|}
{|\overline{f}| + |\overline{g}|}
where :math:`\overline{f}` is the mean value of :math:`f`,
.. math::
\overline{f} = \frac{\langle f, 1\rangle}{\|1|_1}.
The normalized variant takes values in :math:`[0, 1]`.
]
if <ast.UnaryOp object at 0x7da1b1d566e0> begin[:]
variable[data] assign[=] call[name[odl].vector, parameter[name[data]]]
variable[space] assign[=] name[data].space
variable[ground_truth] assign[=] call[name[space].element, parameter[name[ground_truth]]]
variable[l1_norm] assign[=] call[name[odl].solvers.L1Norm, parameter[name[space]]]
if compare[name[mask] is_not constant[None]] begin[:]
variable[data] assign[=] binary_operation[name[data] * name[mask]]
variable[ground_truth] assign[=] binary_operation[name[ground_truth] * name[mask]]
variable[vol] assign[=] call[name[l1_norm], parameter[call[name[space].one, parameter[]]]]
variable[data_mean] assign[=] binary_operation[call[name[data].inner, parameter[call[name[space].one, parameter[]]]] / name[vol]]
variable[ground_truth_mean] assign[=] binary_operation[call[name[ground_truth].inner, parameter[call[name[space].one, parameter[]]]] / name[vol]]
variable[fom] assign[=] call[name[np].abs, parameter[binary_operation[name[data_mean] - name[ground_truth_mean]]]]
if name[normalized] begin[:]
<ast.AugAssign object at 0x7da1b1eedba0>
return[name[fom]] | keyword[def] identifier[mean_value_difference] ( identifier[data] , identifier[ground_truth] , identifier[mask] = keyword[None] , identifier[normalized] = keyword[False] ,
identifier[force_lower_is_better] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] = identifier[odl] . identifier[vector] ( identifier[data] )
identifier[space] = identifier[data] . identifier[space]
identifier[ground_truth] = identifier[space] . identifier[element] ( identifier[ground_truth] )
identifier[l1_norm] = identifier[odl] . identifier[solvers] . identifier[L1Norm] ( identifier[space] )
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[data] = identifier[data] * identifier[mask]
identifier[ground_truth] = identifier[ground_truth] * identifier[mask]
identifier[vol] = identifier[l1_norm] ( identifier[space] . identifier[one] ())
identifier[data_mean] = identifier[data] . identifier[inner] ( identifier[space] . identifier[one] ())/ identifier[vol]
identifier[ground_truth_mean] = identifier[ground_truth] . identifier[inner] ( identifier[space] . identifier[one] ())/ identifier[vol]
identifier[fom] = identifier[np] . identifier[abs] ( identifier[data_mean] - identifier[ground_truth_mean] )
keyword[if] identifier[normalized] :
identifier[fom] /=( identifier[np] . identifier[abs] ( identifier[data_mean] )+ identifier[np] . identifier[abs] ( identifier[ground_truth_mean] ))
keyword[return] identifier[fom] | def mean_value_difference(data, ground_truth, mask=None, normalized=False, force_lower_is_better=True):
"""Return difference in mean value between ``data`` and ``ground_truth``.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean value difference, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mvd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\\mathrm{MVD}(f, g) =
\\Big| \\overline{f} - \\overline{g} \\Big|,
or, in normalized form
.. math::
\\mathrm{MVD_N}(f, g) =
\\frac{\\Big| \\overline{f} - \\overline{g} \\Big|}
{|\\overline{f}| + |\\overline{g}|}
where :math:`\\overline{f}` is the mean value of :math:`f`,
.. math::
\\overline{f} = \\frac{\\langle f, 1\\rangle}{\\|1|_1}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data) # depends on [control=['if'], data=[]]
space = data.space
ground_truth = space.element(ground_truth)
l1_norm = odl.solvers.L1Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask # depends on [control=['if'], data=['mask']]
# Volume of space
vol = l1_norm(space.one())
data_mean = data.inner(space.one()) / vol
ground_truth_mean = ground_truth.inner(space.one()) / vol
fom = np.abs(data_mean - ground_truth_mean)
if normalized:
fom /= np.abs(data_mean) + np.abs(ground_truth_mean) # depends on [control=['if'], data=[]]
# Ignore `force_lower_is_better` since that's already the case
return fom |
def _get_connection(self, handle, expect_state=None):
"""Get a connection object, logging an error if its in an unexpected state
"""
conndata = self._connections.get(handle)
if conndata and expect_state is not None and conndata['state'] != expect_state:
self._logger.error("Connection in unexpected state, wanted=%s, got=%s", expect_state,
conndata['state'])
return conndata | def function[_get_connection, parameter[self, handle, expect_state]]:
constant[Get a connection object, logging an error if its in an unexpected state
]
variable[conndata] assign[=] call[name[self]._connections.get, parameter[name[handle]]]
if <ast.BoolOp object at 0x7da20c6aa260> begin[:]
call[name[self]._logger.error, parameter[constant[Connection in unexpected state, wanted=%s, got=%s], name[expect_state], call[name[conndata]][constant[state]]]]
return[name[conndata]] | keyword[def] identifier[_get_connection] ( identifier[self] , identifier[handle] , identifier[expect_state] = keyword[None] ):
literal[string]
identifier[conndata] = identifier[self] . identifier[_connections] . identifier[get] ( identifier[handle] )
keyword[if] identifier[conndata] keyword[and] identifier[expect_state] keyword[is] keyword[not] keyword[None] keyword[and] identifier[conndata] [ literal[string] ]!= identifier[expect_state] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] , identifier[expect_state] ,
identifier[conndata] [ literal[string] ])
keyword[return] identifier[conndata] | def _get_connection(self, handle, expect_state=None):
"""Get a connection object, logging an error if its in an unexpected state
"""
conndata = self._connections.get(handle)
if conndata and expect_state is not None and (conndata['state'] != expect_state):
self._logger.error('Connection in unexpected state, wanted=%s, got=%s', expect_state, conndata['state']) # depends on [control=['if'], data=[]]
return conndata |
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value | def function[get_value, parameter[self, query]]:
constant[Converts a dimension/category list of dicts into a data value in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
]
variable[indices] assign[=] call[name[self].get_dimension_indices, parameter[name[query]]]
variable[index] assign[=] call[name[self].get_value_index, parameter[name[indices]]]
variable[value] assign[=] call[name[self].get_value_by_index, parameter[name[index]]]
return[name[value]] | keyword[def] identifier[get_value] ( identifier[self] , identifier[query] ):
literal[string]
identifier[indices] = identifier[self] . identifier[get_dimension_indices] ( identifier[query] )
identifier[index] = identifier[self] . identifier[get_value_index] ( identifier[indices] )
identifier[value] = identifier[self] . identifier[get_value_by_index] ( identifier[index] )
keyword[return] identifier[value] | def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value |
def parse_field_path(api_repr):
"""Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path.
"""
# code dredged back up from
# https://github.com/googleapis/google-cloud-python/pull/5109/files
field_names = []
for field_name in split_field_path(api_repr):
# non-simple field name
if field_name[0] == "`" and field_name[-1] == "`":
field_name = field_name[1:-1]
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH)
field_names.append(field_name)
return field_names | def function[parse_field_path, parameter[api_repr]]:
constant[Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path.
]
variable[field_names] assign[=] list[[]]
for taget[name[field_name]] in starred[call[name[split_field_path], parameter[name[api_repr]]]] begin[:]
if <ast.BoolOp object at 0x7da20e9575b0> begin[:]
variable[field_name] assign[=] call[name[field_name]][<ast.Slice object at 0x7da20e956650>]
variable[field_name] assign[=] call[name[field_name].replace, parameter[name[_ESCAPED_BACKTICK], name[_BACKTICK]]]
variable[field_name] assign[=] call[name[field_name].replace, parameter[name[_ESCAPED_BACKSLASH], name[_BACKSLASH]]]
call[name[field_names].append, parameter[name[field_name]]]
return[name[field_names]] | keyword[def] identifier[parse_field_path] ( identifier[api_repr] ):
literal[string]
identifier[field_names] =[]
keyword[for] identifier[field_name] keyword[in] identifier[split_field_path] ( identifier[api_repr] ):
keyword[if] identifier[field_name] [ literal[int] ]== literal[string] keyword[and] identifier[field_name] [- literal[int] ]== literal[string] :
identifier[field_name] = identifier[field_name] [ literal[int] :- literal[int] ]
identifier[field_name] = identifier[field_name] . identifier[replace] ( identifier[_ESCAPED_BACKTICK] , identifier[_BACKTICK] )
identifier[field_name] = identifier[field_name] . identifier[replace] ( identifier[_ESCAPED_BACKSLASH] , identifier[_BACKSLASH] )
identifier[field_names] . identifier[append] ( identifier[field_name] )
keyword[return] identifier[field_names] | def parse_field_path(api_repr):
"""Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path.
"""
# code dredged back up from
# https://github.com/googleapis/google-cloud-python/pull/5109/files
field_names = []
for field_name in split_field_path(api_repr):
# non-simple field name
if field_name[0] == '`' and field_name[-1] == '`':
field_name = field_name[1:-1]
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH) # depends on [control=['if'], data=[]]
field_names.append(field_name) # depends on [control=['for'], data=['field_name']]
return field_names |
def get_cpu_info(self) -> str:
'''Show device CPU information.'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'cat', '/proc/cpuinfo')
return output | def function[get_cpu_info, parameter[self]]:
constant[Show device CPU information.]
<ast.Tuple object at 0x7da207f9b190> assign[=] call[name[self]._execute, parameter[constant[-s], name[self].device_sn, constant[shell], constant[cat], constant[/proc/cpuinfo]]]
return[name[output]] | keyword[def] identifier[get_cpu_info] ( identifier[self] )-> identifier[str] :
literal[string]
identifier[output] , identifier[_] = identifier[self] . identifier[_execute] (
literal[string] , identifier[self] . identifier[device_sn] , literal[string] , literal[string] , literal[string] )
keyword[return] identifier[output] | def get_cpu_info(self) -> str:
"""Show device CPU information."""
(output, _) = self._execute('-s', self.device_sn, 'shell', 'cat', '/proc/cpuinfo')
return output |
def show_pan_mark(viewer, tf, color='red'):
"""Show a mark in the pan position (center of window).
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
color : str
Color of the mark; default is 'red'.
"""
tag = '_$pan_mark'
radius = 10
canvas = viewer.get_private_canvas()
try:
mark = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
mark.color = color
except KeyError:
if tf:
Point = canvas.get_draw_class('point')
canvas.add(Point(0, 0, radius, style='plus', color=color,
coord='cartesian'),
tag=tag, redraw=False)
canvas.update_canvas(whence=3) | def function[show_pan_mark, parameter[viewer, tf, color]]:
constant[Show a mark in the pan position (center of window).
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
color : str
Color of the mark; default is 'red'.
]
variable[tag] assign[=] constant[_$pan_mark]
variable[radius] assign[=] constant[10]
variable[canvas] assign[=] call[name[viewer].get_private_canvas, parameter[]]
<ast.Try object at 0x7da20e954250>
call[name[canvas].update_canvas, parameter[]] | keyword[def] identifier[show_pan_mark] ( identifier[viewer] , identifier[tf] , identifier[color] = literal[string] ):
literal[string]
identifier[tag] = literal[string]
identifier[radius] = literal[int]
identifier[canvas] = identifier[viewer] . identifier[get_private_canvas] ()
keyword[try] :
identifier[mark] = identifier[canvas] . identifier[get_object_by_tag] ( identifier[tag] )
keyword[if] keyword[not] identifier[tf] :
identifier[canvas] . identifier[delete_object_by_tag] ( identifier[tag] )
keyword[else] :
identifier[mark] . identifier[color] = identifier[color]
keyword[except] identifier[KeyError] :
keyword[if] identifier[tf] :
identifier[Point] = identifier[canvas] . identifier[get_draw_class] ( literal[string] )
identifier[canvas] . identifier[add] ( identifier[Point] ( literal[int] , literal[int] , identifier[radius] , identifier[style] = literal[string] , identifier[color] = identifier[color] ,
identifier[coord] = literal[string] ),
identifier[tag] = identifier[tag] , identifier[redraw] = keyword[False] )
identifier[canvas] . identifier[update_canvas] ( identifier[whence] = literal[int] ) | def show_pan_mark(viewer, tf, color='red'):
"""Show a mark in the pan position (center of window).
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
color : str
Color of the mark; default is 'red'.
"""
tag = '_$pan_mark'
radius = 10
canvas = viewer.get_private_canvas()
try:
mark = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag) # depends on [control=['if'], data=[]]
else:
mark.color = color # depends on [control=['try'], data=[]]
except KeyError:
if tf:
Point = canvas.get_draw_class('point')
canvas.add(Point(0, 0, radius, style='plus', color=color, coord='cartesian'), tag=tag, redraw=False) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
canvas.update_canvas(whence=3) |
def main():
"""
NAME
common_mean.py
DESCRIPTION
calculates bootstrap statistics to test for common mean
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
common_mean.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, input file
-f2 FILE, optional second file to compare with first file
-dir D I, optional direction to compare with input file
-fmt [svg,jpg,pnd,pdf] set figure format [default is svg]
NOTES
must have either F2 OR dir but not both
"""
d,i,file2="","",""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file1=sys.argv[ind+1]
if '-f2' in sys.argv:
ind=sys.argv.index('-f2')
file2=sys.argv[ind+1]
if '-dir' in sys.argv:
ind=sys.argv.index('-dir')
d=float(sys.argv[ind+1])
i=float(sys.argv[ind+2])
D1=numpy.loadtxt(file1,dtype=numpy.float)
if file2!="": D2=numpy.loadtxt(file2,dtype=numpy.float)
#
counter,NumSims=0,1000
#
# get bootstrapped means for first data set
#
print("Doing first set of directions, please be patient..")
BDI1=pmag.di_boot(D1)
#
# convert to cartesian coordinates X1,X2, Y1,Y2 and Z1, Z2
#
if d=="": # repeat for second data set
print("Doing second set of directions, please be patient..")
BDI2=pmag.di_boot(D2)
else:
BDI2=[]
# set up plots
CDF={'X':1,'Y':2,'Z':3}
pmagplotlib.plot_init(CDF['X'],4,4)
pmagplotlib.plot_init(CDF['Y'],4,4)
pmagplotlib.plot_init(CDF['Z'],4,4)
# draw the cdfs
pmagplotlib.plot_com(CDF,BDI1,BDI2,[d,i])
files={}
files['X']='CD_X.'+fmt
files['Y']='CD_Y.'+fmt
files['Z']='CD_Z.'+fmt
if plot==0:
pmagplotlib.draw_figs(CDF)
ans=input("S[a]ve plots, <Return> to quit ")
if ans=="a":
pmagplotlib.save_plots(CDF,files)
else:
sys.exit()
else:
pmagplotlib.save_plots(CDF,files)
sys.exit() | def function[main, parameter[]]:
constant[
NAME
common_mean.py
DESCRIPTION
calculates bootstrap statistics to test for common mean
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
common_mean.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, input file
-f2 FILE, optional second file to compare with first file
-dir D I, optional direction to compare with input file
-fmt [svg,jpg,pnd,pdf] set figure format [default is svg]
NOTES
must have either F2 OR dir but not both
]
<ast.Tuple object at 0x7da1b0449390> assign[=] tuple[[<ast.Constant object at 0x7da1b04487f0>, <ast.Constant object at 0x7da1b0448a60>, <ast.Constant object at 0x7da1b0448dc0>]]
<ast.Tuple object at 0x7da1b0448700> assign[=] tuple[[<ast.Constant object at 0x7da1b0448d00>, <ast.Constant object at 0x7da1b0448df0>]]
if compare[constant[-h] in name[sys].argv] begin[:]
call[name[print], parameter[name[main].__doc__]]
call[name[sys].exit, parameter[]]
if compare[constant[-sav] in name[sys].argv] begin[:]
variable[plot] assign[=] constant[1]
if compare[constant[-fmt] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-fmt]]]
variable[fmt] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
if compare[constant[-f] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-f]]]
variable[file1] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
if compare[constant[-f2] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-f2]]]
variable[file2] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
if compare[constant[-dir] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-dir]]]
variable[d] assign[=] call[name[float], parameter[call[name[sys].argv][binary_operation[name[ind] + constant[1]]]]]
variable[i] assign[=] call[name[float], parameter[call[name[sys].argv][binary_operation[name[ind] + constant[2]]]]]
variable[D1] assign[=] call[name[numpy].loadtxt, parameter[name[file1]]]
if compare[name[file2] not_equal[!=] constant[]] begin[:]
variable[D2] assign[=] call[name[numpy].loadtxt, parameter[name[file2]]]
<ast.Tuple object at 0x7da1b047dae0> assign[=] tuple[[<ast.Constant object at 0x7da1b047db40>, <ast.Constant object at 0x7da1b047dab0>]]
call[name[print], parameter[constant[Doing first set of directions, please be patient..]]]
variable[BDI1] assign[=] call[name[pmag].di_boot, parameter[name[D1]]]
if compare[name[d] equal[==] constant[]] begin[:]
call[name[print], parameter[constant[Doing second set of directions, please be patient..]]]
variable[BDI2] assign[=] call[name[pmag].di_boot, parameter[name[D2]]]
variable[CDF] assign[=] dictionary[[<ast.Constant object at 0x7da1b047ded0>, <ast.Constant object at 0x7da1b047c2e0>, <ast.Constant object at 0x7da1b047e650>], [<ast.Constant object at 0x7da1b047f4f0>, <ast.Constant object at 0x7da1b047f4c0>, <ast.Constant object at 0x7da1b047ea70>]]
call[name[pmagplotlib].plot_init, parameter[call[name[CDF]][constant[X]], constant[4], constant[4]]]
call[name[pmagplotlib].plot_init, parameter[call[name[CDF]][constant[Y]], constant[4], constant[4]]]
call[name[pmagplotlib].plot_init, parameter[call[name[CDF]][constant[Z]], constant[4], constant[4]]]
call[name[pmagplotlib].plot_com, parameter[name[CDF], name[BDI1], name[BDI2], list[[<ast.Name object at 0x7da1b047c8e0>, <ast.Name object at 0x7da1b047c7f0>]]]]
variable[files] assign[=] dictionary[[], []]
call[name[files]][constant[X]] assign[=] binary_operation[constant[CD_X.] + name[fmt]]
call[name[files]][constant[Y]] assign[=] binary_operation[constant[CD_Y.] + name[fmt]]
call[name[files]][constant[Z]] assign[=] binary_operation[constant[CD_Z.] + name[fmt]]
if compare[name[plot] equal[==] constant[0]] begin[:]
call[name[pmagplotlib].draw_figs, parameter[name[CDF]]]
variable[ans] assign[=] call[name[input], parameter[constant[S[a]ve plots, <Return> to quit ]]]
if compare[name[ans] equal[==] constant[a]] begin[:]
call[name[pmagplotlib].save_plots, parameter[name[CDF], name[files]]] | keyword[def] identifier[main] ():
literal[string]
identifier[d] , identifier[i] , identifier[file2] = literal[string] , literal[string] , literal[string]
identifier[fmt] , identifier[plot] = literal[string] , literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[sys] . identifier[exit] ()
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] : identifier[plot] = literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[fmt] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[file1] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[file2] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[d] = identifier[float] ( identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ])
identifier[i] = identifier[float] ( identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ])
identifier[D1] = identifier[numpy] . identifier[loadtxt] ( identifier[file1] , identifier[dtype] = identifier[numpy] . identifier[float] )
keyword[if] identifier[file2] != literal[string] : identifier[D2] = identifier[numpy] . identifier[loadtxt] ( identifier[file2] , identifier[dtype] = identifier[numpy] . identifier[float] )
identifier[counter] , identifier[NumSims] = literal[int] , literal[int]
identifier[print] ( literal[string] )
identifier[BDI1] = identifier[pmag] . identifier[di_boot] ( identifier[D1] )
keyword[if] identifier[d] == literal[string] :
identifier[print] ( literal[string] )
identifier[BDI2] = identifier[pmag] . identifier[di_boot] ( identifier[D2] )
keyword[else] :
identifier[BDI2] =[]
identifier[CDF] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[pmagplotlib] . identifier[plot_init] ( identifier[CDF] [ literal[string] ], literal[int] , literal[int] )
identifier[pmagplotlib] . identifier[plot_init] ( identifier[CDF] [ literal[string] ], literal[int] , literal[int] )
identifier[pmagplotlib] . identifier[plot_init] ( identifier[CDF] [ literal[string] ], literal[int] , literal[int] )
identifier[pmagplotlib] . identifier[plot_com] ( identifier[CDF] , identifier[BDI1] , identifier[BDI2] ,[ identifier[d] , identifier[i] ])
identifier[files] ={}
identifier[files] [ literal[string] ]= literal[string] + identifier[fmt]
identifier[files] [ literal[string] ]= literal[string] + identifier[fmt]
identifier[files] [ literal[string] ]= literal[string] + identifier[fmt]
keyword[if] identifier[plot] == literal[int] :
identifier[pmagplotlib] . identifier[draw_figs] ( identifier[CDF] )
identifier[ans] = identifier[input] ( literal[string] )
keyword[if] identifier[ans] == literal[string] :
identifier[pmagplotlib] . identifier[save_plots] ( identifier[CDF] , identifier[files] )
keyword[else] :
identifier[sys] . identifier[exit] ()
keyword[else] :
identifier[pmagplotlib] . identifier[save_plots] ( identifier[CDF] , identifier[files] )
identifier[sys] . identifier[exit] () | def main():
"""
NAME
common_mean.py
DESCRIPTION
calculates bootstrap statistics to test for common mean
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
common_mean.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, input file
-f2 FILE, optional second file to compare with first file
-dir D I, optional direction to compare with input file
-fmt [svg,jpg,pnd,pdf] set figure format [default is svg]
NOTES
must have either F2 OR dir but not both
"""
(d, i, file2) = ('', '', '')
(fmt, plot) = ('svg', 0)
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit # depends on [control=['if'], data=[]]
if '-sav' in sys.argv:
plot = 1 # depends on [control=['if'], data=[]]
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
file1 = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
if '-f2' in sys.argv:
ind = sys.argv.index('-f2')
file2 = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
if '-dir' in sys.argv:
ind = sys.argv.index('-dir')
d = float(sys.argv[ind + 1])
i = float(sys.argv[ind + 2]) # depends on [control=['if'], data=[]]
D1 = numpy.loadtxt(file1, dtype=numpy.float)
if file2 != '':
D2 = numpy.loadtxt(file2, dtype=numpy.float) # depends on [control=['if'], data=['file2']]
#
(counter, NumSims) = (0, 1000)
#
# get bootstrapped means for first data set
#
print('Doing first set of directions, please be patient..')
BDI1 = pmag.di_boot(D1)
#
# convert to cartesian coordinates X1,X2, Y1,Y2 and Z1, Z2
#
if d == '': # repeat for second data set
print('Doing second set of directions, please be patient..')
BDI2 = pmag.di_boot(D2) # depends on [control=['if'], data=[]]
else:
BDI2 = []
# set up plots
CDF = {'X': 1, 'Y': 2, 'Z': 3}
pmagplotlib.plot_init(CDF['X'], 4, 4)
pmagplotlib.plot_init(CDF['Y'], 4, 4)
pmagplotlib.plot_init(CDF['Z'], 4, 4)
# draw the cdfs
pmagplotlib.plot_com(CDF, BDI1, BDI2, [d, i])
files = {}
files['X'] = 'CD_X.' + fmt
files['Y'] = 'CD_Y.' + fmt
files['Z'] = 'CD_Z.' + fmt
if plot == 0:
pmagplotlib.draw_figs(CDF)
ans = input('S[a]ve plots, <Return> to quit ')
if ans == 'a':
pmagplotlib.save_plots(CDF, files) # depends on [control=['if'], data=[]]
else:
sys.exit() # depends on [control=['if'], data=[]]
else:
pmagplotlib.save_plots(CDF, files)
sys.exit() |
def delete(ctx, family_id, individual_id, root):
"""
Delete a case or individual from the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
store = SqlStore(db_path)
if family_id:
case_obj = store.case(case_id=family_id)
if case_obj is None:
logger.warning("Family {0} does not exist in database"
.format(family_id))
ctx.abort()
store.delete_case(case_obj)
elif individual_id:
ind_obj = store.individual(ind_id=individual_id)
if ind_obj.ind_id != individual_id:
logger.warning("Individual {0} does not exist in database"
.format(individual_id))
ctx.abort()
store.delete_individual(ind_obj)
else:
logger.warning("Please provide a family or individual id")
ctx.abort() | def function[delete, parameter[ctx, family_id, individual_id, root]]:
constant[
Delete a case or individual from the database.
If no database was found run puzzle init first.
]
variable[root] assign[=] <ast.BoolOp object at 0x7da20c6e4940>
if call[name[os].path.isfile, parameter[name[root]]] begin[:]
call[name[logger].error, parameter[constant['root' can't be a file]]]
call[name[ctx].abort, parameter[]]
call[name[logger].info, parameter[call[constant[Root directory is: {}].format, parameter[name[root]]]]]
variable[db_path] assign[=] call[name[os].path.join, parameter[name[root], constant[puzzle_db.sqlite3]]]
call[name[logger].info, parameter[call[constant[db path is: {}].format, parameter[name[db_path]]]]]
if <ast.UnaryOp object at 0x7da20c6e6fb0> begin[:]
call[name[logger].warn, parameter[constant[database not initialized, run 'puzzle init']]]
call[name[ctx].abort, parameter[]]
variable[store] assign[=] call[name[SqlStore], parameter[name[db_path]]]
if name[family_id] begin[:]
variable[case_obj] assign[=] call[name[store].case, parameter[]]
if compare[name[case_obj] is constant[None]] begin[:]
call[name[logger].warning, parameter[call[constant[Family {0} does not exist in database].format, parameter[name[family_id]]]]]
call[name[ctx].abort, parameter[]]
call[name[store].delete_case, parameter[name[case_obj]]] | keyword[def] identifier[delete] ( identifier[ctx] , identifier[family_id] , identifier[individual_id] , identifier[root] ):
literal[string]
identifier[root] = identifier[root] keyword[or] identifier[ctx] . identifier[obj] . identifier[get] ( literal[string] ) keyword[or] identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[root] ):
identifier[logger] . identifier[error] ( literal[string] )
identifier[ctx] . identifier[abort] ()
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[root] ))
identifier[db_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[db_path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[db_path] ):
identifier[logger] . identifier[warn] ( literal[string] )
identifier[ctx] . identifier[abort] ()
identifier[store] = identifier[SqlStore] ( identifier[db_path] )
keyword[if] identifier[family_id] :
identifier[case_obj] = identifier[store] . identifier[case] ( identifier[case_id] = identifier[family_id] )
keyword[if] identifier[case_obj] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string]
. identifier[format] ( identifier[family_id] ))
identifier[ctx] . identifier[abort] ()
identifier[store] . identifier[delete_case] ( identifier[case_obj] )
keyword[elif] identifier[individual_id] :
identifier[ind_obj] = identifier[store] . identifier[individual] ( identifier[ind_id] = identifier[individual_id] )
keyword[if] identifier[ind_obj] . identifier[ind_id] != identifier[individual_id] :
identifier[logger] . identifier[warning] ( literal[string]
. identifier[format] ( identifier[individual_id] ))
identifier[ctx] . identifier[abort] ()
identifier[store] . identifier[delete_individual] ( identifier[ind_obj] )
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[ctx] . identifier[abort] () | def delete(ctx, family_id, individual_id, root):
"""
Delete a case or individual from the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser('~/.puzzle')
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort() # depends on [control=['if'], data=[]]
logger.info('Root directory is: {}'.format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info('db path is: {}'.format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort() # depends on [control=['if'], data=[]]
store = SqlStore(db_path)
if family_id:
case_obj = store.case(case_id=family_id)
if case_obj is None:
logger.warning('Family {0} does not exist in database'.format(family_id))
ctx.abort() # depends on [control=['if'], data=[]]
store.delete_case(case_obj) # depends on [control=['if'], data=[]]
elif individual_id:
ind_obj = store.individual(ind_id=individual_id)
if ind_obj.ind_id != individual_id:
logger.warning('Individual {0} does not exist in database'.format(individual_id))
ctx.abort() # depends on [control=['if'], data=['individual_id']]
store.delete_individual(ind_obj) # depends on [control=['if'], data=[]]
else:
logger.warning('Please provide a family or individual id')
ctx.abort() |
def handle_exception(logger, **kw_decorator):
"""
:param logger: logging, a logging object
:return: decorator, wraps exception loggers
"""
def decorator(query_func):
@functools.wraps(query_func)
def wrapper(*args, **kwargs):
try:
return query_func(*args, **kwargs)
except Exception as e:
logger.info('Exception in function {0} -- {1}'.format(query_func.__name__, e))
if valid_dict(kw_decorator, ['subject', 'sender', 'username', 'password', 'host', 'receiver']):
logger.info('Now is sending the email with exception message')
t = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
msg = t + ' ' + 'Exception in function {0} -- {1}'.format(query_func.__name__, e)
_send(subject=kw_decorator['subject'],
text=msg,
sender=kw_decorator['sender'],
username=kw_decorator['username'],
password=kw_decorator['password'],
host=kw_decorator['host'],
receiver=kw_decorator['receiver'])
logger.info('Email is sent')
return wrapper
return decorator | def function[handle_exception, parameter[logger]]:
constant[
:param logger: logging, a logging object
:return: decorator, wraps exception loggers
]
def function[decorator, parameter[query_func]]:
def function[wrapper, parameter[]]:
<ast.Try object at 0x7da18bcc9c00>
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[handle_exception] ( identifier[logger] ,** identifier[kw_decorator] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[query_func] ):
@ identifier[functools] . identifier[wraps] ( identifier[query_func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
keyword[return] identifier[query_func] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[query_func] . identifier[__name__] , identifier[e] ))
keyword[if] identifier[valid_dict] ( identifier[kw_decorator] ,[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]):
identifier[logger] . identifier[info] ( literal[string] )
identifier[t] = identifier[time] . identifier[strftime] ( literal[string] , identifier[time] . identifier[localtime] ( identifier[time] . identifier[time] ()))
identifier[msg] = identifier[t] + literal[string] + literal[string] . identifier[format] ( identifier[query_func] . identifier[__name__] , identifier[e] )
identifier[_send] ( identifier[subject] = identifier[kw_decorator] [ literal[string] ],
identifier[text] = identifier[msg] ,
identifier[sender] = identifier[kw_decorator] [ literal[string] ],
identifier[username] = identifier[kw_decorator] [ literal[string] ],
identifier[password] = identifier[kw_decorator] [ literal[string] ],
identifier[host] = identifier[kw_decorator] [ literal[string] ],
identifier[receiver] = identifier[kw_decorator] [ literal[string] ])
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def handle_exception(logger, **kw_decorator):
"""
:param logger: logging, a logging object
:return: decorator, wraps exception loggers
"""
def decorator(query_func):
@functools.wraps(query_func)
def wrapper(*args, **kwargs):
try:
return query_func(*args, **kwargs) # depends on [control=['try'], data=[]]
except Exception as e:
logger.info('Exception in function {0} -- {1}'.format(query_func.__name__, e))
if valid_dict(kw_decorator, ['subject', 'sender', 'username', 'password', 'host', 'receiver']):
logger.info('Now is sending the email with exception message')
t = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
msg = t + ' ' + 'Exception in function {0} -- {1}'.format(query_func.__name__, e)
_send(subject=kw_decorator['subject'], text=msg, sender=kw_decorator['sender'], username=kw_decorator['username'], password=kw_decorator['password'], host=kw_decorator['host'], receiver=kw_decorator['receiver'])
logger.info('Email is sent') # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
return wrapper
return decorator |
def register(self, mimetype):
"""Register a function to handle a particular mimetype."""
def dec(func):
self._reg[mimetype] = func
return func
return dec | def function[register, parameter[self, mimetype]]:
constant[Register a function to handle a particular mimetype.]
def function[dec, parameter[func]]:
call[name[self]._reg][name[mimetype]] assign[=] name[func]
return[name[func]]
return[name[dec]] | keyword[def] identifier[register] ( identifier[self] , identifier[mimetype] ):
literal[string]
keyword[def] identifier[dec] ( identifier[func] ):
identifier[self] . identifier[_reg] [ identifier[mimetype] ]= identifier[func]
keyword[return] identifier[func]
keyword[return] identifier[dec] | def register(self, mimetype):
"""Register a function to handle a particular mimetype."""
def dec(func):
self._reg[mimetype] = func
return func
return dec |
def create_from_json(cls, json_data):
"""Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object
"""
msa = Msa()
msa.msa = json_data["msa_info"]["msa"]
msa.meta = json_data["meta"] if "meta" in json_data else None
msa.component_results = _create_component_results(json_data, "msa_info")
return msa | def function[create_from_json, parameter[cls, json_data]]:
constant[Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object
]
variable[msa] assign[=] call[name[Msa], parameter[]]
name[msa].msa assign[=] call[call[name[json_data]][constant[msa_info]]][constant[msa]]
name[msa].meta assign[=] <ast.IfExp object at 0x7da20c76f2b0>
name[msa].component_results assign[=] call[name[_create_component_results], parameter[name[json_data], constant[msa_info]]]
return[name[msa]] | keyword[def] identifier[create_from_json] ( identifier[cls] , identifier[json_data] ):
literal[string]
identifier[msa] = identifier[Msa] ()
identifier[msa] . identifier[msa] = identifier[json_data] [ literal[string] ][ literal[string] ]
identifier[msa] . identifier[meta] = identifier[json_data] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[json_data] keyword[else] keyword[None]
identifier[msa] . identifier[component_results] = identifier[_create_component_results] ( identifier[json_data] , literal[string] )
keyword[return] identifier[msa] | def create_from_json(cls, json_data):
"""Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object
"""
msa = Msa()
msa.msa = json_data['msa_info']['msa']
msa.meta = json_data['meta'] if 'meta' in json_data else None
msa.component_results = _create_component_results(json_data, 'msa_info')
return msa |
def getSensors(self):
""" Returns the currently visible state of the world as a numpy array
of doubles.
"""
Pd = array([b.p_demand for b in self.case.buses if b.type == PQ])
logger.info("State: %s" % list(Pd))
return Pd | def function[getSensors, parameter[self]]:
constant[ Returns the currently visible state of the world as a numpy array
of doubles.
]
variable[Pd] assign[=] call[name[array], parameter[<ast.ListComp object at 0x7da1b254c910>]]
call[name[logger].info, parameter[binary_operation[constant[State: %s] <ast.Mod object at 0x7da2590d6920> call[name[list], parameter[name[Pd]]]]]]
return[name[Pd]] | keyword[def] identifier[getSensors] ( identifier[self] ):
literal[string]
identifier[Pd] = identifier[array] ([ identifier[b] . identifier[p_demand] keyword[for] identifier[b] keyword[in] identifier[self] . identifier[case] . identifier[buses] keyword[if] identifier[b] . identifier[type] == identifier[PQ] ])
identifier[logger] . identifier[info] ( literal[string] % identifier[list] ( identifier[Pd] ))
keyword[return] identifier[Pd] | def getSensors(self):
""" Returns the currently visible state of the world as a numpy array
of doubles.
"""
Pd = array([b.p_demand for b in self.case.buses if b.type == PQ])
logger.info('State: %s' % list(Pd))
return Pd |
def process(self):
"""
Process the warnings.
"""
for filename, warnings in self.warnings.iteritems():
self.fileCounts[filename] = {}
fc = self.fileCounts[filename]
fc["warning_count"] = len(warnings)
fc["warning_breakdown"] = self._warnCount(warnings)
self.warningCounts = self._warnCount(warnings,
warningCount=self.warningCounts) | def function[process, parameter[self]]:
constant[
Process the warnings.
]
for taget[tuple[[<ast.Name object at 0x7da1b14c5840>, <ast.Name object at 0x7da1b14c5ed0>]]] in starred[call[name[self].warnings.iteritems, parameter[]]] begin[:]
call[name[self].fileCounts][name[filename]] assign[=] dictionary[[], []]
variable[fc] assign[=] call[name[self].fileCounts][name[filename]]
call[name[fc]][constant[warning_count]] assign[=] call[name[len], parameter[name[warnings]]]
call[name[fc]][constant[warning_breakdown]] assign[=] call[name[self]._warnCount, parameter[name[warnings]]]
name[self].warningCounts assign[=] call[name[self]._warnCount, parameter[name[warnings]]] | keyword[def] identifier[process] ( identifier[self] ):
literal[string]
keyword[for] identifier[filename] , identifier[warnings] keyword[in] identifier[self] . identifier[warnings] . identifier[iteritems] ():
identifier[self] . identifier[fileCounts] [ identifier[filename] ]={}
identifier[fc] = identifier[self] . identifier[fileCounts] [ identifier[filename] ]
identifier[fc] [ literal[string] ]= identifier[len] ( identifier[warnings] )
identifier[fc] [ literal[string] ]= identifier[self] . identifier[_warnCount] ( identifier[warnings] )
identifier[self] . identifier[warningCounts] = identifier[self] . identifier[_warnCount] ( identifier[warnings] ,
identifier[warningCount] = identifier[self] . identifier[warningCounts] ) | def process(self):
"""
Process the warnings.
"""
for (filename, warnings) in self.warnings.iteritems():
self.fileCounts[filename] = {}
fc = self.fileCounts[filename]
fc['warning_count'] = len(warnings)
fc['warning_breakdown'] = self._warnCount(warnings)
self.warningCounts = self._warnCount(warnings, warningCount=self.warningCounts) # depends on [control=['for'], data=[]] |
def formvalue (form, key):
"""Get value with given key from WSGI form."""
field = form.get(key)
if isinstance(field, list):
field = field[0]
return field | def function[formvalue, parameter[form, key]]:
constant[Get value with given key from WSGI form.]
variable[field] assign[=] call[name[form].get, parameter[name[key]]]
if call[name[isinstance], parameter[name[field], name[list]]] begin[:]
variable[field] assign[=] call[name[field]][constant[0]]
return[name[field]] | keyword[def] identifier[formvalue] ( identifier[form] , identifier[key] ):
literal[string]
identifier[field] = identifier[form] . identifier[get] ( identifier[key] )
keyword[if] identifier[isinstance] ( identifier[field] , identifier[list] ):
identifier[field] = identifier[field] [ literal[int] ]
keyword[return] identifier[field] | def formvalue(form, key):
"""Get value with given key from WSGI form."""
field = form.get(key)
if isinstance(field, list):
field = field[0] # depends on [control=['if'], data=[]]
return field |
def camel_case_to_snake_case(name):
"""
HelloWorld -> hello_world
"""
s1 = _FIRST_CAP_RE.sub(r'\1_\2', name)
return _ALL_CAP_RE.sub(r'\1_\2', s1).lower() | def function[camel_case_to_snake_case, parameter[name]]:
constant[
HelloWorld -> hello_world
]
variable[s1] assign[=] call[name[_FIRST_CAP_RE].sub, parameter[constant[\1_\2], name[name]]]
return[call[call[name[_ALL_CAP_RE].sub, parameter[constant[\1_\2], name[s1]]].lower, parameter[]]] | keyword[def] identifier[camel_case_to_snake_case] ( identifier[name] ):
literal[string]
identifier[s1] = identifier[_FIRST_CAP_RE] . identifier[sub] ( literal[string] , identifier[name] )
keyword[return] identifier[_ALL_CAP_RE] . identifier[sub] ( literal[string] , identifier[s1] ). identifier[lower] () | def camel_case_to_snake_case(name):
"""
HelloWorld -> hello_world
"""
s1 = _FIRST_CAP_RE.sub('\\1_\\2', name)
return _ALL_CAP_RE.sub('\\1_\\2', s1).lower() |
def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
r, v = coord[:3], coord[3:]
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.µ / r_norm # specific energy
a = - center.µ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.µ)) # eccentricity
p = a * (1 - e ** 2)
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.µ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float) | def function[_cartesian_to_keplerian, parameter[cls, coord, center]]:
constant[Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
]
<ast.Tuple object at 0x7da1b0b7c9a0> assign[=] tuple[[<ast.Subscript object at 0x7da1b0b7d1b0>, <ast.Subscript object at 0x7da1b0b7f9a0>]]
variable[h] assign[=] call[name[np].cross, parameter[name[r], name[v]]]
variable[h_norm] assign[=] call[name[np].linalg.norm, parameter[name[h]]]
variable[r_norm] assign[=] call[name[np].linalg.norm, parameter[name[r]]]
variable[v_norm] assign[=] call[name[np].linalg.norm, parameter[name[v]]]
variable[K] assign[=] binary_operation[binary_operation[binary_operation[name[v_norm] ** constant[2]] / constant[2]] - binary_operation[name[center].μ / name[r_norm]]]
variable[a] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0b7ff10> / binary_operation[constant[2] * name[K]]]
variable[e] assign[=] call[name[sqrt], parameter[binary_operation[constant[1] - binary_operation[binary_operation[name[h_norm] ** constant[2]] / binary_operation[name[a] * name[center].μ]]]]]
variable[p] assign[=] binary_operation[name[a] * binary_operation[constant[1] - binary_operation[name[e] ** constant[2]]]]
variable[i] assign[=] call[name[arccos], parameter[binary_operation[call[name[h]][constant[2]] / name[h_norm]]]]
variable[Ω] assign[=] binary_operation[call[name[arctan2], parameter[call[name[h]][constant[0]], <ast.UnaryOp object at 0x7da1b0ccb730>]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] * name[np].pi]]
variable[ω_ν] assign[=] call[name[arctan2], parameter[binary_operation[call[name[r]][constant[2]] / call[name[sin], parameter[name[i]]]], binary_operation[binary_operation[call[name[r]][constant[0]] * call[name[cos], parameter[name[Ω]]]] + binary_operation[call[name[r]][constant[1]] * call[name[sin], parameter[name[Ω]]]]]]]
variable[ν] assign[=] binary_operation[call[name[arctan2], parameter[binary_operation[call[name[sqrt], parameter[binary_operation[name[p] / name[center].μ]]] * call[name[np].dot, parameter[name[v], name[r]]]], binary_operation[name[p] - name[r_norm]]]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] * name[np].pi]]
variable[ω] assign[=] binary_operation[binary_operation[name[ω_ν] - name[ν]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] * name[np].pi]]
return[call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b0cca950>, <ast.Name object at 0x7da1b0cca920>, <ast.Name object at 0x7da1b0cca8f0>, <ast.Name object at 0x7da1b0cca8c0>, <ast.Name object at 0x7da1b0cca890>, <ast.Name object at 0x7da1b0cca860>]]]]] | keyword[def] identifier[_cartesian_to_keplerian] ( identifier[cls] , identifier[coord] , identifier[center] ):
literal[string]
identifier[r] , identifier[v] = identifier[coord] [: literal[int] ], identifier[coord] [ literal[int] :]
identifier[h] = identifier[np] . identifier[cross] ( identifier[r] , identifier[v] )
identifier[h_norm] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[h] )
identifier[r_norm] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[r] )
identifier[v_norm] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[v] )
identifier[K] = identifier[v_norm] ** literal[int] / literal[int] - identifier[center] . identifier[µ] / identifier[r_norm]
identifier[a] =- identifier[center] . identifier[µ] /( literal[int] * identifier[K] )
identifier[e] = identifier[sqrt] ( literal[int] - identifier[h_norm] ** literal[int] /( identifier[a] * identifier[center] . identifier[µ] ))
identifier[p] = identifier[a] *( literal[int] - identifier[e] ** literal[int] )
identifier[i] = identifier[arccos] ( identifier[h] [ literal[int] ]/ identifier[h_norm] )
identifier[Ω] = identifier[arctan2] ( identifier[h] [ literal[int] ],- identifier[h] [ literal[int] ])%( literal[int] * identifier[np] . identifier[pi] )
identifier[ω_ν] = identifier[arctan2] ( identifier[r] [ literal[int] ]/ identifier[sin] ( identifier[i] ), identifier[r] [ literal[int] ]* identifier[cos] ( identifier[Ω] )+ identifier[r] [ literal[int] ]* identifier[sin] ( identifier[Ω] ))
identifier[ν] = identifier[arctan2] ( identifier[sqrt] ( identifier[p] / identifier[center] . identifier[µ] )* identifier[np] . identifier[dot] ( identifier[v] , identifier[r] ), identifier[p] - identifier[r_norm] )%( literal[int] * identifier[np] . identifier[pi] )
identifier[ω] =( identifier[ω_ν] - identifier[ν] )%( literal[int] * identifier[np] . identifier[pi] )
keyword[return] identifier[np] . identifier[array] ([ identifier[a] , identifier[e] , identifier[i] , identifier[Ω] , identifier[ω] , identifier[ν] ], identifier[dtype] = identifier[float] ) | def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
(r, v) = (coord[:3], coord[3:])
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.μ / r_norm # specific energy
a = -center.μ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.μ)) # eccentricity
p = a * (1 - e ** 2)
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.μ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float) |
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load:
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'],
preserve_minions=load.get('preserve_minion_cache',
False))
return True | def function[revoke_auth, parameter[self, load]]:
constant[
Allow a minion to request revocation of its own key
]
if compare[constant[id] <ast.NotIn object at 0x7da2590d7190> name[load]] begin[:]
return[constant[False]]
variable[keyapi] assign[=] call[name[salt].key.Key, parameter[name[self].opts]]
call[name[keyapi].delete_key, parameter[call[name[load]][constant[id]]]]
return[constant[True]] | keyword[def] identifier[revoke_auth] ( identifier[self] , identifier[load] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[load] :
keyword[return] keyword[False]
identifier[keyapi] = identifier[salt] . identifier[key] . identifier[Key] ( identifier[self] . identifier[opts] )
identifier[keyapi] . identifier[delete_key] ( identifier[load] [ literal[string] ],
identifier[preserve_minions] = identifier[load] . identifier[get] ( literal[string] ,
keyword[False] ))
keyword[return] keyword[True] | def revoke_auth(self, load):
"""
Allow a minion to request revocation of its own key
"""
if 'id' not in load:
return False # depends on [control=['if'], data=[]]
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False))
return True |
def get_git_hash():
"""
Get version from asv/__init__.py and generate asv/_version.py
"""
# Obtain git revision
githash = ""
if os.path.isdir(os.path.join(basedir, '.git')):
try:
proc = subprocess.Popen(
['git', '-C', basedir, 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rev, err = proc.communicate()
if proc.returncode == 0:
githash = rev.strip().decode('ascii')
except OSError:
pass
return githash | def function[get_git_hash, parameter[]]:
constant[
Get version from asv/__init__.py and generate asv/_version.py
]
variable[githash] assign[=] constant[]
if call[name[os].path.isdir, parameter[call[name[os].path.join, parameter[name[basedir], constant[.git]]]]] begin[:]
<ast.Try object at 0x7da1b1d530d0>
return[name[githash]] | keyword[def] identifier[get_git_hash] ():
literal[string]
identifier[githash] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , literal[string] )):
keyword[try] :
identifier[proc] = identifier[subprocess] . identifier[Popen] (
[ literal[string] , literal[string] , identifier[basedir] , literal[string] , literal[string] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
identifier[rev] , identifier[err] = identifier[proc] . identifier[communicate] ()
keyword[if] identifier[proc] . identifier[returncode] == literal[int] :
identifier[githash] = identifier[rev] . identifier[strip] (). identifier[decode] ( literal[string] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[return] identifier[githash] | def get_git_hash():
"""
Get version from asv/__init__.py and generate asv/_version.py
"""
# Obtain git revision
githash = ''
if os.path.isdir(os.path.join(basedir, '.git')):
try:
proc = subprocess.Popen(['git', '-C', basedir, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(rev, err) = proc.communicate()
if proc.returncode == 0:
githash = rev.strip().decode('ascii') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return githash |
def cli(env):
"""Virtual server order options."""
vsi = SoftLayer.VSManager(env.client)
result = vsi.get_create_options()
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
# Datacenters
datacenters = [dc['template']['datacenter']['name']
for dc in result['datacenters']]
datacenters = sorted(datacenters)
table.add_row(['datacenter',
formatting.listing(datacenters, separator='\n')])
def _add_flavor_rows(flavor_key, flavor_label, flavor_options):
flavors = []
for flavor_option in flavor_options:
flavor_key_name = utils.lookup(flavor_option, 'flavor', 'keyName')
if not flavor_key_name.startswith(flavor_key):
continue
flavors.append(flavor_key_name)
if len(flavors) > 0:
table.add_row(['flavors (%s)' % flavor_label,
formatting.listing(flavors, separator='\n')])
if result.get('flavors', None):
_add_flavor_rows('B1', 'balanced', result['flavors'])
_add_flavor_rows('BL1', 'balanced local - hdd', result['flavors'])
_add_flavor_rows('BL2', 'balanced local - ssd', result['flavors'])
_add_flavor_rows('C1', 'compute', result['flavors'])
_add_flavor_rows('M1', 'memory', result['flavors'])
_add_flavor_rows('AC', 'GPU', result['flavors'])
# CPUs
standard_cpus = [int(x['template']['startCpus']) for x in result['processors']
if not x['template'].get('dedicatedAccountHostOnlyFlag',
False)
and not x['template'].get('dedicatedHost', None)]
ded_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedAccountHostOnlyFlag', False)]
ded_host_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedHost', None)]
standard_cpus = sorted(standard_cpus)
table.add_row(['cpus (standard)', formatting.listing(standard_cpus, separator=',')])
ded_cpus = sorted(ded_cpus)
table.add_row(['cpus (dedicated)', formatting.listing(ded_cpus, separator=',')])
ded_host_cpus = sorted(ded_host_cpus)
table.add_row(['cpus (dedicated host)', formatting.listing(ded_host_cpus, separator=',')])
# Memory
memory = [int(m['template']['maxMemory']) for m in result['memory']
if not m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
ded_host_memory = [int(m['template']['maxMemory']) for m in result['memory']
if m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
memory = sorted(memory)
table.add_row(['memory',
formatting.listing(memory, separator=',')])
ded_host_memory = sorted(ded_host_memory)
table.add_row(['memory (dedicated host)',
formatting.listing(ded_host_memory, separator=',')])
# Operating Systems
op_sys = [o['template']['operatingSystemReferenceCode'] for o in
result['operatingSystems']]
op_sys = sorted(op_sys)
os_summary = set()
for operating_system in op_sys:
os_summary.add(operating_system[0:operating_system.find('_')])
for summary in sorted(os_summary):
table.add_row([
'os (%s)' % summary,
os.linesep.join(sorted([x for x in op_sys
if x[0:len(summary)] == summary]))
])
# Disk
local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and not x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
ded_host_local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
san_disks = [x for x in result['blockDevices']
if not x['template'].get('localDiskFlag', False)]
def add_block_rows(disks, name):
"""Add block rows to the table."""
simple = {}
for disk in disks:
block = disk['template']['blockDevices'][0]
bid = block['device']
if bid not in simple:
simple[bid] = []
simple[bid].append(str(block['diskImage']['capacity']))
for label in sorted(simple):
table.add_row(['%s disk(%s)' % (name, label),
formatting.listing(simple[label],
separator=',')])
add_block_rows(san_disks, 'san')
add_block_rows(local_disks, 'local')
add_block_rows(ded_host_local_disks, 'local (dedicated host)')
# Network
speeds = []
ded_host_speeds = []
for option in result['networkComponents']:
template = option.get('template', None)
price = option.get('itemPrice', None)
if not template or not price \
or not template.get('networkComponents', None):
continue
if not template['networkComponents'][0] \
or not template['networkComponents'][0].get('maxSpeed', None):
continue
max_speed = str(template['networkComponents'][0]['maxSpeed'])
if price.get('dedicatedHostInstanceFlag', False) \
and max_speed not in ded_host_speeds:
ded_host_speeds.append(max_speed)
elif max_speed not in speeds:
speeds.append(max_speed)
speeds = sorted(speeds)
table.add_row(['nic', formatting.listing(speeds, separator=',')])
ded_host_speeds = sorted(ded_host_speeds)
table.add_row(['nic (dedicated host)',
formatting.listing(ded_host_speeds, separator=',')])
env.fout(table) | def function[cli, parameter[env]]:
constant[Virtual server order options.]
variable[vsi] assign[=] call[name[SoftLayer].VSManager, parameter[name[env].client]]
variable[result] assign[=] call[name[vsi].get_create_options, parameter[]]
variable[table] assign[=] call[name[formatting].KeyValueTable, parameter[list[[<ast.Constant object at 0x7da20c6c4040>, <ast.Constant object at 0x7da20c6c4100>]]]]
call[name[table].align][constant[name]] assign[=] constant[r]
call[name[table].align][constant[value]] assign[=] constant[l]
variable[datacenters] assign[=] <ast.ListComp object at 0x7da20c6c48e0>
variable[datacenters] assign[=] call[name[sorted], parameter[name[datacenters]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da20c6c4af0>, <ast.Call object at 0x7da20c6c6680>]]]]
def function[_add_flavor_rows, parameter[flavor_key, flavor_label, flavor_options]]:
variable[flavors] assign[=] list[[]]
for taget[name[flavor_option]] in starred[name[flavor_options]] begin[:]
variable[flavor_key_name] assign[=] call[name[utils].lookup, parameter[name[flavor_option], constant[flavor], constant[keyName]]]
if <ast.UnaryOp object at 0x7da20c6c59f0> begin[:]
continue
call[name[flavors].append, parameter[name[flavor_key_name]]]
if compare[call[name[len], parameter[name[flavors]]] greater[>] constant[0]] begin[:]
call[name[table].add_row, parameter[list[[<ast.BinOp object at 0x7da20c6c4f40>, <ast.Call object at 0x7da20c6c61d0>]]]]
if call[name[result].get, parameter[constant[flavors], constant[None]]] begin[:]
call[name[_add_flavor_rows], parameter[constant[B1], constant[balanced], call[name[result]][constant[flavors]]]]
call[name[_add_flavor_rows], parameter[constant[BL1], constant[balanced local - hdd], call[name[result]][constant[flavors]]]]
call[name[_add_flavor_rows], parameter[constant[BL2], constant[balanced local - ssd], call[name[result]][constant[flavors]]]]
call[name[_add_flavor_rows], parameter[constant[C1], constant[compute], call[name[result]][constant[flavors]]]]
call[name[_add_flavor_rows], parameter[constant[M1], constant[memory], call[name[result]][constant[flavors]]]]
call[name[_add_flavor_rows], parameter[constant[AC], constant[GPU], call[name[result]][constant[flavors]]]]
variable[standard_cpus] assign[=] <ast.ListComp object at 0x7da20c6c5210>
variable[ded_cpus] assign[=] <ast.ListComp object at 0x7da20c6c7fd0>
variable[ded_host_cpus] assign[=] <ast.ListComp object at 0x7da20c6c7430>
variable[standard_cpus] assign[=] call[name[sorted], parameter[name[standard_cpus]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da20c6c5060>, <ast.Call object at 0x7da20c6c66b0>]]]]
variable[ded_cpus] assign[=] call[name[sorted], parameter[name[ded_cpus]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da20c6c4430>, <ast.Call object at 0x7da20c6c6200>]]]]
variable[ded_host_cpus] assign[=] call[name[sorted], parameter[name[ded_host_cpus]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da20c6c7af0>, <ast.Call object at 0x7da20c6c7400>]]]]
variable[memory] assign[=] <ast.ListComp object at 0x7da20c6c4f10>
variable[ded_host_memory] assign[=] <ast.ListComp object at 0x7da204566650>
variable[memory] assign[=] call[name[sorted], parameter[name[memory]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da204564250>, <ast.Call object at 0x7da204565210>]]]]
variable[ded_host_memory] assign[=] call[name[sorted], parameter[name[ded_host_memory]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da2045678b0>, <ast.Call object at 0x7da204567670>]]]]
variable[op_sys] assign[=] <ast.ListComp object at 0x7da204565b40>
variable[op_sys] assign[=] call[name[sorted], parameter[name[op_sys]]]
variable[os_summary] assign[=] call[name[set], parameter[]]
for taget[name[operating_system]] in starred[name[op_sys]] begin[:]
call[name[os_summary].add, parameter[call[name[operating_system]][<ast.Slice object at 0x7da204565ae0>]]]
for taget[name[summary]] in starred[call[name[sorted], parameter[name[os_summary]]]] begin[:]
call[name[table].add_row, parameter[list[[<ast.BinOp object at 0x7da2045673a0>, <ast.Call object at 0x7da2045661d0>]]]]
variable[local_disks] assign[=] <ast.ListComp object at 0x7da204564fa0>
variable[ded_host_local_disks] assign[=] <ast.ListComp object at 0x7da18f810e80>
variable[san_disks] assign[=] <ast.ListComp object at 0x7da18f813970>
def function[add_block_rows, parameter[disks, name]]:
constant[Add block rows to the table.]
variable[simple] assign[=] dictionary[[], []]
for taget[name[disk]] in starred[name[disks]] begin[:]
variable[block] assign[=] call[call[call[name[disk]][constant[template]]][constant[blockDevices]]][constant[0]]
variable[bid] assign[=] call[name[block]][constant[device]]
if compare[name[bid] <ast.NotIn object at 0x7da2590d7190> name[simple]] begin[:]
call[name[simple]][name[bid]] assign[=] list[[]]
call[call[name[simple]][name[bid]].append, parameter[call[name[str], parameter[call[call[name[block]][constant[diskImage]]][constant[capacity]]]]]]
for taget[name[label]] in starred[call[name[sorted], parameter[name[simple]]]] begin[:]
call[name[table].add_row, parameter[list[[<ast.BinOp object at 0x7da18f810280>, <ast.Call object at 0x7da18f812e30>]]]]
call[name[add_block_rows], parameter[name[san_disks], constant[san]]]
call[name[add_block_rows], parameter[name[local_disks], constant[local]]]
call[name[add_block_rows], parameter[name[ded_host_local_disks], constant[local (dedicated host)]]]
variable[speeds] assign[=] list[[]]
variable[ded_host_speeds] assign[=] list[[]]
for taget[name[option]] in starred[call[name[result]][constant[networkComponents]]] begin[:]
variable[template] assign[=] call[name[option].get, parameter[constant[template], constant[None]]]
variable[price] assign[=] call[name[option].get, parameter[constant[itemPrice], constant[None]]]
if <ast.BoolOp object at 0x7da18f810070> begin[:]
continue
if <ast.BoolOp object at 0x7da18f810160> begin[:]
continue
variable[max_speed] assign[=] call[name[str], parameter[call[call[call[name[template]][constant[networkComponents]]][constant[0]]][constant[maxSpeed]]]]
if <ast.BoolOp object at 0x7da18f811b10> begin[:]
call[name[ded_host_speeds].append, parameter[name[max_speed]]]
variable[speeds] assign[=] call[name[sorted], parameter[name[speeds]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da18f811720>, <ast.Call object at 0x7da18f810ac0>]]]]
variable[ded_host_speeds] assign[=] call[name[sorted], parameter[name[ded_host_speeds]]]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da18f813a90>, <ast.Call object at 0x7da18f811e10>]]]]
call[name[env].fout, parameter[name[table]]] | keyword[def] identifier[cli] ( identifier[env] ):
literal[string]
identifier[vsi] = identifier[SoftLayer] . identifier[VSManager] ( identifier[env] . identifier[client] )
identifier[result] = identifier[vsi] . identifier[get_create_options] ()
identifier[table] = identifier[formatting] . identifier[KeyValueTable] ([ literal[string] , literal[string] ])
identifier[table] . identifier[align] [ literal[string] ]= literal[string]
identifier[table] . identifier[align] [ literal[string] ]= literal[string]
identifier[datacenters] =[ identifier[dc] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[for] identifier[dc] keyword[in] identifier[result] [ literal[string] ]]
identifier[datacenters] = identifier[sorted] ( identifier[datacenters] )
identifier[table] . identifier[add_row] ([ literal[string] ,
identifier[formatting] . identifier[listing] ( identifier[datacenters] , identifier[separator] = literal[string] )])
keyword[def] identifier[_add_flavor_rows] ( identifier[flavor_key] , identifier[flavor_label] , identifier[flavor_options] ):
identifier[flavors] =[]
keyword[for] identifier[flavor_option] keyword[in] identifier[flavor_options] :
identifier[flavor_key_name] = identifier[utils] . identifier[lookup] ( identifier[flavor_option] , literal[string] , literal[string] )
keyword[if] keyword[not] identifier[flavor_key_name] . identifier[startswith] ( identifier[flavor_key] ):
keyword[continue]
identifier[flavors] . identifier[append] ( identifier[flavor_key_name] )
keyword[if] identifier[len] ( identifier[flavors] )> literal[int] :
identifier[table] . identifier[add_row] ([ literal[string] % identifier[flavor_label] ,
identifier[formatting] . identifier[listing] ( identifier[flavors] , identifier[separator] = literal[string] )])
keyword[if] identifier[result] . identifier[get] ( literal[string] , keyword[None] ):
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[_add_flavor_rows] ( literal[string] , literal[string] , identifier[result] [ literal[string] ])
identifier[standard_cpus] =[ identifier[int] ( identifier[x] [ literal[string] ][ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] keyword[not] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] ,
keyword[False] )
keyword[and] keyword[not] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )]
identifier[ded_cpus] =[ identifier[int] ( identifier[x] [ literal[string] ][ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )]
identifier[ded_host_cpus] =[ identifier[int] ( identifier[x] [ literal[string] ][ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )]
identifier[standard_cpus] = identifier[sorted] ( identifier[standard_cpus] )
identifier[table] . identifier[add_row] ([ literal[string] , identifier[formatting] . identifier[listing] ( identifier[standard_cpus] , identifier[separator] = literal[string] )])
identifier[ded_cpus] = identifier[sorted] ( identifier[ded_cpus] )
identifier[table] . identifier[add_row] ([ literal[string] , identifier[formatting] . identifier[listing] ( identifier[ded_cpus] , identifier[separator] = literal[string] )])
identifier[ded_host_cpus] = identifier[sorted] ( identifier[ded_host_cpus] )
identifier[table] . identifier[add_row] ([ literal[string] , identifier[formatting] . identifier[listing] ( identifier[ded_host_cpus] , identifier[separator] = literal[string] )])
identifier[memory] =[ identifier[int] ( identifier[m] [ literal[string] ][ literal[string] ]) keyword[for] identifier[m] keyword[in] identifier[result] [ literal[string] ]
keyword[if] keyword[not] identifier[m] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )]
identifier[ded_host_memory] =[ identifier[int] ( identifier[m] [ literal[string] ][ literal[string] ]) keyword[for] identifier[m] keyword[in] identifier[result] [ literal[string] ]
keyword[if] identifier[m] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )]
identifier[memory] = identifier[sorted] ( identifier[memory] )
identifier[table] . identifier[add_row] ([ literal[string] ,
identifier[formatting] . identifier[listing] ( identifier[memory] , identifier[separator] = literal[string] )])
identifier[ded_host_memory] = identifier[sorted] ( identifier[ded_host_memory] )
identifier[table] . identifier[add_row] ([ literal[string] ,
identifier[formatting] . identifier[listing] ( identifier[ded_host_memory] , identifier[separator] = literal[string] )])
identifier[op_sys] =[ identifier[o] [ literal[string] ][ literal[string] ] keyword[for] identifier[o] keyword[in]
identifier[result] [ literal[string] ]]
identifier[op_sys] = identifier[sorted] ( identifier[op_sys] )
identifier[os_summary] = identifier[set] ()
keyword[for] identifier[operating_system] keyword[in] identifier[op_sys] :
identifier[os_summary] . identifier[add] ( identifier[operating_system] [ literal[int] : identifier[operating_system] . identifier[find] ( literal[string] )])
keyword[for] identifier[summary] keyword[in] identifier[sorted] ( identifier[os_summary] ):
identifier[table] . identifier[add_row] ([
literal[string] % identifier[summary] ,
identifier[os] . identifier[linesep] . identifier[join] ( identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[op_sys]
keyword[if] identifier[x] [ literal[int] : identifier[len] ( identifier[summary] )]== identifier[summary] ]))
])
identifier[local_disks] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )
keyword[and] keyword[not] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] ,
keyword[False] )]
identifier[ded_host_local_disks] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )
keyword[and] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] ,
keyword[False] )]
identifier[san_disks] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] keyword[not] identifier[x] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )]
keyword[def] identifier[add_block_rows] ( identifier[disks] , identifier[name] ):
literal[string]
identifier[simple] ={}
keyword[for] identifier[disk] keyword[in] identifier[disks] :
identifier[block] = identifier[disk] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[bid] = identifier[block] [ literal[string] ]
keyword[if] identifier[bid] keyword[not] keyword[in] identifier[simple] :
identifier[simple] [ identifier[bid] ]=[]
identifier[simple] [ identifier[bid] ]. identifier[append] ( identifier[str] ( identifier[block] [ literal[string] ][ literal[string] ]))
keyword[for] identifier[label] keyword[in] identifier[sorted] ( identifier[simple] ):
identifier[table] . identifier[add_row] ([ literal[string] %( identifier[name] , identifier[label] ),
identifier[formatting] . identifier[listing] ( identifier[simple] [ identifier[label] ],
identifier[separator] = literal[string] )])
identifier[add_block_rows] ( identifier[san_disks] , literal[string] )
identifier[add_block_rows] ( identifier[local_disks] , literal[string] )
identifier[add_block_rows] ( identifier[ded_host_local_disks] , literal[string] )
identifier[speeds] =[]
identifier[ded_host_speeds] =[]
keyword[for] identifier[option] keyword[in] identifier[result] [ literal[string] ]:
identifier[template] = identifier[option] . identifier[get] ( literal[string] , keyword[None] )
identifier[price] = identifier[option] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[template] keyword[or] keyword[not] identifier[price] keyword[or] keyword[not] identifier[template] . identifier[get] ( literal[string] , keyword[None] ):
keyword[continue]
keyword[if] keyword[not] identifier[template] [ literal[string] ][ literal[int] ] keyword[or] keyword[not] identifier[template] [ literal[string] ][ literal[int] ]. identifier[get] ( literal[string] , keyword[None] ):
keyword[continue]
identifier[max_speed] = identifier[str] ( identifier[template] [ literal[string] ][ literal[int] ][ literal[string] ])
keyword[if] identifier[price] . identifier[get] ( literal[string] , keyword[False] ) keyword[and] identifier[max_speed] keyword[not] keyword[in] identifier[ded_host_speeds] :
identifier[ded_host_speeds] . identifier[append] ( identifier[max_speed] )
keyword[elif] identifier[max_speed] keyword[not] keyword[in] identifier[speeds] :
identifier[speeds] . identifier[append] ( identifier[max_speed] )
identifier[speeds] = identifier[sorted] ( identifier[speeds] )
identifier[table] . identifier[add_row] ([ literal[string] , identifier[formatting] . identifier[listing] ( identifier[speeds] , identifier[separator] = literal[string] )])
identifier[ded_host_speeds] = identifier[sorted] ( identifier[ded_host_speeds] )
identifier[table] . identifier[add_row] ([ literal[string] ,
identifier[formatting] . identifier[listing] ( identifier[ded_host_speeds] , identifier[separator] = literal[string] )])
identifier[env] . identifier[fout] ( identifier[table] ) | def cli(env):
"""Virtual server order options."""
vsi = SoftLayer.VSManager(env.client)
result = vsi.get_create_options()
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
# Datacenters
datacenters = [dc['template']['datacenter']['name'] for dc in result['datacenters']]
datacenters = sorted(datacenters)
table.add_row(['datacenter', formatting.listing(datacenters, separator='\n')])
def _add_flavor_rows(flavor_key, flavor_label, flavor_options):
flavors = []
for flavor_option in flavor_options:
flavor_key_name = utils.lookup(flavor_option, 'flavor', 'keyName')
if not flavor_key_name.startswith(flavor_key):
continue # depends on [control=['if'], data=[]]
flavors.append(flavor_key_name) # depends on [control=['for'], data=['flavor_option']]
if len(flavors) > 0:
table.add_row(['flavors (%s)' % flavor_label, formatting.listing(flavors, separator='\n')]) # depends on [control=['if'], data=[]]
if result.get('flavors', None):
_add_flavor_rows('B1', 'balanced', result['flavors'])
_add_flavor_rows('BL1', 'balanced local - hdd', result['flavors'])
_add_flavor_rows('BL2', 'balanced local - ssd', result['flavors'])
_add_flavor_rows('C1', 'compute', result['flavors'])
_add_flavor_rows('M1', 'memory', result['flavors'])
_add_flavor_rows('AC', 'GPU', result['flavors']) # depends on [control=['if'], data=[]]
# CPUs
standard_cpus = [int(x['template']['startCpus']) for x in result['processors'] if not x['template'].get('dedicatedAccountHostOnlyFlag', False) and (not x['template'].get('dedicatedHost', None))]
ded_cpus = [int(x['template']['startCpus']) for x in result['processors'] if x['template'].get('dedicatedAccountHostOnlyFlag', False)]
ded_host_cpus = [int(x['template']['startCpus']) for x in result['processors'] if x['template'].get('dedicatedHost', None)]
standard_cpus = sorted(standard_cpus)
table.add_row(['cpus (standard)', formatting.listing(standard_cpus, separator=',')])
ded_cpus = sorted(ded_cpus)
table.add_row(['cpus (dedicated)', formatting.listing(ded_cpus, separator=',')])
ded_host_cpus = sorted(ded_host_cpus)
table.add_row(['cpus (dedicated host)', formatting.listing(ded_host_cpus, separator=',')])
# Memory
memory = [int(m['template']['maxMemory']) for m in result['memory'] if not m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
ded_host_memory = [int(m['template']['maxMemory']) for m in result['memory'] if m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
memory = sorted(memory)
table.add_row(['memory', formatting.listing(memory, separator=',')])
ded_host_memory = sorted(ded_host_memory)
table.add_row(['memory (dedicated host)', formatting.listing(ded_host_memory, separator=',')])
# Operating Systems
op_sys = [o['template']['operatingSystemReferenceCode'] for o in result['operatingSystems']]
op_sys = sorted(op_sys)
os_summary = set()
for operating_system in op_sys:
os_summary.add(operating_system[0:operating_system.find('_')]) # depends on [control=['for'], data=['operating_system']]
for summary in sorted(os_summary):
table.add_row(['os (%s)' % summary, os.linesep.join(sorted([x for x in op_sys if x[0:len(summary)] == summary]))]) # depends on [control=['for'], data=['summary']]
# Disk
local_disks = [x for x in result['blockDevices'] if x['template'].get('localDiskFlag', False) and (not x['itemPrice'].get('dedicatedHostInstanceFlag', False))]
ded_host_local_disks = [x for x in result['blockDevices'] if x['template'].get('localDiskFlag', False) and x['itemPrice'].get('dedicatedHostInstanceFlag', False)]
san_disks = [x for x in result['blockDevices'] if not x['template'].get('localDiskFlag', False)]
def add_block_rows(disks, name):
"""Add block rows to the table."""
simple = {}
for disk in disks:
block = disk['template']['blockDevices'][0]
bid = block['device']
if bid not in simple:
simple[bid] = [] # depends on [control=['if'], data=['bid', 'simple']]
simple[bid].append(str(block['diskImage']['capacity'])) # depends on [control=['for'], data=['disk']]
for label in sorted(simple):
table.add_row(['%s disk(%s)' % (name, label), formatting.listing(simple[label], separator=',')]) # depends on [control=['for'], data=['label']]
add_block_rows(san_disks, 'san')
add_block_rows(local_disks, 'local')
add_block_rows(ded_host_local_disks, 'local (dedicated host)')
# Network
speeds = []
ded_host_speeds = []
for option in result['networkComponents']:
template = option.get('template', None)
price = option.get('itemPrice', None)
if not template or not price or (not template.get('networkComponents', None)):
continue # depends on [control=['if'], data=[]]
if not template['networkComponents'][0] or not template['networkComponents'][0].get('maxSpeed', None):
continue # depends on [control=['if'], data=[]]
max_speed = str(template['networkComponents'][0]['maxSpeed'])
if price.get('dedicatedHostInstanceFlag', False) and max_speed not in ded_host_speeds:
ded_host_speeds.append(max_speed) # depends on [control=['if'], data=[]]
elif max_speed not in speeds:
speeds.append(max_speed) # depends on [control=['if'], data=['max_speed', 'speeds']] # depends on [control=['for'], data=['option']]
speeds = sorted(speeds)
table.add_row(['nic', formatting.listing(speeds, separator=',')])
ded_host_speeds = sorted(ded_host_speeds)
table.add_row(['nic (dedicated host)', formatting.listing(ded_host_speeds, separator=',')])
env.fout(table) |
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root | def function[get_root, parameter[]]:
constant[Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
]
variable[root] assign[=] call[name[os].path.realpath, parameter[call[name[os].path.abspath, parameter[call[name[os].getcwd, parameter[]]]]]]
variable[setup_py] assign[=] call[name[os].path.join, parameter[name[root], constant[setup.py]]]
variable[versioneer_py] assign[=] call[name[os].path.join, parameter[name[root], constant[versioneer.py]]]
if <ast.UnaryOp object at 0x7da1b1a3dc00> begin[:]
variable[root] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[call[name[os].path.abspath, parameter[call[name[sys].argv][constant[0]]]]]]]]
variable[setup_py] assign[=] call[name[os].path.join, parameter[name[root], constant[setup.py]]]
variable[versioneer_py] assign[=] call[name[os].path.join, parameter[name[root], constant[versioneer.py]]]
if <ast.UnaryOp object at 0x7da1b1a3c370> begin[:]
variable[err] assign[=] constant[Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND').]
<ast.Raise object at 0x7da1b1a3dc60>
<ast.Try object at 0x7da1b1a3d540>
return[name[root]] | keyword[def] identifier[get_root] ():
literal[string]
identifier[root] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[getcwd] ()))
identifier[setup_py] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
identifier[versioneer_py] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
keyword[if] keyword[not] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[setup_py] ) keyword[or] identifier[os] . identifier[path] . identifier[exists] ( identifier[versioneer_py] )):
identifier[root] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[sys] . identifier[argv] [ literal[int] ])))
identifier[setup_py] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
identifier[versioneer_py] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
keyword[if] keyword[not] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[setup_py] ) keyword[or] identifier[os] . identifier[path] . identifier[exists] ( identifier[versioneer_py] )):
identifier[err] =( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[raise] identifier[VersioneerBadRootError] ( identifier[err] )
keyword[try] :
identifier[me] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] ))
keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[me] )[ literal[int] ]!= identifier[os] . identifier[path] . identifier[splitext] ( identifier[versioneer_py] )[ literal[int] ]:
identifier[print] ( literal[string]
%( identifier[os] . identifier[path] . identifier[dirname] ( identifier[me] ), identifier[versioneer_py] ))
keyword[except] identifier[NameError] :
keyword[pass]
keyword[return] identifier[root] | def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py') # depends on [control=['if'], data=[]]
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err) # depends on [control=['if'], data=[]]
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except NameError:
pass # depends on [control=['except'], data=[]]
return root |
def execute(self, sql, parameters=None):
"""
Execute an SQL command or query
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param parameters: An iterable of parameter values. The number of values must match
the number of parameters in the SQL string.
:return: The ``Cursor`` object to allow chaining of operations.
"""
self.rowcount = -1
self._assert_valid()
self.impl.prepare(sql)
if parameters:
buffer = make_parameter_set(self.impl)
buffer.add_set(parameters)
buffer.flush()
return self._execute() | def function[execute, parameter[self, sql, parameters]]:
constant[
Execute an SQL command or query
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param parameters: An iterable of parameter values. The number of values must match
the number of parameters in the SQL string.
:return: The ``Cursor`` object to allow chaining of operations.
]
name[self].rowcount assign[=] <ast.UnaryOp object at 0x7da20e956050>
call[name[self]._assert_valid, parameter[]]
call[name[self].impl.prepare, parameter[name[sql]]]
if name[parameters] begin[:]
variable[buffer] assign[=] call[name[make_parameter_set], parameter[name[self].impl]]
call[name[buffer].add_set, parameter[name[parameters]]]
call[name[buffer].flush, parameter[]]
return[call[name[self]._execute, parameter[]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[sql] , identifier[parameters] = keyword[None] ):
literal[string]
identifier[self] . identifier[rowcount] =- literal[int]
identifier[self] . identifier[_assert_valid] ()
identifier[self] . identifier[impl] . identifier[prepare] ( identifier[sql] )
keyword[if] identifier[parameters] :
identifier[buffer] = identifier[make_parameter_set] ( identifier[self] . identifier[impl] )
identifier[buffer] . identifier[add_set] ( identifier[parameters] )
identifier[buffer] . identifier[flush] ()
keyword[return] identifier[self] . identifier[_execute] () | def execute(self, sql, parameters=None):
"""
Execute an SQL command or query
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param parameters: An iterable of parameter values. The number of values must match
the number of parameters in the SQL string.
:return: The ``Cursor`` object to allow chaining of operations.
"""
self.rowcount = -1
self._assert_valid()
self.impl.prepare(sql)
if parameters:
buffer = make_parameter_set(self.impl)
buffer.add_set(parameters)
buffer.flush() # depends on [control=['if'], data=[]]
return self._execute() |
def printableType(val, name=None, parent=None):
"""
Tries to make a nice type string for a value.
Can also pass in a Printable parent object
"""
import numpy as np
if parent is not None and hasattr(parent, 'customPrintableType'):
# Hack for non - trivial preference types
_typestr = parent.customPrintableType(name)
if _typestr is not None:
return _typestr
if isinstance(val, np.ndarray):
info = npArrInfo(val)
_typestr = info.dtypestr
elif isinstance(val, object):
_typestr = val.__class__.__name__
else:
_typestr = str(type(val))
_typestr = _typestr.replace('type', '')
_typestr = re.sub('[\'><]', '', _typestr)
_typestr = re.sub(' *', ' ', _typestr)
_typestr = _typestr.strip()
return _typestr | def function[printableType, parameter[val, name, parent]]:
constant[
Tries to make a nice type string for a value.
Can also pass in a Printable parent object
]
import module[numpy] as alias[np]
if <ast.BoolOp object at 0x7da1b2461cc0> begin[:]
variable[_typestr] assign[=] call[name[parent].customPrintableType, parameter[name[name]]]
if compare[name[_typestr] is_not constant[None]] begin[:]
return[name[_typestr]]
if call[name[isinstance], parameter[name[val], name[np].ndarray]] begin[:]
variable[info] assign[=] call[name[npArrInfo], parameter[name[val]]]
variable[_typestr] assign[=] name[info].dtypestr
return[name[_typestr]] | keyword[def] identifier[printableType] ( identifier[val] , identifier[name] = keyword[None] , identifier[parent] = keyword[None] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[if] identifier[parent] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[parent] , literal[string] ):
identifier[_typestr] = identifier[parent] . identifier[customPrintableType] ( identifier[name] )
keyword[if] identifier[_typestr] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_typestr]
keyword[if] identifier[isinstance] ( identifier[val] , identifier[np] . identifier[ndarray] ):
identifier[info] = identifier[npArrInfo] ( identifier[val] )
identifier[_typestr] = identifier[info] . identifier[dtypestr]
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[object] ):
identifier[_typestr] = identifier[val] . identifier[__class__] . identifier[__name__]
keyword[else] :
identifier[_typestr] = identifier[str] ( identifier[type] ( identifier[val] ))
identifier[_typestr] = identifier[_typestr] . identifier[replace] ( literal[string] , literal[string] )
identifier[_typestr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[_typestr] )
identifier[_typestr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[_typestr] )
identifier[_typestr] = identifier[_typestr] . identifier[strip] ()
keyword[return] identifier[_typestr] | def printableType(val, name=None, parent=None):
"""
Tries to make a nice type string for a value.
Can also pass in a Printable parent object
"""
import numpy as np
if parent is not None and hasattr(parent, 'customPrintableType'):
# Hack for non - trivial preference types
_typestr = parent.customPrintableType(name)
if _typestr is not None:
return _typestr # depends on [control=['if'], data=['_typestr']] # depends on [control=['if'], data=[]]
if isinstance(val, np.ndarray):
info = npArrInfo(val)
_typestr = info.dtypestr # depends on [control=['if'], data=[]]
elif isinstance(val, object):
_typestr = val.__class__.__name__ # depends on [control=['if'], data=[]]
else:
_typestr = str(type(val))
_typestr = _typestr.replace('type', '')
_typestr = re.sub("['><]", '', _typestr)
_typestr = re.sub(' *', ' ', _typestr)
_typestr = _typestr.strip()
return _typestr |
def _create_dict_with_nested_keys_and_val(cls, keys, value):
"""Recursively constructs a nested dictionary with the keys pointing to the value.
For example:
Given the list of keys ['a', 'b', 'c', 'd'] and a primitive
value 'hello world', the method will produce the nested dictionary
{'a': {'b': {'c': {'d': 'hello world'}}}}. The number of keys in the list
defines the depth of the nested dict. If the list of keys is ['a'] and
the value is 'hello world', then the result would be {'a': 'hello world'}.
:param list of string keys: A list of keys to be nested as a dictionary.
:param primitive value: The value of the information being stored.
:return: dict of nested keys leading to the value.
"""
if len(keys) > 1:
new_keys = keys[:-1]
new_val = {keys[-1]: value}
return cls._create_dict_with_nested_keys_and_val(new_keys, new_val)
elif len(keys) == 1:
return {keys[0]: value}
else:
raise ValueError('Keys must contain at least one key.') | def function[_create_dict_with_nested_keys_and_val, parameter[cls, keys, value]]:
constant[Recursively constructs a nested dictionary with the keys pointing to the value.
For example:
Given the list of keys ['a', 'b', 'c', 'd'] and a primitive
value 'hello world', the method will produce the nested dictionary
{'a': {'b': {'c': {'d': 'hello world'}}}}. The number of keys in the list
defines the depth of the nested dict. If the list of keys is ['a'] and
the value is 'hello world', then the result would be {'a': 'hello world'}.
:param list of string keys: A list of keys to be nested as a dictionary.
:param primitive value: The value of the information being stored.
:return: dict of nested keys leading to the value.
]
if compare[call[name[len], parameter[name[keys]]] greater[>] constant[1]] begin[:]
variable[new_keys] assign[=] call[name[keys]][<ast.Slice object at 0x7da1b22a7340>]
variable[new_val] assign[=] dictionary[[<ast.Subscript object at 0x7da1b22a4a00>], [<ast.Name object at 0x7da1b22a55d0>]]
return[call[name[cls]._create_dict_with_nested_keys_and_val, parameter[name[new_keys], name[new_val]]]] | keyword[def] identifier[_create_dict_with_nested_keys_and_val] ( identifier[cls] , identifier[keys] , identifier[value] ):
literal[string]
keyword[if] identifier[len] ( identifier[keys] )> literal[int] :
identifier[new_keys] = identifier[keys] [:- literal[int] ]
identifier[new_val] ={ identifier[keys] [- literal[int] ]: identifier[value] }
keyword[return] identifier[cls] . identifier[_create_dict_with_nested_keys_and_val] ( identifier[new_keys] , identifier[new_val] )
keyword[elif] identifier[len] ( identifier[keys] )== literal[int] :
keyword[return] { identifier[keys] [ literal[int] ]: identifier[value] }
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def _create_dict_with_nested_keys_and_val(cls, keys, value):
"""Recursively constructs a nested dictionary with the keys pointing to the value.
For example:
Given the list of keys ['a', 'b', 'c', 'd'] and a primitive
value 'hello world', the method will produce the nested dictionary
{'a': {'b': {'c': {'d': 'hello world'}}}}. The number of keys in the list
defines the depth of the nested dict. If the list of keys is ['a'] and
the value is 'hello world', then the result would be {'a': 'hello world'}.
:param list of string keys: A list of keys to be nested as a dictionary.
:param primitive value: The value of the information being stored.
:return: dict of nested keys leading to the value.
"""
if len(keys) > 1:
new_keys = keys[:-1]
new_val = {keys[-1]: value}
return cls._create_dict_with_nested_keys_and_val(new_keys, new_val) # depends on [control=['if'], data=[]]
elif len(keys) == 1:
return {keys[0]: value} # depends on [control=['if'], data=[]]
else:
raise ValueError('Keys must contain at least one key.') |
def gene_panels(self, panel_id=None, institute_id=None, version=None):
"""Return all gene panels
If panel_id return all versions of panels by that panel name
Args:
panel_id(str)
Returns:
cursor(pymongo.cursor)
"""
query = {}
if panel_id:
query['panel_name'] = panel_id
if version:
query['version'] = version
if institute_id:
query['institute'] = institute_id
return self.panel_collection.find(query) | def function[gene_panels, parameter[self, panel_id, institute_id, version]]:
constant[Return all gene panels
If panel_id return all versions of panels by that panel name
Args:
panel_id(str)
Returns:
cursor(pymongo.cursor)
]
variable[query] assign[=] dictionary[[], []]
if name[panel_id] begin[:]
call[name[query]][constant[panel_name]] assign[=] name[panel_id]
if name[version] begin[:]
call[name[query]][constant[version]] assign[=] name[version]
if name[institute_id] begin[:]
call[name[query]][constant[institute]] assign[=] name[institute_id]
return[call[name[self].panel_collection.find, parameter[name[query]]]] | keyword[def] identifier[gene_panels] ( identifier[self] , identifier[panel_id] = keyword[None] , identifier[institute_id] = keyword[None] , identifier[version] = keyword[None] ):
literal[string]
identifier[query] ={}
keyword[if] identifier[panel_id] :
identifier[query] [ literal[string] ]= identifier[panel_id]
keyword[if] identifier[version] :
identifier[query] [ literal[string] ]= identifier[version]
keyword[if] identifier[institute_id] :
identifier[query] [ literal[string] ]= identifier[institute_id]
keyword[return] identifier[self] . identifier[panel_collection] . identifier[find] ( identifier[query] ) | def gene_panels(self, panel_id=None, institute_id=None, version=None):
"""Return all gene panels
If panel_id return all versions of panels by that panel name
Args:
panel_id(str)
Returns:
cursor(pymongo.cursor)
"""
query = {}
if panel_id:
query['panel_name'] = panel_id
if version:
query['version'] = version # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if institute_id:
query['institute'] = institute_id # depends on [control=['if'], data=[]]
return self.panel_collection.find(query) |
def on_click_plot_widget(self, event):
""" When the plot control is toggled, set visibility and hand down source"""
self.plot.source = self.sources
self.plot.visible = event.new
if self.plot.visible:
self.plot.watchers.append(
self.select.widget.link(self.plot, value='source')) | def function[on_click_plot_widget, parameter[self, event]]:
constant[ When the plot control is toggled, set visibility and hand down source]
name[self].plot.source assign[=] name[self].sources
name[self].plot.visible assign[=] name[event].new
if name[self].plot.visible begin[:]
call[name[self].plot.watchers.append, parameter[call[name[self].select.widget.link, parameter[name[self].plot]]]] | keyword[def] identifier[on_click_plot_widget] ( identifier[self] , identifier[event] ):
literal[string]
identifier[self] . identifier[plot] . identifier[source] = identifier[self] . identifier[sources]
identifier[self] . identifier[plot] . identifier[visible] = identifier[event] . identifier[new]
keyword[if] identifier[self] . identifier[plot] . identifier[visible] :
identifier[self] . identifier[plot] . identifier[watchers] . identifier[append] (
identifier[self] . identifier[select] . identifier[widget] . identifier[link] ( identifier[self] . identifier[plot] , identifier[value] = literal[string] )) | def on_click_plot_widget(self, event):
""" When the plot control is toggled, set visibility and hand down source"""
self.plot.source = self.sources
self.plot.visible = event.new
if self.plot.visible:
self.plot.watchers.append(self.select.widget.link(self.plot, value='source')) # depends on [control=['if'], data=[]] |
def save_fits(self, data, name):
"""
This method simply saves the model components and the residual.
INPUTS:
data (no default) Data which is to be saved.
name (no default) File name for new .fits file. Will overwrite.
"""
data = data.reshape(1, 1, data.shape[0], data.shape[0])
new_file = pyfits.PrimaryHDU(data,self.img_hdu_list[0].header)
new_file.writeto("{}".format(name), overwrite=True) | def function[save_fits, parameter[self, data, name]]:
constant[
This method simply saves the model components and the residual.
INPUTS:
data (no default) Data which is to be saved.
name (no default) File name for new .fits file. Will overwrite.
]
variable[data] assign[=] call[name[data].reshape, parameter[constant[1], constant[1], call[name[data].shape][constant[0]], call[name[data].shape][constant[0]]]]
variable[new_file] assign[=] call[name[pyfits].PrimaryHDU, parameter[name[data], call[name[self].img_hdu_list][constant[0]].header]]
call[name[new_file].writeto, parameter[call[constant[{}].format, parameter[name[name]]]]] | keyword[def] identifier[save_fits] ( identifier[self] , identifier[data] , identifier[name] ):
literal[string]
identifier[data] = identifier[data] . identifier[reshape] ( literal[int] , literal[int] , identifier[data] . identifier[shape] [ literal[int] ], identifier[data] . identifier[shape] [ literal[int] ])
identifier[new_file] = identifier[pyfits] . identifier[PrimaryHDU] ( identifier[data] , identifier[self] . identifier[img_hdu_list] [ literal[int] ]. identifier[header] )
identifier[new_file] . identifier[writeto] ( literal[string] . identifier[format] ( identifier[name] ), identifier[overwrite] = keyword[True] ) | def save_fits(self, data, name):
"""
This method simply saves the model components and the residual.
INPUTS:
data (no default) Data which is to be saved.
name (no default) File name for new .fits file. Will overwrite.
"""
data = data.reshape(1, 1, data.shape[0], data.shape[0])
new_file = pyfits.PrimaryHDU(data, self.img_hdu_list[0].header)
new_file.writeto('{}'.format(name), overwrite=True) |
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None | def function[loop_stop, parameter[self, force]]:
constant[This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
]
if compare[name[self]._thread is constant[None]] begin[:]
return[name[MQTT_ERR_INVAL]]
name[self]._thread_terminate assign[=] constant[True]
call[name[self]._thread.join, parameter[]]
name[self]._thread assign[=] constant[None] | keyword[def] identifier[loop_stop] ( identifier[self] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[_thread] keyword[is] keyword[None] :
keyword[return] identifier[MQTT_ERR_INVAL]
identifier[self] . identifier[_thread_terminate] = keyword[True]
identifier[self] . identifier[_thread] . identifier[join] ()
identifier[self] . identifier[_thread] = keyword[None] | def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL # depends on [control=['if'], data=[]]
self._thread_terminate = True
self._thread.join()
self._thread = None |
def pipe(p1, p2):
"""Joins two pipes"""
if isinstance(p1, Pipeable) or isinstance(p2, Pipeable):
return p1 | p2
return Pipe([p1, p2]) | def function[pipe, parameter[p1, p2]]:
constant[Joins two pipes]
if <ast.BoolOp object at 0x7da1b0965150> begin[:]
return[binary_operation[name[p1] <ast.BitOr object at 0x7da2590d6aa0> name[p2]]]
return[call[name[Pipe], parameter[list[[<ast.Name object at 0x7da1b09d3af0>, <ast.Name object at 0x7da1b09d3eb0>]]]]] | keyword[def] identifier[pipe] ( identifier[p1] , identifier[p2] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[p1] , identifier[Pipeable] ) keyword[or] identifier[isinstance] ( identifier[p2] , identifier[Pipeable] ):
keyword[return] identifier[p1] | identifier[p2]
keyword[return] identifier[Pipe] ([ identifier[p1] , identifier[p2] ]) | def pipe(p1, p2):
"""Joins two pipes"""
if isinstance(p1, Pipeable) or isinstance(p2, Pipeable):
return p1 | p2 # depends on [control=['if'], data=[]]
return Pipe([p1, p2]) |
def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout:
""" Calculate env rollout """
assert not model.is_recurrent, "Replay env roller does not support recurrent models"
accumulator = TensorAccumulator()
episode_information = [] # List of dictionaries with episode information
for step_idx in range(number_of_steps):
step = model.step(self.last_observation)
replay_extra_information = {}
accumulator.add('observations', self.last_observation_cpu)
# Add step to the tensor accumulator
for name, tensor in step.items():
tensor_cpu = tensor.cpu()
accumulator.add(name, tensor_cpu)
if name != 'actions':
replay_extra_information[name] = tensor_cpu.numpy()
actions_numpy = step['actions'].detach().cpu().numpy()
new_obs, new_rewards, new_dones, new_infos = self.environment.step(actions_numpy)
# Store rollout in the experience replay buffer
self.replay_buffer.store_transition(
frame=self.last_observation_cpu.numpy(),
action=actions_numpy,
reward=new_rewards,
done=new_dones,
extra_info=replay_extra_information
)
# Done is flagged true when the episode has ended AND the frame we see is already a first frame from the
# next episode
dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone()
accumulator.add('dones', dones_tensor)
self.last_observation_cpu = torch.from_numpy(new_obs).clone()
self.last_observation = self.last_observation_cpu.to(self.device)
accumulator.add('rewards', torch.from_numpy(new_rewards.astype(np.float32)).clone())
episode_information.append(new_infos)
accumulated_tensors = accumulator.result()
return Trajectories(
num_steps=accumulated_tensors['observations'].size(0),
num_envs=accumulated_tensors['observations'].size(1),
environment_information=episode_information,
transition_tensors=accumulated_tensors,
rollout_tensors={
'final_values': model.value(self.last_observation).cpu()
}
) | def function[rollout, parameter[self, batch_info, model, number_of_steps]]:
constant[ Calculate env rollout ]
assert[<ast.UnaryOp object at 0x7da1b1791c90>]
variable[accumulator] assign[=] call[name[TensorAccumulator], parameter[]]
variable[episode_information] assign[=] list[[]]
for taget[name[step_idx]] in starred[call[name[range], parameter[name[number_of_steps]]]] begin[:]
variable[step] assign[=] call[name[model].step, parameter[name[self].last_observation]]
variable[replay_extra_information] assign[=] dictionary[[], []]
call[name[accumulator].add, parameter[constant[observations], name[self].last_observation_cpu]]
for taget[tuple[[<ast.Name object at 0x7da2041d94b0>, <ast.Name object at 0x7da2041d9180>]]] in starred[call[name[step].items, parameter[]]] begin[:]
variable[tensor_cpu] assign[=] call[name[tensor].cpu, parameter[]]
call[name[accumulator].add, parameter[name[name], name[tensor_cpu]]]
if compare[name[name] not_equal[!=] constant[actions]] begin[:]
call[name[replay_extra_information]][name[name]] assign[=] call[name[tensor_cpu].numpy, parameter[]]
variable[actions_numpy] assign[=] call[call[call[call[name[step]][constant[actions]].detach, parameter[]].cpu, parameter[]].numpy, parameter[]]
<ast.Tuple object at 0x7da1b140bb20> assign[=] call[name[self].environment.step, parameter[name[actions_numpy]]]
call[name[self].replay_buffer.store_transition, parameter[]]
variable[dones_tensor] assign[=] call[call[name[torch].from_numpy, parameter[call[name[new_dones].astype, parameter[name[np].float32]]]].clone, parameter[]]
call[name[accumulator].add, parameter[constant[dones], name[dones_tensor]]]
name[self].last_observation_cpu assign[=] call[call[name[torch].from_numpy, parameter[name[new_obs]]].clone, parameter[]]
name[self].last_observation assign[=] call[name[self].last_observation_cpu.to, parameter[name[self].device]]
call[name[accumulator].add, parameter[constant[rewards], call[call[name[torch].from_numpy, parameter[call[name[new_rewards].astype, parameter[name[np].float32]]]].clone, parameter[]]]]
call[name[episode_information].append, parameter[name[new_infos]]]
variable[accumulated_tensors] assign[=] call[name[accumulator].result, parameter[]]
return[call[name[Trajectories], parameter[]]] | keyword[def] identifier[rollout] ( identifier[self] , identifier[batch_info] : identifier[BatchInfo] , identifier[model] : identifier[RlModel] , identifier[number_of_steps] : identifier[int] )-> identifier[Rollout] :
literal[string]
keyword[assert] keyword[not] identifier[model] . identifier[is_recurrent] , literal[string]
identifier[accumulator] = identifier[TensorAccumulator] ()
identifier[episode_information] =[]
keyword[for] identifier[step_idx] keyword[in] identifier[range] ( identifier[number_of_steps] ):
identifier[step] = identifier[model] . identifier[step] ( identifier[self] . identifier[last_observation] )
identifier[replay_extra_information] ={}
identifier[accumulator] . identifier[add] ( literal[string] , identifier[self] . identifier[last_observation_cpu] )
keyword[for] identifier[name] , identifier[tensor] keyword[in] identifier[step] . identifier[items] ():
identifier[tensor_cpu] = identifier[tensor] . identifier[cpu] ()
identifier[accumulator] . identifier[add] ( identifier[name] , identifier[tensor_cpu] )
keyword[if] identifier[name] != literal[string] :
identifier[replay_extra_information] [ identifier[name] ]= identifier[tensor_cpu] . identifier[numpy] ()
identifier[actions_numpy] = identifier[step] [ literal[string] ]. identifier[detach] (). identifier[cpu] (). identifier[numpy] ()
identifier[new_obs] , identifier[new_rewards] , identifier[new_dones] , identifier[new_infos] = identifier[self] . identifier[environment] . identifier[step] ( identifier[actions_numpy] )
identifier[self] . identifier[replay_buffer] . identifier[store_transition] (
identifier[frame] = identifier[self] . identifier[last_observation_cpu] . identifier[numpy] (),
identifier[action] = identifier[actions_numpy] ,
identifier[reward] = identifier[new_rewards] ,
identifier[done] = identifier[new_dones] ,
identifier[extra_info] = identifier[replay_extra_information]
)
identifier[dones_tensor] = identifier[torch] . identifier[from_numpy] ( identifier[new_dones] . identifier[astype] ( identifier[np] . identifier[float32] )). identifier[clone] ()
identifier[accumulator] . identifier[add] ( literal[string] , identifier[dones_tensor] )
identifier[self] . identifier[last_observation_cpu] = identifier[torch] . identifier[from_numpy] ( identifier[new_obs] ). identifier[clone] ()
identifier[self] . identifier[last_observation] = identifier[self] . identifier[last_observation_cpu] . identifier[to] ( identifier[self] . identifier[device] )
identifier[accumulator] . identifier[add] ( literal[string] , identifier[torch] . identifier[from_numpy] ( identifier[new_rewards] . identifier[astype] ( identifier[np] . identifier[float32] )). identifier[clone] ())
identifier[episode_information] . identifier[append] ( identifier[new_infos] )
identifier[accumulated_tensors] = identifier[accumulator] . identifier[result] ()
keyword[return] identifier[Trajectories] (
identifier[num_steps] = identifier[accumulated_tensors] [ literal[string] ]. identifier[size] ( literal[int] ),
identifier[num_envs] = identifier[accumulated_tensors] [ literal[string] ]. identifier[size] ( literal[int] ),
identifier[environment_information] = identifier[episode_information] ,
identifier[transition_tensors] = identifier[accumulated_tensors] ,
identifier[rollout_tensors] ={
literal[string] : identifier[model] . identifier[value] ( identifier[self] . identifier[last_observation] ). identifier[cpu] ()
}
) | def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout:
""" Calculate env rollout """
assert not model.is_recurrent, 'Replay env roller does not support recurrent models'
accumulator = TensorAccumulator()
episode_information = [] # List of dictionaries with episode information
for step_idx in range(number_of_steps):
step = model.step(self.last_observation)
replay_extra_information = {}
accumulator.add('observations', self.last_observation_cpu)
# Add step to the tensor accumulator
for (name, tensor) in step.items():
tensor_cpu = tensor.cpu()
accumulator.add(name, tensor_cpu)
if name != 'actions':
replay_extra_information[name] = tensor_cpu.numpy() # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]]
actions_numpy = step['actions'].detach().cpu().numpy()
(new_obs, new_rewards, new_dones, new_infos) = self.environment.step(actions_numpy)
# Store rollout in the experience replay buffer
self.replay_buffer.store_transition(frame=self.last_observation_cpu.numpy(), action=actions_numpy, reward=new_rewards, done=new_dones, extra_info=replay_extra_information)
# Done is flagged true when the episode has ended AND the frame we see is already a first frame from the
# next episode
dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone()
accumulator.add('dones', dones_tensor)
self.last_observation_cpu = torch.from_numpy(new_obs).clone()
self.last_observation = self.last_observation_cpu.to(self.device)
accumulator.add('rewards', torch.from_numpy(new_rewards.astype(np.float32)).clone())
episode_information.append(new_infos) # depends on [control=['for'], data=[]]
accumulated_tensors = accumulator.result()
return Trajectories(num_steps=accumulated_tensors['observations'].size(0), num_envs=accumulated_tensors['observations'].size(1), environment_information=episode_information, transition_tensors=accumulated_tensors, rollout_tensors={'final_values': model.value(self.last_observation).cpu()}) |
async def async_get_api_key(session, host, port, username=None, password=None, **kwargs):
"""Get a new API key for devicetype."""
url = 'http://{host}:{port}/api'.format(host=host, port=str(port))
auth = None
if username and password:
auth = aiohttp.BasicAuth(username, password=password)
data = b'{"devicetype": "pydeconz"}'
response = await async_request(session.post, url, auth=auth, data=data)
api_key = response[0]['success']['username']
_LOGGER.info("API key: %s", api_key)
return api_key | <ast.AsyncFunctionDef object at 0x7da1b0383400> | keyword[async] keyword[def] identifier[async_get_api_key] ( identifier[session] , identifier[host] , identifier[port] , identifier[username] = keyword[None] , identifier[password] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[host] = identifier[host] , identifier[port] = identifier[str] ( identifier[port] ))
identifier[auth] = keyword[None]
keyword[if] identifier[username] keyword[and] identifier[password] :
identifier[auth] = identifier[aiohttp] . identifier[BasicAuth] ( identifier[username] , identifier[password] = identifier[password] )
identifier[data] = literal[string]
identifier[response] = keyword[await] identifier[async_request] ( identifier[session] . identifier[post] , identifier[url] , identifier[auth] = identifier[auth] , identifier[data] = identifier[data] )
identifier[api_key] = identifier[response] [ literal[int] ][ literal[string] ][ literal[string] ]
identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[api_key] )
keyword[return] identifier[api_key] | async def async_get_api_key(session, host, port, username=None, password=None, **kwargs):
"""Get a new API key for devicetype."""
url = 'http://{host}:{port}/api'.format(host=host, port=str(port))
auth = None
if username and password:
auth = aiohttp.BasicAuth(username, password=password) # depends on [control=['if'], data=[]]
data = b'{"devicetype": "pydeconz"}'
response = await async_request(session.post, url, auth=auth, data=data)
api_key = response[0]['success']['username']
_LOGGER.info('API key: %s', api_key)
return api_key |
def init_associations(self, fin_anno, taxids=None):
"""Read annotation file. Store annotation data in a list of namedtuples."""
nts = []
if fin_anno is None:
return nts
tic = timeit.default_timer()
lnum = -1
line = "\t"*len(self.flds)
try:
with open(fin_anno) as ifstrm:
category2ns = {'Process':'BP', 'Function':'MF', 'Component':'CC'}
ntobj = cx.namedtuple('ntanno', self.flds)
# Get: 1) Specified taxids, default taxid(human), or all taxids
get_all = taxids is True
taxids = self.taxids
for lnum, line in enumerate(ifstrm, 1):
# Read data
if line[0] != '#':
vals = line.split('\t')
taxid = int(vals[0])
if get_all or taxid in taxids:
# assert len(vals) == 8
ntd = ntobj(
tax_id=taxid,
DB_ID=int(vals[1]),
GO_ID=vals[2],
Evidence_Code=vals[3],
Qualifier=self._get_qualifiers(vals[4]),
GO_term=vals[5],
DB_Reference=self._get_pmids(vals[6]),
NS=category2ns[vals[7].rstrip()])
#self._chk_qualifiers(qualifiers, lnum, ntd)
nts.append(ntd)
# Read header
elif line[0] == '#':
assert line[1:-1].split('\t') == self.hdrs
# pylint: disable=broad-except
except Exception as inst:
import traceback
traceback.print_exc()
sys.stderr.write("\n **FATAL: {MSG}\n\n".format(MSG=str(inst)))
sys.stderr.write("**FATAL: {FIN}[{LNUM}]:\n{L}".format(FIN=fin_anno, L=line, LNUM=lnum))
self._prt_line_detail(sys.stdout, line, lnum)
sys.exit(1)
print('HMS:{HMS} {N:7,} annotations READ: {ANNO}'.format(
N=len(nts), ANNO=fin_anno,
HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic)))))
return nts | def function[init_associations, parameter[self, fin_anno, taxids]]:
constant[Read annotation file. Store annotation data in a list of namedtuples.]
variable[nts] assign[=] list[[]]
if compare[name[fin_anno] is constant[None]] begin[:]
return[name[nts]]
variable[tic] assign[=] call[name[timeit].default_timer, parameter[]]
variable[lnum] assign[=] <ast.UnaryOp object at 0x7da1b26ae110>
variable[line] assign[=] binary_operation[constant[ ] * call[name[len], parameter[name[self].flds]]]
<ast.Try object at 0x7da1b26ae650>
call[name[print], parameter[call[constant[HMS:{HMS} {N:7,} annotations READ: {ANNO}].format, parameter[]]]]
return[name[nts]] | keyword[def] identifier[init_associations] ( identifier[self] , identifier[fin_anno] , identifier[taxids] = keyword[None] ):
literal[string]
identifier[nts] =[]
keyword[if] identifier[fin_anno] keyword[is] keyword[None] :
keyword[return] identifier[nts]
identifier[tic] = identifier[timeit] . identifier[default_timer] ()
identifier[lnum] =- literal[int]
identifier[line] = literal[string] * identifier[len] ( identifier[self] . identifier[flds] )
keyword[try] :
keyword[with] identifier[open] ( identifier[fin_anno] ) keyword[as] identifier[ifstrm] :
identifier[category2ns] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[ntobj] = identifier[cx] . identifier[namedtuple] ( literal[string] , identifier[self] . identifier[flds] )
identifier[get_all] = identifier[taxids] keyword[is] keyword[True]
identifier[taxids] = identifier[self] . identifier[taxids]
keyword[for] identifier[lnum] , identifier[line] keyword[in] identifier[enumerate] ( identifier[ifstrm] , literal[int] ):
keyword[if] identifier[line] [ literal[int] ]!= literal[string] :
identifier[vals] = identifier[line] . identifier[split] ( literal[string] )
identifier[taxid] = identifier[int] ( identifier[vals] [ literal[int] ])
keyword[if] identifier[get_all] keyword[or] identifier[taxid] keyword[in] identifier[taxids] :
identifier[ntd] = identifier[ntobj] (
identifier[tax_id] = identifier[taxid] ,
identifier[DB_ID] = identifier[int] ( identifier[vals] [ literal[int] ]),
identifier[GO_ID] = identifier[vals] [ literal[int] ],
identifier[Evidence_Code] = identifier[vals] [ literal[int] ],
identifier[Qualifier] = identifier[self] . identifier[_get_qualifiers] ( identifier[vals] [ literal[int] ]),
identifier[GO_term] = identifier[vals] [ literal[int] ],
identifier[DB_Reference] = identifier[self] . identifier[_get_pmids] ( identifier[vals] [ literal[int] ]),
identifier[NS] = identifier[category2ns] [ identifier[vals] [ literal[int] ]. identifier[rstrip] ()])
identifier[nts] . identifier[append] ( identifier[ntd] )
keyword[elif] identifier[line] [ literal[int] ]== literal[string] :
keyword[assert] identifier[line] [ literal[int] :- literal[int] ]. identifier[split] ( literal[string] )== identifier[self] . identifier[hdrs]
keyword[except] identifier[Exception] keyword[as] identifier[inst] :
keyword[import] identifier[traceback]
identifier[traceback] . identifier[print_exc] ()
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] . identifier[format] ( identifier[MSG] = identifier[str] ( identifier[inst] )))
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] . identifier[format] ( identifier[FIN] = identifier[fin_anno] , identifier[L] = identifier[line] , identifier[LNUM] = identifier[lnum] ))
identifier[self] . identifier[_prt_line_detail] ( identifier[sys] . identifier[stdout] , identifier[line] , identifier[lnum] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[print] ( literal[string] . identifier[format] (
identifier[N] = identifier[len] ( identifier[nts] ), identifier[ANNO] = identifier[fin_anno] ,
identifier[HMS] = identifier[str] ( identifier[datetime] . identifier[timedelta] ( identifier[seconds] =( identifier[timeit] . identifier[default_timer] ()- identifier[tic] )))))
keyword[return] identifier[nts] | def init_associations(self, fin_anno, taxids=None):
"""Read annotation file. Store annotation data in a list of namedtuples."""
nts = []
if fin_anno is None:
return nts # depends on [control=['if'], data=[]]
tic = timeit.default_timer()
lnum = -1
line = '\t' * len(self.flds)
try:
with open(fin_anno) as ifstrm:
category2ns = {'Process': 'BP', 'Function': 'MF', 'Component': 'CC'}
ntobj = cx.namedtuple('ntanno', self.flds)
# Get: 1) Specified taxids, default taxid(human), or all taxids
get_all = taxids is True
taxids = self.taxids
for (lnum, line) in enumerate(ifstrm, 1):
# Read data
if line[0] != '#':
vals = line.split('\t')
taxid = int(vals[0])
if get_all or taxid in taxids:
# assert len(vals) == 8
ntd = ntobj(tax_id=taxid, DB_ID=int(vals[1]), GO_ID=vals[2], Evidence_Code=vals[3], Qualifier=self._get_qualifiers(vals[4]), GO_term=vals[5], DB_Reference=self._get_pmids(vals[6]), NS=category2ns[vals[7].rstrip()])
#self._chk_qualifiers(qualifiers, lnum, ntd)
nts.append(ntd) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Read header
elif line[0] == '#':
assert line[1:-1].split('\t') == self.hdrs # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['ifstrm']] # depends on [control=['try'], data=[]]
# pylint: disable=broad-except
except Exception as inst:
import traceback
traceback.print_exc()
sys.stderr.write('\n **FATAL: {MSG}\n\n'.format(MSG=str(inst)))
sys.stderr.write('**FATAL: {FIN}[{LNUM}]:\n{L}'.format(FIN=fin_anno, L=line, LNUM=lnum))
self._prt_line_detail(sys.stdout, line, lnum)
sys.exit(1) # depends on [control=['except'], data=['inst']]
print('HMS:{HMS} {N:7,} annotations READ: {ANNO}'.format(N=len(nts), ANNO=fin_anno, HMS=str(datetime.timedelta(seconds=timeit.default_timer() - tic))))
return nts |
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
hc = ret['extra']['healthchecks']
ret['extra']['healthchecks'] = []
for item in hc:
ret['extra']['healthchecks'].append(_expand_item(item))
fwr = ret['extra']['forwarding_rule']
tp = ret['extra']['forwarding_rule'].targetpool
reg = ret['extra']['forwarding_rule'].region
ret['extra']['forwarding_rule'] = {}
ret['extra']['forwarding_rule'].update(fwr.__dict__)
ret['extra']['forwarding_rule']['targetpool'] = tp.name
ret['extra']['forwarding_rule']['region'] = reg.name
tp = ret['extra']['targetpool']
hc = ret['extra']['targetpool'].healthchecks
nodes = ret['extra']['targetpool'].nodes
region = ret['extra']['targetpool'].region
zones = ret['extra']['targetpool'].region.zones
ret['extra']['targetpool'] = {}
ret['extra']['targetpool'].update(tp.__dict__)
ret['extra']['targetpool']['region'] = _expand_item(region)
ret['extra']['targetpool']['nodes'] = []
for n in nodes:
ret['extra']['targetpool']['nodes'].append(_expand_node(n))
ret['extra']['targetpool']['healthchecks'] = []
for hci in hc:
ret['extra']['targetpool']['healthchecks'].append(hci.name)
ret['extra']['targetpool']['region']['zones'] = []
for z in zones:
ret['extra']['targetpool']['region']['zones'].append(z.name)
return ret | def function[_expand_balancer, parameter[lb]]:
constant[
Convert the libcloud load-balancer object into something more serializable.
]
variable[ret] assign[=] dictionary[[], []]
call[name[ret].update, parameter[name[lb].__dict__]]
variable[hc] assign[=] call[call[name[ret]][constant[extra]]][constant[healthchecks]]
call[call[name[ret]][constant[extra]]][constant[healthchecks]] assign[=] list[[]]
for taget[name[item]] in starred[name[hc]] begin[:]
call[call[call[name[ret]][constant[extra]]][constant[healthchecks]].append, parameter[call[name[_expand_item], parameter[name[item]]]]]
variable[fwr] assign[=] call[call[name[ret]][constant[extra]]][constant[forwarding_rule]]
variable[tp] assign[=] call[call[name[ret]][constant[extra]]][constant[forwarding_rule]].targetpool
variable[reg] assign[=] call[call[name[ret]][constant[extra]]][constant[forwarding_rule]].region
call[call[name[ret]][constant[extra]]][constant[forwarding_rule]] assign[=] dictionary[[], []]
call[call[call[name[ret]][constant[extra]]][constant[forwarding_rule]].update, parameter[name[fwr].__dict__]]
call[call[call[name[ret]][constant[extra]]][constant[forwarding_rule]]][constant[targetpool]] assign[=] name[tp].name
call[call[call[name[ret]][constant[extra]]][constant[forwarding_rule]]][constant[region]] assign[=] name[reg].name
variable[tp] assign[=] call[call[name[ret]][constant[extra]]][constant[targetpool]]
variable[hc] assign[=] call[call[name[ret]][constant[extra]]][constant[targetpool]].healthchecks
variable[nodes] assign[=] call[call[name[ret]][constant[extra]]][constant[targetpool]].nodes
variable[region] assign[=] call[call[name[ret]][constant[extra]]][constant[targetpool]].region
variable[zones] assign[=] call[call[name[ret]][constant[extra]]][constant[targetpool]].region.zones
call[call[name[ret]][constant[extra]]][constant[targetpool]] assign[=] dictionary[[], []]
call[call[call[name[ret]][constant[extra]]][constant[targetpool]].update, parameter[name[tp].__dict__]]
call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[region]] assign[=] call[name[_expand_item], parameter[name[region]]]
call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[nodes]] assign[=] list[[]]
for taget[name[n]] in starred[name[nodes]] begin[:]
call[call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[nodes]].append, parameter[call[name[_expand_node], parameter[name[n]]]]]
call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[healthchecks]] assign[=] list[[]]
for taget[name[hci]] in starred[name[hc]] begin[:]
call[call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[healthchecks]].append, parameter[name[hci].name]]
call[call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[region]]][constant[zones]] assign[=] list[[]]
for taget[name[z]] in starred[name[zones]] begin[:]
call[call[call[call[call[name[ret]][constant[extra]]][constant[targetpool]]][constant[region]]][constant[zones]].append, parameter[name[z].name]]
return[name[ret]] | keyword[def] identifier[_expand_balancer] ( identifier[lb] ):
literal[string]
identifier[ret] ={}
identifier[ret] . identifier[update] ( identifier[lb] . identifier[__dict__] )
identifier[hc] = identifier[ret] [ literal[string] ][ literal[string] ]
identifier[ret] [ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[item] keyword[in] identifier[hc] :
identifier[ret] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[_expand_item] ( identifier[item] ))
identifier[fwr] = identifier[ret] [ literal[string] ][ literal[string] ]
identifier[tp] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[targetpool]
identifier[reg] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[region]
identifier[ret] [ literal[string] ][ literal[string] ]={}
identifier[ret] [ literal[string] ][ literal[string] ]. identifier[update] ( identifier[fwr] . identifier[__dict__] )
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[tp] . identifier[name]
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[reg] . identifier[name]
identifier[tp] = identifier[ret] [ literal[string] ][ literal[string] ]
identifier[hc] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[healthchecks]
identifier[nodes] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[nodes]
identifier[region] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[region]
identifier[zones] = identifier[ret] [ literal[string] ][ literal[string] ]. identifier[region] . identifier[zones]
identifier[ret] [ literal[string] ][ literal[string] ]={}
identifier[ret] [ literal[string] ][ literal[string] ]. identifier[update] ( identifier[tp] . identifier[__dict__] )
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[_expand_item] ( identifier[region] )
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[n] keyword[in] identifier[nodes] :
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]. identifier[append] ( identifier[_expand_node] ( identifier[n] ))
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[hci] keyword[in] identifier[hc] :
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ]. identifier[append] ( identifier[hci] . identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]=[]
keyword[for] identifier[z] keyword[in] identifier[zones] :
identifier[ret] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]. identifier[append] ( identifier[z] . identifier[name] )
keyword[return] identifier[ret] | def _expand_balancer(lb):
"""
Convert the libcloud load-balancer object into something more serializable.
"""
ret = {}
ret.update(lb.__dict__)
hc = ret['extra']['healthchecks']
ret['extra']['healthchecks'] = []
for item in hc:
ret['extra']['healthchecks'].append(_expand_item(item)) # depends on [control=['for'], data=['item']]
fwr = ret['extra']['forwarding_rule']
tp = ret['extra']['forwarding_rule'].targetpool
reg = ret['extra']['forwarding_rule'].region
ret['extra']['forwarding_rule'] = {}
ret['extra']['forwarding_rule'].update(fwr.__dict__)
ret['extra']['forwarding_rule']['targetpool'] = tp.name
ret['extra']['forwarding_rule']['region'] = reg.name
tp = ret['extra']['targetpool']
hc = ret['extra']['targetpool'].healthchecks
nodes = ret['extra']['targetpool'].nodes
region = ret['extra']['targetpool'].region
zones = ret['extra']['targetpool'].region.zones
ret['extra']['targetpool'] = {}
ret['extra']['targetpool'].update(tp.__dict__)
ret['extra']['targetpool']['region'] = _expand_item(region)
ret['extra']['targetpool']['nodes'] = []
for n in nodes:
ret['extra']['targetpool']['nodes'].append(_expand_node(n)) # depends on [control=['for'], data=['n']]
ret['extra']['targetpool']['healthchecks'] = []
for hci in hc:
ret['extra']['targetpool']['healthchecks'].append(hci.name) # depends on [control=['for'], data=['hci']]
ret['extra']['targetpool']['region']['zones'] = []
for z in zones:
ret['extra']['targetpool']['region']['zones'].append(z.name) # depends on [control=['for'], data=['z']]
return ret |
def force_orthotropic(self):
r"""Force an orthotropic laminate
The terms
`A_{13}`, `A_{23}`, `A_{31}`, `A_{32}`,
`B_{13}`, `B_{23}`, `B_{31}`, `B_{32}`,
`D_{13}`, `D_{23}`, `D_{31}`, `D_{32}` are set to zero to force an
orthotropic laminate.
"""
if self.offset != 0.:
raise RuntimeError(
'Laminates with offset cannot be forced orthotropic!')
self.A[0, 2] = 0.
self.A[1, 2] = 0.
self.A[2, 0] = 0.
self.A[2, 1] = 0.
self.B[0, 2] = 0.
self.B[1, 2] = 0.
self.B[2, 0] = 0.
self.B[2, 1] = 0.
self.D[0, 2] = 0.
self.D[1, 2] = 0.
self.D[2, 0] = 0.
self.D[2, 1] = 0.
self.ABD[0, 2] = 0. # A16
self.ABD[1, 2] = 0. # A26
self.ABD[2, 0] = 0. # A61
self.ABD[2, 1] = 0. # A62
self.ABD[0, 5] = 0. # B16
self.ABD[5, 0] = 0. # B61
self.ABD[1, 5] = 0. # B26
self.ABD[5, 1] = 0. # B62
self.ABD[3, 2] = 0. # B16
self.ABD[2, 3] = 0. # B61
self.ABD[4, 2] = 0. # B26
self.ABD[2, 4] = 0. # B62
self.ABD[3, 5] = 0. # D16
self.ABD[4, 5] = 0. # D26
self.ABD[5, 3] = 0. # D61
self.ABD[5, 4] = 0. # D62
self.ABDE[0, 2] = 0. # A16
self.ABDE[1, 2] = 0. # A26
self.ABDE[2, 0] = 0. # A61
self.ABDE[2, 1] = 0. # A62
self.ABDE[0, 5] = 0. # B16
self.ABDE[5, 0] = 0. # B61
self.ABDE[1, 5] = 0. # B26
self.ABDE[5, 1] = 0. # B62
self.ABDE[3, 2] = 0. # B16
self.ABDE[2, 3] = 0. # B61
self.ABDE[4, 2] = 0. # B26
self.ABDE[2, 4] = 0. # B62
self.ABDE[3, 5] = 0. # D16
self.ABDE[4, 5] = 0. # D26
self.ABDE[5, 3] = 0. # D61
self.ABDE[5, 4] = 0. | def function[force_orthotropic, parameter[self]]:
constant[Force an orthotropic laminate
The terms
`A_{13}`, `A_{23}`, `A_{31}`, `A_{32}`,
`B_{13}`, `B_{23}`, `B_{31}`, `B_{32}`,
`D_{13}`, `D_{23}`, `D_{31}`, `D_{32}` are set to zero to force an
orthotropic laminate.
]
if compare[name[self].offset not_equal[!=] constant[0.0]] begin[:]
<ast.Raise object at 0x7da1b092e9e0>
call[name[self].A][tuple[[<ast.Constant object at 0x7da1b092fb20>, <ast.Constant object at 0x7da1b092d990>]]] assign[=] constant[0.0]
call[name[self].A][tuple[[<ast.Constant object at 0x7da1b092f910>, <ast.Constant object at 0x7da1b092ef50>]]] assign[=] constant[0.0]
call[name[self].A][tuple[[<ast.Constant object at 0x7da1b092e020>, <ast.Constant object at 0x7da1b092d4e0>]]] assign[=] constant[0.0]
call[name[self].A][tuple[[<ast.Constant object at 0x7da20c6aa110>, <ast.Constant object at 0x7da20c6a8760>]]] assign[=] constant[0.0]
call[name[self].B][tuple[[<ast.Constant object at 0x7da20c6a9c90>, <ast.Constant object at 0x7da20c6ab640>]]] assign[=] constant[0.0]
call[name[self].B][tuple[[<ast.Constant object at 0x7da20c6a9330>, <ast.Constant object at 0x7da20c6aab60>]]] assign[=] constant[0.0]
call[name[self].B][tuple[[<ast.Constant object at 0x7da20c6a9b10>, <ast.Constant object at 0x7da20c6ab220>]]] assign[=] constant[0.0]
call[name[self].B][tuple[[<ast.Constant object at 0x7da20c6abdf0>, <ast.Constant object at 0x7da20c6aac20>]]] assign[=] constant[0.0]
call[name[self].D][tuple[[<ast.Constant object at 0x7da20c6ab880>, <ast.Constant object at 0x7da20c6aa9b0>]]] assign[=] constant[0.0]
call[name[self].D][tuple[[<ast.Constant object at 0x7da20c6ab970>, <ast.Constant object at 0x7da20c6a85b0>]]] assign[=] constant[0.0]
call[name[self].D][tuple[[<ast.Constant object at 0x7da1b0a9d930>, <ast.Constant object at 0x7da1b0a9da80>]]] assign[=] constant[0.0]
call[name[self].D][tuple[[<ast.Constant object at 0x7da1b0a9c970>, <ast.Constant object at 0x7da1b0a9fa60>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9c250>, <ast.Constant object at 0x7da1b0a9c940>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9d750>, <ast.Constant object at 0x7da1b0a9d8d0>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9c6a0>, <ast.Constant object at 0x7da1b0a9ec50>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9e980>, <ast.Constant object at 0x7da1b0a9d360>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9dd20>, <ast.Constant object at 0x7da1b0a9cd60>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9e230>, <ast.Constant object at 0x7da1b0a9e5f0>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9fc70>, <ast.Constant object at 0x7da1b0a9ff70>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9cee0>, <ast.Constant object at 0x7da1b0a9f190>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9e560>, <ast.Constant object at 0x7da1b0a9f460>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9faf0>, <ast.Constant object at 0x7da1b0a9db10>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9f700>, <ast.Constant object at 0x7da1b0a9c3a0>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9fca0>, <ast.Constant object at 0x7da1b0a9c280>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9f220>, <ast.Constant object at 0x7da1b0a9f340>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9f850>, <ast.Constant object at 0x7da1b0a9d960>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9eda0>, <ast.Constant object at 0x7da1b0a9c520>]]] assign[=] constant[0.0]
call[name[self].ABD][tuple[[<ast.Constant object at 0x7da1b0a9c820>, <ast.Constant object at 0x7da1b0a9cc70>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9ece0>, <ast.Constant object at 0x7da1b0a9f640>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9fa90>, <ast.Constant object at 0x7da1b0a9e8f0>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9d1b0>, <ast.Constant object at 0x7da1b0a9cd30>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9d450>, <ast.Constant object at 0x7da1b0a9e050>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9feb0>, <ast.Constant object at 0x7da1b0a9e140>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9e0b0>, <ast.Constant object at 0x7da1b0a9e680>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9e1d0>, <ast.Constant object at 0x7da1b0a9d900>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9cf40>, <ast.Constant object at 0x7da1b0a9e200>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9d390>, <ast.Constant object at 0x7da1b0a9fc40>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9eb30>, <ast.Constant object at 0x7da1b0a9c9d0>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9e290>, <ast.Constant object at 0x7da1b0a9fa30>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9c550>, <ast.Constant object at 0x7da1b0a9c7c0>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9ed40>, <ast.Constant object at 0x7da1b0a9eef0>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9f4c0>, <ast.Constant object at 0x7da1b0a9c1c0>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9fb80>, <ast.Constant object at 0x7da1b0a9c910>]]] assign[=] constant[0.0]
call[name[self].ABDE][tuple[[<ast.Constant object at 0x7da1b0a9c580>, <ast.Constant object at 0x7da1b0a9e6e0>]]] assign[=] constant[0.0] | keyword[def] identifier[force_orthotropic] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[offset] != literal[int] :
keyword[raise] identifier[RuntimeError] (
literal[string] )
identifier[self] . identifier[A] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[A] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[A] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[A] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[B] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[B] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[B] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[B] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[D] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[D] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[D] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[D] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABD] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int]
identifier[self] . identifier[ABDE] [ literal[int] , literal[int] ]= literal[int] | def force_orthotropic(self):
"""Force an orthotropic laminate
The terms
`A_{13}`, `A_{23}`, `A_{31}`, `A_{32}`,
`B_{13}`, `B_{23}`, `B_{31}`, `B_{32}`,
`D_{13}`, `D_{23}`, `D_{31}`, `D_{32}` are set to zero to force an
orthotropic laminate.
"""
if self.offset != 0.0:
raise RuntimeError('Laminates with offset cannot be forced orthotropic!') # depends on [control=['if'], data=[]]
self.A[0, 2] = 0.0
self.A[1, 2] = 0.0
self.A[2, 0] = 0.0
self.A[2, 1] = 0.0
self.B[0, 2] = 0.0
self.B[1, 2] = 0.0
self.B[2, 0] = 0.0
self.B[2, 1] = 0.0
self.D[0, 2] = 0.0
self.D[1, 2] = 0.0
self.D[2, 0] = 0.0
self.D[2, 1] = 0.0
self.ABD[0, 2] = 0.0 # A16
self.ABD[1, 2] = 0.0 # A26
self.ABD[2, 0] = 0.0 # A61
self.ABD[2, 1] = 0.0 # A62
self.ABD[0, 5] = 0.0 # B16
self.ABD[5, 0] = 0.0 # B61
self.ABD[1, 5] = 0.0 # B26
self.ABD[5, 1] = 0.0 # B62
self.ABD[3, 2] = 0.0 # B16
self.ABD[2, 3] = 0.0 # B61
self.ABD[4, 2] = 0.0 # B26
self.ABD[2, 4] = 0.0 # B62
self.ABD[3, 5] = 0.0 # D16
self.ABD[4, 5] = 0.0 # D26
self.ABD[5, 3] = 0.0 # D61
self.ABD[5, 4] = 0.0 # D62
self.ABDE[0, 2] = 0.0 # A16
self.ABDE[1, 2] = 0.0 # A26
self.ABDE[2, 0] = 0.0 # A61
self.ABDE[2, 1] = 0.0 # A62
self.ABDE[0, 5] = 0.0 # B16
self.ABDE[5, 0] = 0.0 # B61
self.ABDE[1, 5] = 0.0 # B26
self.ABDE[5, 1] = 0.0 # B62
self.ABDE[3, 2] = 0.0 # B16
self.ABDE[2, 3] = 0.0 # B61
self.ABDE[4, 2] = 0.0 # B26
self.ABDE[2, 4] = 0.0 # B62
self.ABDE[3, 5] = 0.0 # D16
self.ABDE[4, 5] = 0.0 # D26
self.ABDE[5, 3] = 0.0 # D61
self.ABDE[5, 4] = 0.0 |
def search_alert_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data | def function[search_alert_deleted_entities, parameter[self]]:
constant[Search over a customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].search_alert_deleted_entities_with_http_info, parameter[]]] | keyword[def] identifier[search_alert_deleted_entities] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[search_alert_deleted_entities_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[search_alert_deleted_entities_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def search_alert_deleted_entities(self, **kwargs): # noqa: E501
"Search over a customer's deleted alerts # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.search_alert_deleted_entities(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param SortableSearchRequest body:\n :return: ResponseContainerPagedAlert\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data |
def _make_agent(self, entrez_id, text_id):
"""Make an Agent object, appropriately grounded.
Parameters
----------
entrez_id : str
Entrez id number
text_id : str
A plain text systematic name, or None if not listed.
Returns
-------
agent : indra.statements.Agent
A grounded agent object.
"""
hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id)
if hgnc_name is not None:
name = hgnc_name
elif text_id is not None:
name = text_id
# Handle case where the name is None
else:
return None
return Agent(name, db_refs=db_refs) | def function[_make_agent, parameter[self, entrez_id, text_id]]:
constant[Make an Agent object, appropriately grounded.
Parameters
----------
entrez_id : str
Entrez id number
text_id : str
A plain text systematic name, or None if not listed.
Returns
-------
agent : indra.statements.Agent
A grounded agent object.
]
<ast.Tuple object at 0x7da207f014b0> assign[=] call[name[self]._make_db_refs, parameter[name[entrez_id], name[text_id]]]
if compare[name[hgnc_name] is_not constant[None]] begin[:]
variable[name] assign[=] name[hgnc_name]
return[call[name[Agent], parameter[name[name]]]] | keyword[def] identifier[_make_agent] ( identifier[self] , identifier[entrez_id] , identifier[text_id] ):
literal[string]
identifier[hgnc_name] , identifier[db_refs] = identifier[self] . identifier[_make_db_refs] ( identifier[entrez_id] , identifier[text_id] )
keyword[if] identifier[hgnc_name] keyword[is] keyword[not] keyword[None] :
identifier[name] = identifier[hgnc_name]
keyword[elif] identifier[text_id] keyword[is] keyword[not] keyword[None] :
identifier[name] = identifier[text_id]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[Agent] ( identifier[name] , identifier[db_refs] = identifier[db_refs] ) | def _make_agent(self, entrez_id, text_id):
"""Make an Agent object, appropriately grounded.
Parameters
----------
entrez_id : str
Entrez id number
text_id : str
A plain text systematic name, or None if not listed.
Returns
-------
agent : indra.statements.Agent
A grounded agent object.
"""
(hgnc_name, db_refs) = self._make_db_refs(entrez_id, text_id)
if hgnc_name is not None:
name = hgnc_name # depends on [control=['if'], data=['hgnc_name']]
elif text_id is not None:
name = text_id # depends on [control=['if'], data=['text_id']]
else:
# Handle case where the name is None
return None
return Agent(name, db_refs=db_refs) |
def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError("Invalid number")
return self._patterns[number] | def function[get, parameter[self, number]]:
constant[
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
]
if <ast.UnaryOp object at 0x7da20c992530> begin[:]
<ast.Raise object at 0x7da20c992650>
return[call[name[self]._patterns][name[number]]] | keyword[def] identifier[get] ( identifier[self] , identifier[number] ):
literal[string]
keyword[if] keyword[not] identifier[number] keyword[in] identifier[self] . identifier[_patterns] :
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[return] identifier[self] . identifier[_patterns] [ identifier[number] ] | def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError('Invalid number') # depends on [control=['if'], data=[]]
return self._patterns[number] |
def wrap(cls, value):
''' Some property types need to wrap their values in special containers, etc.
'''
if isinstance(value, dict):
if isinstance(value, PropertyValueColumnData):
return value
else:
return PropertyValueColumnData(value)
else:
return value | def function[wrap, parameter[cls, value]]:
constant[ Some property types need to wrap their values in special containers, etc.
]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
if call[name[isinstance], parameter[name[value], name[PropertyValueColumnData]]] begin[:]
return[name[value]] | keyword[def] identifier[wrap] ( identifier[cls] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[PropertyValueColumnData] ):
keyword[return] identifier[value]
keyword[else] :
keyword[return] identifier[PropertyValueColumnData] ( identifier[value] )
keyword[else] :
keyword[return] identifier[value] | def wrap(cls, value):
""" Some property types need to wrap their values in special containers, etc.
"""
if isinstance(value, dict):
if isinstance(value, PropertyValueColumnData):
return value # depends on [control=['if'], data=[]]
else:
return PropertyValueColumnData(value) # depends on [control=['if'], data=[]]
else:
return value |
def summary(self, stdout=True, plot=False):
'''
Displays diagnostics to the user
Args:
stdout (bool): print results to the console
plot (bool): use Seaborn to plot results
'''
if stdout:
print('Collinearity summary:')
print(pd.concat([self.results['Eigenvalues'],
self.results['ConditionIndices'],
self.results['VIFs'],
self.results['CorrelationMatrix']],
axis=1))
print('Outlier summary:')
print(self.results['RowMahalanobisDistances'])
print(self.results['ColumnMahalanobisDistances'])
print('Validity summary:')
print(self.results['Variances'])
if plot:
verify_dependencies('seaborn')
for key, result in self.results.items():
if key == 'CorrelationMatrix':
ax = plt.axes()
sns.heatmap(result, cmap='Blues', ax=ax)
ax.set_title(key)
sns.plt.show()
else:
result.plot(kind='bar', title=key)
plt.show() | def function[summary, parameter[self, stdout, plot]]:
constant[
Displays diagnostics to the user
Args:
stdout (bool): print results to the console
plot (bool): use Seaborn to plot results
]
if name[stdout] begin[:]
call[name[print], parameter[constant[Collinearity summary:]]]
call[name[print], parameter[call[name[pd].concat, parameter[list[[<ast.Subscript object at 0x7da20c6e7e50>, <ast.Subscript object at 0x7da20c6e6f80>, <ast.Subscript object at 0x7da20c6e5840>, <ast.Subscript object at 0x7da20c6e75b0>]]]]]]
call[name[print], parameter[constant[Outlier summary:]]]
call[name[print], parameter[call[name[self].results][constant[RowMahalanobisDistances]]]]
call[name[print], parameter[call[name[self].results][constant[ColumnMahalanobisDistances]]]]
call[name[print], parameter[constant[Validity summary:]]]
call[name[print], parameter[call[name[self].results][constant[Variances]]]]
if name[plot] begin[:]
call[name[verify_dependencies], parameter[constant[seaborn]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e7400>, <ast.Name object at 0x7da20c6e7670>]]] in starred[call[name[self].results.items, parameter[]]] begin[:]
if compare[name[key] equal[==] constant[CorrelationMatrix]] begin[:]
variable[ax] assign[=] call[name[plt].axes, parameter[]]
call[name[sns].heatmap, parameter[name[result]]]
call[name[ax].set_title, parameter[name[key]]]
call[name[sns].plt.show, parameter[]] | keyword[def] identifier[summary] ( identifier[self] , identifier[stdout] = keyword[True] , identifier[plot] = keyword[False] ):
literal[string]
keyword[if] identifier[stdout] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[pd] . identifier[concat] ([ identifier[self] . identifier[results] [ literal[string] ],
identifier[self] . identifier[results] [ literal[string] ],
identifier[self] . identifier[results] [ literal[string] ],
identifier[self] . identifier[results] [ literal[string] ]],
identifier[axis] = literal[int] ))
identifier[print] ( literal[string] )
identifier[print] ( identifier[self] . identifier[results] [ literal[string] ])
identifier[print] ( identifier[self] . identifier[results] [ literal[string] ])
identifier[print] ( literal[string] )
identifier[print] ( identifier[self] . identifier[results] [ literal[string] ])
keyword[if] identifier[plot] :
identifier[verify_dependencies] ( literal[string] )
keyword[for] identifier[key] , identifier[result] keyword[in] identifier[self] . identifier[results] . identifier[items] ():
keyword[if] identifier[key] == literal[string] :
identifier[ax] = identifier[plt] . identifier[axes] ()
identifier[sns] . identifier[heatmap] ( identifier[result] , identifier[cmap] = literal[string] , identifier[ax] = identifier[ax] )
identifier[ax] . identifier[set_title] ( identifier[key] )
identifier[sns] . identifier[plt] . identifier[show] ()
keyword[else] :
identifier[result] . identifier[plot] ( identifier[kind] = literal[string] , identifier[title] = identifier[key] )
identifier[plt] . identifier[show] () | def summary(self, stdout=True, plot=False):
"""
Displays diagnostics to the user
Args:
stdout (bool): print results to the console
plot (bool): use Seaborn to plot results
"""
if stdout:
print('Collinearity summary:')
print(pd.concat([self.results['Eigenvalues'], self.results['ConditionIndices'], self.results['VIFs'], self.results['CorrelationMatrix']], axis=1))
print('Outlier summary:')
print(self.results['RowMahalanobisDistances'])
print(self.results['ColumnMahalanobisDistances'])
print('Validity summary:')
print(self.results['Variances']) # depends on [control=['if'], data=[]]
if plot:
verify_dependencies('seaborn')
for (key, result) in self.results.items():
if key == 'CorrelationMatrix':
ax = plt.axes()
sns.heatmap(result, cmap='Blues', ax=ax)
ax.set_title(key)
sns.plt.show() # depends on [control=['if'], data=['key']]
else:
result.plot(kind='bar', title=key)
plt.show() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def shelf_from_config(config, **default_init):
"""Get a `Shelf` instance dynamically based on config.
`config` is a dictionary containing ``shelf_*`` keys as defined in
:mod:`birding.config`.
"""
shelf_cls = import_name(config['shelf_class'], default_ns='birding.shelf')
init = {}
init.update(default_init)
init.update(config['shelf_init'])
shelf = shelf_cls(**init)
if hasattr(shelf, 'set_expiration') and 'shelf_expiration' in config:
shelf.set_expiration(config['shelf_expiration'])
return shelf | def function[shelf_from_config, parameter[config]]:
constant[Get a `Shelf` instance dynamically based on config.
`config` is a dictionary containing ``shelf_*`` keys as defined in
:mod:`birding.config`.
]
variable[shelf_cls] assign[=] call[name[import_name], parameter[call[name[config]][constant[shelf_class]]]]
variable[init] assign[=] dictionary[[], []]
call[name[init].update, parameter[name[default_init]]]
call[name[init].update, parameter[call[name[config]][constant[shelf_init]]]]
variable[shelf] assign[=] call[name[shelf_cls], parameter[]]
if <ast.BoolOp object at 0x7da1b1a776a0> begin[:]
call[name[shelf].set_expiration, parameter[call[name[config]][constant[shelf_expiration]]]]
return[name[shelf]] | keyword[def] identifier[shelf_from_config] ( identifier[config] ,** identifier[default_init] ):
literal[string]
identifier[shelf_cls] = identifier[import_name] ( identifier[config] [ literal[string] ], identifier[default_ns] = literal[string] )
identifier[init] ={}
identifier[init] . identifier[update] ( identifier[default_init] )
identifier[init] . identifier[update] ( identifier[config] [ literal[string] ])
identifier[shelf] = identifier[shelf_cls] (** identifier[init] )
keyword[if] identifier[hasattr] ( identifier[shelf] , literal[string] ) keyword[and] literal[string] keyword[in] identifier[config] :
identifier[shelf] . identifier[set_expiration] ( identifier[config] [ literal[string] ])
keyword[return] identifier[shelf] | def shelf_from_config(config, **default_init):
"""Get a `Shelf` instance dynamically based on config.
`config` is a dictionary containing ``shelf_*`` keys as defined in
:mod:`birding.config`.
"""
shelf_cls = import_name(config['shelf_class'], default_ns='birding.shelf')
init = {}
init.update(default_init)
init.update(config['shelf_init'])
shelf = shelf_cls(**init)
if hasattr(shelf, 'set_expiration') and 'shelf_expiration' in config:
shelf.set_expiration(config['shelf_expiration']) # depends on [control=['if'], data=[]]
return shelf |
def _compute_magnitude_terms(self, rup, coeffs):
"""
First three terms of equation (8) on p. 203:
``c1 + c2*(M - 6) + c3*(M - 6)**2``
"""
adj_mag = rup.mag - self.CONSTS['ref_mag']
return coeffs['c1'] + coeffs['c2']*adj_mag + coeffs['c3']*adj_mag**2 | def function[_compute_magnitude_terms, parameter[self, rup, coeffs]]:
constant[
First three terms of equation (8) on p. 203:
``c1 + c2*(M - 6) + c3*(M - 6)**2``
]
variable[adj_mag] assign[=] binary_operation[name[rup].mag - call[name[self].CONSTS][constant[ref_mag]]]
return[binary_operation[binary_operation[call[name[coeffs]][constant[c1]] + binary_operation[call[name[coeffs]][constant[c2]] * name[adj_mag]]] + binary_operation[call[name[coeffs]][constant[c3]] * binary_operation[name[adj_mag] ** constant[2]]]]] | keyword[def] identifier[_compute_magnitude_terms] ( identifier[self] , identifier[rup] , identifier[coeffs] ):
literal[string]
identifier[adj_mag] = identifier[rup] . identifier[mag] - identifier[self] . identifier[CONSTS] [ literal[string] ]
keyword[return] identifier[coeffs] [ literal[string] ]+ identifier[coeffs] [ literal[string] ]* identifier[adj_mag] + identifier[coeffs] [ literal[string] ]* identifier[adj_mag] ** literal[int] | def _compute_magnitude_terms(self, rup, coeffs):
"""
First three terms of equation (8) on p. 203:
``c1 + c2*(M - 6) + c3*(M - 6)**2``
"""
adj_mag = rup.mag - self.CONSTS['ref_mag']
return coeffs['c1'] + coeffs['c2'] * adj_mag + coeffs['c3'] * adj_mag ** 2 |
def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
self._cond.acquire()
try:
if idx >= self._expected:
raise IndexError
elif idx < len(self._collection):
return self._collection[idx]
elif idx != len(self._collection):
# Violation of the sequence protocol
raise IndexError()
else:
self._cond.wait(timeout=timeout)
try:
return self._collection[idx]
except IndexError:
# Still not added !
raise TimeoutError("Timeout while waiting for results")
finally:
self._cond.release() | def function[_get_result, parameter[self, idx, timeout]]:
constant[Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
]
call[name[self]._cond.acquire, parameter[]]
<ast.Try object at 0x7da204567cd0> | keyword[def] identifier[_get_result] ( identifier[self] , identifier[idx] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[self] . identifier[_cond] . identifier[acquire] ()
keyword[try] :
keyword[if] identifier[idx] >= identifier[self] . identifier[_expected] :
keyword[raise] identifier[IndexError]
keyword[elif] identifier[idx] < identifier[len] ( identifier[self] . identifier[_collection] ):
keyword[return] identifier[self] . identifier[_collection] [ identifier[idx] ]
keyword[elif] identifier[idx] != identifier[len] ( identifier[self] . identifier[_collection] ):
keyword[raise] identifier[IndexError] ()
keyword[else] :
identifier[self] . identifier[_cond] . identifier[wait] ( identifier[timeout] = identifier[timeout] )
keyword[try] :
keyword[return] identifier[self] . identifier[_collection] [ identifier[idx] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[TimeoutError] ( literal[string] )
keyword[finally] :
identifier[self] . identifier[_cond] . identifier[release] () | def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\\param idx The index of the result we want, wrt collector's order
\\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
self._cond.acquire()
try:
if idx >= self._expected:
raise IndexError # depends on [control=['if'], data=[]]
elif idx < len(self._collection):
return self._collection[idx] # depends on [control=['if'], data=['idx']]
elif idx != len(self._collection):
# Violation of the sequence protocol
raise IndexError() # depends on [control=['if'], data=[]]
else:
self._cond.wait(timeout=timeout)
try:
return self._collection[idx] # depends on [control=['try'], data=[]]
except IndexError:
# Still not added !
raise TimeoutError('Timeout while waiting for results') # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
finally:
self._cond.release() |
def make_model(self, grounding_ontology='UN', grounding_threshold=None):
"""Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
"""
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = (s.subj.delta['polarity'] is not None and
s.obj.delta['polarity'] is not None)
# Add the nodes to the graph
for node, delta in zip((s.subj.concept, s.obj.concept),
(s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node),
simulable=has_both_polarity,
mods=delta['adjectives'])
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = (s.subj.delta['polarity'] ==
s.obj.delta['polarity'])
if same_polarity:
target_arrow_shape, linecolor = ('circle', 'green')
else:
target_arrow_shape, linecolor = ('tee', 'maroon')
else:
target_arrow_shape, linecolor = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text
self.CAG.add_edge(
self._node_name(s.subj.concept),
self._node_name(s.obj.concept),
subj_polarity=s.subj.delta['polarity'],
subj_adjectives=s.subj.delta['adjectives'],
obj_polarity=s.obj.delta['polarity'],
obj_adjectives=s.obj.delta['adjectives'],
linestyle=linestyle,
linecolor=linecolor,
targetArrowShape=target_arrow_shape,
provenance=provenance,
)
return self.CAG | def function[make_model, parameter[self, grounding_ontology, grounding_threshold]]:
constant[Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
]
if compare[name[grounding_threshold] is_not constant[None]] begin[:]
name[self].grounding_threshold assign[=] name[grounding_threshold]
name[self].grounding_ontology assign[=] name[grounding_ontology]
variable[statements] assign[=] <ast.ListComp object at 0x7da2054a6fb0>
name[self].CAG assign[=] call[name[nx].MultiDiGraph, parameter[]]
for taget[name[s]] in starred[name[statements]] begin[:]
variable[has_both_polarity] assign[=] <ast.BoolOp object at 0x7da1b2347e80>
for taget[tuple[[<ast.Name object at 0x7da1b2344670>, <ast.Name object at 0x7da1b2346bf0>]]] in starred[call[name[zip], parameter[tuple[[<ast.Attribute object at 0x7da1b23442b0>, <ast.Attribute object at 0x7da1b2346500>]], tuple[[<ast.Attribute object at 0x7da1b23469e0>, <ast.Attribute object at 0x7da1b23440d0>]]]]] begin[:]
call[name[self].CAG.add_node, parameter[call[name[self]._node_name, parameter[name[node]]]]]
variable[linestyle] assign[=] <ast.IfExp object at 0x7da1b2344a30>
if name[has_both_polarity] begin[:]
variable[same_polarity] assign[=] compare[call[name[s].subj.delta][constant[polarity]] equal[==] call[name[s].obj.delta][constant[polarity]]]
if name[same_polarity] begin[:]
<ast.Tuple object at 0x7da1b2346110> assign[=] tuple[[<ast.Constant object at 0x7da1b2347640>, <ast.Constant object at 0x7da1b2345ed0>]]
variable[provenance] assign[=] list[[]]
if name[s].evidence begin[:]
variable[provenance] assign[=] call[call[name[s].evidence][constant[0]].annotations.get, parameter[constant[provenance], list[[]]]]
if name[provenance] begin[:]
call[call[name[provenance]][constant[0]]][constant[text]] assign[=] call[name[s].evidence][constant[0]].text
call[name[self].CAG.add_edge, parameter[call[name[self]._node_name, parameter[name[s].subj.concept]], call[name[self]._node_name, parameter[name[s].obj.concept]]]]
return[name[self].CAG] | keyword[def] identifier[make_model] ( identifier[self] , identifier[grounding_ontology] = literal[string] , identifier[grounding_threshold] = keyword[None] ):
literal[string]
keyword[if] identifier[grounding_threshold] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[grounding_threshold] = identifier[grounding_threshold]
identifier[self] . identifier[grounding_ontology] = identifier[grounding_ontology]
identifier[statements] =[ identifier[stmt] keyword[for] identifier[stmt] keyword[in] identifier[self] . identifier[statements] keyword[if]
identifier[isinstance] ( identifier[stmt] , identifier[Influence] )]
identifier[self] . identifier[CAG] = identifier[nx] . identifier[MultiDiGraph] ()
keyword[for] identifier[s] keyword[in] identifier[statements] :
identifier[has_both_polarity] =( identifier[s] . identifier[subj] . identifier[delta] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[s] . identifier[obj] . identifier[delta] [ literal[string] ] keyword[is] keyword[not] keyword[None] )
keyword[for] identifier[node] , identifier[delta] keyword[in] identifier[zip] (( identifier[s] . identifier[subj] . identifier[concept] , identifier[s] . identifier[obj] . identifier[concept] ),
( identifier[s] . identifier[subj] . identifier[delta] , identifier[s] . identifier[obj] . identifier[delta] )):
identifier[self] . identifier[CAG] . identifier[add_node] ( identifier[self] . identifier[_node_name] ( identifier[node] ),
identifier[simulable] = identifier[has_both_polarity] ,
identifier[mods] = identifier[delta] [ literal[string] ])
identifier[linestyle] = literal[string] keyword[if] identifier[has_both_polarity] keyword[else] literal[string]
keyword[if] identifier[has_both_polarity] :
identifier[same_polarity] =( identifier[s] . identifier[subj] . identifier[delta] [ literal[string] ]==
identifier[s] . identifier[obj] . identifier[delta] [ literal[string] ])
keyword[if] identifier[same_polarity] :
identifier[target_arrow_shape] , identifier[linecolor] =( literal[string] , literal[string] )
keyword[else] :
identifier[target_arrow_shape] , identifier[linecolor] =( literal[string] , literal[string] )
keyword[else] :
identifier[target_arrow_shape] , identifier[linecolor] =( literal[string] , literal[string] )
identifier[provenance] =[]
keyword[if] identifier[s] . identifier[evidence] :
identifier[provenance] = identifier[s] . identifier[evidence] [ literal[int] ]. identifier[annotations] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[provenance] :
identifier[provenance] [ literal[int] ][ literal[string] ]= identifier[s] . identifier[evidence] [ literal[int] ]. identifier[text]
identifier[self] . identifier[CAG] . identifier[add_edge] (
identifier[self] . identifier[_node_name] ( identifier[s] . identifier[subj] . identifier[concept] ),
identifier[self] . identifier[_node_name] ( identifier[s] . identifier[obj] . identifier[concept] ),
identifier[subj_polarity] = identifier[s] . identifier[subj] . identifier[delta] [ literal[string] ],
identifier[subj_adjectives] = identifier[s] . identifier[subj] . identifier[delta] [ literal[string] ],
identifier[obj_polarity] = identifier[s] . identifier[obj] . identifier[delta] [ literal[string] ],
identifier[obj_adjectives] = identifier[s] . identifier[obj] . identifier[delta] [ literal[string] ],
identifier[linestyle] = identifier[linestyle] ,
identifier[linecolor] = identifier[linecolor] ,
identifier[targetArrowShape] = identifier[target_arrow_shape] ,
identifier[provenance] = identifier[provenance] ,
)
keyword[return] identifier[self] . identifier[CAG] | def make_model(self, grounding_ontology='UN', grounding_threshold=None):
"""Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
"""
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold # depends on [control=['if'], data=['grounding_threshold']]
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = s.subj.delta['polarity'] is not None and s.obj.delta['polarity'] is not None
# Add the nodes to the graph
for (node, delta) in zip((s.subj.concept, s.obj.concept), (s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node), simulable=has_both_polarity, mods=delta['adjectives']) # depends on [control=['for'], data=[]]
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = s.subj.delta['polarity'] == s.obj.delta['polarity']
if same_polarity:
(target_arrow_shape, linecolor) = ('circle', 'green') # depends on [control=['if'], data=[]]
else:
(target_arrow_shape, linecolor) = ('tee', 'maroon') # depends on [control=['if'], data=[]]
else:
(target_arrow_shape, linecolor) = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.CAG.add_edge(self._node_name(s.subj.concept), self._node_name(s.obj.concept), subj_polarity=s.subj.delta['polarity'], subj_adjectives=s.subj.delta['adjectives'], obj_polarity=s.obj.delta['polarity'], obj_adjectives=s.obj.delta['adjectives'], linestyle=linestyle, linecolor=linecolor, targetArrowShape=target_arrow_shape, provenance=provenance) # depends on [control=['for'], data=['s']]
return self.CAG |
def transform_folder(args):
"""
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
"""
command, (transform, src, dest) = args
try:
print(progress.value, "remaining")
# Apply transformations to all files
data = []
data_dir = os.path.join(src, command)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
data.append(transform({'path': path}))
# Save results
pickleFile = os.path.join(dest, "{}.pkl".format(command))
gc.disable()
with open(pickleFile, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
gc.enable()
# Update progress
with progress.get_lock():
progress.value -= 1
except Exception as e:
print(command, e, file=sys.stderr)
traceback.print_exc() | def function[transform_folder, parameter[args]]:
constant[
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
]
<ast.Tuple object at 0x7da1b0980d30> assign[=] name[args]
<ast.Try object at 0x7da1b0845f00> | keyword[def] identifier[transform_folder] ( identifier[args] ):
literal[string]
identifier[command] ,( identifier[transform] , identifier[src] , identifier[dest] )= identifier[args]
keyword[try] :
identifier[print] ( identifier[progress] . identifier[value] , literal[string] )
identifier[data] =[]
identifier[data_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[src] , identifier[command] )
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[data_dir] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[filename] )
identifier[data] . identifier[append] ( identifier[transform] ({ literal[string] : identifier[path] }))
identifier[pickleFile] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest] , literal[string] . identifier[format] ( identifier[command] ))
identifier[gc] . identifier[disable] ()
keyword[with] identifier[open] ( identifier[pickleFile] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[data] , identifier[f] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
identifier[gc] . identifier[enable] ()
keyword[with] identifier[progress] . identifier[get_lock] ():
identifier[progress] . identifier[value] -= literal[int]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( identifier[command] , identifier[e] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[traceback] . identifier[print_exc] () | def transform_folder(args):
"""
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
"""
(command, (transform, src, dest)) = args
try:
print(progress.value, 'remaining')
# Apply transformations to all files
data = []
data_dir = os.path.join(src, command)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
data.append(transform({'path': path})) # depends on [control=['for'], data=['filename']]
# Save results
pickleFile = os.path.join(dest, '{}.pkl'.format(command))
gc.disable()
with open(pickleFile, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['f']]
gc.enable()
# Update progress
with progress.get_lock():
progress.value -= 1 # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
print(command, e, file=sys.stderr)
traceback.print_exc() # depends on [control=['except'], data=['e']] |
def set_data(self, data, coll_filter=None):
"""Set model data"""
self._data = data
data_type = get_type_string(data)
if coll_filter is not None and not self.remote and \
isinstance(data, (tuple, list, dict, set)):
data = coll_filter(data)
self.showndata = data
self.header0 = _("Index")
if self.names:
self.header0 = _("Name")
if isinstance(data, tuple):
self.keys = list(range(len(data)))
self.title += _("Tuple")
elif isinstance(data, list):
self.keys = list(range(len(data)))
self.title += _("List")
elif isinstance(data, set):
self.keys = list(range(len(data)))
self.title += _("Set")
self._data = list(data)
elif isinstance(data, dict):
self.keys = list(data.keys())
self.title += _("Dictionary")
if not self.names:
self.header0 = _("Key")
else:
self.keys = get_object_attrs(data)
self._data = data = self.showndata = ProxyObject(data)
if not self.names:
self.header0 = _("Attribute")
if not isinstance(self._data, ProxyObject):
self.title += (' (' + str(len(self.keys)) + ' ' +
_("elements") + ')')
else:
self.title += data_type
self.total_rows = len(self.keys)
if self.total_rows > LARGE_NROWS:
self.rows_loaded = ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
self.sig_setting_data.emit()
self.set_size_and_type()
self.reset() | def function[set_data, parameter[self, data, coll_filter]]:
constant[Set model data]
name[self]._data assign[=] name[data]
variable[data_type] assign[=] call[name[get_type_string], parameter[name[data]]]
if <ast.BoolOp object at 0x7da18c4ce110> begin[:]
variable[data] assign[=] call[name[coll_filter], parameter[name[data]]]
name[self].showndata assign[=] name[data]
name[self].header0 assign[=] call[name[_], parameter[constant[Index]]]
if name[self].names begin[:]
name[self].header0 assign[=] call[name[_], parameter[constant[Name]]]
if call[name[isinstance], parameter[name[data], name[tuple]]] begin[:]
name[self].keys assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[data]]]]]]]
<ast.AugAssign object at 0x7da18c4cc070>
if <ast.UnaryOp object at 0x7da20c7c92d0> begin[:]
<ast.AugAssign object at 0x7da18f7218d0>
name[self].total_rows assign[=] call[name[len], parameter[name[self].keys]]
if compare[name[self].total_rows greater[>] name[LARGE_NROWS]] begin[:]
name[self].rows_loaded assign[=] name[ROWS_TO_LOAD]
call[name[self].sig_setting_data.emit, parameter[]]
call[name[self].set_size_and_type, parameter[]]
call[name[self].reset, parameter[]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[data] , identifier[coll_filter] = keyword[None] ):
literal[string]
identifier[self] . identifier[_data] = identifier[data]
identifier[data_type] = identifier[get_type_string] ( identifier[data] )
keyword[if] identifier[coll_filter] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[remote] keyword[and] identifier[isinstance] ( identifier[data] ,( identifier[tuple] , identifier[list] , identifier[dict] , identifier[set] )):
identifier[data] = identifier[coll_filter] ( identifier[data] )
identifier[self] . identifier[showndata] = identifier[data]
identifier[self] . identifier[header0] = identifier[_] ( literal[string] )
keyword[if] identifier[self] . identifier[names] :
identifier[self] . identifier[header0] = identifier[_] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[tuple] ):
identifier[self] . identifier[keys] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[data] )))
identifier[self] . identifier[title] += identifier[_] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ):
identifier[self] . identifier[keys] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[data] )))
identifier[self] . identifier[title] += identifier[_] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[set] ):
identifier[self] . identifier[keys] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[data] )))
identifier[self] . identifier[title] += identifier[_] ( literal[string] )
identifier[self] . identifier[_data] = identifier[list] ( identifier[data] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[self] . identifier[keys] = identifier[list] ( identifier[data] . identifier[keys] ())
identifier[self] . identifier[title] += identifier[_] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[names] :
identifier[self] . identifier[header0] = identifier[_] ( literal[string] )
keyword[else] :
identifier[self] . identifier[keys] = identifier[get_object_attrs] ( identifier[data] )
identifier[self] . identifier[_data] = identifier[data] = identifier[self] . identifier[showndata] = identifier[ProxyObject] ( identifier[data] )
keyword[if] keyword[not] identifier[self] . identifier[names] :
identifier[self] . identifier[header0] = identifier[_] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[_data] , identifier[ProxyObject] ):
identifier[self] . identifier[title] +=( literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[keys] ))+ literal[string] +
identifier[_] ( literal[string] )+ literal[string] )
keyword[else] :
identifier[self] . identifier[title] += identifier[data_type]
identifier[self] . identifier[total_rows] = identifier[len] ( identifier[self] . identifier[keys] )
keyword[if] identifier[self] . identifier[total_rows] > identifier[LARGE_NROWS] :
identifier[self] . identifier[rows_loaded] = identifier[ROWS_TO_LOAD]
keyword[else] :
identifier[self] . identifier[rows_loaded] = identifier[self] . identifier[total_rows]
identifier[self] . identifier[sig_setting_data] . identifier[emit] ()
identifier[self] . identifier[set_size_and_type] ()
identifier[self] . identifier[reset] () | def set_data(self, data, coll_filter=None):
"""Set model data"""
self._data = data
data_type = get_type_string(data)
if coll_filter is not None and (not self.remote) and isinstance(data, (tuple, list, dict, set)):
data = coll_filter(data) # depends on [control=['if'], data=[]]
self.showndata = data
self.header0 = _('Index')
if self.names:
self.header0 = _('Name') # depends on [control=['if'], data=[]]
if isinstance(data, tuple):
self.keys = list(range(len(data)))
self.title += _('Tuple') # depends on [control=['if'], data=[]]
elif isinstance(data, list):
self.keys = list(range(len(data)))
self.title += _('List') # depends on [control=['if'], data=[]]
elif isinstance(data, set):
self.keys = list(range(len(data)))
self.title += _('Set')
self._data = list(data) # depends on [control=['if'], data=[]]
elif isinstance(data, dict):
self.keys = list(data.keys())
self.title += _('Dictionary')
if not self.names:
self.header0 = _('Key') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self.keys = get_object_attrs(data)
self._data = data = self.showndata = ProxyObject(data)
if not self.names:
self.header0 = _('Attribute') # depends on [control=['if'], data=[]]
if not isinstance(self._data, ProxyObject):
self.title += ' (' + str(len(self.keys)) + ' ' + _('elements') + ')' # depends on [control=['if'], data=[]]
else:
self.title += data_type
self.total_rows = len(self.keys)
if self.total_rows > LARGE_NROWS:
self.rows_loaded = ROWS_TO_LOAD # depends on [control=['if'], data=[]]
else:
self.rows_loaded = self.total_rows
self.sig_setting_data.emit()
self.set_size_and_type()
self.reset() |
def createDdbTable(region=None, table="credential-store", **kwargs):
'''
create the secret store table in DDB in the specified region
'''
session = get_session(**kwargs)
dynamodb = session.resource("dynamodb", region_name=region)
if table in (t.name for t in dynamodb.tables.all()):
print("Credential Store table already exists")
return
print("Creating table...")
dynamodb.create_table(
TableName=table,
KeySchema=[
{
"AttributeName": "name",
"KeyType": "HASH",
},
{
"AttributeName": "version",
"KeyType": "RANGE",
}
],
AttributeDefinitions=[
{
"AttributeName": "name",
"AttributeType": "S",
},
{
"AttributeName": "version",
"AttributeType": "S",
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
}
)
print("Waiting for table to be created...")
client = session.client("dynamodb", region_name=region)
response = client.describe_table(TableName=table)
client.get_waiter("table_exists").wait(TableName=table)
print("Adding tag...")
client.tag_resource(
ResourceArn=response["Table"]["TableArn"],
Tags=[
{
'Key': "Name",
'Value': "credstash"
},
]
)
print("Table has been created. "
"Go read the README about how to create your KMS key") | def function[createDdbTable, parameter[region, table]]:
constant[
create the secret store table in DDB in the specified region
]
variable[session] assign[=] call[name[get_session], parameter[]]
variable[dynamodb] assign[=] call[name[session].resource, parameter[constant[dynamodb]]]
if compare[name[table] in <ast.GeneratorExp object at 0x7da18bc72500>] begin[:]
call[name[print], parameter[constant[Credential Store table already exists]]]
return[None]
call[name[print], parameter[constant[Creating table...]]]
call[name[dynamodb].create_table, parameter[]]
call[name[print], parameter[constant[Waiting for table to be created...]]]
variable[client] assign[=] call[name[session].client, parameter[constant[dynamodb]]]
variable[response] assign[=] call[name[client].describe_table, parameter[]]
call[call[name[client].get_waiter, parameter[constant[table_exists]]].wait, parameter[]]
call[name[print], parameter[constant[Adding tag...]]]
call[name[client].tag_resource, parameter[]]
call[name[print], parameter[constant[Table has been created. Go read the README about how to create your KMS key]]] | keyword[def] identifier[createDdbTable] ( identifier[region] = keyword[None] , identifier[table] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[session] = identifier[get_session] (** identifier[kwargs] )
identifier[dynamodb] = identifier[session] . identifier[resource] ( literal[string] , identifier[region_name] = identifier[region] )
keyword[if] identifier[table] keyword[in] ( identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[dynamodb] . identifier[tables] . identifier[all] ()):
identifier[print] ( literal[string] )
keyword[return]
identifier[print] ( literal[string] )
identifier[dynamodb] . identifier[create_table] (
identifier[TableName] = identifier[table] ,
identifier[KeySchema] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
},
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
],
identifier[AttributeDefinitions] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
},
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
},
],
identifier[ProvisionedThroughput] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
)
identifier[print] ( literal[string] )
identifier[client] = identifier[session] . identifier[client] ( literal[string] , identifier[region_name] = identifier[region] )
identifier[response] = identifier[client] . identifier[describe_table] ( identifier[TableName] = identifier[table] )
identifier[client] . identifier[get_waiter] ( literal[string] ). identifier[wait] ( identifier[TableName] = identifier[table] )
identifier[print] ( literal[string] )
identifier[client] . identifier[tag_resource] (
identifier[ResourceArn] = identifier[response] [ literal[string] ][ literal[string] ],
identifier[Tags] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string]
},
]
)
identifier[print] ( literal[string]
literal[string] ) | def createDdbTable(region=None, table='credential-store', **kwargs):
"""
create the secret store table in DDB in the specified region
"""
session = get_session(**kwargs)
dynamodb = session.resource('dynamodb', region_name=region)
if table in (t.name for t in dynamodb.tables.all()):
print('Credential Store table already exists')
return # depends on [control=['if'], data=[]]
print('Creating table...')
dynamodb.create_table(TableName=table, KeySchema=[{'AttributeName': 'name', 'KeyType': 'HASH'}, {'AttributeName': 'version', 'KeyType': 'RANGE'}], AttributeDefinitions=[{'AttributeName': 'name', 'AttributeType': 'S'}, {'AttributeName': 'version', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1})
print('Waiting for table to be created...')
client = session.client('dynamodb', region_name=region)
response = client.describe_table(TableName=table)
client.get_waiter('table_exists').wait(TableName=table)
print('Adding tag...')
client.tag_resource(ResourceArn=response['Table']['TableArn'], Tags=[{'Key': 'Name', 'Value': 'credstash'}])
print('Table has been created. Go read the README about how to create your KMS key') |
def previous_sibling(self):
""" Returns the previous sibling of the current node.
The previous sibling is searched in the parent node if we are not considering a top-level
node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID)
that is associated with the considered tree instance.
"""
if self.parent:
nodes = self.parent.children
index = nodes.index(self)
sibling = nodes[index - 1] if index > 0 else None
else:
nodes = self.tree.nodes
index = nodes.index(self)
sibling = (
next((n for n in reversed(nodes[:index]) if n.level == self.level), None)
if index > 0 else None
)
return sibling | def function[previous_sibling, parameter[self]]:
constant[ Returns the previous sibling of the current node.
The previous sibling is searched in the parent node if we are not considering a top-level
node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID)
that is associated with the considered tree instance.
]
if name[self].parent begin[:]
variable[nodes] assign[=] name[self].parent.children
variable[index] assign[=] call[name[nodes].index, parameter[name[self]]]
variable[sibling] assign[=] <ast.IfExp object at 0x7da18f812f50>
return[name[sibling]] | keyword[def] identifier[previous_sibling] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] :
identifier[nodes] = identifier[self] . identifier[parent] . identifier[children]
identifier[index] = identifier[nodes] . identifier[index] ( identifier[self] )
identifier[sibling] = identifier[nodes] [ identifier[index] - literal[int] ] keyword[if] identifier[index] > literal[int] keyword[else] keyword[None]
keyword[else] :
identifier[nodes] = identifier[self] . identifier[tree] . identifier[nodes]
identifier[index] = identifier[nodes] . identifier[index] ( identifier[self] )
identifier[sibling] =(
identifier[next] (( identifier[n] keyword[for] identifier[n] keyword[in] identifier[reversed] ( identifier[nodes] [: identifier[index] ]) keyword[if] identifier[n] . identifier[level] == identifier[self] . identifier[level] ), keyword[None] )
keyword[if] identifier[index] > literal[int] keyword[else] keyword[None]
)
keyword[return] identifier[sibling] | def previous_sibling(self):
""" Returns the previous sibling of the current node.
The previous sibling is searched in the parent node if we are not considering a top-level
node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID)
that is associated with the considered tree instance.
"""
if self.parent:
nodes = self.parent.children
index = nodes.index(self)
sibling = nodes[index - 1] if index > 0 else None # depends on [control=['if'], data=[]]
else:
nodes = self.tree.nodes
index = nodes.index(self)
sibling = next((n for n in reversed(nodes[:index]) if n.level == self.level), None) if index > 0 else None
return sibling |
def mkAutoGui(self):
"""
:summary: automatically generate simple gui in TCL
"""
gui = GuiBuilder()
p0 = gui.page("Main")
handlers = []
for p in self.iterParams(self.top):
name = self.getParamPhysicalName(p)
p0.param(name)
for fn in paramManipulatorFns(name):
handlers.append(fn)
with open(self.guiFile, "w") as f:
f.write(gui.asTcl())
for h in handlers:
f.write('\n\n')
f.write(str(h)) | def function[mkAutoGui, parameter[self]]:
constant[
:summary: automatically generate simple gui in TCL
]
variable[gui] assign[=] call[name[GuiBuilder], parameter[]]
variable[p0] assign[=] call[name[gui].page, parameter[constant[Main]]]
variable[handlers] assign[=] list[[]]
for taget[name[p]] in starred[call[name[self].iterParams, parameter[name[self].top]]] begin[:]
variable[name] assign[=] call[name[self].getParamPhysicalName, parameter[name[p]]]
call[name[p0].param, parameter[name[name]]]
for taget[name[fn]] in starred[call[name[paramManipulatorFns], parameter[name[name]]]] begin[:]
call[name[handlers].append, parameter[name[fn]]]
with call[name[open], parameter[name[self].guiFile, constant[w]]] begin[:]
call[name[f].write, parameter[call[name[gui].asTcl, parameter[]]]]
for taget[name[h]] in starred[name[handlers]] begin[:]
call[name[f].write, parameter[constant[
]]]
call[name[f].write, parameter[call[name[str], parameter[name[h]]]]] | keyword[def] identifier[mkAutoGui] ( identifier[self] ):
literal[string]
identifier[gui] = identifier[GuiBuilder] ()
identifier[p0] = identifier[gui] . identifier[page] ( literal[string] )
identifier[handlers] =[]
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[iterParams] ( identifier[self] . identifier[top] ):
identifier[name] = identifier[self] . identifier[getParamPhysicalName] ( identifier[p] )
identifier[p0] . identifier[param] ( identifier[name] )
keyword[for] identifier[fn] keyword[in] identifier[paramManipulatorFns] ( identifier[name] ):
identifier[handlers] . identifier[append] ( identifier[fn] )
keyword[with] identifier[open] ( identifier[self] . identifier[guiFile] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[gui] . identifier[asTcl] ())
keyword[for] identifier[h] keyword[in] identifier[handlers] :
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( identifier[str] ( identifier[h] )) | def mkAutoGui(self):
"""
:summary: automatically generate simple gui in TCL
"""
gui = GuiBuilder()
p0 = gui.page('Main')
handlers = []
for p in self.iterParams(self.top):
name = self.getParamPhysicalName(p)
p0.param(name)
for fn in paramManipulatorFns(name):
handlers.append(fn) # depends on [control=['for'], data=['fn']] # depends on [control=['for'], data=['p']]
with open(self.guiFile, 'w') as f:
f.write(gui.asTcl())
for h in handlers:
f.write('\n\n')
f.write(str(h)) # depends on [control=['for'], data=['h']] # depends on [control=['with'], data=['f']] |
def get_config_parameter_boolean(config: ConfigParser,
section: str,
param: str,
default: bool) -> bool:
"""
Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
"""
try:
value = config.getboolean(section, param)
except (TypeError, ValueError, NoOptionError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
value = default
return value | def function[get_config_parameter_boolean, parameter[config, section, param, default]]:
constant[
Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
]
<ast.Try object at 0x7da1b184baf0>
return[name[value]] | keyword[def] identifier[get_config_parameter_boolean] ( identifier[config] : identifier[ConfigParser] ,
identifier[section] : identifier[str] ,
identifier[param] : identifier[str] ,
identifier[default] : identifier[bool] )-> identifier[bool] :
literal[string]
keyword[try] :
identifier[value] = identifier[config] . identifier[getboolean] ( identifier[section] , identifier[param] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[NoOptionError] ):
identifier[log] . identifier[warning] (
literal[string]
literal[string] , identifier[param] , identifier[section] , identifier[default] )
identifier[value] = identifier[default]
keyword[return] identifier[value] | def get_config_parameter_boolean(config: ConfigParser, section: str, param: str, default: bool) -> bool:
"""
Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
"""
try:
value = config.getboolean(section, param) # depends on [control=['try'], data=[]]
except (TypeError, ValueError, NoOptionError):
log.warning('Configuration variable {} not found or improper in section [{}]; using default of {!r}', param, section, default)
value = default # depends on [control=['except'], data=[]]
return value |
def _build_cache(source_file, skip_cache=False):
"""Build the cached data.
Either by parsing the RDF taxonomy file or a vocabulary file.
:param source_file: source file of the taxonomy, RDF file
:param skip_cache: if True, build cache will not be
saved (pickled) - it is saved as <source_file.db>
"""
store = rdflib.ConjunctiveGraph()
if skip_cache:
current_app.logger.info("You requested not to save the cache to disk.")
else:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
# Make sure we have a cache_dir readable and writable.
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
if os.access(cache_dir, os.R_OK):
if not os.access(cache_dir, os.W_OK):
raise TaxonomyError("Cache directory exists but is not"
" writable. Check your permissions"
" for: %s" % cache_dir)
else:
raise TaxonomyError("Cache directory does not exist"
" (and could not be created): %s" % cache_dir)
timer_start = time.clock()
namespace = None
single_keywords, composite_keywords = {}, {}
try:
current_app.logger.info(
"Building RDFLib's conjunctive graph from: %s" % source_file)
try:
store.parse(source_file)
except urllib_error.URLError:
if source_file[0] == '/':
store.parse("file://" + source_file)
else:
store.parse("file:///" + source_file)
except rdflib.exceptions.Error as e:
current_app.logger.exception("Serious error reading RDF file")
raise
except (xml.sax.SAXParseException, ImportError) as e:
# File is not a RDF file. We assume it is a controlled vocabulary.
current_app.logger.error(e)
current_app.logger.warning("The ontology file is probably not a valid RDF file. \
Assuming it is a controlled vocabulary file.")
filestream = open(source_file, "r")
for line in filestream:
keyword = line.strip()
kt = KeywordToken(keyword)
single_keywords[kt.short_id] = kt
if not len(single_keywords):
raise TaxonomyError('The ontology file is not well formated')
else: # ok, no exception happened
current_app.logger.info("Now building cache of keywords")
# File is a RDF file.
namespace = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
single_count = 0
composite_count = 0
subject_objects = store.subject_objects(namespace["prefLabel"])
for subject, pref_label in subject_objects:
kt = KeywordToken(subject, store=store, namespace=namespace)
if kt.isComposite():
composite_count += 1
composite_keywords[kt.short_id] = kt
else:
single_keywords[kt.short_id] = kt
single_count += 1
cached_data = {}
cached_data["single"] = single_keywords
cached_data["composite"] = composite_keywords
cached_data["creation_time"] = time.gmtime()
cached_data["version_info"] = {'rdflib': rdflib.__version__}
current_app.logger.debug(
"Building taxonomy... %d terms built in %.1f sec." %
(len(single_keywords) + len(composite_keywords),
time.clock() - timer_start))
current_app.logger.info(
"Total count of single keywords: %d "
% len(single_keywords)
)
current_app.logger.info(
"Total count of composite keywords: %d "
% len(composite_keywords)
)
if not skip_cache:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
current_app.logger.debug("Writing the cache into: %s" % cache_path)
# test again, it could have changed
if os.access(cache_dir, os.R_OK):
if os.access(cache_dir, os.W_OK):
# Serialize.
filestream = None
try:
filestream = open(cache_path, "wb")
except IOError as msg:
# Impossible to write the cache.
current_app.logger.error(
"Impossible to write cache to '%s'."
% cache_path)
current_app.logger.error(msg)
else:
current_app.logger.debug(
"Writing cache to file %s" % cache_path)
cPickle.dump(cached_data, filestream, 1)
if filestream:
filestream.close()
else:
raise TaxonomyError("Cache directory exists but is not "
"writable. Check your permissions "
"for: %s" % cache_dir)
else:
raise TaxonomyError("Cache directory does not exist"
" (and could not be created): %s" % cache_dir)
# now when the whole taxonomy was parsed,
# find sub-components of the composite kws
# it is important to keep this call after the taxonomy was saved,
# because we don't want to pickle regexes multiple times
# (as they are must be re-compiled at load time)
for kt in composite_keywords.values():
kt.refreshCompositeOf(single_keywords, composite_keywords,
store=store, namespace=namespace)
# house-cleaning
if store:
store.close()
return (single_keywords, composite_keywords) | def function[_build_cache, parameter[source_file, skip_cache]]:
constant[Build the cached data.
Either by parsing the RDF taxonomy file or a vocabulary file.
:param source_file: source file of the taxonomy, RDF file
:param skip_cache: if True, build cache will not be
saved (pickled) - it is saved as <source_file.db>
]
variable[store] assign[=] call[name[rdflib].ConjunctiveGraph, parameter[]]
if name[skip_cache] begin[:]
call[name[current_app].logger.info, parameter[constant[You requested not to save the cache to disk.]]]
variable[timer_start] assign[=] call[name[time].clock, parameter[]]
variable[namespace] assign[=] constant[None]
<ast.Tuple object at 0x7da2044c3370> assign[=] tuple[[<ast.Dict object at 0x7da2044c1cf0>, <ast.Dict object at 0x7da2044c3f70>]]
<ast.Try object at 0x7da2044c0fa0>
variable[cached_data] assign[=] dictionary[[], []]
call[name[cached_data]][constant[single]] assign[=] name[single_keywords]
call[name[cached_data]][constant[composite]] assign[=] name[composite_keywords]
call[name[cached_data]][constant[creation_time]] assign[=] call[name[time].gmtime, parameter[]]
call[name[cached_data]][constant[version_info]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90b50>], [<ast.Attribute object at 0x7da18fe91c60>]]
call[name[current_app].logger.debug, parameter[binary_operation[constant[Building taxonomy... %d terms built in %.1f sec.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18fe91c90>, <ast.BinOp object at 0x7da18fe934c0>]]]]]
call[name[current_app].logger.info, parameter[binary_operation[constant[Total count of single keywords: %d ] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[single_keywords]]]]]]
call[name[current_app].logger.info, parameter[binary_operation[constant[Total count of composite keywords: %d ] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[composite_keywords]]]]]]
if <ast.UnaryOp object at 0x7da18fe927a0> begin[:]
variable[cache_path] assign[=] call[name[_get_cache_path], parameter[name[source_file]]]
variable[cache_dir] assign[=] call[name[os].path.dirname, parameter[name[cache_path]]]
call[name[current_app].logger.debug, parameter[binary_operation[constant[Writing the cache into: %s] <ast.Mod object at 0x7da2590d6920> name[cache_path]]]]
if call[name[os].access, parameter[name[cache_dir], name[os].R_OK]] begin[:]
if call[name[os].access, parameter[name[cache_dir], name[os].W_OK]] begin[:]
variable[filestream] assign[=] constant[None]
<ast.Try object at 0x7da207f998d0>
if name[filestream] begin[:]
call[name[filestream].close, parameter[]]
for taget[name[kt]] in starred[call[name[composite_keywords].values, parameter[]]] begin[:]
call[name[kt].refreshCompositeOf, parameter[name[single_keywords], name[composite_keywords]]]
if name[store] begin[:]
call[name[store].close, parameter[]]
return[tuple[[<ast.Name object at 0x7da207f9ab60>, <ast.Name object at 0x7da207f99a20>]]] | keyword[def] identifier[_build_cache] ( identifier[source_file] , identifier[skip_cache] = keyword[False] ):
literal[string]
identifier[store] = identifier[rdflib] . identifier[ConjunctiveGraph] ()
keyword[if] identifier[skip_cache] :
identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
identifier[cache_path] = identifier[_get_cache_path] ( identifier[source_file] )
identifier[cache_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[cache_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[cache_dir] ):
identifier[os] . identifier[makedirs] ( identifier[cache_dir] )
keyword[if] identifier[os] . identifier[access] ( identifier[cache_dir] , identifier[os] . identifier[R_OK] ):
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[cache_dir] , identifier[os] . identifier[W_OK] ):
keyword[raise] identifier[TaxonomyError] ( literal[string]
literal[string]
literal[string] % identifier[cache_dir] )
keyword[else] :
keyword[raise] identifier[TaxonomyError] ( literal[string]
literal[string] % identifier[cache_dir] )
identifier[timer_start] = identifier[time] . identifier[clock] ()
identifier[namespace] = keyword[None]
identifier[single_keywords] , identifier[composite_keywords] ={},{}
keyword[try] :
identifier[current_app] . identifier[logger] . identifier[info] (
literal[string] % identifier[source_file] )
keyword[try] :
identifier[store] . identifier[parse] ( identifier[source_file] )
keyword[except] identifier[urllib_error] . identifier[URLError] :
keyword[if] identifier[source_file] [ literal[int] ]== literal[string] :
identifier[store] . identifier[parse] ( literal[string] + identifier[source_file] )
keyword[else] :
identifier[store] . identifier[parse] ( literal[string] + identifier[source_file] )
keyword[except] identifier[rdflib] . identifier[exceptions] . identifier[Error] keyword[as] identifier[e] :
identifier[current_app] . identifier[logger] . identifier[exception] ( literal[string] )
keyword[raise]
keyword[except] ( identifier[xml] . identifier[sax] . identifier[SAXParseException] , identifier[ImportError] ) keyword[as] identifier[e] :
identifier[current_app] . identifier[logger] . identifier[error] ( identifier[e] )
identifier[current_app] . identifier[logger] . identifier[warning] ( literal[string] )
identifier[filestream] = identifier[open] ( identifier[source_file] , literal[string] )
keyword[for] identifier[line] keyword[in] identifier[filestream] :
identifier[keyword] = identifier[line] . identifier[strip] ()
identifier[kt] = identifier[KeywordToken] ( identifier[keyword] )
identifier[single_keywords] [ identifier[kt] . identifier[short_id] ]= identifier[kt]
keyword[if] keyword[not] identifier[len] ( identifier[single_keywords] ):
keyword[raise] identifier[TaxonomyError] ( literal[string] )
keyword[else] :
identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] )
identifier[namespace] = identifier[rdflib] . identifier[Namespace] ( literal[string] )
identifier[single_count] = literal[int]
identifier[composite_count] = literal[int]
identifier[subject_objects] = identifier[store] . identifier[subject_objects] ( identifier[namespace] [ literal[string] ])
keyword[for] identifier[subject] , identifier[pref_label] keyword[in] identifier[subject_objects] :
identifier[kt] = identifier[KeywordToken] ( identifier[subject] , identifier[store] = identifier[store] , identifier[namespace] = identifier[namespace] )
keyword[if] identifier[kt] . identifier[isComposite] ():
identifier[composite_count] += literal[int]
identifier[composite_keywords] [ identifier[kt] . identifier[short_id] ]= identifier[kt]
keyword[else] :
identifier[single_keywords] [ identifier[kt] . identifier[short_id] ]= identifier[kt]
identifier[single_count] += literal[int]
identifier[cached_data] ={}
identifier[cached_data] [ literal[string] ]= identifier[single_keywords]
identifier[cached_data] [ literal[string] ]= identifier[composite_keywords]
identifier[cached_data] [ literal[string] ]= identifier[time] . identifier[gmtime] ()
identifier[cached_data] [ literal[string] ]={ literal[string] : identifier[rdflib] . identifier[__version__] }
identifier[current_app] . identifier[logger] . identifier[debug] (
literal[string] %
( identifier[len] ( identifier[single_keywords] )+ identifier[len] ( identifier[composite_keywords] ),
identifier[time] . identifier[clock] ()- identifier[timer_start] ))
identifier[current_app] . identifier[logger] . identifier[info] (
literal[string]
% identifier[len] ( identifier[single_keywords] )
)
identifier[current_app] . identifier[logger] . identifier[info] (
literal[string]
% identifier[len] ( identifier[composite_keywords] )
)
keyword[if] keyword[not] identifier[skip_cache] :
identifier[cache_path] = identifier[_get_cache_path] ( identifier[source_file] )
identifier[cache_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[cache_path] )
identifier[current_app] . identifier[logger] . identifier[debug] ( literal[string] % identifier[cache_path] )
keyword[if] identifier[os] . identifier[access] ( identifier[cache_dir] , identifier[os] . identifier[R_OK] ):
keyword[if] identifier[os] . identifier[access] ( identifier[cache_dir] , identifier[os] . identifier[W_OK] ):
identifier[filestream] = keyword[None]
keyword[try] :
identifier[filestream] = identifier[open] ( identifier[cache_path] , literal[string] )
keyword[except] identifier[IOError] keyword[as] identifier[msg] :
identifier[current_app] . identifier[logger] . identifier[error] (
literal[string]
% identifier[cache_path] )
identifier[current_app] . identifier[logger] . identifier[error] ( identifier[msg] )
keyword[else] :
identifier[current_app] . identifier[logger] . identifier[debug] (
literal[string] % identifier[cache_path] )
identifier[cPickle] . identifier[dump] ( identifier[cached_data] , identifier[filestream] , literal[int] )
keyword[if] identifier[filestream] :
identifier[filestream] . identifier[close] ()
keyword[else] :
keyword[raise] identifier[TaxonomyError] ( literal[string]
literal[string]
literal[string] % identifier[cache_dir] )
keyword[else] :
keyword[raise] identifier[TaxonomyError] ( literal[string]
literal[string] % identifier[cache_dir] )
keyword[for] identifier[kt] keyword[in] identifier[composite_keywords] . identifier[values] ():
identifier[kt] . identifier[refreshCompositeOf] ( identifier[single_keywords] , identifier[composite_keywords] ,
identifier[store] = identifier[store] , identifier[namespace] = identifier[namespace] )
keyword[if] identifier[store] :
identifier[store] . identifier[close] ()
keyword[return] ( identifier[single_keywords] , identifier[composite_keywords] ) | def _build_cache(source_file, skip_cache=False):
"""Build the cached data.
Either by parsing the RDF taxonomy file or a vocabulary file.
:param source_file: source file of the taxonomy, RDF file
:param skip_cache: if True, build cache will not be
saved (pickled) - it is saved as <source_file.db>
"""
store = rdflib.ConjunctiveGraph()
if skip_cache:
current_app.logger.info('You requested not to save the cache to disk.') # depends on [control=['if'], data=[]]
else:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
# Make sure we have a cache_dir readable and writable.
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir) # depends on [control=['if'], data=[]]
if os.access(cache_dir, os.R_OK):
if not os.access(cache_dir, os.W_OK):
raise TaxonomyError('Cache directory exists but is not writable. Check your permissions for: %s' % cache_dir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise TaxonomyError('Cache directory does not exist (and could not be created): %s' % cache_dir)
timer_start = time.clock()
namespace = None
(single_keywords, composite_keywords) = ({}, {})
try:
current_app.logger.info("Building RDFLib's conjunctive graph from: %s" % source_file)
try:
store.parse(source_file) # depends on [control=['try'], data=[]]
except urllib_error.URLError:
if source_file[0] == '/':
store.parse('file://' + source_file) # depends on [control=['if'], data=[]]
else:
store.parse('file:///' + source_file) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except rdflib.exceptions.Error as e:
current_app.logger.exception('Serious error reading RDF file')
raise # depends on [control=['except'], data=[]]
except (xml.sax.SAXParseException, ImportError) as e:
# File is not a RDF file. We assume it is a controlled vocabulary.
current_app.logger.error(e)
current_app.logger.warning('The ontology file is probably not a valid RDF file. Assuming it is a controlled vocabulary file.')
filestream = open(source_file, 'r')
for line in filestream:
keyword = line.strip()
kt = KeywordToken(keyword)
single_keywords[kt.short_id] = kt # depends on [control=['for'], data=['line']]
if not len(single_keywords):
raise TaxonomyError('The ontology file is not well formated') # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
else: # ok, no exception happened
current_app.logger.info('Now building cache of keywords')
# File is a RDF file.
namespace = rdflib.Namespace('http://www.w3.org/2004/02/skos/core#')
single_count = 0
composite_count = 0
subject_objects = store.subject_objects(namespace['prefLabel'])
for (subject, pref_label) in subject_objects:
kt = KeywordToken(subject, store=store, namespace=namespace)
if kt.isComposite():
composite_count += 1
composite_keywords[kt.short_id] = kt # depends on [control=['if'], data=[]]
else:
single_keywords[kt.short_id] = kt
single_count += 1 # depends on [control=['for'], data=[]]
cached_data = {}
cached_data['single'] = single_keywords
cached_data['composite'] = composite_keywords
cached_data['creation_time'] = time.gmtime()
cached_data['version_info'] = {'rdflib': rdflib.__version__}
current_app.logger.debug('Building taxonomy... %d terms built in %.1f sec.' % (len(single_keywords) + len(composite_keywords), time.clock() - timer_start))
current_app.logger.info('Total count of single keywords: %d ' % len(single_keywords))
current_app.logger.info('Total count of composite keywords: %d ' % len(composite_keywords))
if not skip_cache:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
current_app.logger.debug('Writing the cache into: %s' % cache_path)
# test again, it could have changed
if os.access(cache_dir, os.R_OK):
if os.access(cache_dir, os.W_OK):
# Serialize.
filestream = None
try:
filestream = open(cache_path, 'wb') # depends on [control=['try'], data=[]]
except IOError as msg:
# Impossible to write the cache.
current_app.logger.error("Impossible to write cache to '%s'." % cache_path)
current_app.logger.error(msg) # depends on [control=['except'], data=['msg']]
else:
current_app.logger.debug('Writing cache to file %s' % cache_path)
cPickle.dump(cached_data, filestream, 1)
if filestream:
filestream.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise TaxonomyError('Cache directory exists but is not writable. Check your permissions for: %s' % cache_dir) # depends on [control=['if'], data=[]]
else:
raise TaxonomyError('Cache directory does not exist (and could not be created): %s' % cache_dir) # depends on [control=['if'], data=[]]
# now when the whole taxonomy was parsed,
# find sub-components of the composite kws
# it is important to keep this call after the taxonomy was saved,
# because we don't want to pickle regexes multiple times
# (as they are must be re-compiled at load time)
for kt in composite_keywords.values():
kt.refreshCompositeOf(single_keywords, composite_keywords, store=store, namespace=namespace) # depends on [control=['for'], data=['kt']]
# house-cleaning
if store:
store.close() # depends on [control=['if'], data=[]]
return (single_keywords, composite_keywords) |
def deblind(rInv,y):
"""
Removes blinding using ephemeral key @rInv on (intermediate result)
@y \in Gt.
"""
# Verify types, then deblind using the values provided.
assertScalarType(rInv)
assertType(y, GtElement)
return y ** rInv | def function[deblind, parameter[rInv, y]]:
constant[
Removes blinding using ephemeral key @rInv on (intermediate result)
@y \in Gt.
]
call[name[assertScalarType], parameter[name[rInv]]]
call[name[assertType], parameter[name[y], name[GtElement]]]
return[binary_operation[name[y] ** name[rInv]]] | keyword[def] identifier[deblind] ( identifier[rInv] , identifier[y] ):
literal[string]
identifier[assertScalarType] ( identifier[rInv] )
identifier[assertType] ( identifier[y] , identifier[GtElement] )
keyword[return] identifier[y] ** identifier[rInv] | def deblind(rInv, y):
"""
Removes blinding using ephemeral key @rInv on (intermediate result)
@y \\in Gt.
"""
# Verify types, then deblind using the values provided.
assertScalarType(rInv)
assertType(y, GtElement)
return y ** rInv |
def datetime_to_knx(datetimeval, clock_synced_external=1):
"""Convert a Python timestamp to an 8 byte KNX time and date object"""
res = [0, 0, 0, 0, 0, 0, 0, 0]
year = datetimeval.year
if (year < 1900) or (year > 2155):
raise KNXException("Only years between 1900 and 2155 supported")
res[0] = year - 1900
res[1] = datetimeval.month
res[2] = datetimeval.day
res[3] = (datetimeval.isoweekday() << 5) + datetimeval.hour
res[4] = datetimeval.minute
res[5] = datetimeval.second
if datetimeval.isoweekday() < 6:
is_working_day = 1
else:
is_working_day = 0
# DST starts last Sunday in March
date1 = datetime(year, 4, 1)
dston = date1 - timedelta(days=date1.weekday() + 1)
# ends last Sunday in October
date2 = datetime(year, 11, 1)
dstoff = date2 - timedelta(days=date2.weekday() + 1)
if dston <= datetimeval.replace(tzinfo=None) < dstoff:
dst = 1
else:
dst = 0
res[6] = (is_working_day << 6) + (1 << 5) + dst
if clock_synced_external:
res[7] = 128
else:
res[7] = 0
return res | def function[datetime_to_knx, parameter[datetimeval, clock_synced_external]]:
constant[Convert a Python timestamp to an 8 byte KNX time and date object]
variable[res] assign[=] list[[<ast.Constant object at 0x7da2046228f0>, <ast.Constant object at 0x7da204622140>, <ast.Constant object at 0x7da2046224d0>, <ast.Constant object at 0x7da204620880>, <ast.Constant object at 0x7da2046206a0>, <ast.Constant object at 0x7da204621030>, <ast.Constant object at 0x7da204620640>, <ast.Constant object at 0x7da2046223b0>]]
variable[year] assign[=] name[datetimeval].year
if <ast.BoolOp object at 0x7da2046203d0> begin[:]
<ast.Raise object at 0x7da204622830>
call[name[res]][constant[0]] assign[=] binary_operation[name[year] - constant[1900]]
call[name[res]][constant[1]] assign[=] name[datetimeval].month
call[name[res]][constant[2]] assign[=] name[datetimeval].day
call[name[res]][constant[3]] assign[=] binary_operation[binary_operation[call[name[datetimeval].isoweekday, parameter[]] <ast.LShift object at 0x7da2590d69e0> constant[5]] + name[datetimeval].hour]
call[name[res]][constant[4]] assign[=] name[datetimeval].minute
call[name[res]][constant[5]] assign[=] name[datetimeval].second
if compare[call[name[datetimeval].isoweekday, parameter[]] less[<] constant[6]] begin[:]
variable[is_working_day] assign[=] constant[1]
variable[date1] assign[=] call[name[datetime], parameter[name[year], constant[4], constant[1]]]
variable[dston] assign[=] binary_operation[name[date1] - call[name[timedelta], parameter[]]]
variable[date2] assign[=] call[name[datetime], parameter[name[year], constant[11], constant[1]]]
variable[dstoff] assign[=] binary_operation[name[date2] - call[name[timedelta], parameter[]]]
if compare[name[dston] less_or_equal[<=] call[name[datetimeval].replace, parameter[]]] begin[:]
variable[dst] assign[=] constant[1]
call[name[res]][constant[6]] assign[=] binary_operation[binary_operation[binary_operation[name[is_working_day] <ast.LShift object at 0x7da2590d69e0> constant[6]] + binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> constant[5]]] + name[dst]]
if name[clock_synced_external] begin[:]
call[name[res]][constant[7]] assign[=] constant[128]
return[name[res]] | keyword[def] identifier[datetime_to_knx] ( identifier[datetimeval] , identifier[clock_synced_external] = literal[int] ):
literal[string]
identifier[res] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[year] = identifier[datetimeval] . identifier[year]
keyword[if] ( identifier[year] < literal[int] ) keyword[or] ( identifier[year] > literal[int] ):
keyword[raise] identifier[KNXException] ( literal[string] )
identifier[res] [ literal[int] ]= identifier[year] - literal[int]
identifier[res] [ literal[int] ]= identifier[datetimeval] . identifier[month]
identifier[res] [ literal[int] ]= identifier[datetimeval] . identifier[day]
identifier[res] [ literal[int] ]=( identifier[datetimeval] . identifier[isoweekday] ()<< literal[int] )+ identifier[datetimeval] . identifier[hour]
identifier[res] [ literal[int] ]= identifier[datetimeval] . identifier[minute]
identifier[res] [ literal[int] ]= identifier[datetimeval] . identifier[second]
keyword[if] identifier[datetimeval] . identifier[isoweekday] ()< literal[int] :
identifier[is_working_day] = literal[int]
keyword[else] :
identifier[is_working_day] = literal[int]
identifier[date1] = identifier[datetime] ( identifier[year] , literal[int] , literal[int] )
identifier[dston] = identifier[date1] - identifier[timedelta] ( identifier[days] = identifier[date1] . identifier[weekday] ()+ literal[int] )
identifier[date2] = identifier[datetime] ( identifier[year] , literal[int] , literal[int] )
identifier[dstoff] = identifier[date2] - identifier[timedelta] ( identifier[days] = identifier[date2] . identifier[weekday] ()+ literal[int] )
keyword[if] identifier[dston] <= identifier[datetimeval] . identifier[replace] ( identifier[tzinfo] = keyword[None] )< identifier[dstoff] :
identifier[dst] = literal[int]
keyword[else] :
identifier[dst] = literal[int]
identifier[res] [ literal[int] ]=( identifier[is_working_day] << literal[int] )+( literal[int] << literal[int] )+ identifier[dst]
keyword[if] identifier[clock_synced_external] :
identifier[res] [ literal[int] ]= literal[int]
keyword[else] :
identifier[res] [ literal[int] ]= literal[int]
keyword[return] identifier[res] | def datetime_to_knx(datetimeval, clock_synced_external=1):
"""Convert a Python timestamp to an 8 byte KNX time and date object"""
res = [0, 0, 0, 0, 0, 0, 0, 0]
year = datetimeval.year
if year < 1900 or year > 2155:
raise KNXException('Only years between 1900 and 2155 supported') # depends on [control=['if'], data=[]]
res[0] = year - 1900
res[1] = datetimeval.month
res[2] = datetimeval.day
res[3] = (datetimeval.isoweekday() << 5) + datetimeval.hour
res[4] = datetimeval.minute
res[5] = datetimeval.second
if datetimeval.isoweekday() < 6:
is_working_day = 1 # depends on [control=['if'], data=[]]
else:
is_working_day = 0
# DST starts last Sunday in March
date1 = datetime(year, 4, 1)
dston = date1 - timedelta(days=date1.weekday() + 1)
# ends last Sunday in October
date2 = datetime(year, 11, 1)
dstoff = date2 - timedelta(days=date2.weekday() + 1)
if dston <= datetimeval.replace(tzinfo=None) < dstoff:
dst = 1 # depends on [control=['if'], data=[]]
else:
dst = 0
res[6] = (is_working_day << 6) + (1 << 5) + dst
if clock_synced_external:
res[7] = 128 # depends on [control=['if'], data=[]]
else:
res[7] = 0
return res |
def validate(self, instance, value):
"""Check shape and dtype of vector
validate also coerces the vector from valid strings (these
include ZERO, X, Y, Z, -X, -Y, -Z, EAST, WEST, NORTH, SOUTH, UP,
and DOWN) and scales it to the given length.
"""
if not isinstance(value, (tuple, list, np.ndarray)):
self.error(instance, value)
if isinstance(value, (tuple, list)):
for i, val in enumerate(value):
if isinstance(val, string_types):
if val.upper() not in VECTOR_DIRECTIONS:
self.error(
instance=instance,
value=val,
extra='This is an invalid Vector3 representation.',
)
value[i] = VECTOR_DIRECTIONS[val.upper()]
return super(Vector3Array, self).validate(instance, value) | def function[validate, parameter[self, instance, value]]:
constant[Check shape and dtype of vector
validate also coerces the vector from valid strings (these
include ZERO, X, Y, Z, -X, -Y, -Z, EAST, WEST, NORTH, SOUTH, UP,
and DOWN) and scales it to the given length.
]
if <ast.UnaryOp object at 0x7da1b04bc160> begin[:]
call[name[self].error, parameter[name[instance], name[value]]]
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b04bc760>, <ast.Name object at 0x7da1b04bc190>]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b04bc250>, <ast.Name object at 0x7da1b04bc6a0>]]] in starred[call[name[enumerate], parameter[name[value]]]] begin[:]
if call[name[isinstance], parameter[name[val], name[string_types]]] begin[:]
if compare[call[name[val].upper, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[VECTOR_DIRECTIONS]] begin[:]
call[name[self].error, parameter[]]
call[name[value]][name[i]] assign[=] call[name[VECTOR_DIRECTIONS]][call[name[val].upper, parameter[]]]
return[call[call[name[super], parameter[name[Vector3Array], name[self]]].validate, parameter[name[instance], name[value]]]] | keyword[def] identifier[validate] ( identifier[self] , identifier[instance] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] , identifier[np] . identifier[ndarray] )):
identifier[self] . identifier[error] ( identifier[instance] , identifier[value] )
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] )):
keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[value] ):
keyword[if] identifier[isinstance] ( identifier[val] , identifier[string_types] ):
keyword[if] identifier[val] . identifier[upper] () keyword[not] keyword[in] identifier[VECTOR_DIRECTIONS] :
identifier[self] . identifier[error] (
identifier[instance] = identifier[instance] ,
identifier[value] = identifier[val] ,
identifier[extra] = literal[string] ,
)
identifier[value] [ identifier[i] ]= identifier[VECTOR_DIRECTIONS] [ identifier[val] . identifier[upper] ()]
keyword[return] identifier[super] ( identifier[Vector3Array] , identifier[self] ). identifier[validate] ( identifier[instance] , identifier[value] ) | def validate(self, instance, value):
"""Check shape and dtype of vector
validate also coerces the vector from valid strings (these
include ZERO, X, Y, Z, -X, -Y, -Z, EAST, WEST, NORTH, SOUTH, UP,
and DOWN) and scales it to the given length.
"""
if not isinstance(value, (tuple, list, np.ndarray)):
self.error(instance, value) # depends on [control=['if'], data=[]]
if isinstance(value, (tuple, list)):
for (i, val) in enumerate(value):
if isinstance(val, string_types):
if val.upper() not in VECTOR_DIRECTIONS:
self.error(instance=instance, value=val, extra='This is an invalid Vector3 representation.') # depends on [control=['if'], data=[]]
value[i] = VECTOR_DIRECTIONS[val.upper()] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return super(Vector3Array, self).validate(instance, value) |
def list(self, **params):
"""
Retrieve all orders
Returns all orders available to the user according to the parameters provided
:calls: ``get /orders``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Orders.
:rtype: list
"""
_, _, orders = self.http_client.get("/orders", params=params)
return orders | def function[list, parameter[self]]:
constant[
Retrieve all orders
Returns all orders available to the user according to the parameters provided
:calls: ``get /orders``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Orders.
:rtype: list
]
<ast.Tuple object at 0x7da18f7230d0> assign[=] call[name[self].http_client.get, parameter[constant[/orders]]]
return[name[orders]] | keyword[def] identifier[list] ( identifier[self] ,** identifier[params] ):
literal[string]
identifier[_] , identifier[_] , identifier[orders] = identifier[self] . identifier[http_client] . identifier[get] ( literal[string] , identifier[params] = identifier[params] )
keyword[return] identifier[orders] | def list(self, **params):
"""
Retrieve all orders
Returns all orders available to the user according to the parameters provided
:calls: ``get /orders``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Orders.
:rtype: list
"""
(_, _, orders) = self.http_client.get('/orders', params=params)
return orders |
def _registerPickleType(name, typedef):
'''
Register a type with the specified name. After registration, NamedStruct with this type
(and any sub-types) can be successfully pickled and transfered.
'''
NamedStruct._pickleNames[typedef] = name
NamedStruct._pickleTypes[name] = typedef | def function[_registerPickleType, parameter[name, typedef]]:
constant[
Register a type with the specified name. After registration, NamedStruct with this type
(and any sub-types) can be successfully pickled and transfered.
]
call[name[NamedStruct]._pickleNames][name[typedef]] assign[=] name[name]
call[name[NamedStruct]._pickleTypes][name[name]] assign[=] name[typedef] | keyword[def] identifier[_registerPickleType] ( identifier[name] , identifier[typedef] ):
literal[string]
identifier[NamedStruct] . identifier[_pickleNames] [ identifier[typedef] ]= identifier[name]
identifier[NamedStruct] . identifier[_pickleTypes] [ identifier[name] ]= identifier[typedef] | def _registerPickleType(name, typedef):
"""
Register a type with the specified name. After registration, NamedStruct with this type
(and any sub-types) can be successfully pickled and transfered.
"""
NamedStruct._pickleNames[typedef] = name
NamedStruct._pickleTypes[name] = typedef |
def main_loop():
'''main processing loop, display graphs and maps'''
global grui, last_xlim
while True:
if mestate is None or mestate.exit:
return
while not mestate.input_queue.empty():
line = mestate.input_queue.get()
cmds = line.split(';')
for c in cmds:
process_stdin(c)
for i in range(0, len(grui)):
xlim = grui[i].check_xlim_change()
if xlim is not None and mestate.settings.sync_xzoom:
remlist = []
for j in range(0, len(grui)):
#print("set_xlim: ", j, xlim)
if not grui[j].set_xlim(xlim):
remlist.append(j)
last_xlim = xlim
if len(remlist) > 0:
# remove stale graphs
new_grui = []
for j in range(0, len(grui)):
if j not in remlist:
new_grui.append(grui[j])
grui = new_grui
break
time.sleep(0.1) | def function[main_loop, parameter[]]:
constant[main processing loop, display graphs and maps]
<ast.Global object at 0x7da18f09fd90>
while constant[True] begin[:]
if <ast.BoolOp object at 0x7da18f09dd80> begin[:]
return[None]
while <ast.UnaryOp object at 0x7da18f09ca60> begin[:]
variable[line] assign[=] call[name[mestate].input_queue.get, parameter[]]
variable[cmds] assign[=] call[name[line].split, parameter[constant[;]]]
for taget[name[c]] in starred[name[cmds]] begin[:]
call[name[process_stdin], parameter[name[c]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[grui]]]]]] begin[:]
variable[xlim] assign[=] call[call[name[grui]][name[i]].check_xlim_change, parameter[]]
if <ast.BoolOp object at 0x7da18f09d300> begin[:]
variable[remlist] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[grui]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f09c130> begin[:]
call[name[remlist].append, parameter[name[j]]]
variable[last_xlim] assign[=] name[xlim]
if compare[call[name[len], parameter[name[remlist]]] greater[>] constant[0]] begin[:]
variable[new_grui] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[grui]]]]]] begin[:]
if compare[name[j] <ast.NotIn object at 0x7da2590d7190> name[remlist]] begin[:]
call[name[new_grui].append, parameter[call[name[grui]][name[j]]]]
variable[grui] assign[=] name[new_grui]
break
call[name[time].sleep, parameter[constant[0.1]]] | keyword[def] identifier[main_loop] ():
literal[string]
keyword[global] identifier[grui] , identifier[last_xlim]
keyword[while] keyword[True] :
keyword[if] identifier[mestate] keyword[is] keyword[None] keyword[or] identifier[mestate] . identifier[exit] :
keyword[return]
keyword[while] keyword[not] identifier[mestate] . identifier[input_queue] . identifier[empty] ():
identifier[line] = identifier[mestate] . identifier[input_queue] . identifier[get] ()
identifier[cmds] = identifier[line] . identifier[split] ( literal[string] )
keyword[for] identifier[c] keyword[in] identifier[cmds] :
identifier[process_stdin] ( identifier[c] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[grui] )):
identifier[xlim] = identifier[grui] [ identifier[i] ]. identifier[check_xlim_change] ()
keyword[if] identifier[xlim] keyword[is] keyword[not] keyword[None] keyword[and] identifier[mestate] . identifier[settings] . identifier[sync_xzoom] :
identifier[remlist] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[grui] )):
keyword[if] keyword[not] identifier[grui] [ identifier[j] ]. identifier[set_xlim] ( identifier[xlim] ):
identifier[remlist] . identifier[append] ( identifier[j] )
identifier[last_xlim] = identifier[xlim]
keyword[if] identifier[len] ( identifier[remlist] )> literal[int] :
identifier[new_grui] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[grui] )):
keyword[if] identifier[j] keyword[not] keyword[in] identifier[remlist] :
identifier[new_grui] . identifier[append] ( identifier[grui] [ identifier[j] ])
identifier[grui] = identifier[new_grui]
keyword[break]
identifier[time] . identifier[sleep] ( literal[int] ) | def main_loop():
"""main processing loop, display graphs and maps"""
global grui, last_xlim
while True:
if mestate is None or mestate.exit:
return # depends on [control=['if'], data=[]]
while not mestate.input_queue.empty():
line = mestate.input_queue.get()
cmds = line.split(';')
for c in cmds:
process_stdin(c) # depends on [control=['for'], data=['c']] # depends on [control=['while'], data=[]]
for i in range(0, len(grui)):
xlim = grui[i].check_xlim_change()
if xlim is not None and mestate.settings.sync_xzoom:
remlist = []
for j in range(0, len(grui)):
#print("set_xlim: ", j, xlim)
if not grui[j].set_xlim(xlim):
remlist.append(j) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
last_xlim = xlim
if len(remlist) > 0:
# remove stale graphs
new_grui = []
for j in range(0, len(grui)):
if j not in remlist:
new_grui.append(grui[j]) # depends on [control=['if'], data=['j']] # depends on [control=['for'], data=['j']]
grui = new_grui # depends on [control=['if'], data=[]]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
time.sleep(0.1) # depends on [control=['while'], data=[]] |
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in long_range(1, broadcast - network + 1):
yield self._address_class(network + x) | def function[hosts, parameter[self]]:
constant[Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
]
variable[network] assign[=] call[name[int], parameter[name[self].network_address]]
variable[broadcast] assign[=] call[name[int], parameter[name[self].broadcast_address]]
for taget[name[x]] in starred[call[name[long_range], parameter[constant[1], binary_operation[binary_operation[name[broadcast] - name[network]] + constant[1]]]]] begin[:]
<ast.Yield object at 0x7da18bc70c70> | keyword[def] identifier[hosts] ( identifier[self] ):
literal[string]
identifier[network] = identifier[int] ( identifier[self] . identifier[network_address] )
identifier[broadcast] = identifier[int] ( identifier[self] . identifier[broadcast_address] )
keyword[for] identifier[x] keyword[in] identifier[long_range] ( literal[int] , identifier[broadcast] - identifier[network] + literal[int] ):
keyword[yield] identifier[self] . identifier[_address_class] ( identifier[network] + identifier[x] ) | def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in long_range(1, broadcast - network + 1):
yield self._address_class(network + x) # depends on [control=['for'], data=['x']] |
def parse_mcast_grps(family, grp_attr):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
"""
remaining = c_int()
if not grp_attr:
raise BUG
for nla in nla_for_each_nested(grp_attr, remaining):
tb = dict()
err = nla_parse_nested(tb, CTRL_ATTR_MCAST_GRP_MAX, nla, family_grp_policy)
if err < 0:
return err
if not tb[CTRL_ATTR_MCAST_GRP_ID] or not tb[CTRL_ATTR_MCAST_GRP_NAME]:
return -NLE_MISSING_ATTR
id_ = nla_get_u32(tb[CTRL_ATTR_MCAST_GRP_ID])
name = nla_get_string(tb[CTRL_ATTR_MCAST_GRP_NAME])
err = genl_family_add_grp(family, id_, name)
if err < 0:
return err
return 0 | def function[parse_mcast_grps, parameter[family, grp_attr]]:
constant[https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
]
variable[remaining] assign[=] call[name[c_int], parameter[]]
if <ast.UnaryOp object at 0x7da1b2871d50> begin[:]
<ast.Raise object at 0x7da1b2871f30>
for taget[name[nla]] in starred[call[name[nla_for_each_nested], parameter[name[grp_attr], name[remaining]]]] begin[:]
variable[tb] assign[=] call[name[dict], parameter[]]
variable[err] assign[=] call[name[nla_parse_nested], parameter[name[tb], name[CTRL_ATTR_MCAST_GRP_MAX], name[nla], name[family_grp_policy]]]
if compare[name[err] less[<] constant[0]] begin[:]
return[name[err]]
if <ast.BoolOp object at 0x7da1b2635840> begin[:]
return[<ast.UnaryOp object at 0x7da1b2637670>]
variable[id_] assign[=] call[name[nla_get_u32], parameter[call[name[tb]][name[CTRL_ATTR_MCAST_GRP_ID]]]]
variable[name] assign[=] call[name[nla_get_string], parameter[call[name[tb]][name[CTRL_ATTR_MCAST_GRP_NAME]]]]
variable[err] assign[=] call[name[genl_family_add_grp], parameter[name[family], name[id_], name[name]]]
if compare[name[err] less[<] constant[0]] begin[:]
return[name[err]]
return[constant[0]] | keyword[def] identifier[parse_mcast_grps] ( identifier[family] , identifier[grp_attr] ):
literal[string]
identifier[remaining] = identifier[c_int] ()
keyword[if] keyword[not] identifier[grp_attr] :
keyword[raise] identifier[BUG]
keyword[for] identifier[nla] keyword[in] identifier[nla_for_each_nested] ( identifier[grp_attr] , identifier[remaining] ):
identifier[tb] = identifier[dict] ()
identifier[err] = identifier[nla_parse_nested] ( identifier[tb] , identifier[CTRL_ATTR_MCAST_GRP_MAX] , identifier[nla] , identifier[family_grp_policy] )
keyword[if] identifier[err] < literal[int] :
keyword[return] identifier[err]
keyword[if] keyword[not] identifier[tb] [ identifier[CTRL_ATTR_MCAST_GRP_ID] ] keyword[or] keyword[not] identifier[tb] [ identifier[CTRL_ATTR_MCAST_GRP_NAME] ]:
keyword[return] - identifier[NLE_MISSING_ATTR]
identifier[id_] = identifier[nla_get_u32] ( identifier[tb] [ identifier[CTRL_ATTR_MCAST_GRP_ID] ])
identifier[name] = identifier[nla_get_string] ( identifier[tb] [ identifier[CTRL_ATTR_MCAST_GRP_NAME] ])
identifier[err] = identifier[genl_family_add_grp] ( identifier[family] , identifier[id_] , identifier[name] )
keyword[if] identifier[err] < literal[int] :
keyword[return] identifier[err]
keyword[return] literal[int] | def parse_mcast_grps(family, grp_attr):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code.
"""
remaining = c_int()
if not grp_attr:
raise BUG # depends on [control=['if'], data=[]]
for nla in nla_for_each_nested(grp_attr, remaining):
tb = dict()
err = nla_parse_nested(tb, CTRL_ATTR_MCAST_GRP_MAX, nla, family_grp_policy)
if err < 0:
return err # depends on [control=['if'], data=['err']]
if not tb[CTRL_ATTR_MCAST_GRP_ID] or not tb[CTRL_ATTR_MCAST_GRP_NAME]:
return -NLE_MISSING_ATTR # depends on [control=['if'], data=[]]
id_ = nla_get_u32(tb[CTRL_ATTR_MCAST_GRP_ID])
name = nla_get_string(tb[CTRL_ATTR_MCAST_GRP_NAME])
err = genl_family_add_grp(family, id_, name)
if err < 0:
return err # depends on [control=['if'], data=['err']] # depends on [control=['for'], data=['nla']]
return 0 |
def push(self, x):
"""
append items to the stack; input can be a single value or a list
"""
if isinstance(x, list):
for item in x:
self.stack.append(item)
else:
self.stack.append(x) | def function[push, parameter[self, x]]:
constant[
append items to the stack; input can be a single value or a list
]
if call[name[isinstance], parameter[name[x], name[list]]] begin[:]
for taget[name[item]] in starred[name[x]] begin[:]
call[name[self].stack.append, parameter[name[item]]] | keyword[def] identifier[push] ( identifier[self] , identifier[x] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[list] ):
keyword[for] identifier[item] keyword[in] identifier[x] :
identifier[self] . identifier[stack] . identifier[append] ( identifier[item] )
keyword[else] :
identifier[self] . identifier[stack] . identifier[append] ( identifier[x] ) | def push(self, x):
"""
append items to the stack; input can be a single value or a list
"""
if isinstance(x, list):
for item in x:
self.stack.append(item) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
else:
self.stack.append(x) |
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes | def function[_minutes_to_exclude, parameter[self]]:
constant[
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
]
variable[market_opens] assign[=] call[name[self]._market_opens.values.astype, parameter[constant[datetime64[m]]]]
variable[market_closes] assign[=] call[name[self]._market_closes.values.astype, parameter[constant[datetime64[m]]]]
variable[minutes_per_day] assign[=] call[binary_operation[name[market_closes] - name[market_opens]].astype, parameter[name[np].int64]]
variable[early_indices] assign[=] call[call[name[np].where, parameter[compare[name[minutes_per_day] not_equal[!=] binary_operation[name[self]._minutes_per_day - constant[1]]]]]][constant[0]]
variable[early_opens] assign[=] call[name[self]._market_opens][name[early_indices]]
variable[early_closes] assign[=] call[name[self]._market_closes][name[early_indices]]
variable[minutes] assign[=] <ast.ListComp object at 0x7da1b1e8efe0>
return[name[minutes]] | keyword[def] identifier[_minutes_to_exclude] ( identifier[self] ):
literal[string]
identifier[market_opens] = identifier[self] . identifier[_market_opens] . identifier[values] . identifier[astype] ( literal[string] )
identifier[market_closes] = identifier[self] . identifier[_market_closes] . identifier[values] . identifier[astype] ( literal[string] )
identifier[minutes_per_day] =( identifier[market_closes] - identifier[market_opens] ). identifier[astype] ( identifier[np] . identifier[int64] )
identifier[early_indices] = identifier[np] . identifier[where] (
identifier[minutes_per_day] != identifier[self] . identifier[_minutes_per_day] - literal[int] )[ literal[int] ]
identifier[early_opens] = identifier[self] . identifier[_market_opens] [ identifier[early_indices] ]
identifier[early_closes] = identifier[self] . identifier[_market_closes] [ identifier[early_indices] ]
identifier[minutes] =[( identifier[market_open] , identifier[early_close] )
keyword[for] identifier[market_open] , identifier[early_close]
keyword[in] identifier[zip] ( identifier[early_opens] , identifier[early_closes] )]
keyword[return] identifier[minutes] | def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close) for (market_open, early_close) in zip(early_opens, early_closes)]
return minutes |
def map_vnics(vm):
"""
maps the vnic on the vm by name
:param vm: virtual machine
:return: dictionary: {'vnic_name': vnic}
"""
return {device.deviceInfo.label: device
for device in vm.config.hardware.device
if isinstance(device, vim.vm.device.VirtualEthernetCard)} | def function[map_vnics, parameter[vm]]:
constant[
maps the vnic on the vm by name
:param vm: virtual machine
:return: dictionary: {'vnic_name': vnic}
]
return[<ast.DictComp object at 0x7da207f9af50>] | keyword[def] identifier[map_vnics] ( identifier[vm] ):
literal[string]
keyword[return] { identifier[device] . identifier[deviceInfo] . identifier[label] : identifier[device]
keyword[for] identifier[device] keyword[in] identifier[vm] . identifier[config] . identifier[hardware] . identifier[device]
keyword[if] identifier[isinstance] ( identifier[device] , identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualEthernetCard] )} | def map_vnics(vm):
"""
maps the vnic on the vm by name
:param vm: virtual machine
:return: dictionary: {'vnic_name': vnic}
"""
return {device.deviceInfo.label: device for device in vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)} |
def set_version(request, response):
"""Set version and revision to response
"""
settings = request.registry.settings
resolver = DottedNameResolver()
# get version config
version_header = settings.get(
'api.version_header',
'X-Version',
)
version_header_value = settings.get('api.version_header_value')
if callable(version_header_value):
version_header_value = version_header_value()
elif version_header_value:
version_header_value = resolver.resolve(version_header_value)
# get revision config
revision_header = settings.get(
'api.revision_header',
'X-Revision',
)
revision_header_value = settings.get('api.revision_header_value')
if callable(revision_header_value):
revision_header_value = revision_header_value()
elif revision_header_value:
revision_header_value = resolver.resolve(revision_header_value)
if version_header and version_header_value:
response.headers[str(version_header)] = str(version_header_value)
if revision_header and revision_header_value:
response.headers[str(revision_header)] = str(revision_header_value) | def function[set_version, parameter[request, response]]:
constant[Set version and revision to response
]
variable[settings] assign[=] name[request].registry.settings
variable[resolver] assign[=] call[name[DottedNameResolver], parameter[]]
variable[version_header] assign[=] call[name[settings].get, parameter[constant[api.version_header], constant[X-Version]]]
variable[version_header_value] assign[=] call[name[settings].get, parameter[constant[api.version_header_value]]]
if call[name[callable], parameter[name[version_header_value]]] begin[:]
variable[version_header_value] assign[=] call[name[version_header_value], parameter[]]
variable[revision_header] assign[=] call[name[settings].get, parameter[constant[api.revision_header], constant[X-Revision]]]
variable[revision_header_value] assign[=] call[name[settings].get, parameter[constant[api.revision_header_value]]]
if call[name[callable], parameter[name[revision_header_value]]] begin[:]
variable[revision_header_value] assign[=] call[name[revision_header_value], parameter[]]
if <ast.BoolOp object at 0x7da1b14721d0> begin[:]
call[name[response].headers][call[name[str], parameter[name[version_header]]]] assign[=] call[name[str], parameter[name[version_header_value]]]
if <ast.BoolOp object at 0x7da20e963a90> begin[:]
call[name[response].headers][call[name[str], parameter[name[revision_header]]]] assign[=] call[name[str], parameter[name[revision_header_value]]] | keyword[def] identifier[set_version] ( identifier[request] , identifier[response] ):
literal[string]
identifier[settings] = identifier[request] . identifier[registry] . identifier[settings]
identifier[resolver] = identifier[DottedNameResolver] ()
identifier[version_header] = identifier[settings] . identifier[get] (
literal[string] ,
literal[string] ,
)
identifier[version_header_value] = identifier[settings] . identifier[get] ( literal[string] )
keyword[if] identifier[callable] ( identifier[version_header_value] ):
identifier[version_header_value] = identifier[version_header_value] ()
keyword[elif] identifier[version_header_value] :
identifier[version_header_value] = identifier[resolver] . identifier[resolve] ( identifier[version_header_value] )
identifier[revision_header] = identifier[settings] . identifier[get] (
literal[string] ,
literal[string] ,
)
identifier[revision_header_value] = identifier[settings] . identifier[get] ( literal[string] )
keyword[if] identifier[callable] ( identifier[revision_header_value] ):
identifier[revision_header_value] = identifier[revision_header_value] ()
keyword[elif] identifier[revision_header_value] :
identifier[revision_header_value] = identifier[resolver] . identifier[resolve] ( identifier[revision_header_value] )
keyword[if] identifier[version_header] keyword[and] identifier[version_header_value] :
identifier[response] . identifier[headers] [ identifier[str] ( identifier[version_header] )]= identifier[str] ( identifier[version_header_value] )
keyword[if] identifier[revision_header] keyword[and] identifier[revision_header_value] :
identifier[response] . identifier[headers] [ identifier[str] ( identifier[revision_header] )]= identifier[str] ( identifier[revision_header_value] ) | def set_version(request, response):
"""Set version and revision to response
"""
settings = request.registry.settings
resolver = DottedNameResolver()
# get version config
version_header = settings.get('api.version_header', 'X-Version')
version_header_value = settings.get('api.version_header_value')
if callable(version_header_value):
version_header_value = version_header_value() # depends on [control=['if'], data=[]]
elif version_header_value:
version_header_value = resolver.resolve(version_header_value) # depends on [control=['if'], data=[]]
# get revision config
revision_header = settings.get('api.revision_header', 'X-Revision')
revision_header_value = settings.get('api.revision_header_value')
if callable(revision_header_value):
revision_header_value = revision_header_value() # depends on [control=['if'], data=[]]
elif revision_header_value:
revision_header_value = resolver.resolve(revision_header_value) # depends on [control=['if'], data=[]]
if version_header and version_header_value:
response.headers[str(version_header)] = str(version_header_value) # depends on [control=['if'], data=[]]
if revision_header and revision_header_value:
response.headers[str(revision_header)] = str(revision_header_value) # depends on [control=['if'], data=[]] |
def _create_sagemaker_model(self, instance_type, accelerator_type=None, tags=None):
"""Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be used for, this is only
used to determine if the image needs GPU support or not.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation\
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
"""
container_def = self.prepare_container_def(instance_type, accelerator_type=accelerator_type)
self.name = self.name or utils.name_from_image(container_def['Image'])
enable_network_isolation = self.enable_network_isolation()
self.sagemaker_session.create_model(self.name, self.role,
container_def, vpc_config=self.vpc_config,
enable_network_isolation=enable_network_isolation,
tags=tags) | def function[_create_sagemaker_model, parameter[self, instance_type, accelerator_type, tags]]:
constant[Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be used for, this is only
used to determine if the image needs GPU support or not.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
]
variable[container_def] assign[=] call[name[self].prepare_container_def, parameter[name[instance_type]]]
name[self].name assign[=] <ast.BoolOp object at 0x7da1b1c1a6e0>
variable[enable_network_isolation] assign[=] call[name[self].enable_network_isolation, parameter[]]
call[name[self].sagemaker_session.create_model, parameter[name[self].name, name[self].role, name[container_def]]] | keyword[def] identifier[_create_sagemaker_model] ( identifier[self] , identifier[instance_type] , identifier[accelerator_type] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[container_def] = identifier[self] . identifier[prepare_container_def] ( identifier[instance_type] , identifier[accelerator_type] = identifier[accelerator_type] )
identifier[self] . identifier[name] = identifier[self] . identifier[name] keyword[or] identifier[utils] . identifier[name_from_image] ( identifier[container_def] [ literal[string] ])
identifier[enable_network_isolation] = identifier[self] . identifier[enable_network_isolation] ()
identifier[self] . identifier[sagemaker_session] . identifier[create_model] ( identifier[self] . identifier[name] , identifier[self] . identifier[role] ,
identifier[container_def] , identifier[vpc_config] = identifier[self] . identifier[vpc_config] ,
identifier[enable_network_isolation] = identifier[enable_network_isolation] ,
identifier[tags] = identifier[tags] ) | def _create_sagemaker_model(self, instance_type, accelerator_type=None, tags=None):
"""Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be used for, this is only
used to determine if the image needs GPU support or not.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
"""
container_def = self.prepare_container_def(instance_type, accelerator_type=accelerator_type)
self.name = self.name or utils.name_from_image(container_def['Image'])
enable_network_isolation = self.enable_network_isolation()
self.sagemaker_session.create_model(self.name, self.role, container_def, vpc_config=self.vpc_config, enable_network_isolation=enable_network_isolation, tags=tags) |
def intersection(source, mask):
"""Intersect two layers.
Issue https://github.com/inasafe/inasafe/issues/3186
:param source: The vector layer to clip.
:type source: QgsVectorLayer
:param mask: The vector layer to use for clipping.
:type mask: QgsVectorLayer
:return: The clip vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = intersection_steps['output_layer_name']
output_layer_name = output_layer_name % (
source.keywords['layer_purpose'])
parameters = {'INPUT': source,
'OVERLAY': mask,
'OUTPUT': 'memory:'}
# TODO implement callback through QgsProcessingFeedback object
initialize_processing()
feedback = create_processing_feedback()
context = create_processing_context(feedback=feedback)
result = processing.run('native:intersection', parameters, context=context)
if result is None:
raise ProcessingInstallationError
intersect = result['OUTPUT']
intersect.setName(output_layer_name)
intersect.keywords = dict(source.keywords)
intersect.keywords['title'] = output_layer_name
intersect.keywords['layer_purpose'] = \
layer_purpose_exposure_summary['key']
intersect.keywords['inasafe_fields'] = \
dict(source.keywords['inasafe_fields'])
intersect.keywords['inasafe_fields'].update(
mask.keywords['inasafe_fields'])
intersect.keywords['hazard_keywords'] = \
dict(mask.keywords['hazard_keywords'])
intersect.keywords['exposure_keywords'] = dict(source.keywords)
intersect.keywords['aggregation_keywords'] = dict(
mask.keywords['aggregation_keywords'])
check_layer(intersect)
return intersect | def function[intersection, parameter[source, mask]]:
constant[Intersect two layers.
Issue https://github.com/inasafe/inasafe/issues/3186
:param source: The vector layer to clip.
:type source: QgsVectorLayer
:param mask: The vector layer to use for clipping.
:type mask: QgsVectorLayer
:return: The clip vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
]
variable[output_layer_name] assign[=] call[name[intersection_steps]][constant[output_layer_name]]
variable[output_layer_name] assign[=] binary_operation[name[output_layer_name] <ast.Mod object at 0x7da2590d6920> call[name[source].keywords][constant[layer_purpose]]]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347fa0>, <ast.Constant object at 0x7da1b2345e70>, <ast.Constant object at 0x7da1b2347af0>], [<ast.Name object at 0x7da1b2347b50>, <ast.Name object at 0x7da1b2346080>, <ast.Constant object at 0x7da1b23446a0>]]
call[name[initialize_processing], parameter[]]
variable[feedback] assign[=] call[name[create_processing_feedback], parameter[]]
variable[context] assign[=] call[name[create_processing_context], parameter[]]
variable[result] assign[=] call[name[processing].run, parameter[constant[native:intersection], name[parameters]]]
if compare[name[result] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f8130a0>
variable[intersect] assign[=] call[name[result]][constant[OUTPUT]]
call[name[intersect].setName, parameter[name[output_layer_name]]]
name[intersect].keywords assign[=] call[name[dict], parameter[name[source].keywords]]
call[name[intersect].keywords][constant[title]] assign[=] name[output_layer_name]
call[name[intersect].keywords][constant[layer_purpose]] assign[=] call[name[layer_purpose_exposure_summary]][constant[key]]
call[name[intersect].keywords][constant[inasafe_fields]] assign[=] call[name[dict], parameter[call[name[source].keywords][constant[inasafe_fields]]]]
call[call[name[intersect].keywords][constant[inasafe_fields]].update, parameter[call[name[mask].keywords][constant[inasafe_fields]]]]
call[name[intersect].keywords][constant[hazard_keywords]] assign[=] call[name[dict], parameter[call[name[mask].keywords][constant[hazard_keywords]]]]
call[name[intersect].keywords][constant[exposure_keywords]] assign[=] call[name[dict], parameter[name[source].keywords]]
call[name[intersect].keywords][constant[aggregation_keywords]] assign[=] call[name[dict], parameter[call[name[mask].keywords][constant[aggregation_keywords]]]]
call[name[check_layer], parameter[name[intersect]]]
return[name[intersect]] | keyword[def] identifier[intersection] ( identifier[source] , identifier[mask] ):
literal[string]
identifier[output_layer_name] = identifier[intersection_steps] [ literal[string] ]
identifier[output_layer_name] = identifier[output_layer_name] %(
identifier[source] . identifier[keywords] [ literal[string] ])
identifier[parameters] ={ literal[string] : identifier[source] ,
literal[string] : identifier[mask] ,
literal[string] : literal[string] }
identifier[initialize_processing] ()
identifier[feedback] = identifier[create_processing_feedback] ()
identifier[context] = identifier[create_processing_context] ( identifier[feedback] = identifier[feedback] )
identifier[result] = identifier[processing] . identifier[run] ( literal[string] , identifier[parameters] , identifier[context] = identifier[context] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[raise] identifier[ProcessingInstallationError]
identifier[intersect] = identifier[result] [ literal[string] ]
identifier[intersect] . identifier[setName] ( identifier[output_layer_name] )
identifier[intersect] . identifier[keywords] = identifier[dict] ( identifier[source] . identifier[keywords] )
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[output_layer_name]
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[layer_purpose_exposure_summary] [ literal[string] ]
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[dict] ( identifier[source] . identifier[keywords] [ literal[string] ])
identifier[intersect] . identifier[keywords] [ literal[string] ]. identifier[update] (
identifier[mask] . identifier[keywords] [ literal[string] ])
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[dict] ( identifier[mask] . identifier[keywords] [ literal[string] ])
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[dict] ( identifier[source] . identifier[keywords] )
identifier[intersect] . identifier[keywords] [ literal[string] ]= identifier[dict] (
identifier[mask] . identifier[keywords] [ literal[string] ])
identifier[check_layer] ( identifier[intersect] )
keyword[return] identifier[intersect] | def intersection(source, mask):
"""Intersect two layers.
Issue https://github.com/inasafe/inasafe/issues/3186
:param source: The vector layer to clip.
:type source: QgsVectorLayer
:param mask: The vector layer to use for clipping.
:type mask: QgsVectorLayer
:return: The clip vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = intersection_steps['output_layer_name']
output_layer_name = output_layer_name % source.keywords['layer_purpose']
parameters = {'INPUT': source, 'OVERLAY': mask, 'OUTPUT': 'memory:'}
# TODO implement callback through QgsProcessingFeedback object
initialize_processing()
feedback = create_processing_feedback()
context = create_processing_context(feedback=feedback)
result = processing.run('native:intersection', parameters, context=context)
if result is None:
raise ProcessingInstallationError # depends on [control=['if'], data=[]]
intersect = result['OUTPUT']
intersect.setName(output_layer_name)
intersect.keywords = dict(source.keywords)
intersect.keywords['title'] = output_layer_name
intersect.keywords['layer_purpose'] = layer_purpose_exposure_summary['key']
intersect.keywords['inasafe_fields'] = dict(source.keywords['inasafe_fields'])
intersect.keywords['inasafe_fields'].update(mask.keywords['inasafe_fields'])
intersect.keywords['hazard_keywords'] = dict(mask.keywords['hazard_keywords'])
intersect.keywords['exposure_keywords'] = dict(source.keywords)
intersect.keywords['aggregation_keywords'] = dict(mask.keywords['aggregation_keywords'])
check_layer(intersect)
return intersect |
def QA_fetch_index_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.index_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 09:30:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_index_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_index_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_index_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Index_min(res_reset_index) | def function[QA_fetch_index_min_adv, parameter[code, start, end, frequence, if_drop_index, collections]]:
constant[
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
]
if compare[name[frequence] in list[[<ast.Constant object at 0x7da1b1ea1060>, <ast.Constant object at 0x7da1b1ea0af0>]]] begin[:]
variable[frequence] assign[=] constant[1min]
variable[end] assign[=] <ast.IfExp object at 0x7da1b1ea1a20>
if compare[call[name[len], parameter[name[start]]] equal[==] constant[10]] begin[:]
variable[start] assign[=] call[constant[{} 09:30:00].format, parameter[name[start]]]
if compare[call[name[len], parameter[name[end]]] equal[==] constant[10]] begin[:]
variable[end] assign[=] call[constant[{} 15:00:00].format, parameter[name[end]]]
variable[res] assign[=] call[name[QA_fetch_index_min], parameter[name[code], name[start], name[end]]]
if compare[name[res] is constant[None]] begin[:]
call[name[print], parameter[binary_operation[constant[QA Error QA_fetch_index_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_index_min return None] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1fe5ff0>, <ast.Name object at 0x7da1b1fe6050>, <ast.Name object at 0x7da1b1fe5f60>, <ast.Name object at 0x7da1b1fe5f30>]]]]] | keyword[def] identifier[QA_fetch_index_min_adv] (
identifier[code] ,
identifier[start] , identifier[end] = keyword[None] ,
identifier[frequence] = literal[string] ,
identifier[if_drop_index] = keyword[True] ,
identifier[collections] = identifier[DATABASE] . identifier[index_min] ):
literal[string]
keyword[if] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]:
identifier[frequence] = literal[string]
keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]:
identifier[frequence] = literal[string]
keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]:
identifier[frequence] = literal[string]
keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]:
identifier[frequence] = literal[string]
keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]:
identifier[frequence] = literal[string]
identifier[end] = identifier[start] keyword[if] identifier[end] keyword[is] keyword[None] keyword[else] identifier[end]
keyword[if] identifier[len] ( identifier[start] )== literal[int] :
identifier[start] = literal[string] . identifier[format] ( identifier[start] )
keyword[if] identifier[len] ( identifier[end] )== literal[int] :
identifier[end] = literal[string] . identifier[format] ( identifier[end] )
identifier[res] = identifier[QA_fetch_index_min] (
identifier[code] , identifier[start] , identifier[end] , identifier[format] = literal[string] , identifier[frequence] = identifier[frequence] )
keyword[if] identifier[res] keyword[is] keyword[None] :
identifier[print] ( literal[string] %(
identifier[code] , identifier[start] , identifier[end] , identifier[frequence] ))
keyword[else] :
identifier[res_reset_index] = identifier[res] . identifier[set_index] (
[ literal[string] , literal[string] ], identifier[drop] = identifier[if_drop_index] )
keyword[return] identifier[QA_DataStruct_Index_min] ( identifier[res_reset_index] ) | def QA_fetch_index_min_adv(code, start, end=None, frequence='1min', if_drop_index=True, collections=DATABASE.index_min):
"""
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
"""
if frequence in ['1min', '1m']:
frequence = '1min' # depends on [control=['if'], data=['frequence']]
elif frequence in ['5min', '5m']:
frequence = '5min' # depends on [control=['if'], data=['frequence']]
elif frequence in ['15min', '15m']:
frequence = '15min' # depends on [control=['if'], data=['frequence']]
elif frequence in ['30min', '30m']:
frequence = '30min' # depends on [control=['if'], data=['frequence']]
elif frequence in ['60min', '60m']:
frequence = '60min' # depends on [control=['if'], data=['frequence']]
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 09:30:00'.format(start) # depends on [control=['if'], data=[]]
if len(end) == 10:
end = '{} 15:00:00'.format(end) # depends on [control=['if'], data=[]]
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_index_min(code, start, end, format='pd', frequence=frequence)
if res is None:
print('QA Error QA_fetch_index_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_index_min return None' % (code, start, end, frequence)) # depends on [control=['if'], data=[]]
else:
res_reset_index = res.set_index(['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Index_min(res_reset_index) |
def settimeout(self, timeout):
"""set the timeout for this specific socket
:param timeout:
the number of seconds the socket's blocking operations should block
before raising a ``socket.timeout``
:type timeout: float or None
"""
if timeout is not None:
timeout = float(timeout)
self._timeout = timeout | def function[settimeout, parameter[self, timeout]]:
constant[set the timeout for this specific socket
:param timeout:
the number of seconds the socket's blocking operations should block
before raising a ``socket.timeout``
:type timeout: float or None
]
if compare[name[timeout] is_not constant[None]] begin[:]
variable[timeout] assign[=] call[name[float], parameter[name[timeout]]]
name[self]._timeout assign[=] name[timeout] | keyword[def] identifier[settimeout] ( identifier[self] , identifier[timeout] ):
literal[string]
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
identifier[timeout] = identifier[float] ( identifier[timeout] )
identifier[self] . identifier[_timeout] = identifier[timeout] | def settimeout(self, timeout):
"""set the timeout for this specific socket
:param timeout:
the number of seconds the socket's blocking operations should block
before raising a ``socket.timeout``
:type timeout: float or None
"""
if timeout is not None:
timeout = float(timeout) # depends on [control=['if'], data=['timeout']]
self._timeout = timeout |
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3 | def function[multivariate_multiply, parameter[m1, c1, m2, c2]]:
constant[
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
]
variable[C1] assign[=] call[name[np].asarray, parameter[name[c1]]]
variable[C2] assign[=] call[name[np].asarray, parameter[name[c2]]]
variable[M1] assign[=] call[name[np].asarray, parameter[name[m1]]]
variable[M2] assign[=] call[name[np].asarray, parameter[name[m2]]]
variable[sum_inv] assign[=] call[name[np].linalg.inv, parameter[binary_operation[name[C1] + name[C2]]]]
variable[C3] assign[=] call[call[name[np].dot, parameter[name[C1], name[sum_inv]]].dot, parameter[name[C2]]]
variable[M3] assign[=] binary_operation[call[call[name[np].dot, parameter[name[C2], name[sum_inv]]].dot, parameter[name[M1]]] + call[call[name[np].dot, parameter[name[C1], name[sum_inv]]].dot, parameter[name[M2]]]]
return[tuple[[<ast.Name object at 0x7da20c76d900>, <ast.Name object at 0x7da20c76c430>]]] | keyword[def] identifier[multivariate_multiply] ( identifier[m1] , identifier[c1] , identifier[m2] , identifier[c2] ):
literal[string]
identifier[C1] = identifier[np] . identifier[asarray] ( identifier[c1] )
identifier[C2] = identifier[np] . identifier[asarray] ( identifier[c2] )
identifier[M1] = identifier[np] . identifier[asarray] ( identifier[m1] )
identifier[M2] = identifier[np] . identifier[asarray] ( identifier[m2] )
identifier[sum_inv] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[C1] + identifier[C2] )
identifier[C3] = identifier[np] . identifier[dot] ( identifier[C1] , identifier[sum_inv] ). identifier[dot] ( identifier[C2] )
identifier[M3] =( identifier[np] . identifier[dot] ( identifier[C2] , identifier[sum_inv] ). identifier[dot] ( identifier[M1] )+
identifier[np] . identifier[dot] ( identifier[C1] , identifier[sum_inv] ). identifier[dot] ( identifier[M2] ))
keyword[return] identifier[M3] , identifier[C3] | def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1 + C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = np.dot(C2, sum_inv).dot(M1) + np.dot(C1, sum_inv).dot(M2)
return (M3, C3) |
def run_dssp(pdb, path=True, outfile=None):
"""Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the dssp output.
Returns
-------
dssp_out : str
Std out from DSSP.
"""
if not path:
if type(pdb) == str:
pdb = pdb.encode()
try:
temp_pdb = tempfile.NamedTemporaryFile(delete=False)
temp_pdb.write(pdb)
temp_pdb.seek(0)
dssp_out = subprocess.check_output(
[global_settings['dssp']['path'], temp_pdb.name])
temp_pdb.close()
finally:
os.remove(temp_pdb.name)
else:
dssp_out = subprocess.check_output(
[global_settings['dssp']['path'], pdb])
# Python 3 string formatting.
dssp_out = dssp_out.decode()
if outfile:
with open(outfile, 'w') as outf:
outf.write(dssp_out)
return dssp_out | def function[run_dssp, parameter[pdb, path, outfile]]:
constant[Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the dssp output.
Returns
-------
dssp_out : str
Std out from DSSP.
]
if <ast.UnaryOp object at 0x7da1b2678220> begin[:]
if compare[call[name[type], parameter[name[pdb]]] equal[==] name[str]] begin[:]
variable[pdb] assign[=] call[name[pdb].encode, parameter[]]
<ast.Try object at 0x7da1b2679f90>
variable[dssp_out] assign[=] call[name[dssp_out].decode, parameter[]]
if name[outfile] begin[:]
with call[name[open], parameter[name[outfile], constant[w]]] begin[:]
call[name[outf].write, parameter[name[dssp_out]]]
return[name[dssp_out]] | keyword[def] identifier[run_dssp] ( identifier[pdb] , identifier[path] = keyword[True] , identifier[outfile] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
keyword[if] identifier[type] ( identifier[pdb] )== identifier[str] :
identifier[pdb] = identifier[pdb] . identifier[encode] ()
keyword[try] :
identifier[temp_pdb] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] )
identifier[temp_pdb] . identifier[write] ( identifier[pdb] )
identifier[temp_pdb] . identifier[seek] ( literal[int] )
identifier[dssp_out] = identifier[subprocess] . identifier[check_output] (
[ identifier[global_settings] [ literal[string] ][ literal[string] ], identifier[temp_pdb] . identifier[name] ])
identifier[temp_pdb] . identifier[close] ()
keyword[finally] :
identifier[os] . identifier[remove] ( identifier[temp_pdb] . identifier[name] )
keyword[else] :
identifier[dssp_out] = identifier[subprocess] . identifier[check_output] (
[ identifier[global_settings] [ literal[string] ][ literal[string] ], identifier[pdb] ])
identifier[dssp_out] = identifier[dssp_out] . identifier[decode] ()
keyword[if] identifier[outfile] :
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[outf] :
identifier[outf] . identifier[write] ( identifier[dssp_out] )
keyword[return] identifier[dssp_out] | def run_dssp(pdb, path=True, outfile=None):
"""Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the dssp output.
Returns
-------
dssp_out : str
Std out from DSSP.
"""
if not path:
if type(pdb) == str:
pdb = pdb.encode() # depends on [control=['if'], data=[]]
try:
temp_pdb = tempfile.NamedTemporaryFile(delete=False)
temp_pdb.write(pdb)
temp_pdb.seek(0)
dssp_out = subprocess.check_output([global_settings['dssp']['path'], temp_pdb.name])
temp_pdb.close() # depends on [control=['try'], data=[]]
finally:
os.remove(temp_pdb.name) # depends on [control=['if'], data=[]]
else:
dssp_out = subprocess.check_output([global_settings['dssp']['path'], pdb])
# Python 3 string formatting.
dssp_out = dssp_out.decode()
if outfile:
with open(outfile, 'w') as outf:
outf.write(dssp_out) # depends on [control=['with'], data=['outf']] # depends on [control=['if'], data=[]]
return dssp_out |
def setup(self):
"""Setup."""
self.context_visible_first = self.config['context_visible_first']
self.delimiters = []
self.escapes = None
self.line_endings = self.config['normalize_line_endings']
escapes = []
for delimiter in self.config['delimiters']:
if not isinstance(delimiter, dict):
continue
group = util.random_name_gen()
while (
group in delimiter['open'] or
group in delimiter['close'] or
group in delimiter.get('content', DEFAULT_CONTENT)
):
group = util.random_name_gen()
pattern = r'%s(?P<%s>%s)(?:%s|\Z)' % (
delimiter['open'],
group,
delimiter.get('content', DEFAULT_CONTENT),
delimiter['close']
)
self.delimiters.append((re.compile(pattern, re.M), group))
escapes = self.config['escapes']
if escapes:
self.escapes = re.compile(escapes) | def function[setup, parameter[self]]:
constant[Setup.]
name[self].context_visible_first assign[=] call[name[self].config][constant[context_visible_first]]
name[self].delimiters assign[=] list[[]]
name[self].escapes assign[=] constant[None]
name[self].line_endings assign[=] call[name[self].config][constant[normalize_line_endings]]
variable[escapes] assign[=] list[[]]
for taget[name[delimiter]] in starred[call[name[self].config][constant[delimiters]]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cd270> begin[:]
continue
variable[group] assign[=] call[name[util].random_name_gen, parameter[]]
while <ast.BoolOp object at 0x7da18c4ce5c0> begin[:]
variable[group] assign[=] call[name[util].random_name_gen, parameter[]]
variable[pattern] assign[=] binary_operation[constant[%s(?P<%s>%s)(?:%s|\Z)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20e961600>, <ast.Name object at 0x7da20e961f00>, <ast.Call object at 0x7da20e9606a0>, <ast.Subscript object at 0x7da20e9633a0>]]]
call[name[self].delimiters.append, parameter[tuple[[<ast.Call object at 0x7da20e9619c0>, <ast.Name object at 0x7da20e9631f0>]]]]
variable[escapes] assign[=] call[name[self].config][constant[escapes]]
if name[escapes] begin[:]
name[self].escapes assign[=] call[name[re].compile, parameter[name[escapes]]] | keyword[def] identifier[setup] ( identifier[self] ):
literal[string]
identifier[self] . identifier[context_visible_first] = identifier[self] . identifier[config] [ literal[string] ]
identifier[self] . identifier[delimiters] =[]
identifier[self] . identifier[escapes] = keyword[None]
identifier[self] . identifier[line_endings] = identifier[self] . identifier[config] [ literal[string] ]
identifier[escapes] =[]
keyword[for] identifier[delimiter] keyword[in] identifier[self] . identifier[config] [ literal[string] ]:
keyword[if] keyword[not] identifier[isinstance] ( identifier[delimiter] , identifier[dict] ):
keyword[continue]
identifier[group] = identifier[util] . identifier[random_name_gen] ()
keyword[while] (
identifier[group] keyword[in] identifier[delimiter] [ literal[string] ] keyword[or]
identifier[group] keyword[in] identifier[delimiter] [ literal[string] ] keyword[or]
identifier[group] keyword[in] identifier[delimiter] . identifier[get] ( literal[string] , identifier[DEFAULT_CONTENT] )
):
identifier[group] = identifier[util] . identifier[random_name_gen] ()
identifier[pattern] = literal[string] %(
identifier[delimiter] [ literal[string] ],
identifier[group] ,
identifier[delimiter] . identifier[get] ( literal[string] , identifier[DEFAULT_CONTENT] ),
identifier[delimiter] [ literal[string] ]
)
identifier[self] . identifier[delimiters] . identifier[append] (( identifier[re] . identifier[compile] ( identifier[pattern] , identifier[re] . identifier[M] ), identifier[group] ))
identifier[escapes] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] identifier[escapes] :
identifier[self] . identifier[escapes] = identifier[re] . identifier[compile] ( identifier[escapes] ) | def setup(self):
"""Setup."""
self.context_visible_first = self.config['context_visible_first']
self.delimiters = []
self.escapes = None
self.line_endings = self.config['normalize_line_endings']
escapes = []
for delimiter in self.config['delimiters']:
if not isinstance(delimiter, dict):
continue # depends on [control=['if'], data=[]]
group = util.random_name_gen()
while group in delimiter['open'] or group in delimiter['close'] or group in delimiter.get('content', DEFAULT_CONTENT):
group = util.random_name_gen() # depends on [control=['while'], data=[]]
pattern = '%s(?P<%s>%s)(?:%s|\\Z)' % (delimiter['open'], group, delimiter.get('content', DEFAULT_CONTENT), delimiter['close'])
self.delimiters.append((re.compile(pattern, re.M), group)) # depends on [control=['for'], data=['delimiter']]
escapes = self.config['escapes']
if escapes:
self.escapes = re.compile(escapes) # depends on [control=['if'], data=[]] |
def extend(self, new_leaves: List[bytes]):
"""Extend this tree with new_leaves on the end.
The algorithm works by using _push_subtree() as a primitive, calling
it with the maximum number of allowed leaves until we can add the
remaining leaves as a valid entire (non-full) subtree in one go.
"""
size = len(new_leaves)
final_size = self.tree_size + size
idx = 0
while True:
# keep pushing subtrees until mintree_size > remaining
max_h = self.__mintree_height
max_size = 1 << (max_h - 1) if max_h > 0 else 0
if max_h > 0 and size - idx >= max_size:
self._push_subtree(new_leaves[idx:idx + max_size])
idx += max_size
else:
break
# fill in rest of tree in one go, now that we can
if idx < size:
root_hash, hashes = self.__hasher._hash_full(new_leaves, idx, size)
self._update(final_size, self.hashes + hashes)
assert self.tree_size == final_size | def function[extend, parameter[self, new_leaves]]:
constant[Extend this tree with new_leaves on the end.
The algorithm works by using _push_subtree() as a primitive, calling
it with the maximum number of allowed leaves until we can add the
remaining leaves as a valid entire (non-full) subtree in one go.
]
variable[size] assign[=] call[name[len], parameter[name[new_leaves]]]
variable[final_size] assign[=] binary_operation[name[self].tree_size + name[size]]
variable[idx] assign[=] constant[0]
while constant[True] begin[:]
variable[max_h] assign[=] name[self].__mintree_height
variable[max_size] assign[=] <ast.IfExp object at 0x7da2054a7820>
if <ast.BoolOp object at 0x7da2054a7eb0> begin[:]
call[name[self]._push_subtree, parameter[call[name[new_leaves]][<ast.Slice object at 0x7da2054a7f40>]]]
<ast.AugAssign object at 0x7da2054a7910>
if compare[name[idx] less[<] name[size]] begin[:]
<ast.Tuple object at 0x7da2054a7d30> assign[=] call[name[self].__hasher._hash_full, parameter[name[new_leaves], name[idx], name[size]]]
call[name[self]._update, parameter[name[final_size], binary_operation[name[self].hashes + name[hashes]]]]
assert[compare[name[self].tree_size equal[==] name[final_size]]] | keyword[def] identifier[extend] ( identifier[self] , identifier[new_leaves] : identifier[List] [ identifier[bytes] ]):
literal[string]
identifier[size] = identifier[len] ( identifier[new_leaves] )
identifier[final_size] = identifier[self] . identifier[tree_size] + identifier[size]
identifier[idx] = literal[int]
keyword[while] keyword[True] :
identifier[max_h] = identifier[self] . identifier[__mintree_height]
identifier[max_size] = literal[int] <<( identifier[max_h] - literal[int] ) keyword[if] identifier[max_h] > literal[int] keyword[else] literal[int]
keyword[if] identifier[max_h] > literal[int] keyword[and] identifier[size] - identifier[idx] >= identifier[max_size] :
identifier[self] . identifier[_push_subtree] ( identifier[new_leaves] [ identifier[idx] : identifier[idx] + identifier[max_size] ])
identifier[idx] += identifier[max_size]
keyword[else] :
keyword[break]
keyword[if] identifier[idx] < identifier[size] :
identifier[root_hash] , identifier[hashes] = identifier[self] . identifier[__hasher] . identifier[_hash_full] ( identifier[new_leaves] , identifier[idx] , identifier[size] )
identifier[self] . identifier[_update] ( identifier[final_size] , identifier[self] . identifier[hashes] + identifier[hashes] )
keyword[assert] identifier[self] . identifier[tree_size] == identifier[final_size] | def extend(self, new_leaves: List[bytes]):
"""Extend this tree with new_leaves on the end.
The algorithm works by using _push_subtree() as a primitive, calling
it with the maximum number of allowed leaves until we can add the
remaining leaves as a valid entire (non-full) subtree in one go.
"""
size = len(new_leaves)
final_size = self.tree_size + size
idx = 0
while True:
# keep pushing subtrees until mintree_size > remaining
max_h = self.__mintree_height
max_size = 1 << max_h - 1 if max_h > 0 else 0
if max_h > 0 and size - idx >= max_size:
self._push_subtree(new_leaves[idx:idx + max_size])
idx += max_size # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
# fill in rest of tree in one go, now that we can
if idx < size:
(root_hash, hashes) = self.__hasher._hash_full(new_leaves, idx, size)
self._update(final_size, self.hashes + hashes) # depends on [control=['if'], data=['idx', 'size']]
assert self.tree_size == final_size |
def eval_hessian(self, ordered_parameters=[], **parameters):
"""
Hessian for log-likelihood is defined as
:math:`\\nabla^2_{\\vec{p}}( \\log( L(\\vec{p} | \\vec{x})))`.
:param parameters: values for the fit parameters.
:return: array of length number of ``Parameter``'s in the model, with all partial derivatives evaluated at p, data.
"""
evaluated_func = super(LogLikelihood, self).__call__(
ordered_parameters, **parameters
)
evaluated_jac = super(LogLikelihood, self).eval_jacobian(
ordered_parameters, **parameters
)
evaluated_hess = super(LogLikelihood, self).eval_hessian(
ordered_parameters, **parameters
)
result = 0
for f, jac_comp, hess_comp in zip(evaluated_func, evaluated_jac, evaluated_hess):
# Outer product
jac_outer_jac = np.einsum('i...,j...->ij...', jac_comp, jac_comp)
dd_logf = - hess_comp / f[np.newaxis, np.newaxis, ...] + \
(1 / f**2)[np.newaxis, np.newaxis, ...] * jac_outer_jac
# We sum away everything except the matrices in the axes 0 & 1.
axes = tuple(range(2, len(dd_logf.shape)))
result += np.sum(dd_logf, axis=axes, keepdims=False)
else:
return np.atleast_2d(np.squeeze(np.array(result))) | def function[eval_hessian, parameter[self, ordered_parameters]]:
constant[
Hessian for log-likelihood is defined as
:math:`\nabla^2_{\vec{p}}( \log( L(\vec{p} | \vec{x})))`.
:param parameters: values for the fit parameters.
:return: array of length number of ``Parameter``'s in the model, with all partial derivatives evaluated at p, data.
]
variable[evaluated_func] assign[=] call[call[name[super], parameter[name[LogLikelihood], name[self]]].__call__, parameter[name[ordered_parameters]]]
variable[evaluated_jac] assign[=] call[call[name[super], parameter[name[LogLikelihood], name[self]]].eval_jacobian, parameter[name[ordered_parameters]]]
variable[evaluated_hess] assign[=] call[call[name[super], parameter[name[LogLikelihood], name[self]]].eval_hessian, parameter[name[ordered_parameters]]]
variable[result] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da207f03be0>, <ast.Name object at 0x7da207f00a00>, <ast.Name object at 0x7da207f01000>]]] in starred[call[name[zip], parameter[name[evaluated_func], name[evaluated_jac], name[evaluated_hess]]]] begin[:]
variable[jac_outer_jac] assign[=] call[name[np].einsum, parameter[constant[i...,j...->ij...], name[jac_comp], name[jac_comp]]]
variable[dd_logf] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da207f01b70> / call[name[f]][tuple[[<ast.Attribute object at 0x7da207f024d0>, <ast.Attribute object at 0x7da207f03c10>, <ast.Constant object at 0x7da207f00c40>]]]] + binary_operation[call[binary_operation[constant[1] / binary_operation[name[f] ** constant[2]]]][tuple[[<ast.Attribute object at 0x7da207f026e0>, <ast.Attribute object at 0x7da207f01480>, <ast.Constant object at 0x7da207f030d0>]]] * name[jac_outer_jac]]]
variable[axes] assign[=] call[name[tuple], parameter[call[name[range], parameter[constant[2], call[name[len], parameter[name[dd_logf].shape]]]]]]
<ast.AugAssign object at 0x7da207f01ba0> | keyword[def] identifier[eval_hessian] ( identifier[self] , identifier[ordered_parameters] =[],** identifier[parameters] ):
literal[string]
identifier[evaluated_func] = identifier[super] ( identifier[LogLikelihood] , identifier[self] ). identifier[__call__] (
identifier[ordered_parameters] ,** identifier[parameters]
)
identifier[evaluated_jac] = identifier[super] ( identifier[LogLikelihood] , identifier[self] ). identifier[eval_jacobian] (
identifier[ordered_parameters] ,** identifier[parameters]
)
identifier[evaluated_hess] = identifier[super] ( identifier[LogLikelihood] , identifier[self] ). identifier[eval_hessian] (
identifier[ordered_parameters] ,** identifier[parameters]
)
identifier[result] = literal[int]
keyword[for] identifier[f] , identifier[jac_comp] , identifier[hess_comp] keyword[in] identifier[zip] ( identifier[evaluated_func] , identifier[evaluated_jac] , identifier[evaluated_hess] ):
identifier[jac_outer_jac] = identifier[np] . identifier[einsum] ( literal[string] , identifier[jac_comp] , identifier[jac_comp] )
identifier[dd_logf] =- identifier[hess_comp] / identifier[f] [ identifier[np] . identifier[newaxis] , identifier[np] . identifier[newaxis] ,...]+( literal[int] / identifier[f] ** literal[int] )[ identifier[np] . identifier[newaxis] , identifier[np] . identifier[newaxis] ,...]* identifier[jac_outer_jac]
identifier[axes] = identifier[tuple] ( identifier[range] ( literal[int] , identifier[len] ( identifier[dd_logf] . identifier[shape] )))
identifier[result] += identifier[np] . identifier[sum] ( identifier[dd_logf] , identifier[axis] = identifier[axes] , identifier[keepdims] = keyword[False] )
keyword[else] :
keyword[return] identifier[np] . identifier[atleast_2d] ( identifier[np] . identifier[squeeze] ( identifier[np] . identifier[array] ( identifier[result] ))) | def eval_hessian(self, ordered_parameters=[], **parameters):
"""
Hessian for log-likelihood is defined as
:math:`\\nabla^2_{\\vec{p}}( \\log( L(\\vec{p} | \\vec{x})))`.
:param parameters: values for the fit parameters.
:return: array of length number of ``Parameter``'s in the model, with all partial derivatives evaluated at p, data.
"""
evaluated_func = super(LogLikelihood, self).__call__(ordered_parameters, **parameters)
evaluated_jac = super(LogLikelihood, self).eval_jacobian(ordered_parameters, **parameters)
evaluated_hess = super(LogLikelihood, self).eval_hessian(ordered_parameters, **parameters)
result = 0
for (f, jac_comp, hess_comp) in zip(evaluated_func, evaluated_jac, evaluated_hess):
# Outer product
jac_outer_jac = np.einsum('i...,j...->ij...', jac_comp, jac_comp)
dd_logf = -hess_comp / f[np.newaxis, np.newaxis, ...] + (1 / f ** 2)[np.newaxis, np.newaxis, ...] * jac_outer_jac
# We sum away everything except the matrices in the axes 0 & 1.
axes = tuple(range(2, len(dd_logf.shape)))
result += np.sum(dd_logf, axis=axes, keepdims=False) # depends on [control=['for'], data=[]]
else:
return np.atleast_2d(np.squeeze(np.array(result))) |
def cleanup(self):
"""
Cleans up references to the plot after the plot has been
deleted. Traverses through all plots cleaning up Callbacks and
Stream subscribers.
"""
plots = self.traverse(lambda x: x, [BokehPlot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue
streams = list(plot.streams)
plot.streams = []
plot._document = None
if plot.subplots:
plot.subplots.clear()
if isinstance(plot, GenericElementPlot):
for callback in plot.callbacks:
streams += callback.streams
callback.cleanup()
for stream in set(streams):
stream._subscribers = [
(p, subscriber) for p, subscriber in stream._subscribers
if get_method_owner(subscriber) not in plots
]
if self.comm and self.root is self.handles.get('plot'):
self.comm.close() | def function[cleanup, parameter[self]]:
constant[
Cleans up references to the plot after the plot has been
deleted. Traverses through all plots cleaning up Callbacks and
Stream subscribers.
]
variable[plots] assign[=] call[name[self].traverse, parameter[<ast.Lambda object at 0x7da18f09f3d0>, list[[<ast.Name object at 0x7da18f09f940>]]]]
for taget[name[plot]] in starred[name[plots]] begin[:]
if <ast.UnaryOp object at 0x7da18f09c970> begin[:]
continue
variable[streams] assign[=] call[name[list], parameter[name[plot].streams]]
name[plot].streams assign[=] list[[]]
name[plot]._document assign[=] constant[None]
if name[plot].subplots begin[:]
call[name[plot].subplots.clear, parameter[]]
if call[name[isinstance], parameter[name[plot], name[GenericElementPlot]]] begin[:]
for taget[name[callback]] in starred[name[plot].callbacks] begin[:]
<ast.AugAssign object at 0x7da18f09d600>
call[name[callback].cleanup, parameter[]]
for taget[name[stream]] in starred[call[name[set], parameter[name[streams]]]] begin[:]
name[stream]._subscribers assign[=] <ast.ListComp object at 0x7da2045655d0>
if <ast.BoolOp object at 0x7da2054a7c10> begin[:]
call[name[self].comm.close, parameter[]] | keyword[def] identifier[cleanup] ( identifier[self] ):
literal[string]
identifier[plots] = identifier[self] . identifier[traverse] ( keyword[lambda] identifier[x] : identifier[x] ,[ identifier[BokehPlot] ])
keyword[for] identifier[plot] keyword[in] identifier[plots] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[plot] ,( identifier[GenericCompositePlot] , identifier[GenericElementPlot] , identifier[GenericOverlayPlot] )):
keyword[continue]
identifier[streams] = identifier[list] ( identifier[plot] . identifier[streams] )
identifier[plot] . identifier[streams] =[]
identifier[plot] . identifier[_document] = keyword[None]
keyword[if] identifier[plot] . identifier[subplots] :
identifier[plot] . identifier[subplots] . identifier[clear] ()
keyword[if] identifier[isinstance] ( identifier[plot] , identifier[GenericElementPlot] ):
keyword[for] identifier[callback] keyword[in] identifier[plot] . identifier[callbacks] :
identifier[streams] += identifier[callback] . identifier[streams]
identifier[callback] . identifier[cleanup] ()
keyword[for] identifier[stream] keyword[in] identifier[set] ( identifier[streams] ):
identifier[stream] . identifier[_subscribers] =[
( identifier[p] , identifier[subscriber] ) keyword[for] identifier[p] , identifier[subscriber] keyword[in] identifier[stream] . identifier[_subscribers]
keyword[if] identifier[get_method_owner] ( identifier[subscriber] ) keyword[not] keyword[in] identifier[plots]
]
keyword[if] identifier[self] . identifier[comm] keyword[and] identifier[self] . identifier[root] keyword[is] identifier[self] . identifier[handles] . identifier[get] ( literal[string] ):
identifier[self] . identifier[comm] . identifier[close] () | def cleanup(self):
"""
Cleans up references to the plot after the plot has been
deleted. Traverses through all plots cleaning up Callbacks and
Stream subscribers.
"""
plots = self.traverse(lambda x: x, [BokehPlot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue # depends on [control=['if'], data=[]]
streams = list(plot.streams)
plot.streams = []
plot._document = None
if plot.subplots:
plot.subplots.clear() # depends on [control=['if'], data=[]]
if isinstance(plot, GenericElementPlot):
for callback in plot.callbacks:
streams += callback.streams
callback.cleanup() # depends on [control=['for'], data=['callback']] # depends on [control=['if'], data=[]]
for stream in set(streams):
stream._subscribers = [(p, subscriber) for (p, subscriber) in stream._subscribers if get_method_owner(subscriber) not in plots] # depends on [control=['for'], data=['stream']] # depends on [control=['for'], data=['plot']]
if self.comm and self.root is self.handles.get('plot'):
self.comm.close() # depends on [control=['if'], data=[]] |
def __get_files(dir_name, extensions):
'''Helper function to get files in a single directory'''
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, '*' + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr))
return myfiles | def function[__get_files, parameter[dir_name, extensions]]:
constant[Helper function to get files in a single directory]
variable[dir_name] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.expanduser, parameter[name[dir_name]]]]]
variable[myfiles] assign[=] call[name[set], parameter[]]
for taget[name[sub_ext]] in starred[name[extensions]] begin[:]
variable[globstr] assign[=] call[name[os].path.join, parameter[name[dir_name], binary_operation[binary_operation[constant[*] + name[os].path.extsep] + name[sub_ext]]]]
<ast.AugAssign object at 0x7da207f997e0>
return[name[myfiles]] | keyword[def] identifier[__get_files] ( identifier[dir_name] , identifier[extensions] ):
literal[string]
identifier[dir_name] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[dir_name] ))
identifier[myfiles] = identifier[set] ()
keyword[for] identifier[sub_ext] keyword[in] identifier[extensions] :
identifier[globstr] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] , literal[string] + identifier[os] . identifier[path] . identifier[extsep] + identifier[sub_ext] )
identifier[myfiles] |= identifier[set] ( identifier[glob] . identifier[glob] ( identifier[globstr] ))
keyword[return] identifier[myfiles] | def __get_files(dir_name, extensions):
"""Helper function to get files in a single directory"""
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, '*' + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr)) # depends on [control=['for'], data=['sub_ext']]
return myfiles |
def joint_sfs(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
"""
# check inputs
dac1, n1 = _check_dac_n(dac1, n1)
dac2, n2 = _check_dac_n(dac2, n2)
# compute site frequency spectrum
x = n1 + 1
y = n2 + 1
# need platform integer for bincount
tmp = (dac1 * y + dac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s | def function[joint_sfs, parameter[dac1, dac2, n1, n2]]:
constant[Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
]
<ast.Tuple object at 0x7da20c6e6da0> assign[=] call[name[_check_dac_n], parameter[name[dac1], name[n1]]]
<ast.Tuple object at 0x7da20c6e45e0> assign[=] call[name[_check_dac_n], parameter[name[dac2], name[n2]]]
variable[x] assign[=] binary_operation[name[n1] + constant[1]]
variable[y] assign[=] binary_operation[name[n2] + constant[1]]
variable[tmp] assign[=] call[binary_operation[binary_operation[name[dac1] * name[y]] + name[dac2]].astype, parameter[name[int]]]
variable[s] assign[=] call[name[np].bincount, parameter[name[tmp]]]
call[name[s].resize, parameter[name[x], name[y]]]
return[name[s]] | keyword[def] identifier[joint_sfs] ( identifier[dac1] , identifier[dac2] , identifier[n1] = keyword[None] , identifier[n2] = keyword[None] ):
literal[string]
identifier[dac1] , identifier[n1] = identifier[_check_dac_n] ( identifier[dac1] , identifier[n1] )
identifier[dac2] , identifier[n2] = identifier[_check_dac_n] ( identifier[dac2] , identifier[n2] )
identifier[x] = identifier[n1] + literal[int]
identifier[y] = identifier[n2] + literal[int]
identifier[tmp] =( identifier[dac1] * identifier[y] + identifier[dac2] ). identifier[astype] ( identifier[int] , identifier[copy] = keyword[False] )
identifier[s] = identifier[np] . identifier[bincount] ( identifier[tmp] )
identifier[s] . identifier[resize] ( identifier[x] , identifier[y] )
keyword[return] identifier[s] | def joint_sfs(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
"""
# check inputs
(dac1, n1) = _check_dac_n(dac1, n1)
(dac2, n2) = _check_dac_n(dac2, n2)
# compute site frequency spectrum
x = n1 + 1
y = n2 + 1
# need platform integer for bincount
tmp = (dac1 * y + dac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s |
def search_process(process, pattern, minAddr = None,
maxAddr = None,
bufferPages = None,
overlapping = False):
"""
Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
# Do some namespace lookups of symbols we'll be using frequently.
MEM_COMMIT = win32.MEM_COMMIT
PAGE_GUARD = win32.PAGE_GUARD
page = MemoryAddresses.pageSize
read = pattern.read
find = pattern.find
# Calculate the address range.
if minAddr is None:
minAddr = 0
if maxAddr is None:
maxAddr = win32.LPVOID(-1).value # XXX HACK
# Calculate the buffer size from the number of pages.
if bufferPages is None:
try:
size = MemoryAddresses.\
align_address_to_page_end(len(pattern)) + page
except NotImplementedError:
size = None
elif bufferPages > 0:
size = page * (bufferPages + 1)
else:
size = None
# Get the memory map of the process.
memory_map = process.iter_memory_map(minAddr, maxAddr)
# Perform search with buffering enabled.
if size:
# Loop through all memory blocks containing data.
buffer = "" # buffer to hold the memory data
prev_addr = 0 # previous memory block address
last = 0 # position of the last match
delta = 0 # delta of last read address and start of buffer
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress # current address to search on
block_size = mbi.RegionSize # total size of the block
if address >= maxAddr:
break
end = address + block_size # end address of the block
# If the block is contiguous to the previous block,
# coalesce the new data in the buffer.
if delta and address == prev_addr:
buffer += read(process, address, page)
# If not, clear the buffer and read new data.
else:
buffer = read(process, address, min(size, block_size))
last = 0
delta = 0
# Search for the pattern in this block.
while 1:
# Yield each match of the pattern in the buffer.
pos, length = find(buffer, last)
while pos >= last:
match_addr = address + pos - delta
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last)
# Advance to the next page.
address = address + page
block_size = block_size - page
prev_addr = address
# Fix the position of the last match.
last = last - page
if last < 0:
last = 0
# Remove the first page in the buffer.
buffer = buffer[ page : ]
delta = page
# If we haven't reached the end of the block yet,
# read the next page in the block and keep seaching.
if address < end:
buffer = buffer + read(process, address, page)
# Otherwise, we're done searching this block.
else:
break
# Perform search with buffering disabled.
else:
# Loop through all memory blocks containing data.
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress
block_size = mbi.RegionSize
if address >= maxAddr:
break;
# Read the whole memory region.
buffer = process.read(address, block_size)
# Search for the pattern in this region.
pos, length = find(buffer)
last = 0
while pos >= last:
match_addr = address + pos
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last) | def function[search_process, parameter[process, pattern, minAddr, maxAddr, bufferPages, overlapping]]:
constant[
Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
]
variable[MEM_COMMIT] assign[=] name[win32].MEM_COMMIT
variable[PAGE_GUARD] assign[=] name[win32].PAGE_GUARD
variable[page] assign[=] name[MemoryAddresses].pageSize
variable[read] assign[=] name[pattern].read
variable[find] assign[=] name[pattern].find
if compare[name[minAddr] is constant[None]] begin[:]
variable[minAddr] assign[=] constant[0]
if compare[name[maxAddr] is constant[None]] begin[:]
variable[maxAddr] assign[=] call[name[win32].LPVOID, parameter[<ast.UnaryOp object at 0x7da207f00df0>]].value
if compare[name[bufferPages] is constant[None]] begin[:]
<ast.Try object at 0x7da207f034f0>
variable[memory_map] assign[=] call[name[process].iter_memory_map, parameter[name[minAddr], name[maxAddr]]]
if name[size] begin[:]
variable[buffer] assign[=] constant[]
variable[prev_addr] assign[=] constant[0]
variable[last] assign[=] constant[0]
variable[delta] assign[=] constant[0]
for taget[name[mbi]] in starred[name[memory_map]] begin[:]
if <ast.UnaryOp object at 0x7da207f008e0> begin[:]
continue
variable[address] assign[=] name[mbi].BaseAddress
variable[block_size] assign[=] name[mbi].RegionSize
if compare[name[address] greater_or_equal[>=] name[maxAddr]] begin[:]
break
variable[end] assign[=] binary_operation[name[address] + name[block_size]]
if <ast.BoolOp object at 0x7da207f00f70> begin[:]
<ast.AugAssign object at 0x7da207f03f70>
while constant[1] begin[:]
<ast.Tuple object at 0x7da207f011e0> assign[=] call[name[find], parameter[name[buffer], name[last]]]
while compare[name[pos] greater_or_equal[>=] name[last]] begin[:]
variable[match_addr] assign[=] binary_operation[binary_operation[name[address] + name[pos]] - name[delta]]
if compare[name[minAddr] less_or_equal[<=] name[match_addr]] begin[:]
variable[result] assign[=] call[name[pattern].found, parameter[name[match_addr], name[length], call[name[buffer]][<ast.Slice object at 0x7da1b08d9e40>]]]
if compare[name[result] is_not constant[None]] begin[:]
<ast.Yield object at 0x7da1b08d8550>
if name[overlapping] begin[:]
variable[last] assign[=] binary_operation[name[pos] + constant[1]]
<ast.Tuple object at 0x7da1b08d9240> assign[=] call[name[find], parameter[name[buffer], name[last]]]
variable[address] assign[=] binary_operation[name[address] + name[page]]
variable[block_size] assign[=] binary_operation[name[block_size] - name[page]]
variable[prev_addr] assign[=] name[address]
variable[last] assign[=] binary_operation[name[last] - name[page]]
if compare[name[last] less[<] constant[0]] begin[:]
variable[last] assign[=] constant[0]
variable[buffer] assign[=] call[name[buffer]][<ast.Slice object at 0x7da18f723b80>]
variable[delta] assign[=] name[page]
if compare[name[address] less[<] name[end]] begin[:]
variable[buffer] assign[=] binary_operation[name[buffer] + call[name[read], parameter[name[process], name[address], name[page]]]] | keyword[def] identifier[search_process] ( identifier[process] , identifier[pattern] , identifier[minAddr] = keyword[None] ,
identifier[maxAddr] = keyword[None] ,
identifier[bufferPages] = keyword[None] ,
identifier[overlapping] = keyword[False] ):
literal[string]
identifier[MEM_COMMIT] = identifier[win32] . identifier[MEM_COMMIT]
identifier[PAGE_GUARD] = identifier[win32] . identifier[PAGE_GUARD]
identifier[page] = identifier[MemoryAddresses] . identifier[pageSize]
identifier[read] = identifier[pattern] . identifier[read]
identifier[find] = identifier[pattern] . identifier[find]
keyword[if] identifier[minAddr] keyword[is] keyword[None] :
identifier[minAddr] = literal[int]
keyword[if] identifier[maxAddr] keyword[is] keyword[None] :
identifier[maxAddr] = identifier[win32] . identifier[LPVOID] (- literal[int] ). identifier[value]
keyword[if] identifier[bufferPages] keyword[is] keyword[None] :
keyword[try] :
identifier[size] = identifier[MemoryAddresses] . identifier[align_address_to_page_end] ( identifier[len] ( identifier[pattern] ))+ identifier[page]
keyword[except] identifier[NotImplementedError] :
identifier[size] = keyword[None]
keyword[elif] identifier[bufferPages] > literal[int] :
identifier[size] = identifier[page] *( identifier[bufferPages] + literal[int] )
keyword[else] :
identifier[size] = keyword[None]
identifier[memory_map] = identifier[process] . identifier[iter_memory_map] ( identifier[minAddr] , identifier[maxAddr] )
keyword[if] identifier[size] :
identifier[buffer] = literal[string]
identifier[prev_addr] = literal[int]
identifier[last] = literal[int]
identifier[delta] = literal[int]
keyword[for] identifier[mbi] keyword[in] identifier[memory_map] :
keyword[if] keyword[not] identifier[mbi] . identifier[has_content] ():
keyword[continue]
identifier[address] = identifier[mbi] . identifier[BaseAddress]
identifier[block_size] = identifier[mbi] . identifier[RegionSize]
keyword[if] identifier[address] >= identifier[maxAddr] :
keyword[break]
identifier[end] = identifier[address] + identifier[block_size]
keyword[if] identifier[delta] keyword[and] identifier[address] == identifier[prev_addr] :
identifier[buffer] += identifier[read] ( identifier[process] , identifier[address] , identifier[page] )
keyword[else] :
identifier[buffer] = identifier[read] ( identifier[process] , identifier[address] , identifier[min] ( identifier[size] , identifier[block_size] ))
identifier[last] = literal[int]
identifier[delta] = literal[int]
keyword[while] literal[int] :
identifier[pos] , identifier[length] = identifier[find] ( identifier[buffer] , identifier[last] )
keyword[while] identifier[pos] >= identifier[last] :
identifier[match_addr] = identifier[address] + identifier[pos] - identifier[delta]
keyword[if] identifier[minAddr] <= identifier[match_addr] < identifier[maxAddr] :
identifier[result] = identifier[pattern] . identifier[found] (
identifier[match_addr] , identifier[length] ,
identifier[buffer] [ identifier[pos] : identifier[pos] + identifier[length] ])
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[result]
keyword[if] identifier[overlapping] :
identifier[last] = identifier[pos] + literal[int]
keyword[else] :
identifier[last] = identifier[pos] + identifier[length]
identifier[pos] , identifier[length] = identifier[find] ( identifier[buffer] , identifier[last] )
identifier[address] = identifier[address] + identifier[page]
identifier[block_size] = identifier[block_size] - identifier[page]
identifier[prev_addr] = identifier[address]
identifier[last] = identifier[last] - identifier[page]
keyword[if] identifier[last] < literal[int] :
identifier[last] = literal[int]
identifier[buffer] = identifier[buffer] [ identifier[page] :]
identifier[delta] = identifier[page]
keyword[if] identifier[address] < identifier[end] :
identifier[buffer] = identifier[buffer] + identifier[read] ( identifier[process] , identifier[address] , identifier[page] )
keyword[else] :
keyword[break]
keyword[else] :
keyword[for] identifier[mbi] keyword[in] identifier[memory_map] :
keyword[if] keyword[not] identifier[mbi] . identifier[has_content] ():
keyword[continue]
identifier[address] = identifier[mbi] . identifier[BaseAddress]
identifier[block_size] = identifier[mbi] . identifier[RegionSize]
keyword[if] identifier[address] >= identifier[maxAddr] :
keyword[break] ;
identifier[buffer] = identifier[process] . identifier[read] ( identifier[address] , identifier[block_size] )
identifier[pos] , identifier[length] = identifier[find] ( identifier[buffer] )
identifier[last] = literal[int]
keyword[while] identifier[pos] >= identifier[last] :
identifier[match_addr] = identifier[address] + identifier[pos]
keyword[if] identifier[minAddr] <= identifier[match_addr] < identifier[maxAddr] :
identifier[result] = identifier[pattern] . identifier[found] (
identifier[match_addr] , identifier[length] ,
identifier[buffer] [ identifier[pos] : identifier[pos] + identifier[length] ])
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[result]
keyword[if] identifier[overlapping] :
identifier[last] = identifier[pos] + literal[int]
keyword[else] :
identifier[last] = identifier[pos] + identifier[length]
identifier[pos] , identifier[length] = identifier[find] ( identifier[buffer] , identifier[last] ) | def search_process(process, pattern, minAddr=None, maxAddr=None, bufferPages=None, overlapping=False):
"""
Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
# Do some namespace lookups of symbols we'll be using frequently.
MEM_COMMIT = win32.MEM_COMMIT
PAGE_GUARD = win32.PAGE_GUARD
page = MemoryAddresses.pageSize
read = pattern.read
find = pattern.find
# Calculate the address range.
if minAddr is None:
minAddr = 0 # depends on [control=['if'], data=['minAddr']]
if maxAddr is None:
maxAddr = win32.LPVOID(-1).value # XXX HACK # depends on [control=['if'], data=['maxAddr']]
# Calculate the buffer size from the number of pages.
if bufferPages is None:
try:
size = MemoryAddresses.align_address_to_page_end(len(pattern)) + page # depends on [control=['try'], data=[]]
except NotImplementedError:
size = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif bufferPages > 0:
size = page * (bufferPages + 1) # depends on [control=['if'], data=['bufferPages']]
else:
size = None
# Get the memory map of the process.
memory_map = process.iter_memory_map(minAddr, maxAddr)
# Perform search with buffering enabled.
if size:
# Loop through all memory blocks containing data.
buffer = '' # buffer to hold the memory data
prev_addr = 0 # previous memory block address
last = 0 # position of the last match
delta = 0 # delta of last read address and start of buffer
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue # depends on [control=['if'], data=[]]
# Get the address and size of this block.
address = mbi.BaseAddress # current address to search on
block_size = mbi.RegionSize # total size of the block
if address >= maxAddr:
break # depends on [control=['if'], data=[]]
end = address + block_size # end address of the block
# If the block is contiguous to the previous block,
# coalesce the new data in the buffer.
if delta and address == prev_addr:
buffer += read(process, address, page) # depends on [control=['if'], data=[]]
else:
# If not, clear the buffer and read new data.
buffer = read(process, address, min(size, block_size))
last = 0
delta = 0
# Search for the pattern in this block.
while 1:
# Yield each match of the pattern in the buffer.
(pos, length) = find(buffer, last)
while pos >= last:
match_addr = address + pos - delta
if minAddr <= match_addr < maxAddr:
result = pattern.found(match_addr, length, buffer[pos:pos + length])
if result is not None:
yield result # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=['match_addr']]
if overlapping:
last = pos + 1 # depends on [control=['if'], data=[]]
else:
last = pos + length
(pos, length) = find(buffer, last) # depends on [control=['while'], data=['pos', 'last']]
# Advance to the next page.
address = address + page
block_size = block_size - page
prev_addr = address
# Fix the position of the last match.
last = last - page
if last < 0:
last = 0 # depends on [control=['if'], data=['last']]
# Remove the first page in the buffer.
buffer = buffer[page:]
delta = page
# If we haven't reached the end of the block yet,
# read the next page in the block and keep seaching.
if address < end:
buffer = buffer + read(process, address, page) # depends on [control=['if'], data=['address']]
else:
# Otherwise, we're done searching this block.
break # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['mbi']] # depends on [control=['if'], data=[]]
else:
# Perform search with buffering disabled.
# Loop through all memory blocks containing data.
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue # depends on [control=['if'], data=[]]
# Get the address and size of this block.
address = mbi.BaseAddress
block_size = mbi.RegionSize
if address >= maxAddr:
break # depends on [control=['if'], data=[]]
# Read the whole memory region.
buffer = process.read(address, block_size)
# Search for the pattern in this region.
(pos, length) = find(buffer)
last = 0
while pos >= last:
match_addr = address + pos
if minAddr <= match_addr < maxAddr:
result = pattern.found(match_addr, length, buffer[pos:pos + length])
if result is not None:
yield result # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=['match_addr']]
if overlapping:
last = pos + 1 # depends on [control=['if'], data=[]]
else:
last = pos + length
(pos, length) = find(buffer, last) # depends on [control=['while'], data=['pos', 'last']] # depends on [control=['for'], data=['mbi']] |
def setup_allelespecific_database(fasta_file, database_folder, allele_list):
"""
Since some genera have some rMLST genes missing, or two copies of some genes, genus-specific databases are needed.
This will take only the alleles known to be part of each genus and write them to a genus-specific file.
:param database_folder: Path to folder where rMLST_combined is stored.
:param fasta_file: Path to fasta file to write allelespecific database to.
:param allele_list: allele list generated by find_genusspecific_allele_list
"""
index = SeqIO.index(os.path.join(database_folder, 'rMLST_combined.fasta'), 'fasta')
seqs = list()
for s in allele_list:
try:
seqs.append(index[s])
except KeyError:
logging.warning('Tried to add {} to allele-specific database, but could not find it.'.format(s))
SeqIO.write(seqs, fasta_file, 'fasta') | def function[setup_allelespecific_database, parameter[fasta_file, database_folder, allele_list]]:
constant[
Since some genera have some rMLST genes missing, or two copies of some genes, genus-specific databases are needed.
This will take only the alleles known to be part of each genus and write them to a genus-specific file.
:param database_folder: Path to folder where rMLST_combined is stored.
:param fasta_file: Path to fasta file to write allelespecific database to.
:param allele_list: allele list generated by find_genusspecific_allele_list
]
variable[index] assign[=] call[name[SeqIO].index, parameter[call[name[os].path.join, parameter[name[database_folder], constant[rMLST_combined.fasta]]], constant[fasta]]]
variable[seqs] assign[=] call[name[list], parameter[]]
for taget[name[s]] in starred[name[allele_list]] begin[:]
<ast.Try object at 0x7da1b1a9e530>
call[name[SeqIO].write, parameter[name[seqs], name[fasta_file], constant[fasta]]] | keyword[def] identifier[setup_allelespecific_database] ( identifier[fasta_file] , identifier[database_folder] , identifier[allele_list] ):
literal[string]
identifier[index] = identifier[SeqIO] . identifier[index] ( identifier[os] . identifier[path] . identifier[join] ( identifier[database_folder] , literal[string] ), literal[string] )
identifier[seqs] = identifier[list] ()
keyword[for] identifier[s] keyword[in] identifier[allele_list] :
keyword[try] :
identifier[seqs] . identifier[append] ( identifier[index] [ identifier[s] ])
keyword[except] identifier[KeyError] :
identifier[logging] . identifier[warning] ( literal[string] . identifier[format] ( identifier[s] ))
identifier[SeqIO] . identifier[write] ( identifier[seqs] , identifier[fasta_file] , literal[string] ) | def setup_allelespecific_database(fasta_file, database_folder, allele_list):
"""
Since some genera have some rMLST genes missing, or two copies of some genes, genus-specific databases are needed.
This will take only the alleles known to be part of each genus and write them to a genus-specific file.
:param database_folder: Path to folder where rMLST_combined is stored.
:param fasta_file: Path to fasta file to write allelespecific database to.
:param allele_list: allele list generated by find_genusspecific_allele_list
"""
index = SeqIO.index(os.path.join(database_folder, 'rMLST_combined.fasta'), 'fasta')
seqs = list()
for s in allele_list:
try:
seqs.append(index[s]) # depends on [control=['try'], data=[]]
except KeyError:
logging.warning('Tried to add {} to allele-specific database, but could not find it.'.format(s)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['s']]
SeqIO.write(seqs, fasta_file, 'fasta') |
def delete_file(dk_api, kitchen, recipe_name, message, files_to_delete_param):
"""
returns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_delete_param: path to the files to delete
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if kitchen is None or recipe_name is None or message is None or files_to_delete_param is None:
s = 'ERROR: DKCloudCommandRunner bad input parameters'
rc.set(rc.DK_FAIL, s)
return rc
# Take a simple string or an array
if isinstance(files_to_delete_param, basestring):
files_to_delete = [files_to_delete_param]
else:
files_to_delete = files_to_delete_param
msg = ''
for file_to_delete in files_to_delete:
basename = os.path.basename(file_to_delete)
rc = dk_api.delete_file(kitchen, recipe_name, message, file_to_delete, basename)
if not rc.ok():
msg += '\nDKCloudCommand.delete_file for %s failed\nmessage: %s' % (file_to_delete, rc.get_message())
rc.set_message(msg)
return rc
else:
msg += 'DKCloudCommand.delete_file for %s succeed' % file_to_delete
rc.set_message(msg)
return rc | def function[delete_file, parameter[dk_api, kitchen, recipe_name, message, files_to_delete_param]]:
constant[
returns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_delete_param: path to the files to delete
:rtype: DKReturnCode
]
variable[rc] assign[=] call[name[DKReturnCode], parameter[]]
if <ast.BoolOp object at 0x7da18fe916c0> begin[:]
variable[s] assign[=] constant[ERROR: DKCloudCommandRunner bad input parameters]
call[name[rc].set, parameter[name[rc].DK_FAIL, name[s]]]
return[name[rc]]
if call[name[isinstance], parameter[name[files_to_delete_param], name[basestring]]] begin[:]
variable[files_to_delete] assign[=] list[[<ast.Name object at 0x7da207f01f60>]]
variable[msg] assign[=] constant[]
for taget[name[file_to_delete]] in starred[name[files_to_delete]] begin[:]
variable[basename] assign[=] call[name[os].path.basename, parameter[name[file_to_delete]]]
variable[rc] assign[=] call[name[dk_api].delete_file, parameter[name[kitchen], name[recipe_name], name[message], name[file_to_delete], name[basename]]]
if <ast.UnaryOp object at 0x7da207f02da0> begin[:]
<ast.AugAssign object at 0x7da207f03c70>
call[name[rc].set_message, parameter[name[msg]]]
return[name[rc]]
call[name[rc].set_message, parameter[name[msg]]]
return[name[rc]] | keyword[def] identifier[delete_file] ( identifier[dk_api] , identifier[kitchen] , identifier[recipe_name] , identifier[message] , identifier[files_to_delete_param] ):
literal[string]
identifier[rc] = identifier[DKReturnCode] ()
keyword[if] identifier[kitchen] keyword[is] keyword[None] keyword[or] identifier[recipe_name] keyword[is] keyword[None] keyword[or] identifier[message] keyword[is] keyword[None] keyword[or] identifier[files_to_delete_param] keyword[is] keyword[None] :
identifier[s] = literal[string]
identifier[rc] . identifier[set] ( identifier[rc] . identifier[DK_FAIL] , identifier[s] )
keyword[return] identifier[rc]
keyword[if] identifier[isinstance] ( identifier[files_to_delete_param] , identifier[basestring] ):
identifier[files_to_delete] =[ identifier[files_to_delete_param] ]
keyword[else] :
identifier[files_to_delete] = identifier[files_to_delete_param]
identifier[msg] = literal[string]
keyword[for] identifier[file_to_delete] keyword[in] identifier[files_to_delete] :
identifier[basename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[file_to_delete] )
identifier[rc] = identifier[dk_api] . identifier[delete_file] ( identifier[kitchen] , identifier[recipe_name] , identifier[message] , identifier[file_to_delete] , identifier[basename] )
keyword[if] keyword[not] identifier[rc] . identifier[ok] ():
identifier[msg] += literal[string] %( identifier[file_to_delete] , identifier[rc] . identifier[get_message] ())
identifier[rc] . identifier[set_message] ( identifier[msg] )
keyword[return] identifier[rc]
keyword[else] :
identifier[msg] += literal[string] % identifier[file_to_delete]
identifier[rc] . identifier[set_message] ( identifier[msg] )
keyword[return] identifier[rc] | def delete_file(dk_api, kitchen, recipe_name, message, files_to_delete_param):
"""
returns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_delete_param: path to the files to delete
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if kitchen is None or recipe_name is None or message is None or (files_to_delete_param is None):
s = 'ERROR: DKCloudCommandRunner bad input parameters'
rc.set(rc.DK_FAIL, s)
return rc # depends on [control=['if'], data=[]]
# Take a simple string or an array
if isinstance(files_to_delete_param, basestring):
files_to_delete = [files_to_delete_param] # depends on [control=['if'], data=[]]
else:
files_to_delete = files_to_delete_param
msg = ''
for file_to_delete in files_to_delete:
basename = os.path.basename(file_to_delete)
rc = dk_api.delete_file(kitchen, recipe_name, message, file_to_delete, basename)
if not rc.ok():
msg += '\nDKCloudCommand.delete_file for %s failed\nmessage: %s' % (file_to_delete, rc.get_message())
rc.set_message(msg)
return rc # depends on [control=['if'], data=[]]
else:
msg += 'DKCloudCommand.delete_file for %s succeed' % file_to_delete # depends on [control=['for'], data=['file_to_delete']]
rc.set_message(msg)
return rc |
def _parse_area_km2(valor):
"""O campo ``area_km2`` é uma string com um número em formato pt-br, com
casas decimais que representam m2.
Exemplos: "331,401", "248.222,801"
"""
if valor is None:
return None
elif isinstance(valor, Decimal):
return valor
try:
int_, dec = valor.split(',', 1)
except ValueError:
# valor não tem separador decimal
int_, dec = valor, '000'
# remove os separadores de milhar
int_ = int_.replace('.', '')
return Decimal('%s.%s' % (int_, dec)) | def function[_parse_area_km2, parameter[valor]]:
constant[O campo ``area_km2`` é uma string com um número em formato pt-br, com
casas decimais que representam m2.
Exemplos: "331,401", "248.222,801"
]
if compare[name[valor] is constant[None]] begin[:]
return[constant[None]]
<ast.Try object at 0x7da1b2298460>
variable[int_] assign[=] call[name[int_].replace, parameter[constant[.], constant[]]]
return[call[name[Decimal], parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b229a8f0>, <ast.Name object at 0x7da1b2298eb0>]]]]]] | keyword[def] identifier[_parse_area_km2] ( identifier[valor] ):
literal[string]
keyword[if] identifier[valor] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[valor] , identifier[Decimal] ):
keyword[return] identifier[valor]
keyword[try] :
identifier[int_] , identifier[dec] = identifier[valor] . identifier[split] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
identifier[int_] , identifier[dec] = identifier[valor] , literal[string]
identifier[int_] = identifier[int_] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[Decimal] ( literal[string] %( identifier[int_] , identifier[dec] )) | def _parse_area_km2(valor):
"""O campo ``area_km2`` é uma string com um número em formato pt-br, com
casas decimais que representam m2.
Exemplos: "331,401", "248.222,801"
"""
if valor is None:
return None # depends on [control=['if'], data=[]]
elif isinstance(valor, Decimal):
return valor # depends on [control=['if'], data=[]]
try:
(int_, dec) = valor.split(',', 1) # depends on [control=['try'], data=[]]
except ValueError:
# valor não tem separador decimal
(int_, dec) = (valor, '000') # depends on [control=['except'], data=[]]
# remove os separadores de milhar
int_ = int_.replace('.', '')
return Decimal('%s.%s' % (int_, dec)) |
def click(self, *args, **kwds):
"""
When you not pass any argument, it clicks on current element. If you
pass some arguments, it works as following snippet. For more info what
you can pass check out method :py:meth:`~._WebdriverBaseWrapper.get_elm`.
.. code-block:: python
driver.get_elm('someid').click()
"""
if args or kwds:
elm = self.get_elm(*args, **kwds)
elm.click()
else:
super().click() | def function[click, parameter[self]]:
constant[
When you not pass any argument, it clicks on current element. If you
pass some arguments, it works as following snippet. For more info what
you can pass check out method :py:meth:`~._WebdriverBaseWrapper.get_elm`.
.. code-block:: python
driver.get_elm('someid').click()
]
if <ast.BoolOp object at 0x7da1b0ebe890> begin[:]
variable[elm] assign[=] call[name[self].get_elm, parameter[<ast.Starred object at 0x7da1b0ebce20>]]
call[name[elm].click, parameter[]] | keyword[def] identifier[click] ( identifier[self] ,* identifier[args] ,** identifier[kwds] ):
literal[string]
keyword[if] identifier[args] keyword[or] identifier[kwds] :
identifier[elm] = identifier[self] . identifier[get_elm] (* identifier[args] ,** identifier[kwds] )
identifier[elm] . identifier[click] ()
keyword[else] :
identifier[super] (). identifier[click] () | def click(self, *args, **kwds):
"""
When you not pass any argument, it clicks on current element. If you
pass some arguments, it works as following snippet. For more info what
you can pass check out method :py:meth:`~._WebdriverBaseWrapper.get_elm`.
.. code-block:: python
driver.get_elm('someid').click()
"""
if args or kwds:
elm = self.get_elm(*args, **kwds)
elm.click() # depends on [control=['if'], data=[]]
else:
super().click() |
def pair_new_device(self, pairing_mode, pairing_mode_duration=60, pairing_device_type_selector=None,
kidde_radio_code=None):
"""
:param pairing_mode: a string one of ["zigbee", "zwave", "zwave_exclusion",
"zwave_network_rediscovery", "lutron", "bluetooth", "kidde"]
:param pairing_mode_duration: an int in seconds defaults to 60
:param pairing_device_type_selector: a string I believe this is only for bluetooth devices.
:param kidde_radio_code: a string of 8 1s and 0s one for each dip switch on the kidde device
left --> right = 1 --> 8
:return: nothing
"""
if pairing_mode == "lutron" and pairing_mode_duration < 120:
pairing_mode_duration = 120
elif pairing_mode == "zwave_network_rediscovery":
pairing_mode_duration = 0
elif pairing_mode == "bluetooth" and pairing_device_type_selector is None:
pairing_device_type_selector = "switchmate"
desired_state = {"pairing_mode": pairing_mode,
"pairing_mode_duration": pairing_mode_duration}
if pairing_mode == "kidde" and kidde_radio_code is not None:
# Convert dip switch 1 and 0s to an int
try:
kidde_radio_code_int = int(kidde_radio_code, 2)
desired_state = {"kidde_radio_code": kidde_radio_code_int, "pairing_mode": None}
except (TypeError, ValueError):
_LOGGER.error("An invalid Kidde radio code was provided. %s", kidde_radio_code)
if pairing_device_type_selector is not None:
desired_state.update({"pairing_device_type_selector": pairing_device_type_selector})
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) | def function[pair_new_device, parameter[self, pairing_mode, pairing_mode_duration, pairing_device_type_selector, kidde_radio_code]]:
constant[
:param pairing_mode: a string one of ["zigbee", "zwave", "zwave_exclusion",
"zwave_network_rediscovery", "lutron", "bluetooth", "kidde"]
:param pairing_mode_duration: an int in seconds defaults to 60
:param pairing_device_type_selector: a string I believe this is only for bluetooth devices.
:param kidde_radio_code: a string of 8 1s and 0s one for each dip switch on the kidde device
left --> right = 1 --> 8
:return: nothing
]
if <ast.BoolOp object at 0x7da1b265e4d0> begin[:]
variable[pairing_mode_duration] assign[=] constant[120]
variable[desired_state] assign[=] dictionary[[<ast.Constant object at 0x7da1b265e140>, <ast.Constant object at 0x7da1b265e320>], [<ast.Name object at 0x7da1b265ff70>, <ast.Name object at 0x7da1b265f670>]]
if <ast.BoolOp object at 0x7da1b265dea0> begin[:]
<ast.Try object at 0x7da1b265ed10>
if compare[name[pairing_device_type_selector] is_not constant[None]] begin[:]
call[name[desired_state].update, parameter[dictionary[[<ast.Constant object at 0x7da1b25317e0>], [<ast.Name object at 0x7da1b2606440>]]]]
variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], dictionary[[<ast.Constant object at 0x7da1b2606aa0>], [<ast.Name object at 0x7da1b2604a90>]]]]
call[name[self]._update_state_from_response, parameter[name[response]]] | keyword[def] identifier[pair_new_device] ( identifier[self] , identifier[pairing_mode] , identifier[pairing_mode_duration] = literal[int] , identifier[pairing_device_type_selector] = keyword[None] ,
identifier[kidde_radio_code] = keyword[None] ):
literal[string]
keyword[if] identifier[pairing_mode] == literal[string] keyword[and] identifier[pairing_mode_duration] < literal[int] :
identifier[pairing_mode_duration] = literal[int]
keyword[elif] identifier[pairing_mode] == literal[string] :
identifier[pairing_mode_duration] = literal[int]
keyword[elif] identifier[pairing_mode] == literal[string] keyword[and] identifier[pairing_device_type_selector] keyword[is] keyword[None] :
identifier[pairing_device_type_selector] = literal[string]
identifier[desired_state] ={ literal[string] : identifier[pairing_mode] ,
literal[string] : identifier[pairing_mode_duration] }
keyword[if] identifier[pairing_mode] == literal[string] keyword[and] identifier[kidde_radio_code] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[kidde_radio_code_int] = identifier[int] ( identifier[kidde_radio_code] , literal[int] )
identifier[desired_state] ={ literal[string] : identifier[kidde_radio_code_int] , literal[string] : keyword[None] }
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[_LOGGER] . identifier[error] ( literal[string] , identifier[kidde_radio_code] )
keyword[if] identifier[pairing_device_type_selector] keyword[is] keyword[not] keyword[None] :
identifier[desired_state] . identifier[update] ({ literal[string] : identifier[pairing_device_type_selector] })
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] ,{
literal[string] : identifier[desired_state]
})
identifier[self] . identifier[_update_state_from_response] ( identifier[response] ) | def pair_new_device(self, pairing_mode, pairing_mode_duration=60, pairing_device_type_selector=None, kidde_radio_code=None):
"""
:param pairing_mode: a string one of ["zigbee", "zwave", "zwave_exclusion",
"zwave_network_rediscovery", "lutron", "bluetooth", "kidde"]
:param pairing_mode_duration: an int in seconds defaults to 60
:param pairing_device_type_selector: a string I believe this is only for bluetooth devices.
:param kidde_radio_code: a string of 8 1s and 0s one for each dip switch on the kidde device
left --> right = 1 --> 8
:return: nothing
"""
if pairing_mode == 'lutron' and pairing_mode_duration < 120:
pairing_mode_duration = 120 # depends on [control=['if'], data=[]]
elif pairing_mode == 'zwave_network_rediscovery':
pairing_mode_duration = 0 # depends on [control=['if'], data=[]]
elif pairing_mode == 'bluetooth' and pairing_device_type_selector is None:
pairing_device_type_selector = 'switchmate' # depends on [control=['if'], data=[]]
desired_state = {'pairing_mode': pairing_mode, 'pairing_mode_duration': pairing_mode_duration}
if pairing_mode == 'kidde' and kidde_radio_code is not None: # Convert dip switch 1 and 0s to an int
try:
kidde_radio_code_int = int(kidde_radio_code, 2)
desired_state = {'kidde_radio_code': kidde_radio_code_int, 'pairing_mode': None} # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
_LOGGER.error('An invalid Kidde radio code was provided. %s', kidde_radio_code) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if pairing_device_type_selector is not None:
desired_state.update({'pairing_device_type_selector': pairing_device_type_selector}) # depends on [control=['if'], data=['pairing_device_type_selector']]
response = self.api_interface.set_device_state(self, {'desired_state': desired_state})
self._update_state_from_response(response) |
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J) | def function[Jkpw, parameter[dW, h, n]]:
constant[matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
]
variable[m] assign[=] call[name[dW].shape][constant[1]]
<ast.Tuple object at 0x7da1b12a9d80> assign[=] call[name[Ikpw], parameter[name[dW], name[h], name[n]]]
variable[J] assign[=] binary_operation[name[I] + binary_operation[binary_operation[constant[0.5] * name[h]] * call[call[name[np].eye, parameter[name[m]]].reshape, parameter[tuple[[<ast.Constant object at 0x7da1b12a8580>, <ast.Name object at 0x7da1b12a9fc0>, <ast.Name object at 0x7da1b12a8880>]]]]]]
return[tuple[[<ast.Name object at 0x7da1b12a9e70>, <ast.Name object at 0x7da1b12aa980>]]] | keyword[def] identifier[Jkpw] ( identifier[dW] , identifier[h] , identifier[n] = literal[int] ):
literal[string]
identifier[m] = identifier[dW] . identifier[shape] [ literal[int] ]
identifier[A] , identifier[I] = identifier[Ikpw] ( identifier[dW] , identifier[h] , identifier[n] )
identifier[J] = identifier[I] + literal[int] * identifier[h] * identifier[np] . identifier[eye] ( identifier[m] ). identifier[reshape] (( literal[int] , identifier[m] , identifier[m] ))
keyword[return] ( identifier[A] , identifier[J] ) | def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
(A, I) = Ikpw(dW, h, n)
J = I + 0.5 * h * np.eye(m).reshape((1, m, m))
return (A, J) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.