code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
|
def function[_configure_csv_file, parameter[self, file_handle, schema]]:
constant[Configure a csv writer with the file_handle and write schema
as headers for the new file.
]
variable[csv_writer] assign[=] call[name[csv].writer, parameter[name[file_handle]]]
call[name[csv_writer].writerow, parameter[name[schema]]]
return[name[csv_writer]]
|
keyword[def] identifier[_configure_csv_file] ( identifier[self] , identifier[file_handle] , identifier[schema] ):
literal[string]
identifier[csv_writer] = identifier[csv] . identifier[writer] ( identifier[file_handle] , identifier[encoding] = literal[string] ,
identifier[delimiter] = identifier[self] . identifier[field_delimiter] )
identifier[csv_writer] . identifier[writerow] ( identifier[schema] )
keyword[return] identifier[csv_writer]
|
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8', delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
|
def list_prior_model_tuples(self):
"""
Returns
-------
list_prior_model_tuples: [(String, ListPriorModel)]
"""
return list(filter(lambda t: isinstance(t[1], CollectionPriorModel), self.__dict__.items()))
|
def function[list_prior_model_tuples, parameter[self]]:
constant[
Returns
-------
list_prior_model_tuples: [(String, ListPriorModel)]
]
return[call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da18fe93c40>, call[name[self].__dict__.items, parameter[]]]]]]]
|
keyword[def] identifier[list_prior_model_tuples] ( identifier[self] ):
literal[string]
keyword[return] identifier[list] ( identifier[filter] ( keyword[lambda] identifier[t] : identifier[isinstance] ( identifier[t] [ literal[int] ], identifier[CollectionPriorModel] ), identifier[self] . identifier[__dict__] . identifier[items] ()))
|
def list_prior_model_tuples(self):
"""
Returns
-------
list_prior_model_tuples: [(String, ListPriorModel)]
"""
return list(filter(lambda t: isinstance(t[1], CollectionPriorModel), self.__dict__.items()))
|
def get_protein_targets_only(target_chembl_ids):
"""Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets
Parameters
----------
target_chembl_ids : list
list of chembl_ids as strings
Returns
-------
protein_targets : dict
dictionary keyed to ChEMBL target ids with lists of activity ids
"""
protein_targets = {}
for target_chembl_id in target_chembl_ids:
target = query_target(target_chembl_id)
if 'SINGLE PROTEIN' in target['target_type']:
protein_targets[target_chembl_id] = target
return protein_targets
|
def function[get_protein_targets_only, parameter[target_chembl_ids]]:
constant[Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets
Parameters
----------
target_chembl_ids : list
list of chembl_ids as strings
Returns
-------
protein_targets : dict
dictionary keyed to ChEMBL target ids with lists of activity ids
]
variable[protein_targets] assign[=] dictionary[[], []]
for taget[name[target_chembl_id]] in starred[name[target_chembl_ids]] begin[:]
variable[target] assign[=] call[name[query_target], parameter[name[target_chembl_id]]]
if compare[constant[SINGLE PROTEIN] in call[name[target]][constant[target_type]]] begin[:]
call[name[protein_targets]][name[target_chembl_id]] assign[=] name[target]
return[name[protein_targets]]
|
keyword[def] identifier[get_protein_targets_only] ( identifier[target_chembl_ids] ):
literal[string]
identifier[protein_targets] ={}
keyword[for] identifier[target_chembl_id] keyword[in] identifier[target_chembl_ids] :
identifier[target] = identifier[query_target] ( identifier[target_chembl_id] )
keyword[if] literal[string] keyword[in] identifier[target] [ literal[string] ]:
identifier[protein_targets] [ identifier[target_chembl_id] ]= identifier[target]
keyword[return] identifier[protein_targets]
|
def get_protein_targets_only(target_chembl_ids):
"""Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets
Parameters
----------
target_chembl_ids : list
list of chembl_ids as strings
Returns
-------
protein_targets : dict
dictionary keyed to ChEMBL target ids with lists of activity ids
"""
protein_targets = {}
for target_chembl_id in target_chembl_ids:
target = query_target(target_chembl_id)
if 'SINGLE PROTEIN' in target['target_type']:
protein_targets[target_chembl_id] = target # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_chembl_id']]
return protein_targets
|
def mkdir(directory, exists_okay):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory, exists_okay=exists_okay)
|
def function[mkdir, parameter[directory, exists_okay]]:
constant[
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
]
variable[board_files] assign[=] call[name[files].Files, parameter[name[_board]]]
call[name[board_files].mkdir, parameter[name[directory]]]
|
keyword[def] identifier[mkdir] ( identifier[directory] , identifier[exists_okay] ):
literal[string]
identifier[board_files] = identifier[files] . identifier[Files] ( identifier[_board] )
identifier[board_files] . identifier[mkdir] ( identifier[directory] , identifier[exists_okay] = identifier[exists_okay] )
|
def mkdir(directory, exists_okay):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory, exists_okay=exists_okay)
|
def remove_frequencies(self, fmin, fmax):
"""Remove frequencies from the dataset
"""
self.data.query(
'frequency > {0} and frequency < {1}'.format(fmin, fmax),
inplace=True
)
g = self.data.groupby('frequency')
print('Remaining frequencies:')
print(sorted(g.groups.keys()))
|
def function[remove_frequencies, parameter[self, fmin, fmax]]:
constant[Remove frequencies from the dataset
]
call[name[self].data.query, parameter[call[constant[frequency > {0} and frequency < {1}].format, parameter[name[fmin], name[fmax]]]]]
variable[g] assign[=] call[name[self].data.groupby, parameter[constant[frequency]]]
call[name[print], parameter[constant[Remaining frequencies:]]]
call[name[print], parameter[call[name[sorted], parameter[call[name[g].groups.keys, parameter[]]]]]]
|
keyword[def] identifier[remove_frequencies] ( identifier[self] , identifier[fmin] , identifier[fmax] ):
literal[string]
identifier[self] . identifier[data] . identifier[query] (
literal[string] . identifier[format] ( identifier[fmin] , identifier[fmax] ),
identifier[inplace] = keyword[True]
)
identifier[g] = identifier[self] . identifier[data] . identifier[groupby] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( identifier[sorted] ( identifier[g] . identifier[groups] . identifier[keys] ()))
|
def remove_frequencies(self, fmin, fmax):
"""Remove frequencies from the dataset
"""
self.data.query('frequency > {0} and frequency < {1}'.format(fmin, fmax), inplace=True)
g = self.data.groupby('frequency')
print('Remaining frequencies:')
print(sorted(g.groups.keys()))
|
def _check_flag_meanings(self, ds, name):
'''
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_meanings = getattr(variable, 'flag_meanings', None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_meanings.assert_true(flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name))
valid_meanings.assert_true(isinstance(flag_meanings, basestring),
"{}'s flag_meanings attribute must be a string".format(name))
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, basestring):
return valid_meanings.to_result()
valid_meanings.assert_true(len(flag_meanings) > 0,
"{}'s flag_meanings can't be empty".format(name))
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(name)+\
"{}".format(meaning))
return valid_meanings.to_result()
|
def function[_check_flag_meanings, parameter[self, ds, name]]:
constant[
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
]
variable[variable] assign[=] call[name[ds].variables][name[name]]
variable[flag_meanings] assign[=] call[name[getattr], parameter[name[variable], constant[flag_meanings], constant[None]]]
variable[valid_meanings] assign[=] call[name[TestCtx], parameter[name[BaseCheck].HIGH, call[name[self].section_titles][constant[3.5]]]]
call[name[valid_meanings].assert_true, parameter[compare[name[flag_meanings] is_not constant[None]], call[constant[{}'s flag_meanings attribute is required for flag variables].format, parameter[name[name]]]]]
call[name[valid_meanings].assert_true, parameter[call[name[isinstance], parameter[name[flag_meanings], name[basestring]]], call[constant[{}'s flag_meanings attribute must be a string].format, parameter[name[name]]]]]
if <ast.UnaryOp object at 0x7da2041d9600> begin[:]
return[call[name[valid_meanings].to_result, parameter[]]]
call[name[valid_meanings].assert_true, parameter[compare[call[name[len], parameter[name[flag_meanings]]] greater[>] constant[0]], call[constant[{}'s flag_meanings can't be empty].format, parameter[name[name]]]]]
variable[flag_regx] assign[=] call[name[regex].compile, parameter[constant[^[0-9A-Za-z_\-.+@]+$]]]
variable[meanings] assign[=] call[name[flag_meanings].split, parameter[]]
for taget[name[meaning]] in starred[name[meanings]] begin[:]
if compare[call[name[flag_regx].match, parameter[name[meaning]]] is constant[None]] begin[:]
call[name[valid_meanings].assert_true, parameter[constant[False], binary_operation[call[constant[{}'s flag_meanings attribute defined an illegal flag meaning ].format, parameter[name[name]]] + call[constant[{}].format, parameter[name[meaning]]]]]]
return[call[name[valid_meanings].to_result, parameter[]]]
|
keyword[def] identifier[_check_flag_meanings] ( identifier[self] , identifier[ds] , identifier[name] ):
literal[string]
identifier[variable] = identifier[ds] . identifier[variables] [ identifier[name] ]
identifier[flag_meanings] = identifier[getattr] ( identifier[variable] , literal[string] , keyword[None] )
identifier[valid_meanings] = identifier[TestCtx] ( identifier[BaseCheck] . identifier[HIGH] , identifier[self] . identifier[section_titles] [ literal[string] ])
identifier[valid_meanings] . identifier[assert_true] ( identifier[flag_meanings] keyword[is] keyword[not] keyword[None] ,
literal[string] . identifier[format] ( identifier[name] ))
identifier[valid_meanings] . identifier[assert_true] ( identifier[isinstance] ( identifier[flag_meanings] , identifier[basestring] ),
literal[string] . identifier[format] ( identifier[name] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[flag_meanings] , identifier[basestring] ):
keyword[return] identifier[valid_meanings] . identifier[to_result] ()
identifier[valid_meanings] . identifier[assert_true] ( identifier[len] ( identifier[flag_meanings] )> literal[int] ,
literal[string] . identifier[format] ( identifier[name] ))
identifier[flag_regx] = identifier[regex] . identifier[compile] ( literal[string] )
identifier[meanings] = identifier[flag_meanings] . identifier[split] ()
keyword[for] identifier[meaning] keyword[in] identifier[meanings] :
keyword[if] identifier[flag_regx] . identifier[match] ( identifier[meaning] ) keyword[is] keyword[None] :
identifier[valid_meanings] . identifier[assert_true] ( keyword[False] ,
literal[string] . identifier[format] ( identifier[name] )+ literal[string] . identifier[format] ( identifier[meaning] ))
keyword[return] identifier[valid_meanings] . identifier[to_result] ()
|
def _check_flag_meanings(self, ds, name):
"""
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_meanings = getattr(variable, 'flag_meanings', None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_meanings.assert_true(flag_meanings is not None, "{}'s flag_meanings attribute is required for flag variables".format(name))
valid_meanings.assert_true(isinstance(flag_meanings, basestring), "{}'s flag_meanings attribute must be a string".format(name))
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, basestring):
return valid_meanings.to_result() # depends on [control=['if'], data=[]]
valid_meanings.assert_true(len(flag_meanings) > 0, "{}'s flag_meanings can't be empty".format(name))
flag_regx = regex.compile('^[0-9A-Za-z_\\-.+@]+$')
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(False, "{}'s flag_meanings attribute defined an illegal flag meaning ".format(name) + '{}'.format(meaning)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['meaning']]
return valid_meanings.to_result()
|
def init_gl(self):
"""
Perform the magic incantations to create an
OpenGL scene using pyglet.
"""
# default background color is white-ish
background = [.99, .99, .99, 1.0]
# if user passed a background color use it
if 'background' in self.kwargs:
try:
# convert to (4,) uint8 RGBA
background = to_rgba(self.kwargs['background'])
# convert to 0.0 - 1.0 float
background = background.astype(np.float64) / 255.0
except BaseException:
log.error('background color set but wrong!',
exc_info=True)
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene)
|
def function[init_gl, parameter[self]]:
constant[
Perform the magic incantations to create an
OpenGL scene using pyglet.
]
variable[background] assign[=] list[[<ast.Constant object at 0x7da20c992b00>, <ast.Constant object at 0x7da20c9918d0>, <ast.Constant object at 0x7da20c991c60>, <ast.Constant object at 0x7da20c991b40>]]
if compare[constant[background] in name[self].kwargs] begin[:]
<ast.Try object at 0x7da20c990160>
call[name[self]._gl_set_background, parameter[name[background]]]
call[name[self]._gl_enable_depth, parameter[name[self].scene]]
call[name[self]._gl_enable_color_material, parameter[]]
call[name[self]._gl_enable_blending, parameter[]]
call[name[self]._gl_enable_smooth_lines, parameter[]]
call[name[self]._gl_enable_lighting, parameter[name[self].scene]]
|
keyword[def] identifier[init_gl] ( identifier[self] ):
literal[string]
identifier[background] =[ literal[int] , literal[int] , literal[int] , literal[int] ]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[kwargs] :
keyword[try] :
identifier[background] = identifier[to_rgba] ( identifier[self] . identifier[kwargs] [ literal[string] ])
identifier[background] = identifier[background] . identifier[astype] ( identifier[np] . identifier[float64] )/ literal[int]
keyword[except] identifier[BaseException] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[exc_info] = keyword[True] )
identifier[self] . identifier[_gl_set_background] ( identifier[background] )
identifier[self] . identifier[_gl_enable_depth] ( identifier[self] . identifier[scene] )
identifier[self] . identifier[_gl_enable_color_material] ()
identifier[self] . identifier[_gl_enable_blending] ()
identifier[self] . identifier[_gl_enable_smooth_lines] ()
identifier[self] . identifier[_gl_enable_lighting] ( identifier[self] . identifier[scene] )
|
def init_gl(self):
"""
Perform the magic incantations to create an
OpenGL scene using pyglet.
"""
# default background color is white-ish
background = [0.99, 0.99, 0.99, 1.0]
# if user passed a background color use it
if 'background' in self.kwargs:
try:
# convert to (4,) uint8 RGBA
background = to_rgba(self.kwargs['background'])
# convert to 0.0 - 1.0 float
background = background.astype(np.float64) / 255.0 # depends on [control=['try'], data=[]]
except BaseException:
log.error('background color set but wrong!', exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene)
|
def update_profile_banner(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_banner
:allowed_param:'width', 'height', 'offset_left', 'offset_right'
"""
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 700, form_field='banner', f=f)
bind_api(
api=self,
path='/account/update_profile_banner.json',
method='POST',
allowed_param=['width', 'height', 'offset_left', 'offset_right'],
require_auth=True
)(post_data=post_data, headers=headers)
|
def function[update_profile_banner, parameter[self, filename]]:
constant[ :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_banner
:allowed_param:'width', 'height', 'offset_left', 'offset_right'
]
variable[f] assign[=] call[name[kargs].pop, parameter[constant[file], constant[None]]]
<ast.Tuple object at 0x7da18dc07700> assign[=] call[name[API]._pack_image, parameter[name[filename], constant[700]]]
call[call[name[bind_api], parameter[]], parameter[]]
|
keyword[def] identifier[update_profile_banner] ( identifier[self] , identifier[filename] ,** identifier[kargs] ):
literal[string]
identifier[f] = identifier[kargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[headers] , identifier[post_data] = identifier[API] . identifier[_pack_image] ( identifier[filename] , literal[int] , identifier[form_field] = literal[string] , identifier[f] = identifier[f] )
identifier[bind_api] (
identifier[api] = identifier[self] ,
identifier[path] = literal[string] ,
identifier[method] = literal[string] ,
identifier[allowed_param] =[ literal[string] , literal[string] , literal[string] , literal[string] ],
identifier[require_auth] = keyword[True]
)( identifier[post_data] = identifier[post_data] , identifier[headers] = identifier[headers] )
|
def update_profile_banner(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_banner
:allowed_param:'width', 'height', 'offset_left', 'offset_right'
"""
f = kargs.pop('file', None)
(headers, post_data) = API._pack_image(filename, 700, form_field='banner', f=f)
bind_api(api=self, path='/account/update_profile_banner.json', method='POST', allowed_param=['width', 'height', 'offset_left', 'offset_right'], require_auth=True)(post_data=post_data, headers=headers)
|
def get_file_report(self, this_hash):
""" Get the scan results for a file.
:param this_hash: The md5/sha1/sha256/scan_ids hash of the file whose dynamic behavioural report you want to
retrieve or scan_ids from a previous call to scan_file.
:return:
"""
params = {'api_key': self.api_key, 'hash': this_hash}
try:
response_info = requests.get(self.base + 'file/mwsinfo', params=params)
response_additional = requests.get(self.base + 'file/addinfo', params=params)
except requests.RequestException as e:
return dict(error=e.message)
ri = _return_response_and_status_code(response_info)
ra = _return_response_and_status_code(response_additional)
if ri['response_code'] == '1' and ra['response_code'] == '1': # both ok
both = ri['results'].copy()
both.update(ra['results'])
response = dict(results=both, response_code=1)
elif ri['response_code'] == '1' and ra['response_code'] == '0': # advance non exists but standard ok
response = ri
elif ri['response_code'] == '2': # main is still loading
response = dict(results={}, response_code=2)
else: # error generic
response = ri
return response
|
def function[get_file_report, parameter[self, this_hash]]:
constant[ Get the scan results for a file.
:param this_hash: The md5/sha1/sha256/scan_ids hash of the file whose dynamic behavioural report you want to
retrieve or scan_ids from a previous call to scan_file.
:return:
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18eb541f0>, <ast.Constant object at 0x7da18eb578b0>], [<ast.Attribute object at 0x7da18eb54d90>, <ast.Name object at 0x7da18eb55e70>]]
<ast.Try object at 0x7da18eb56410>
variable[ri] assign[=] call[name[_return_response_and_status_code], parameter[name[response_info]]]
variable[ra] assign[=] call[name[_return_response_and_status_code], parameter[name[response_additional]]]
if <ast.BoolOp object at 0x7da18eb565f0> begin[:]
variable[both] assign[=] call[call[name[ri]][constant[results]].copy, parameter[]]
call[name[both].update, parameter[call[name[ra]][constant[results]]]]
variable[response] assign[=] call[name[dict], parameter[]]
return[name[response]]
|
keyword[def] identifier[get_file_report] ( identifier[self] , identifier[this_hash] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[api_key] , literal[string] : identifier[this_hash] }
keyword[try] :
identifier[response_info] = identifier[requests] . identifier[get] ( identifier[self] . identifier[base] + literal[string] , identifier[params] = identifier[params] )
identifier[response_additional] = identifier[requests] . identifier[get] ( identifier[self] . identifier[base] + literal[string] , identifier[params] = identifier[params] )
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[return] identifier[dict] ( identifier[error] = identifier[e] . identifier[message] )
identifier[ri] = identifier[_return_response_and_status_code] ( identifier[response_info] )
identifier[ra] = identifier[_return_response_and_status_code] ( identifier[response_additional] )
keyword[if] identifier[ri] [ literal[string] ]== literal[string] keyword[and] identifier[ra] [ literal[string] ]== literal[string] :
identifier[both] = identifier[ri] [ literal[string] ]. identifier[copy] ()
identifier[both] . identifier[update] ( identifier[ra] [ literal[string] ])
identifier[response] = identifier[dict] ( identifier[results] = identifier[both] , identifier[response_code] = literal[int] )
keyword[elif] identifier[ri] [ literal[string] ]== literal[string] keyword[and] identifier[ra] [ literal[string] ]== literal[string] :
identifier[response] = identifier[ri]
keyword[elif] identifier[ri] [ literal[string] ]== literal[string] :
identifier[response] = identifier[dict] ( identifier[results] ={}, identifier[response_code] = literal[int] )
keyword[else] :
identifier[response] = identifier[ri]
keyword[return] identifier[response]
|
def get_file_report(self, this_hash):
""" Get the scan results for a file.
:param this_hash: The md5/sha1/sha256/scan_ids hash of the file whose dynamic behavioural report you want to
retrieve or scan_ids from a previous call to scan_file.
:return:
"""
params = {'api_key': self.api_key, 'hash': this_hash}
try:
response_info = requests.get(self.base + 'file/mwsinfo', params=params)
response_additional = requests.get(self.base + 'file/addinfo', params=params) # depends on [control=['try'], data=[]]
except requests.RequestException as e:
return dict(error=e.message) # depends on [control=['except'], data=['e']]
ri = _return_response_and_status_code(response_info)
ra = _return_response_and_status_code(response_additional)
if ri['response_code'] == '1' and ra['response_code'] == '1': # both ok
both = ri['results'].copy()
both.update(ra['results'])
response = dict(results=both, response_code=1) # depends on [control=['if'], data=[]]
elif ri['response_code'] == '1' and ra['response_code'] == '0': # advance non exists but standard ok
response = ri # depends on [control=['if'], data=[]]
elif ri['response_code'] == '2': # main is still loading
response = dict(results={}, response_code=2) # depends on [control=['if'], data=[]]
else: # error generic
response = ri
return response
|
def exchange_declare(self, exchange, type, passive=False, durable=False,
auto_delete=True, nowait=False, arguments=None):
"""Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(type)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(auto_delete)
args.write_bit(False) # internal: deprecated
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 10), args)
if auto_delete:
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED))
if not nowait:
return self.wait(allowed_methods=[
(40, 11), # Channel.exchange_declare_ok
])
|
def function[exchange_declare, parameter[self, exchange, type, passive, durable, auto_delete, nowait, arguments]]:
constant[Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
]
variable[arguments] assign[=] <ast.IfExp object at 0x7da1b17ed1e0>
variable[args] assign[=] call[name[AMQPWriter], parameter[]]
call[name[args].write_short, parameter[constant[0]]]
call[name[args].write_shortstr, parameter[name[exchange]]]
call[name[args].write_shortstr, parameter[name[type]]]
call[name[args].write_bit, parameter[name[passive]]]
call[name[args].write_bit, parameter[name[durable]]]
call[name[args].write_bit, parameter[name[auto_delete]]]
call[name[args].write_bit, parameter[constant[False]]]
call[name[args].write_bit, parameter[name[nowait]]]
call[name[args].write_table, parameter[name[arguments]]]
call[name[self]._send_method, parameter[tuple[[<ast.Constant object at 0x7da1b17ed120>, <ast.Constant object at 0x7da1b17ef0d0>]], name[args]]]
if name[auto_delete] begin[:]
call[name[warn], parameter[call[name[VDeprecationWarning], parameter[name[EXCHANGE_AUTODELETE_DEPRECATED]]]]]
if <ast.UnaryOp object at 0x7da1b17ec310> begin[:]
return[call[name[self].wait, parameter[]]]
|
keyword[def] identifier[exchange_declare] ( identifier[self] , identifier[exchange] , identifier[type] , identifier[passive] = keyword[False] , identifier[durable] = keyword[False] ,
identifier[auto_delete] = keyword[True] , identifier[nowait] = keyword[False] , identifier[arguments] = keyword[None] ):
literal[string]
identifier[arguments] ={} keyword[if] identifier[arguments] keyword[is] keyword[None] keyword[else] identifier[arguments]
identifier[args] = identifier[AMQPWriter] ()
identifier[args] . identifier[write_short] ( literal[int] )
identifier[args] . identifier[write_shortstr] ( identifier[exchange] )
identifier[args] . identifier[write_shortstr] ( identifier[type] )
identifier[args] . identifier[write_bit] ( identifier[passive] )
identifier[args] . identifier[write_bit] ( identifier[durable] )
identifier[args] . identifier[write_bit] ( identifier[auto_delete] )
identifier[args] . identifier[write_bit] ( keyword[False] )
identifier[args] . identifier[write_bit] ( identifier[nowait] )
identifier[args] . identifier[write_table] ( identifier[arguments] )
identifier[self] . identifier[_send_method] (( literal[int] , literal[int] ), identifier[args] )
keyword[if] identifier[auto_delete] :
identifier[warn] ( identifier[VDeprecationWarning] ( identifier[EXCHANGE_AUTODELETE_DEPRECATED] ))
keyword[if] keyword[not] identifier[nowait] :
keyword[return] identifier[self] . identifier[wait] ( identifier[allowed_methods] =[
( literal[int] , literal[int] ),
])
|
def exchange_declare(self, exchange, type, passive=False, durable=False, auto_delete=True, nowait=False, arguments=None):
"""Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(type)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(auto_delete)
args.write_bit(False) # internal: deprecated
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 10), args)
if auto_delete:
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED)) # depends on [control=['if'], data=[]]
if not nowait: # Channel.exchange_declare_ok
return self.wait(allowed_methods=[(40, 11)]) # depends on [control=['if'], data=[]]
|
async def get_offers(self, **params):
"""Receives all users input (by cid) or output offers
Accepts:
- public key
- cid (optional)
- coinid (optional)
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
cid = params.get("cid")
public_key = params.get("public_key")
coinid = params.get("coinid")
# Get all input offers by cid
if cid and coinid:
cid = int(cid)
database = client[coinid]
offer_collection = database[settings.OFFER]
content_collection = database[settings.CONTENT]
offers = [{i:document[i] for i in document if i == "confirmed"}
async for document in offer_collection.find({"cid":cid, "confirmed":None})]
# Get all output users offers
elif not cid:
database = client[coinid]
offer_collection = database[settings.OFFER]
offers = [{i:document[i] for i in document if i == "confirmed"}
async for document in offer_collection.find({"public_key":public_key,
"confirmed":None})]
# Return list with offers
return offers
|
<ast.AsyncFunctionDef object at 0x7da1b0a4a9e0>
|
keyword[async] keyword[def] identifier[get_offers] ( identifier[self] ,** identifier[params] ):
literal[string]
keyword[if] identifier[params] . identifier[get] ( literal[string] ):
identifier[params] = identifier[json] . identifier[loads] ( identifier[params] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[params] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[string] }
identifier[cid] = identifier[params] . identifier[get] ( literal[string] )
identifier[public_key] = identifier[params] . identifier[get] ( literal[string] )
identifier[coinid] = identifier[params] . identifier[get] ( literal[string] )
keyword[if] identifier[cid] keyword[and] identifier[coinid] :
identifier[cid] = identifier[int] ( identifier[cid] )
identifier[database] = identifier[client] [ identifier[coinid] ]
identifier[offer_collection] = identifier[database] [ identifier[settings] . identifier[OFFER] ]
identifier[content_collection] = identifier[database] [ identifier[settings] . identifier[CONTENT] ]
identifier[offers] =[{ identifier[i] : identifier[document] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[document] keyword[if] identifier[i] == literal[string] }
keyword[async] keyword[for] identifier[document] keyword[in] identifier[offer_collection] . identifier[find] ({ literal[string] : identifier[cid] , literal[string] : keyword[None] })]
keyword[elif] keyword[not] identifier[cid] :
identifier[database] = identifier[client] [ identifier[coinid] ]
identifier[offer_collection] = identifier[database] [ identifier[settings] . identifier[OFFER] ]
identifier[offers] =[{ identifier[i] : identifier[document] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[document] keyword[if] identifier[i] == literal[string] }
keyword[async] keyword[for] identifier[document] keyword[in] identifier[offer_collection] . identifier[find] ({ literal[string] : identifier[public_key] ,
literal[string] : keyword[None] })]
keyword[return] identifier[offers]
|
async def get_offers(self, **params):
"""Receives all users input (by cid) or output offers
Accepts:
- public key
- cid (optional)
- coinid (optional)
"""
if params.get('message'):
params = json.loads(params.get('message', '{}')) # depends on [control=['if'], data=[]]
if not params:
return {'error': 400, 'reason': 'Missed required fields'} # depends on [control=['if'], data=[]]
cid = params.get('cid')
public_key = params.get('public_key')
coinid = params.get('coinid') # Get all input offers by cid
if cid and coinid:
cid = int(cid)
database = client[coinid]
offer_collection = database[settings.OFFER]
content_collection = database[settings.CONTENT]
offers = [{i: document[i] for i in document if i == 'confirmed'} async for document in offer_collection.find({'cid': cid, 'confirmed': None})] # depends on [control=['if'], data=[]] # Get all output users offers
elif not cid:
database = client[coinid]
offer_collection = database[settings.OFFER]
offers = [{i: document[i] for i in document if i == 'confirmed'} async for document in offer_collection.find({'public_key': public_key, 'confirmed': None})] # depends on [control=['if'], data=[]] # Return list with offers
return offers
|
def to_imgur_format(params):
"""Convert the parameters to the format Imgur expects."""
if params is None:
return None
return dict((k, convert_general(val)) for (k, val) in params.items())
|
def function[to_imgur_format, parameter[params]]:
constant[Convert the parameters to the format Imgur expects.]
if compare[name[params] is constant[None]] begin[:]
return[constant[None]]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b23445b0>]]]
|
keyword[def] identifier[to_imgur_format] ( identifier[params] ):
literal[string]
keyword[if] identifier[params] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[dict] (( identifier[k] , identifier[convert_general] ( identifier[val] )) keyword[for] ( identifier[k] , identifier[val] ) keyword[in] identifier[params] . identifier[items] ())
|
def to_imgur_format(params):
"""Convert the parameters to the format Imgur expects."""
if params is None:
return None # depends on [control=['if'], data=[]]
return dict(((k, convert_general(val)) for (k, val) in params.items()))
|
def generate_code_challenge(verifier):
"""
source: https://github.com/openstack/deb-python-oauth2client
Creates a 'code_challenge' as described in section 4.2 of RFC 7636
by taking the sha256 hash of the verifier and then urlsafe
base64-encoding it.
Args:
verifier: bytestring, representing a code_verifier as generated by
generate_code_verifier().
Returns:
Bytestring, representing a urlsafe base64-encoded sha256 hash digest,
without '=' padding.
"""
digest = hashlib.sha256(verifier.encode('utf-8')).digest()
return base64.urlsafe_b64encode(digest).rstrip(b'=').decode('utf-8')
|
def function[generate_code_challenge, parameter[verifier]]:
constant[
source: https://github.com/openstack/deb-python-oauth2client
Creates a 'code_challenge' as described in section 4.2 of RFC 7636
by taking the sha256 hash of the verifier and then urlsafe
base64-encoding it.
Args:
verifier: bytestring, representing a code_verifier as generated by
generate_code_verifier().
Returns:
Bytestring, representing a urlsafe base64-encoded sha256 hash digest,
without '=' padding.
]
variable[digest] assign[=] call[call[name[hashlib].sha256, parameter[call[name[verifier].encode, parameter[constant[utf-8]]]]].digest, parameter[]]
return[call[call[call[name[base64].urlsafe_b64encode, parameter[name[digest]]].rstrip, parameter[constant[b'=']]].decode, parameter[constant[utf-8]]]]
|
keyword[def] identifier[generate_code_challenge] ( identifier[verifier] ):
literal[string]
identifier[digest] = identifier[hashlib] . identifier[sha256] ( identifier[verifier] . identifier[encode] ( literal[string] )). identifier[digest] ()
keyword[return] identifier[base64] . identifier[urlsafe_b64encode] ( identifier[digest] ). identifier[rstrip] ( literal[string] ). identifier[decode] ( literal[string] )
|
def generate_code_challenge(verifier):
"""
source: https://github.com/openstack/deb-python-oauth2client
Creates a 'code_challenge' as described in section 4.2 of RFC 7636
by taking the sha256 hash of the verifier and then urlsafe
base64-encoding it.
Args:
verifier: bytestring, representing a code_verifier as generated by
generate_code_verifier().
Returns:
Bytestring, representing a urlsafe base64-encoded sha256 hash digest,
without '=' padding.
"""
digest = hashlib.sha256(verifier.encode('utf-8')).digest()
return base64.urlsafe_b64encode(digest).rstrip(b'=').decode('utf-8')
|
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warn('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
|
def function[get_config_window_bounds, parameter[self]]:
constant[Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
]
variable[bounds_x] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da18eb56230>]]
variable[bounds_y] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da1b244ec50>]]
variable[monitor_index] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da1b244de40>]]
if compare[name[monitor_index] greater[>] <ast.UnaryOp object at 0x7da1b244f430>] begin[:]
<ast.Try object at 0x7da1b244e650>
return[tuple[[<ast.Name object at 0x7da1b244f850>, <ast.Name object at 0x7da1b244ee90>]]]
|
keyword[def] identifier[get_config_window_bounds] ( identifier[self] ):
literal[string]
identifier[bounds_x] = identifier[int] ( identifier[self] . identifier[config] . identifier[get_optional] ( literal[string] , literal[string] ) keyword[or] literal[int] )
identifier[bounds_y] = identifier[int] ( identifier[self] . identifier[config] . identifier[get_optional] ( literal[string] , literal[string] ) keyword[or] literal[int] )
identifier[monitor_index] = identifier[int] ( identifier[self] . identifier[config] . identifier[get_optional] ( literal[string] , literal[string] ) keyword[or] - literal[int] )
keyword[if] identifier[monitor_index] >- literal[int] :
keyword[try] :
identifier[monitor] = identifier[screeninfo] . identifier[get_monitors] ()[ identifier[monitor_index] ]
identifier[bounds_x] += identifier[monitor] . identifier[x]
identifier[bounds_y] += identifier[monitor] . identifier[y]
keyword[except] identifier[NotImplementedError] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[return] identifier[bounds_x] , identifier[bounds_y]
|
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y # depends on [control=['try'], data=[]]
except NotImplementedError:
self.logger.warn("Current environment doesn't support get_monitors") # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['monitor_index']]
return (bounds_x, bounds_y)
|
def get_queryset(self):
"""
This viewset provides a helper attribute to prefetch related models
based on the include specified in the URL.
__all__ can be used to specify a prefetch which should be done regardless of the include
.. code:: python
# When MyViewSet is called with ?include=author it will prefetch author and authorbio
class MyViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
prefetch_for_includes = {
'__all__': [],
'author': ['author', 'author__authorbio'],
'category.section': ['category']
}
"""
qs = super(PrefetchForIncludesHelperMixin, self).get_queryset()
if not hasattr(self, 'prefetch_for_includes'):
return qs
includes = self.request.GET.get('include', '').split(',')
for inc in includes + ['__all__']:
prefetches = self.prefetch_for_includes.get(inc)
if prefetches:
qs = qs.prefetch_related(*prefetches)
return qs
|
def function[get_queryset, parameter[self]]:
constant[
This viewset provides a helper attribute to prefetch related models
based on the include specified in the URL.
__all__ can be used to specify a prefetch which should be done regardless of the include
.. code:: python
# When MyViewSet is called with ?include=author it will prefetch author and authorbio
class MyViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
prefetch_for_includes = {
'__all__': [],
'author': ['author', 'author__authorbio'],
'category.section': ['category']
}
]
variable[qs] assign[=] call[call[name[super], parameter[name[PrefetchForIncludesHelperMixin], name[self]]].get_queryset, parameter[]]
if <ast.UnaryOp object at 0x7da1b1720370> begin[:]
return[name[qs]]
variable[includes] assign[=] call[call[name[self].request.GET.get, parameter[constant[include], constant[]]].split, parameter[constant[,]]]
for taget[name[inc]] in starred[binary_operation[name[includes] + list[[<ast.Constant object at 0x7da1b17fa4a0>]]]] begin[:]
variable[prefetches] assign[=] call[name[self].prefetch_for_includes.get, parameter[name[inc]]]
if name[prefetches] begin[:]
variable[qs] assign[=] call[name[qs].prefetch_related, parameter[<ast.Starred object at 0x7da1b17fb040>]]
return[name[qs]]
|
keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[qs] = identifier[super] ( identifier[PrefetchForIncludesHelperMixin] , identifier[self] ). identifier[get_queryset] ()
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[qs]
identifier[includes] = identifier[self] . identifier[request] . identifier[GET] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[inc] keyword[in] identifier[includes] +[ literal[string] ]:
identifier[prefetches] = identifier[self] . identifier[prefetch_for_includes] . identifier[get] ( identifier[inc] )
keyword[if] identifier[prefetches] :
identifier[qs] = identifier[qs] . identifier[prefetch_related] (* identifier[prefetches] )
keyword[return] identifier[qs]
|
def get_queryset(self):
"""
This viewset provides a helper attribute to prefetch related models
based on the include specified in the URL.
__all__ can be used to specify a prefetch which should be done regardless of the include
.. code:: python
# When MyViewSet is called with ?include=author it will prefetch author and authorbio
class MyViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
prefetch_for_includes = {
'__all__': [],
'author': ['author', 'author__authorbio'],
'category.section': ['category']
}
"""
qs = super(PrefetchForIncludesHelperMixin, self).get_queryset()
if not hasattr(self, 'prefetch_for_includes'):
return qs # depends on [control=['if'], data=[]]
includes = self.request.GET.get('include', '').split(',')
for inc in includes + ['__all__']:
prefetches = self.prefetch_for_includes.get(inc)
if prefetches:
qs = qs.prefetch_related(*prefetches) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['inc']]
return qs
|
def __stream(self, endpoint, listener, params={}, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Internal streaming API helper.
Returns a handle to the open connection that the user can close if they
wish to terminate it.
"""
# Check if we have to redirect
instance = self.instance()
if "streaming_api" in instance["urls"] and instance["urls"]["streaming_api"] != self.api_base_url:
# This is probably a websockets URL, which is really for the browser, but requests can't handle it
# So we do this below to turn it into an HTTPS or HTTP URL
parse = urlparse(instance["urls"]["streaming_api"])
if parse.scheme == 'wss':
url = "https://" + parse.netloc
elif parse.scheme == 'ws':
url = "http://" + parse.netloc
else:
raise MastodonAPIError(
"Could not parse streaming api location returned from server: {}.".format(
instance["urls"]["streaming_api"]))
else:
url = self.api_base_url
# The streaming server can't handle two slashes in a path, so remove trailing slashes
if url[-1] == '/':
url = url[:-1]
# Connect function (called and then potentially passed to async handler)
def connect_func():
headers = {"Authorization": "Bearer " + self.access_token}
connection = self.session.get(url + endpoint, headers = headers, data = params, stream = True,
timeout=(self.request_timeout, timeout))
if connection.status_code != 200:
raise MastodonNetworkError("Could not connect to streaming server: %s" % connection.reason)
return connection
connection = None
# Async stream handler
class __stream_handle():
def __init__(self, connection, connect_func, reconnect_async, reconnect_async_wait_sec):
self.closed = False
self.running = True
self.connection = connection
self.connect_func = connect_func
self.reconnect_async = reconnect_async
self.reconnect_async_wait_sec = reconnect_async_wait_sec
self.reconnecting = False
def close(self):
self.closed = True
self.connection.close()
def is_alive(self):
return self._thread.is_alive()
def is_receiving(self):
if self.closed or not self.running or self.reconnecting or not self.is_alive():
return False
else:
return True
def _threadproc(self):
self._thread = threading.current_thread()
# Run until closed or until error if not autoreconnecting
while self.running:
if not self.connection is None:
with closing(self.connection) as r:
try:
listener.handle_stream(r)
except (AttributeError, MastodonMalformedEventError, MastodonNetworkError) as e:
if not (self.closed or self.reconnect_async):
raise e
else:
if self.closed:
self.running = False
# Reconnect loop. Try immediately once, then with delays on error.
if (self.reconnect_async and not self.closed) or self.connection is None:
self.reconnecting = True
connect_success = False
while not connect_success:
connect_success = True
try:
self.connection = self.connect_func()
if self.connection.status_code != 200:
time.sleep(self.reconnect_async_wait_sec)
connect_success = False
exception = MastodonNetworkError("Could not connect to server.")
listener.on_abort(exception)
except:
time.sleep(self.reconnect_async_wait_sec)
connect_success = False
self.reconnecting = False
else:
self.running = False
return 0
if run_async:
handle = __stream_handle(connection, connect_func, reconnect_async, reconnect_async_wait_sec)
t = threading.Thread(args=(), target=handle._threadproc)
t.daemon = True
t.start()
return handle
else:
# Blocking, never returns (can only leave via exception)
connection = connect_func()
with closing(connection) as r:
listener.handle_stream(r)
|
def function[__stream, parameter[self, endpoint, listener, params, run_async, timeout, reconnect_async, reconnect_async_wait_sec]]:
constant[
Internal streaming API helper.
Returns a handle to the open connection that the user can close if they
wish to terminate it.
]
variable[instance] assign[=] call[name[self].instance, parameter[]]
if <ast.BoolOp object at 0x7da20e9579a0> begin[:]
variable[parse] assign[=] call[name[urlparse], parameter[call[call[name[instance]][constant[urls]]][constant[streaming_api]]]]
if compare[name[parse].scheme equal[==] constant[wss]] begin[:]
variable[url] assign[=] binary_operation[constant[https://] + name[parse].netloc]
if compare[call[name[url]][<ast.UnaryOp object at 0x7da20e956590>] equal[==] constant[/]] begin[:]
variable[url] assign[=] call[name[url]][<ast.Slice object at 0x7da20e956320>]
def function[connect_func, parameter[]]:
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20e954820>], [<ast.BinOp object at 0x7da20e955ed0>]]
variable[connection] assign[=] call[name[self].session.get, parameter[binary_operation[name[url] + name[endpoint]]]]
if compare[name[connection].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da20e9571c0>
return[name[connection]]
variable[connection] assign[=] constant[None]
class class[__stream_handle, parameter[]] begin[:]
def function[__init__, parameter[self, connection, connect_func, reconnect_async, reconnect_async_wait_sec]]:
name[self].closed assign[=] constant[False]
name[self].running assign[=] constant[True]
name[self].connection assign[=] name[connection]
name[self].connect_func assign[=] name[connect_func]
name[self].reconnect_async assign[=] name[reconnect_async]
name[self].reconnect_async_wait_sec assign[=] name[reconnect_async_wait_sec]
name[self].reconnecting assign[=] constant[False]
def function[close, parameter[self]]:
name[self].closed assign[=] constant[True]
call[name[self].connection.close, parameter[]]
def function[is_alive, parameter[self]]:
return[call[name[self]._thread.is_alive, parameter[]]]
def function[is_receiving, parameter[self]]:
if <ast.BoolOp object at 0x7da20e954340> begin[:]
return[constant[False]]
def function[_threadproc, parameter[self]]:
name[self]._thread assign[=] call[name[threading].current_thread, parameter[]]
while name[self].running begin[:]
if <ast.UnaryOp object at 0x7da20e9546d0> begin[:]
with call[name[closing], parameter[name[self].connection]] begin[:]
<ast.Try object at 0x7da20e9b1180>
if <ast.BoolOp object at 0x7da20e9b14b0> begin[:]
name[self].reconnecting assign[=] constant[True]
variable[connect_success] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da18eb57ee0> begin[:]
variable[connect_success] assign[=] constant[True]
<ast.Try object at 0x7da18eb57b20>
name[self].reconnecting assign[=] constant[False]
return[constant[0]]
if name[run_async] begin[:]
variable[handle] assign[=] call[name[__stream_handle], parameter[name[connection], name[connect_func], name[reconnect_async], name[reconnect_async_wait_sec]]]
variable[t] assign[=] call[name[threading].Thread, parameter[]]
name[t].daemon assign[=] constant[True]
call[name[t].start, parameter[]]
return[name[handle]]
|
keyword[def] identifier[__stream] ( identifier[self] , identifier[endpoint] , identifier[listener] , identifier[params] ={}, identifier[run_async] = keyword[False] , identifier[timeout] = identifier[__DEFAULT_STREAM_TIMEOUT] , identifier[reconnect_async] = keyword[False] , identifier[reconnect_async_wait_sec] = identifier[__DEFAULT_STREAM_RECONNECT_WAIT_SEC] ):
literal[string]
identifier[instance] = identifier[self] . identifier[instance] ()
keyword[if] literal[string] keyword[in] identifier[instance] [ literal[string] ] keyword[and] identifier[instance] [ literal[string] ][ literal[string] ]!= identifier[self] . identifier[api_base_url] :
identifier[parse] = identifier[urlparse] ( identifier[instance] [ literal[string] ][ literal[string] ])
keyword[if] identifier[parse] . identifier[scheme] == literal[string] :
identifier[url] = literal[string] + identifier[parse] . identifier[netloc]
keyword[elif] identifier[parse] . identifier[scheme] == literal[string] :
identifier[url] = literal[string] + identifier[parse] . identifier[netloc]
keyword[else] :
keyword[raise] identifier[MastodonAPIError] (
literal[string] . identifier[format] (
identifier[instance] [ literal[string] ][ literal[string] ]))
keyword[else] :
identifier[url] = identifier[self] . identifier[api_base_url]
keyword[if] identifier[url] [- literal[int] ]== literal[string] :
identifier[url] = identifier[url] [:- literal[int] ]
keyword[def] identifier[connect_func] ():
identifier[headers] ={ literal[string] : literal[string] + identifier[self] . identifier[access_token] }
identifier[connection] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] + identifier[endpoint] , identifier[headers] = identifier[headers] , identifier[data] = identifier[params] , identifier[stream] = keyword[True] ,
identifier[timeout] =( identifier[self] . identifier[request_timeout] , identifier[timeout] ))
keyword[if] identifier[connection] . identifier[status_code] != literal[int] :
keyword[raise] identifier[MastodonNetworkError] ( literal[string] % identifier[connection] . identifier[reason] )
keyword[return] identifier[connection]
identifier[connection] = keyword[None]
keyword[class] identifier[__stream_handle] ():
keyword[def] identifier[__init__] ( identifier[self] , identifier[connection] , identifier[connect_func] , identifier[reconnect_async] , identifier[reconnect_async_wait_sec] ):
identifier[self] . identifier[closed] = keyword[False]
identifier[self] . identifier[running] = keyword[True]
identifier[self] . identifier[connection] = identifier[connection]
identifier[self] . identifier[connect_func] = identifier[connect_func]
identifier[self] . identifier[reconnect_async] = identifier[reconnect_async]
identifier[self] . identifier[reconnect_async_wait_sec] = identifier[reconnect_async_wait_sec]
identifier[self] . identifier[reconnecting] = keyword[False]
keyword[def] identifier[close] ( identifier[self] ):
identifier[self] . identifier[closed] = keyword[True]
identifier[self] . identifier[connection] . identifier[close] ()
keyword[def] identifier[is_alive] ( identifier[self] ):
keyword[return] identifier[self] . identifier[_thread] . identifier[is_alive] ()
keyword[def] identifier[is_receiving] ( identifier[self] ):
keyword[if] identifier[self] . identifier[closed] keyword[or] keyword[not] identifier[self] . identifier[running] keyword[or] identifier[self] . identifier[reconnecting] keyword[or] keyword[not] identifier[self] . identifier[is_alive] ():
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True]
keyword[def] identifier[_threadproc] ( identifier[self] ):
identifier[self] . identifier[_thread] = identifier[threading] . identifier[current_thread] ()
keyword[while] identifier[self] . identifier[running] :
keyword[if] keyword[not] identifier[self] . identifier[connection] keyword[is] keyword[None] :
keyword[with] identifier[closing] ( identifier[self] . identifier[connection] ) keyword[as] identifier[r] :
keyword[try] :
identifier[listener] . identifier[handle_stream] ( identifier[r] )
keyword[except] ( identifier[AttributeError] , identifier[MastodonMalformedEventError] , identifier[MastodonNetworkError] ) keyword[as] identifier[e] :
keyword[if] keyword[not] ( identifier[self] . identifier[closed] keyword[or] identifier[self] . identifier[reconnect_async] ):
keyword[raise] identifier[e]
keyword[else] :
keyword[if] identifier[self] . identifier[closed] :
identifier[self] . identifier[running] = keyword[False]
keyword[if] ( identifier[self] . identifier[reconnect_async] keyword[and] keyword[not] identifier[self] . identifier[closed] ) keyword[or] identifier[self] . identifier[connection] keyword[is] keyword[None] :
identifier[self] . identifier[reconnecting] = keyword[True]
identifier[connect_success] = keyword[False]
keyword[while] keyword[not] identifier[connect_success] :
identifier[connect_success] = keyword[True]
keyword[try] :
identifier[self] . identifier[connection] = identifier[self] . identifier[connect_func] ()
keyword[if] identifier[self] . identifier[connection] . identifier[status_code] != literal[int] :
identifier[time] . identifier[sleep] ( identifier[self] . identifier[reconnect_async_wait_sec] )
identifier[connect_success] = keyword[False]
identifier[exception] = identifier[MastodonNetworkError] ( literal[string] )
identifier[listener] . identifier[on_abort] ( identifier[exception] )
keyword[except] :
identifier[time] . identifier[sleep] ( identifier[self] . identifier[reconnect_async_wait_sec] )
identifier[connect_success] = keyword[False]
identifier[self] . identifier[reconnecting] = keyword[False]
keyword[else] :
identifier[self] . identifier[running] = keyword[False]
keyword[return] literal[int]
keyword[if] identifier[run_async] :
identifier[handle] = identifier[__stream_handle] ( identifier[connection] , identifier[connect_func] , identifier[reconnect_async] , identifier[reconnect_async_wait_sec] )
identifier[t] = identifier[threading] . identifier[Thread] ( identifier[args] =(), identifier[target] = identifier[handle] . identifier[_threadproc] )
identifier[t] . identifier[daemon] = keyword[True]
identifier[t] . identifier[start] ()
keyword[return] identifier[handle]
keyword[else] :
identifier[connection] = identifier[connect_func] ()
keyword[with] identifier[closing] ( identifier[connection] ) keyword[as] identifier[r] :
identifier[listener] . identifier[handle_stream] ( identifier[r] )
|
def __stream(self, endpoint, listener, params={}, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Internal streaming API helper.
Returns a handle to the open connection that the user can close if they
wish to terminate it.
"""
# Check if we have to redirect
instance = self.instance()
if 'streaming_api' in instance['urls'] and instance['urls']['streaming_api'] != self.api_base_url:
# This is probably a websockets URL, which is really for the browser, but requests can't handle it
# So we do this below to turn it into an HTTPS or HTTP URL
parse = urlparse(instance['urls']['streaming_api'])
if parse.scheme == 'wss':
url = 'https://' + parse.netloc # depends on [control=['if'], data=[]]
elif parse.scheme == 'ws':
url = 'http://' + parse.netloc # depends on [control=['if'], data=[]]
else:
raise MastodonAPIError('Could not parse streaming api location returned from server: {}.'.format(instance['urls']['streaming_api'])) # depends on [control=['if'], data=[]]
else:
url = self.api_base_url
# The streaming server can't handle two slashes in a path, so remove trailing slashes
if url[-1] == '/':
url = url[:-1] # depends on [control=['if'], data=[]]
# Connect function (called and then potentially passed to async handler)
def connect_func():
headers = {'Authorization': 'Bearer ' + self.access_token}
connection = self.session.get(url + endpoint, headers=headers, data=params, stream=True, timeout=(self.request_timeout, timeout))
if connection.status_code != 200:
raise MastodonNetworkError('Could not connect to streaming server: %s' % connection.reason) # depends on [control=['if'], data=[]]
return connection
connection = None
# Async stream handler
class __stream_handle:
def __init__(self, connection, connect_func, reconnect_async, reconnect_async_wait_sec):
self.closed = False
self.running = True
self.connection = connection
self.connect_func = connect_func
self.reconnect_async = reconnect_async
self.reconnect_async_wait_sec = reconnect_async_wait_sec
self.reconnecting = False
def close(self):
self.closed = True
self.connection.close()
def is_alive(self):
return self._thread.is_alive()
def is_receiving(self):
if self.closed or not self.running or self.reconnecting or (not self.is_alive()):
return False # depends on [control=['if'], data=[]]
else:
return True
def _threadproc(self):
self._thread = threading.current_thread()
# Run until closed or until error if not autoreconnecting
while self.running:
if not self.connection is None:
with closing(self.connection) as r:
try:
listener.handle_stream(r) # depends on [control=['try'], data=[]]
except (AttributeError, MastodonMalformedEventError, MastodonNetworkError) as e:
if not (self.closed or self.reconnect_async):
raise e # depends on [control=['if'], data=[]]
elif self.closed:
self.running = False # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['with'], data=['r']] # depends on [control=['if'], data=[]]
# Reconnect loop. Try immediately once, then with delays on error.
if self.reconnect_async and (not self.closed) or self.connection is None:
self.reconnecting = True
connect_success = False
while not connect_success:
connect_success = True
try:
self.connection = self.connect_func()
if self.connection.status_code != 200:
time.sleep(self.reconnect_async_wait_sec)
connect_success = False
exception = MastodonNetworkError('Could not connect to server.')
listener.on_abort(exception) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
time.sleep(self.reconnect_async_wait_sec)
connect_success = False # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
self.reconnecting = False # depends on [control=['if'], data=[]]
else:
self.running = False # depends on [control=['while'], data=[]]
return 0
if run_async:
handle = __stream_handle(connection, connect_func, reconnect_async, reconnect_async_wait_sec)
t = threading.Thread(args=(), target=handle._threadproc)
t.daemon = True
t.start()
return handle # depends on [control=['if'], data=[]]
else:
# Blocking, never returns (can only leave via exception)
connection = connect_func()
with closing(connection) as r:
listener.handle_stream(r) # depends on [control=['with'], data=['r']]
|
def _setup_imports(self):
"""
Ensure the local importer and PushFileService has everything for the
Ansible module before setup() completes, but before detach() is called
in an asynchronous task.
The master automatically streams modules towards us concurrent to the
runner invocation, however there is no public API to synchronize on the
completion of those preloads. Instead simply reuse the importer's
synchronization mechanism by importing everything the module will need
prior to detaching.
"""
for fullname, _, _ in self.module_map['custom']:
mitogen.core.import_module(fullname)
for fullname in self.module_map['builtin']:
mitogen.core.import_module(fullname)
|
def function[_setup_imports, parameter[self]]:
constant[
Ensure the local importer and PushFileService has everything for the
Ansible module before setup() completes, but before detach() is called
in an asynchronous task.
The master automatically streams modules towards us concurrent to the
runner invocation, however there is no public API to synchronize on the
completion of those preloads. Instead simply reuse the importer's
synchronization mechanism by importing everything the module will need
prior to detaching.
]
for taget[tuple[[<ast.Name object at 0x7da1b1d4d3c0>, <ast.Name object at 0x7da1b1d4c2e0>, <ast.Name object at 0x7da1b1d4ea40>]]] in starred[call[name[self].module_map][constant[custom]]] begin[:]
call[name[mitogen].core.import_module, parameter[name[fullname]]]
for taget[name[fullname]] in starred[call[name[self].module_map][constant[builtin]]] begin[:]
call[name[mitogen].core.import_module, parameter[name[fullname]]]
|
keyword[def] identifier[_setup_imports] ( identifier[self] ):
literal[string]
keyword[for] identifier[fullname] , identifier[_] , identifier[_] keyword[in] identifier[self] . identifier[module_map] [ literal[string] ]:
identifier[mitogen] . identifier[core] . identifier[import_module] ( identifier[fullname] )
keyword[for] identifier[fullname] keyword[in] identifier[self] . identifier[module_map] [ literal[string] ]:
identifier[mitogen] . identifier[core] . identifier[import_module] ( identifier[fullname] )
|
def _setup_imports(self):
"""
Ensure the local importer and PushFileService has everything for the
Ansible module before setup() completes, but before detach() is called
in an asynchronous task.
The master automatically streams modules towards us concurrent to the
runner invocation, however there is no public API to synchronize on the
completion of those preloads. Instead simply reuse the importer's
synchronization mechanism by importing everything the module will need
prior to detaching.
"""
for (fullname, _, _) in self.module_map['custom']:
mitogen.core.import_module(fullname) # depends on [control=['for'], data=[]]
for fullname in self.module_map['builtin']:
mitogen.core.import_module(fullname) # depends on [control=['for'], data=['fullname']]
|
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
|
def function[generate_bq_schema, parameter[df, default_type]]:
constant[DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
]
call[name[warnings].warn, parameter[constant[generate_bq_schema is deprecated and will be removed in a future version], name[FutureWarning]]]
return[call[name[_generate_bq_schema], parameter[name[df]]]]
|
keyword[def] identifier[generate_bq_schema] ( identifier[df] , identifier[default_type] = literal[string] ):
literal[string]
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[FutureWarning] ,
identifier[stacklevel] = literal[int] ,
)
keyword[return] identifier[_generate_bq_schema] ( identifier[df] , identifier[default_type] = identifier[default_type] )
|
def generate_bq_schema(df, default_type='STRING'):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn('generate_bq_schema is deprecated and will be removed in a future version', FutureWarning, stacklevel=2)
return _generate_bq_schema(df, default_type=default_type)
|
def remove_from_parent(self):
'''
Removes this frame from its parent, and nulls the parent link
'''
if self.parent:
self.parent._children.remove(self)
self.parent._invalidate_time_caches()
self.parent = None
|
def function[remove_from_parent, parameter[self]]:
constant[
Removes this frame from its parent, and nulls the parent link
]
if name[self].parent begin[:]
call[name[self].parent._children.remove, parameter[name[self]]]
call[name[self].parent._invalidate_time_caches, parameter[]]
name[self].parent assign[=] constant[None]
|
keyword[def] identifier[remove_from_parent] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] :
identifier[self] . identifier[parent] . identifier[_children] . identifier[remove] ( identifier[self] )
identifier[self] . identifier[parent] . identifier[_invalidate_time_caches] ()
identifier[self] . identifier[parent] = keyword[None]
|
def remove_from_parent(self):
"""
Removes this frame from its parent, and nulls the parent link
"""
if self.parent:
self.parent._children.remove(self)
self.parent._invalidate_time_caches()
self.parent = None # depends on [control=['if'], data=[]]
|
def list_config_map_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ConfigMap
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_config_map_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ConfigMapList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_config_map_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_config_map_for_all_namespaces_with_http_info(**kwargs)
return data
|
def function[list_config_map_for_all_namespaces, parameter[self]]:
constant[
list or watch objects of kind ConfigMap
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_config_map_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ConfigMapList
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].list_config_map_for_all_namespaces_with_http_info, parameter[]]]
|
keyword[def] identifier[list_config_map_for_all_namespaces] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[list_config_map_for_all_namespaces_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[list_config_map_for_all_namespaces_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data]
|
def list_config_map_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ConfigMap
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_config_map_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ConfigMapList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_config_map_for_all_namespaces_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = self.list_config_map_for_all_namespaces_with_http_info(**kwargs)
return data
|
def _validate_importers(importers):
"""Validates the importers and decorates the callables with our output
formatter.
"""
# They could have no importers, that's chill
if importers is None:
return None
def _to_importer(priority, func):
assert isinstance(priority, int), priority
assert callable(func), func
return (priority, _importer_callback_wrapper(func))
# Our code assumes tuple of tuples
return tuple(_to_importer(priority, func) for priority, func in importers)
|
def function[_validate_importers, parameter[importers]]:
constant[Validates the importers and decorates the callables with our output
formatter.
]
if compare[name[importers] is constant[None]] begin[:]
return[constant[None]]
def function[_to_importer, parameter[priority, func]]:
assert[call[name[isinstance], parameter[name[priority], name[int]]]]
assert[call[name[callable], parameter[name[func]]]]
return[tuple[[<ast.Name object at 0x7da18f812230>, <ast.Call object at 0x7da18f8110c0>]]]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18f812260>]]]
|
keyword[def] identifier[_validate_importers] ( identifier[importers] ):
literal[string]
keyword[if] identifier[importers] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[def] identifier[_to_importer] ( identifier[priority] , identifier[func] ):
keyword[assert] identifier[isinstance] ( identifier[priority] , identifier[int] ), identifier[priority]
keyword[assert] identifier[callable] ( identifier[func] ), identifier[func]
keyword[return] ( identifier[priority] , identifier[_importer_callback_wrapper] ( identifier[func] ))
keyword[return] identifier[tuple] ( identifier[_to_importer] ( identifier[priority] , identifier[func] ) keyword[for] identifier[priority] , identifier[func] keyword[in] identifier[importers] )
|
def _validate_importers(importers):
"""Validates the importers and decorates the callables with our output
formatter.
"""
# They could have no importers, that's chill
if importers is None:
return None # depends on [control=['if'], data=[]]
def _to_importer(priority, func):
assert isinstance(priority, int), priority
assert callable(func), func
return (priority, _importer_callback_wrapper(func))
# Our code assumes tuple of tuples
return tuple((_to_importer(priority, func) for (priority, func) in importers))
|
def getIndexForValue(self, value):
"""
Return the ramp index for the given value
:param value: Lookup value
:rtype: int
"""
return math.trunc(self.slope * float(value) + self.intercept)
|
def function[getIndexForValue, parameter[self, value]]:
constant[
Return the ramp index for the given value
:param value: Lookup value
:rtype: int
]
return[call[name[math].trunc, parameter[binary_operation[binary_operation[name[self].slope * call[name[float], parameter[name[value]]]] + name[self].intercept]]]]
|
keyword[def] identifier[getIndexForValue] ( identifier[self] , identifier[value] ):
literal[string]
keyword[return] identifier[math] . identifier[trunc] ( identifier[self] . identifier[slope] * identifier[float] ( identifier[value] )+ identifier[self] . identifier[intercept] )
|
def getIndexForValue(self, value):
"""
Return the ramp index for the given value
:param value: Lookup value
:rtype: int
"""
return math.trunc(self.slope * float(value) + self.intercept)
|
def map_navigation(self):
"""
This is a wrapper for depth-first recursive analysis of the article
"""
#All articles should have titles
title_id = 'titlepage-{0}'.format(self.article_doi)
title_label = self.article.publisher.nav_title()
title_source = 'main.{0}.xhtml#title'.format(self.article_doi)
title_navpoint = navpoint(title_id, title_label, self.play_order,
title_source, [])
self.nav.append(title_navpoint)
#When processing a collection of articles, we will want all subsequent
#navpoints for this article to be located under the title
if self.collection:
nav_insertion = title_navpoint.children
else:
nav_insertion = self.nav
#If the article has a body, we'll need to parse it for navigation
if self.article.body is not None:
#Here is where we invoke the recursive parsing!
for nav_pt in self.recursive_article_navmap(self.article.body):
nav_insertion.append(nav_pt)
#Add a navpoint to the references if appropriate
if self.article.root.xpath('./back/ref'):
ref_id = 'references-{0}'.format(self.article_doi)
ref_label = 'References'
ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi)
ref_navpoint = navpoint(ref_id, ref_label, self.play_order,
ref_source, [])
nav_insertion.append(ref_navpoint)
|
def function[map_navigation, parameter[self]]:
constant[
This is a wrapper for depth-first recursive analysis of the article
]
variable[title_id] assign[=] call[constant[titlepage-{0}].format, parameter[name[self].article_doi]]
variable[title_label] assign[=] call[name[self].article.publisher.nav_title, parameter[]]
variable[title_source] assign[=] call[constant[main.{0}.xhtml#title].format, parameter[name[self].article_doi]]
variable[title_navpoint] assign[=] call[name[navpoint], parameter[name[title_id], name[title_label], name[self].play_order, name[title_source], list[[]]]]
call[name[self].nav.append, parameter[name[title_navpoint]]]
if name[self].collection begin[:]
variable[nav_insertion] assign[=] name[title_navpoint].children
if compare[name[self].article.body is_not constant[None]] begin[:]
for taget[name[nav_pt]] in starred[call[name[self].recursive_article_navmap, parameter[name[self].article.body]]] begin[:]
call[name[nav_insertion].append, parameter[name[nav_pt]]]
if call[name[self].article.root.xpath, parameter[constant[./back/ref]]] begin[:]
variable[ref_id] assign[=] call[constant[references-{0}].format, parameter[name[self].article_doi]]
variable[ref_label] assign[=] constant[References]
variable[ref_source] assign[=] call[constant[biblio.{0}.xhtml#references].format, parameter[name[self].article_doi]]
variable[ref_navpoint] assign[=] call[name[navpoint], parameter[name[ref_id], name[ref_label], name[self].play_order, name[ref_source], list[[]]]]
call[name[nav_insertion].append, parameter[name[ref_navpoint]]]
|
keyword[def] identifier[map_navigation] ( identifier[self] ):
literal[string]
identifier[title_id] = literal[string] . identifier[format] ( identifier[self] . identifier[article_doi] )
identifier[title_label] = identifier[self] . identifier[article] . identifier[publisher] . identifier[nav_title] ()
identifier[title_source] = literal[string] . identifier[format] ( identifier[self] . identifier[article_doi] )
identifier[title_navpoint] = identifier[navpoint] ( identifier[title_id] , identifier[title_label] , identifier[self] . identifier[play_order] ,
identifier[title_source] ,[])
identifier[self] . identifier[nav] . identifier[append] ( identifier[title_navpoint] )
keyword[if] identifier[self] . identifier[collection] :
identifier[nav_insertion] = identifier[title_navpoint] . identifier[children]
keyword[else] :
identifier[nav_insertion] = identifier[self] . identifier[nav]
keyword[if] identifier[self] . identifier[article] . identifier[body] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nav_pt] keyword[in] identifier[self] . identifier[recursive_article_navmap] ( identifier[self] . identifier[article] . identifier[body] ):
identifier[nav_insertion] . identifier[append] ( identifier[nav_pt] )
keyword[if] identifier[self] . identifier[article] . identifier[root] . identifier[xpath] ( literal[string] ):
identifier[ref_id] = literal[string] . identifier[format] ( identifier[self] . identifier[article_doi] )
identifier[ref_label] = literal[string]
identifier[ref_source] = literal[string] . identifier[format] ( identifier[self] . identifier[article_doi] )
identifier[ref_navpoint] = identifier[navpoint] ( identifier[ref_id] , identifier[ref_label] , identifier[self] . identifier[play_order] ,
identifier[ref_source] ,[])
identifier[nav_insertion] . identifier[append] ( identifier[ref_navpoint] )
|
def map_navigation(self):
"""
This is a wrapper for depth-first recursive analysis of the article
"""
#All articles should have titles
title_id = 'titlepage-{0}'.format(self.article_doi)
title_label = self.article.publisher.nav_title()
title_source = 'main.{0}.xhtml#title'.format(self.article_doi)
title_navpoint = navpoint(title_id, title_label, self.play_order, title_source, [])
self.nav.append(title_navpoint)
#When processing a collection of articles, we will want all subsequent
#navpoints for this article to be located under the title
if self.collection:
nav_insertion = title_navpoint.children # depends on [control=['if'], data=[]]
else:
nav_insertion = self.nav
#If the article has a body, we'll need to parse it for navigation
if self.article.body is not None:
#Here is where we invoke the recursive parsing!
for nav_pt in self.recursive_article_navmap(self.article.body):
nav_insertion.append(nav_pt) # depends on [control=['for'], data=['nav_pt']] # depends on [control=['if'], data=[]]
#Add a navpoint to the references if appropriate
if self.article.root.xpath('./back/ref'):
ref_id = 'references-{0}'.format(self.article_doi)
ref_label = 'References'
ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi)
ref_navpoint = navpoint(ref_id, ref_label, self.play_order, ref_source, [])
nav_insertion.append(ref_navpoint) # depends on [control=['if'], data=[]]
|
def lang_match_rdf(triple, accepted_languages):
'''Find if the RDF triple contains acceptable language data'''
if not accepted_languages:
return True
languages = set([n.language for n in triple if isinstance(n, Literal)])
return (not languages) or (languages & accepted_languages)
|
def function[lang_match_rdf, parameter[triple, accepted_languages]]:
constant[Find if the RDF triple contains acceptable language data]
if <ast.UnaryOp object at 0x7da18f09edd0> begin[:]
return[constant[True]]
variable[languages] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da18f813970>]]
return[<ast.BoolOp object at 0x7da18f813520>]
|
keyword[def] identifier[lang_match_rdf] ( identifier[triple] , identifier[accepted_languages] ):
literal[string]
keyword[if] keyword[not] identifier[accepted_languages] :
keyword[return] keyword[True]
identifier[languages] = identifier[set] ([ identifier[n] . identifier[language] keyword[for] identifier[n] keyword[in] identifier[triple] keyword[if] identifier[isinstance] ( identifier[n] , identifier[Literal] )])
keyword[return] ( keyword[not] identifier[languages] ) keyword[or] ( identifier[languages] & identifier[accepted_languages] )
|
def lang_match_rdf(triple, accepted_languages):
"""Find if the RDF triple contains acceptable language data"""
if not accepted_languages:
return True # depends on [control=['if'], data=[]]
languages = set([n.language for n in triple if isinstance(n, Literal)])
return not languages or languages & accepted_languages
|
def Rx(rads: Union[float, sympy.Basic]) -> XPowGate:
"""Returns a gate with the matrix e^{-i X rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return XPowGate(exponent=rads / pi, global_shift=-0.5)
|
def function[Rx, parameter[rads]]:
constant[Returns a gate with the matrix e^{-i X rads / 2}.]
variable[pi] assign[=] <ast.IfExp object at 0x7da1b1c62770>
return[call[name[XPowGate], parameter[]]]
|
keyword[def] identifier[Rx] ( identifier[rads] : identifier[Union] [ identifier[float] , identifier[sympy] . identifier[Basic] ])-> identifier[XPowGate] :
literal[string]
identifier[pi] = identifier[sympy] . identifier[pi] keyword[if] identifier[protocols] . identifier[is_parameterized] ( identifier[rads] ) keyword[else] identifier[np] . identifier[pi]
keyword[return] identifier[XPowGate] ( identifier[exponent] = identifier[rads] / identifier[pi] , identifier[global_shift] =- literal[int] )
|
def Rx(rads: Union[float, sympy.Basic]) -> XPowGate:
"""Returns a gate with the matrix e^{-i X rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return XPowGate(exponent=rads / pi, global_shift=-0.5)
|
def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb)
|
def function[publish_synchronous, parameter[self]]:
constant[
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
]
variable[cb] assign[=] call[name[kwargs].pop, parameter[constant[cb], constant[None]]]
call[name[self].tx.select, parameter[]]
call[name[self].basic.publish, parameter[<ast.Starred object at 0x7da1b06982e0>]]
call[name[self].tx.commit, parameter[]]
|
keyword[def] identifier[publish_synchronous] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[cb] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[self] . identifier[tx] . identifier[select] ()
identifier[self] . identifier[basic] . identifier[publish] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[tx] . identifier[commit] ( identifier[cb] = identifier[cb] )
|
def publish_synchronous(self, *args, **kwargs):
"""
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
"""
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb)
|
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None):
"""
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported
"""
if not isinstance(libcrypto_path, str_cls):
raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path))
if not isinstance(libssl_path, str_cls):
raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path))
if not os.path.exists(libcrypto_path):
raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path)
if not os.path.exists(libssl_path):
raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path)
if trust_list_path is not None:
if not isinstance(trust_list_path, str_cls):
raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path))
if not os.path.exists(trust_list_path):
raise OSError('trust_list_path does not exist at %s' % trust_list_path)
with _backend_lock:
if _module_values['backend'] is not None:
raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL')
_module_values['backend'] = 'openssl'
_module_values['backend_config'] = {
'libcrypto_path': libcrypto_path,
'libssl_path': libssl_path,
'trust_list_path': trust_list_path,
}
|
def function[use_openssl, parameter[libcrypto_path, libssl_path, trust_list_path]]:
constant[
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported
]
if <ast.UnaryOp object at 0x7da18bccaf20> begin[:]
<ast.Raise object at 0x7da18bcc9d80>
if <ast.UnaryOp object at 0x7da212db5090> begin[:]
<ast.Raise object at 0x7da18c4cd870>
if <ast.UnaryOp object at 0x7da18bcc8d00> begin[:]
<ast.Raise object at 0x7da18bcc9780>
if <ast.UnaryOp object at 0x7da18bcc8430> begin[:]
<ast.Raise object at 0x7da18bcc8ca0>
if compare[name[trust_list_path] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da18bccacb0> begin[:]
<ast.Raise object at 0x7da18bcca440>
if <ast.UnaryOp object at 0x7da18bcc9750> begin[:]
<ast.Raise object at 0x7da18bccb520>
with name[_backend_lock] begin[:]
if compare[call[name[_module_values]][constant[backend]] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da18bcc89a0>
call[name[_module_values]][constant[backend]] assign[=] constant[openssl]
call[name[_module_values]][constant[backend_config]] assign[=] dictionary[[<ast.Constant object at 0x7da18bccadd0>, <ast.Constant object at 0x7da18bccbdf0>, <ast.Constant object at 0x7da18bccb310>], [<ast.Name object at 0x7da18bccab60>, <ast.Name object at 0x7da18bcc9540>, <ast.Name object at 0x7da18bccb220>]]
|
keyword[def] identifier[use_openssl] ( identifier[libcrypto_path] , identifier[libssl_path] , identifier[trust_list_path] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[libcrypto_path] , identifier[str_cls] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[type_name] ( identifier[libcrypto_path] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[libssl_path] , identifier[str_cls] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[type_name] ( identifier[libssl_path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[libcrypto_path] ):
keyword[raise] identifier[LibraryNotFoundError] ( literal[string] % identifier[libcrypto_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[libssl_path] ):
keyword[raise] identifier[LibraryNotFoundError] ( literal[string] % identifier[libssl_path] )
keyword[if] identifier[trust_list_path] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[trust_list_path] , identifier[str_cls] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[type_name] ( identifier[trust_list_path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[trust_list_path] ):
keyword[raise] identifier[OSError] ( literal[string] % identifier[trust_list_path] )
keyword[with] identifier[_backend_lock] :
keyword[if] identifier[_module_values] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[_module_values] [ literal[string] ]= literal[string]
identifier[_module_values] [ literal[string] ]={
literal[string] : identifier[libcrypto_path] ,
literal[string] : identifier[libssl_path] ,
literal[string] : identifier[trust_list_path] ,
}
|
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None):
"""
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported
"""
if not isinstance(libcrypto_path, str_cls):
raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path)) # depends on [control=['if'], data=[]]
if not isinstance(libssl_path, str_cls):
raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path)) # depends on [control=['if'], data=[]]
if not os.path.exists(libcrypto_path):
raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path) # depends on [control=['if'], data=[]]
if not os.path.exists(libssl_path):
raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path) # depends on [control=['if'], data=[]]
if trust_list_path is not None:
if not isinstance(trust_list_path, str_cls):
raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path)) # depends on [control=['if'], data=[]]
if not os.path.exists(trust_list_path):
raise OSError('trust_list_path does not exist at %s' % trust_list_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['trust_list_path']]
with _backend_lock:
if _module_values['backend'] is not None:
raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL') # depends on [control=['if'], data=[]]
_module_values['backend'] = 'openssl'
_module_values['backend_config'] = {'libcrypto_path': libcrypto_path, 'libssl_path': libssl_path, 'trust_list_path': trust_list_path} # depends on [control=['with'], data=[]]
|
def setup_rate_limit_handler(self, sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT,
rate_limit_header=RATE_LIMIT_HEADER,
rate_limit_reset_header=RATE_LIMIT_RESET_HEADER):
"""Setup the rate limit handler.
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to make the fecthing process sleep
:param rate_limit_header: header from where extract the rate limit data
:param rate_limit_reset_header: header from where extract the rate limit reset data
"""
self.rate_limit = None
self.rate_limit_reset_ts = None
self.sleep_for_rate = sleep_for_rate
self.rate_limit_header = rate_limit_header
self.rate_limit_reset_header = rate_limit_reset_header
if min_rate_to_sleep > self.MAX_RATE_LIMIT:
msg = "Minimum rate to sleep value exceeded (%d)."
msg += "High values might cause the client to sleep forever."
msg += "Reset to %d."
self.min_rate_to_sleep = self.MAX_RATE_LIMIT
logger.warning(msg, min_rate_to_sleep, self.MAX_RATE_LIMIT)
else:
self.min_rate_to_sleep = min_rate_to_sleep
|
def function[setup_rate_limit_handler, parameter[self, sleep_for_rate, min_rate_to_sleep, rate_limit_header, rate_limit_reset_header]]:
constant[Setup the rate limit handler.
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to make the fecthing process sleep
:param rate_limit_header: header from where extract the rate limit data
:param rate_limit_reset_header: header from where extract the rate limit reset data
]
name[self].rate_limit assign[=] constant[None]
name[self].rate_limit_reset_ts assign[=] constant[None]
name[self].sleep_for_rate assign[=] name[sleep_for_rate]
name[self].rate_limit_header assign[=] name[rate_limit_header]
name[self].rate_limit_reset_header assign[=] name[rate_limit_reset_header]
if compare[name[min_rate_to_sleep] greater[>] name[self].MAX_RATE_LIMIT] begin[:]
variable[msg] assign[=] constant[Minimum rate to sleep value exceeded (%d).]
<ast.AugAssign object at 0x7da1b0382530>
<ast.AugAssign object at 0x7da1b03835b0>
name[self].min_rate_to_sleep assign[=] name[self].MAX_RATE_LIMIT
call[name[logger].warning, parameter[name[msg], name[min_rate_to_sleep], name[self].MAX_RATE_LIMIT]]
|
keyword[def] identifier[setup_rate_limit_handler] ( identifier[self] , identifier[sleep_for_rate] = keyword[False] , identifier[min_rate_to_sleep] = identifier[MIN_RATE_LIMIT] ,
identifier[rate_limit_header] = identifier[RATE_LIMIT_HEADER] ,
identifier[rate_limit_reset_header] = identifier[RATE_LIMIT_RESET_HEADER] ):
literal[string]
identifier[self] . identifier[rate_limit] = keyword[None]
identifier[self] . identifier[rate_limit_reset_ts] = keyword[None]
identifier[self] . identifier[sleep_for_rate] = identifier[sleep_for_rate]
identifier[self] . identifier[rate_limit_header] = identifier[rate_limit_header]
identifier[self] . identifier[rate_limit_reset_header] = identifier[rate_limit_reset_header]
keyword[if] identifier[min_rate_to_sleep] > identifier[self] . identifier[MAX_RATE_LIMIT] :
identifier[msg] = literal[string]
identifier[msg] += literal[string]
identifier[msg] += literal[string]
identifier[self] . identifier[min_rate_to_sleep] = identifier[self] . identifier[MAX_RATE_LIMIT]
identifier[logger] . identifier[warning] ( identifier[msg] , identifier[min_rate_to_sleep] , identifier[self] . identifier[MAX_RATE_LIMIT] )
keyword[else] :
identifier[self] . identifier[min_rate_to_sleep] = identifier[min_rate_to_sleep]
|
def setup_rate_limit_handler(self, sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT, rate_limit_header=RATE_LIMIT_HEADER, rate_limit_reset_header=RATE_LIMIT_RESET_HEADER):
"""Setup the rate limit handler.
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to make the fecthing process sleep
:param rate_limit_header: header from where extract the rate limit data
:param rate_limit_reset_header: header from where extract the rate limit reset data
"""
self.rate_limit = None
self.rate_limit_reset_ts = None
self.sleep_for_rate = sleep_for_rate
self.rate_limit_header = rate_limit_header
self.rate_limit_reset_header = rate_limit_reset_header
if min_rate_to_sleep > self.MAX_RATE_LIMIT:
msg = 'Minimum rate to sleep value exceeded (%d).'
msg += 'High values might cause the client to sleep forever.'
msg += 'Reset to %d.'
self.min_rate_to_sleep = self.MAX_RATE_LIMIT
logger.warning(msg, min_rate_to_sleep, self.MAX_RATE_LIMIT) # depends on [control=['if'], data=['min_rate_to_sleep']]
else:
self.min_rate_to_sleep = min_rate_to_sleep
|
def dedent(string, indent_str=' ', max_levels=None):
"""Revert the effect of indentation.
Examples
--------
Remove a simple one-level indentation:
>>> text = '''<->This is line 1.
... <->Next line.
... <->And another one.'''
>>> print(text)
<->This is line 1.
<->Next line.
<->And another one.
>>> print(dedent(text, '<->'))
This is line 1.
Next line.
And another one.
Multiple levels of indentation:
>>> text = '''<->Level 1.
... <-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<->Level 1.
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 1.
<->Level 2.
<-><->Level 3.
>>> text = '''<-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 2.
<->Level 3.
>>> print(dedent(text, '<->', max_levels=1))
<->Level 2.
<-><->Level 3.
"""
if len(indent_str) == 0:
return string
lines = string.splitlines()
# Determine common (minumum) number of indentation levels, capped at
# `max_levels` if given
def num_indents(line):
max_num = int(np.ceil(len(line) / len(indent_str)))
for i in range(max_num):
if line.startswith(indent_str):
line = line[len(indent_str):]
else:
break
return i
num_levels = num_indents(min(lines, key=num_indents))
if max_levels is not None:
num_levels = min(num_levels, max_levels)
# Dedent
dedent_len = num_levels * len(indent_str)
return '\n'.join(line[dedent_len:] for line in lines)
|
def function[dedent, parameter[string, indent_str, max_levels]]:
constant[Revert the effect of indentation.
Examples
--------
Remove a simple one-level indentation:
>>> text = '''<->This is line 1.
... <->Next line.
... <->And another one.'''
>>> print(text)
<->This is line 1.
<->Next line.
<->And another one.
>>> print(dedent(text, '<->'))
This is line 1.
Next line.
And another one.
Multiple levels of indentation:
>>> text = '''<->Level 1.
... <-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<->Level 1.
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 1.
<->Level 2.
<-><->Level 3.
>>> text = '''<-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 2.
<->Level 3.
>>> print(dedent(text, '<->', max_levels=1))
<->Level 2.
<-><->Level 3.
]
if compare[call[name[len], parameter[name[indent_str]]] equal[==] constant[0]] begin[:]
return[name[string]]
variable[lines] assign[=] call[name[string].splitlines, parameter[]]
def function[num_indents, parameter[line]]:
variable[max_num] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[call[name[len], parameter[name[line]]] / call[name[len], parameter[name[indent_str]]]]]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[max_num]]]] begin[:]
if call[name[line].startswith, parameter[name[indent_str]]] begin[:]
variable[line] assign[=] call[name[line]][<ast.Slice object at 0x7da18f58e710>]
return[name[i]]
variable[num_levels] assign[=] call[name[num_indents], parameter[call[name[min], parameter[name[lines]]]]]
if compare[name[max_levels] is_not constant[None]] begin[:]
variable[num_levels] assign[=] call[name[min], parameter[name[num_levels], name[max_levels]]]
variable[dedent_len] assign[=] binary_operation[name[num_levels] * call[name[len], parameter[name[indent_str]]]]
return[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da1b1e448b0>]]]
|
keyword[def] identifier[dedent] ( identifier[string] , identifier[indent_str] = literal[string] , identifier[max_levels] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[indent_str] )== literal[int] :
keyword[return] identifier[string]
identifier[lines] = identifier[string] . identifier[splitlines] ()
keyword[def] identifier[num_indents] ( identifier[line] ):
identifier[max_num] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[len] ( identifier[line] )/ identifier[len] ( identifier[indent_str] )))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max_num] ):
keyword[if] identifier[line] . identifier[startswith] ( identifier[indent_str] ):
identifier[line] = identifier[line] [ identifier[len] ( identifier[indent_str] ):]
keyword[else] :
keyword[break]
keyword[return] identifier[i]
identifier[num_levels] = identifier[num_indents] ( identifier[min] ( identifier[lines] , identifier[key] = identifier[num_indents] ))
keyword[if] identifier[max_levels] keyword[is] keyword[not] keyword[None] :
identifier[num_levels] = identifier[min] ( identifier[num_levels] , identifier[max_levels] )
identifier[dedent_len] = identifier[num_levels] * identifier[len] ( identifier[indent_str] )
keyword[return] literal[string] . identifier[join] ( identifier[line] [ identifier[dedent_len] :] keyword[for] identifier[line] keyword[in] identifier[lines] )
|
def dedent(string, indent_str=' ', max_levels=None):
"""Revert the effect of indentation.
Examples
--------
Remove a simple one-level indentation:
>>> text = '''<->This is line 1.
... <->Next line.
... <->And another one.'''
>>> print(text)
<->This is line 1.
<->Next line.
<->And another one.
>>> print(dedent(text, '<->'))
This is line 1.
Next line.
And another one.
Multiple levels of indentation:
>>> text = '''<->Level 1.
... <-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<->Level 1.
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 1.
<->Level 2.
<-><->Level 3.
>>> text = '''<-><->Level 2.
... <-><-><->Level 3.'''
>>> print(text)
<-><->Level 2.
<-><-><->Level 3.
>>> print(dedent(text, '<->'))
Level 2.
<->Level 3.
>>> print(dedent(text, '<->', max_levels=1))
<->Level 2.
<-><->Level 3.
"""
if len(indent_str) == 0:
return string # depends on [control=['if'], data=[]]
lines = string.splitlines()
# Determine common (minumum) number of indentation levels, capped at
# `max_levels` if given
def num_indents(line):
max_num = int(np.ceil(len(line) / len(indent_str)))
for i in range(max_num):
if line.startswith(indent_str):
line = line[len(indent_str):] # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=[]]
return i
num_levels = num_indents(min(lines, key=num_indents))
if max_levels is not None:
num_levels = min(num_levels, max_levels) # depends on [control=['if'], data=['max_levels']]
# Dedent
dedent_len = num_levels * len(indent_str)
return '\n'.join((line[dedent_len:] for line in lines))
|
def dragEnterEvent(self, event):
"""Allow user to drag files"""
if mimedata2url(event.mimeData()):
event.accept()
else:
event.ignore()
|
def function[dragEnterEvent, parameter[self, event]]:
constant[Allow user to drag files]
if call[name[mimedata2url], parameter[call[name[event].mimeData, parameter[]]]] begin[:]
call[name[event].accept, parameter[]]
|
keyword[def] identifier[dragEnterEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[mimedata2url] ( identifier[event] . identifier[mimeData] ()):
identifier[event] . identifier[accept] ()
keyword[else] :
identifier[event] . identifier[ignore] ()
|
def dragEnterEvent(self, event):
"""Allow user to drag files"""
if mimedata2url(event.mimeData()):
event.accept() # depends on [control=['if'], data=[]]
else:
event.ignore()
|
def _execute_wakeup_tasks(self):
"""Executes wakeup tasks, should only be called from loop()"""
# Check the length of wakeup tasks first to avoid concurrent issues
size = len(self.wakeup_tasks)
for i in range(size):
self.wakeup_tasks[i]()
|
def function[_execute_wakeup_tasks, parameter[self]]:
constant[Executes wakeup tasks, should only be called from loop()]
variable[size] assign[=] call[name[len], parameter[name[self].wakeup_tasks]]
for taget[name[i]] in starred[call[name[range], parameter[name[size]]]] begin[:]
call[call[name[self].wakeup_tasks][name[i]], parameter[]]
|
keyword[def] identifier[_execute_wakeup_tasks] ( identifier[self] ):
literal[string]
identifier[size] = identifier[len] ( identifier[self] . identifier[wakeup_tasks] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] ):
identifier[self] . identifier[wakeup_tasks] [ identifier[i] ]()
|
def _execute_wakeup_tasks(self):
"""Executes wakeup tasks, should only be called from loop()"""
# Check the length of wakeup tasks first to avoid concurrent issues
size = len(self.wakeup_tasks)
for i in range(size):
self.wakeup_tasks[i]() # depends on [control=['for'], data=['i']]
|
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
|
def function[write, parameter[self, filepath, skip_unknown]]:
constant[Write the metadata fields to filepath.]
variable[fp] assign[=] call[name[codecs].open, parameter[name[filepath], constant[w]]]
<ast.Try object at 0x7da1b1de1c60>
|
keyword[def] identifier[write] ( identifier[self] , identifier[filepath] , identifier[skip_unknown] = keyword[False] ):
literal[string]
identifier[fp] = identifier[codecs] . identifier[open] ( identifier[filepath] , literal[string] , identifier[encoding] = literal[string] )
keyword[try] :
identifier[self] . identifier[write_file] ( identifier[fp] , identifier[skip_unknown] )
keyword[finally] :
identifier[fp] . identifier[close] ()
|
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown) # depends on [control=['try'], data=[]]
finally:
fp.close()
|
def getLinkProperties(self, wanInterfaceId=1, timeout=1):
"""Execute GetCommonLinkProperties action to get WAN link properties.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN link properties
:rtype: WanLinkProperties
"""
namespace = Wan.getServiceType("getLinkProperties") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetCommonLinkProperties", timeout=timeout)
return WanLinkProperties(results)
|
def function[getLinkProperties, parameter[self, wanInterfaceId, timeout]]:
constant[Execute GetCommonLinkProperties action to get WAN link properties.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN link properties
:rtype: WanLinkProperties
]
variable[namespace] assign[=] binary_operation[call[name[Wan].getServiceType, parameter[constant[getLinkProperties]]] + call[name[str], parameter[name[wanInterfaceId]]]]
variable[uri] assign[=] call[name[self].getControlURL, parameter[name[namespace]]]
variable[results] assign[=] call[name[self].execute, parameter[name[uri], name[namespace], constant[GetCommonLinkProperties]]]
return[call[name[WanLinkProperties], parameter[name[results]]]]
|
keyword[def] identifier[getLinkProperties] ( identifier[self] , identifier[wanInterfaceId] = literal[int] , identifier[timeout] = literal[int] ):
literal[string]
identifier[namespace] = identifier[Wan] . identifier[getServiceType] ( literal[string] )+ identifier[str] ( identifier[wanInterfaceId] )
identifier[uri] = identifier[self] . identifier[getControlURL] ( identifier[namespace] )
identifier[results] = identifier[self] . identifier[execute] ( identifier[uri] , identifier[namespace] , literal[string] , identifier[timeout] = identifier[timeout] )
keyword[return] identifier[WanLinkProperties] ( identifier[results] )
|
def getLinkProperties(self, wanInterfaceId=1, timeout=1):
"""Execute GetCommonLinkProperties action to get WAN link properties.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN link properties
:rtype: WanLinkProperties
"""
namespace = Wan.getServiceType('getLinkProperties') + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, 'GetCommonLinkProperties', timeout=timeout)
return WanLinkProperties(results)
|
def patch_namespaced_service_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_service_status # noqa: E501
partially update status of the specified Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_service_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Service
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data
|
def function[patch_namespaced_service_status, parameter[self, name, namespace, body]]:
constant[patch_namespaced_service_status # noqa: E501
partially update status of the specified Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_service_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Service
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].patch_namespaced_service_status_with_http_info, parameter[name[name], name[namespace], name[body]]]]
|
keyword[def] identifier[patch_namespaced_service_status] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[patch_namespaced_service_status_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[patch_namespaced_service_status_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def patch_namespaced_service_status(self, name, namespace, body, **kwargs): # noqa: E501
"patch_namespaced_service_status # noqa: E501\n\n partially update status of the specified Service # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_namespaced_service_status(name, namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Service (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param UNKNOWN_BASE_TYPE body: (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1Service\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.patch_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data
|
def save_task(self, task):
"""Save a task into TaskWarrior database using add/modify call"""
args = [task['uuid'], 'modify'] if task.saved else ['add']
args.extend(self._get_modified_task_fields_as_args(task))
output = self.execute_command(args)
# Parse out the new ID, if the task is being added for the first time
if not task.saved:
id_lines = [l for l in output if l.startswith('Created task ')]
# Complain loudly if it seems that more tasks were created
# Should not happen.
# Expected output: Created task 1.
# Created task 1 (recurrence template).
if len(id_lines) != 1 or len(id_lines[0].split(' ')) not in (3, 5):
raise TaskWarriorException(
'Unexpected output when creating '
'task: %s' % '\n'.join(id_lines),
)
# Circumvent the ID storage, since ID is considered read-only
identifier = id_lines[0].split(' ')[2].rstrip('.')
# Identifier can be either ID or UUID for completed tasks
try:
task._data['id'] = int(identifier)
except ValueError:
task._data['uuid'] = identifier
# Refreshing is very important here, as not only modification time
# is updated, but arbitrary attribute may have changed due hooks
# altering the data before saving
task.refresh(after_save=True)
|
def function[save_task, parameter[self, task]]:
constant[Save a task into TaskWarrior database using add/modify call]
variable[args] assign[=] <ast.IfExp object at 0x7da1b05481f0>
call[name[args].extend, parameter[call[name[self]._get_modified_task_fields_as_args, parameter[name[task]]]]]
variable[output] assign[=] call[name[self].execute_command, parameter[name[args]]]
if <ast.UnaryOp object at 0x7da1b05be890> begin[:]
variable[id_lines] assign[=] <ast.ListComp object at 0x7da1b05ff2b0>
if <ast.BoolOp object at 0x7da1b05ff220> begin[:]
<ast.Raise object at 0x7da1b05ffc70>
variable[identifier] assign[=] call[call[call[call[name[id_lines]][constant[0]].split, parameter[constant[ ]]]][constant[2]].rstrip, parameter[constant[.]]]
<ast.Try object at 0x7da1b05fcbb0>
call[name[task].refresh, parameter[]]
|
keyword[def] identifier[save_task] ( identifier[self] , identifier[task] ):
literal[string]
identifier[args] =[ identifier[task] [ literal[string] ], literal[string] ] keyword[if] identifier[task] . identifier[saved] keyword[else] [ literal[string] ]
identifier[args] . identifier[extend] ( identifier[self] . identifier[_get_modified_task_fields_as_args] ( identifier[task] ))
identifier[output] = identifier[self] . identifier[execute_command] ( identifier[args] )
keyword[if] keyword[not] identifier[task] . identifier[saved] :
identifier[id_lines] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[output] keyword[if] identifier[l] . identifier[startswith] ( literal[string] )]
keyword[if] identifier[len] ( identifier[id_lines] )!= literal[int] keyword[or] identifier[len] ( identifier[id_lines] [ literal[int] ]. identifier[split] ( literal[string] )) keyword[not] keyword[in] ( literal[int] , literal[int] ):
keyword[raise] identifier[TaskWarriorException] (
literal[string]
literal[string] % literal[string] . identifier[join] ( identifier[id_lines] ),
)
identifier[identifier] = identifier[id_lines] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[rstrip] ( literal[string] )
keyword[try] :
identifier[task] . identifier[_data] [ literal[string] ]= identifier[int] ( identifier[identifier] )
keyword[except] identifier[ValueError] :
identifier[task] . identifier[_data] [ literal[string] ]= identifier[identifier]
identifier[task] . identifier[refresh] ( identifier[after_save] = keyword[True] )
|
def save_task(self, task):
"""Save a task into TaskWarrior database using add/modify call"""
args = [task['uuid'], 'modify'] if task.saved else ['add']
args.extend(self._get_modified_task_fields_as_args(task))
output = self.execute_command(args)
# Parse out the new ID, if the task is being added for the first time
if not task.saved:
id_lines = [l for l in output if l.startswith('Created task ')]
# Complain loudly if it seems that more tasks were created
# Should not happen.
# Expected output: Created task 1.
# Created task 1 (recurrence template).
if len(id_lines) != 1 or len(id_lines[0].split(' ')) not in (3, 5):
raise TaskWarriorException('Unexpected output when creating task: %s' % '\n'.join(id_lines)) # depends on [control=['if'], data=[]]
# Circumvent the ID storage, since ID is considered read-only
identifier = id_lines[0].split(' ')[2].rstrip('.')
# Identifier can be either ID or UUID for completed tasks
try:
task._data['id'] = int(identifier) # depends on [control=['try'], data=[]]
except ValueError:
task._data['uuid'] = identifier # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Refreshing is very important here, as not only modification time
# is updated, but arbitrary attribute may have changed due hooks
# altering the data before saving
task.refresh(after_save=True)
|
def prepare_rsem_reference(gtf, multifasta, build):
"""
gtf: path to GTF file (must have gene_id and transcript_id)
multifasta: path to multifasta file
build: name of organism build (e.g. hg19)
"""
if not utils.which("rsem-prepare-reference"):
logger.info("Skipping prepping RSEM reference because "
"rsem-prepare-reference could not be found.")
return None
command = PREPARE_REFERENCE.format(gtf=gtf, multifasta=multifasta,
build=build)
with transaction.tx_tmpdir(remove=False) as rsem_genome_dir:
with utils.chdir(rsem_genome_dir):
message = "Preparing rsem reference from %s" % gtf
do.run(command, message)
return rsem_genome_dir
|
def function[prepare_rsem_reference, parameter[gtf, multifasta, build]]:
constant[
gtf: path to GTF file (must have gene_id and transcript_id)
multifasta: path to multifasta file
build: name of organism build (e.g. hg19)
]
if <ast.UnaryOp object at 0x7da1b1881540> begin[:]
call[name[logger].info, parameter[constant[Skipping prepping RSEM reference because rsem-prepare-reference could not be found.]]]
return[constant[None]]
variable[command] assign[=] call[name[PREPARE_REFERENCE].format, parameter[]]
with call[name[transaction].tx_tmpdir, parameter[]] begin[:]
with call[name[utils].chdir, parameter[name[rsem_genome_dir]]] begin[:]
variable[message] assign[=] binary_operation[constant[Preparing rsem reference from %s] <ast.Mod object at 0x7da2590d6920> name[gtf]]
call[name[do].run, parameter[name[command], name[message]]]
return[name[rsem_genome_dir]]
|
keyword[def] identifier[prepare_rsem_reference] ( identifier[gtf] , identifier[multifasta] , identifier[build] ):
literal[string]
keyword[if] keyword[not] identifier[utils] . identifier[which] ( literal[string] ):
identifier[logger] . identifier[info] ( literal[string]
literal[string] )
keyword[return] keyword[None]
identifier[command] = identifier[PREPARE_REFERENCE] . identifier[format] ( identifier[gtf] = identifier[gtf] , identifier[multifasta] = identifier[multifasta] ,
identifier[build] = identifier[build] )
keyword[with] identifier[transaction] . identifier[tx_tmpdir] ( identifier[remove] = keyword[False] ) keyword[as] identifier[rsem_genome_dir] :
keyword[with] identifier[utils] . identifier[chdir] ( identifier[rsem_genome_dir] ):
identifier[message] = literal[string] % identifier[gtf]
identifier[do] . identifier[run] ( identifier[command] , identifier[message] )
keyword[return] identifier[rsem_genome_dir]
|
def prepare_rsem_reference(gtf, multifasta, build):
"""
gtf: path to GTF file (must have gene_id and transcript_id)
multifasta: path to multifasta file
build: name of organism build (e.g. hg19)
"""
if not utils.which('rsem-prepare-reference'):
logger.info('Skipping prepping RSEM reference because rsem-prepare-reference could not be found.')
return None # depends on [control=['if'], data=[]]
command = PREPARE_REFERENCE.format(gtf=gtf, multifasta=multifasta, build=build)
with transaction.tx_tmpdir(remove=False) as rsem_genome_dir:
with utils.chdir(rsem_genome_dir):
message = 'Preparing rsem reference from %s' % gtf
do.run(command, message) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['rsem_genome_dir']]
return rsem_genome_dir
|
def _par_vector2dict(v, pars, dims, starts=None):
"""Turn a vector of samples into an OrderedDict according to param dims.
Parameters
----------
y : list of int or float
pars : list of str
parameter names
dims : list of list of int
list of dimensions of parameters
Returns
-------
d : dict
Examples
--------
>>> v = list(range(31))
>>> dims = [[5], [5, 5], []]
>>> pars = ['mu', 'Phi', 'eta']
>>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS
OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ...
"""
if starts is None:
starts = _calc_starts(dims)
d = OrderedDict()
for i in range(len(pars)):
l = int(np.prod(dims[i]))
start = starts[i]
end = start + l
y = np.asarray(v[start:end])
if len(dims[i]) > 1:
y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major
d[pars[i]] = y.squeeze() if y.shape == (1,) else y
return d
|
def function[_par_vector2dict, parameter[v, pars, dims, starts]]:
constant[Turn a vector of samples into an OrderedDict according to param dims.
Parameters
----------
y : list of int or float
pars : list of str
parameter names
dims : list of list of int
list of dimensions of parameters
Returns
-------
d : dict
Examples
--------
>>> v = list(range(31))
>>> dims = [[5], [5, 5], []]
>>> pars = ['mu', 'Phi', 'eta']
>>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS
OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ...
]
if compare[name[starts] is constant[None]] begin[:]
variable[starts] assign[=] call[name[_calc_starts], parameter[name[dims]]]
variable[d] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[pars]]]]]] begin[:]
variable[l] assign[=] call[name[int], parameter[call[name[np].prod, parameter[call[name[dims]][name[i]]]]]]
variable[start] assign[=] call[name[starts]][name[i]]
variable[end] assign[=] binary_operation[name[start] + name[l]]
variable[y] assign[=] call[name[np].asarray, parameter[call[name[v]][<ast.Slice object at 0x7da2044c3430>]]]
if compare[call[name[len], parameter[call[name[dims]][name[i]]]] greater[>] constant[1]] begin[:]
variable[y] assign[=] call[name[y].reshape, parameter[call[name[dims]][name[i]]]]
call[name[d]][call[name[pars]][name[i]]] assign[=] <ast.IfExp object at 0x7da1b1de04f0>
return[name[d]]
|
keyword[def] identifier[_par_vector2dict] ( identifier[v] , identifier[pars] , identifier[dims] , identifier[starts] = keyword[None] ):
literal[string]
keyword[if] identifier[starts] keyword[is] keyword[None] :
identifier[starts] = identifier[_calc_starts] ( identifier[dims] )
identifier[d] = identifier[OrderedDict] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[pars] )):
identifier[l] = identifier[int] ( identifier[np] . identifier[prod] ( identifier[dims] [ identifier[i] ]))
identifier[start] = identifier[starts] [ identifier[i] ]
identifier[end] = identifier[start] + identifier[l]
identifier[y] = identifier[np] . identifier[asarray] ( identifier[v] [ identifier[start] : identifier[end] ])
keyword[if] identifier[len] ( identifier[dims] [ identifier[i] ])> literal[int] :
identifier[y] = identifier[y] . identifier[reshape] ( identifier[dims] [ identifier[i] ], identifier[order] = literal[string] )
identifier[d] [ identifier[pars] [ identifier[i] ]]= identifier[y] . identifier[squeeze] () keyword[if] identifier[y] . identifier[shape] ==( literal[int] ,) keyword[else] identifier[y]
keyword[return] identifier[d]
|
def _par_vector2dict(v, pars, dims, starts=None):
"""Turn a vector of samples into an OrderedDict according to param dims.
Parameters
----------
y : list of int or float
pars : list of str
parameter names
dims : list of list of int
list of dimensions of parameters
Returns
-------
d : dict
Examples
--------
>>> v = list(range(31))
>>> dims = [[5], [5, 5], []]
>>> pars = ['mu', 'Phi', 'eta']
>>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS
OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ...
"""
if starts is None:
starts = _calc_starts(dims) # depends on [control=['if'], data=['starts']]
d = OrderedDict()
for i in range(len(pars)):
l = int(np.prod(dims[i]))
start = starts[i]
end = start + l
y = np.asarray(v[start:end])
if len(dims[i]) > 1:
y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major # depends on [control=['if'], data=[]]
d[pars[i]] = y.squeeze() if y.shape == (1,) else y # depends on [control=['for'], data=['i']]
return d
|
def returnOneIndex(self, last=False):
'''Return the first origin index (integer) of the current list. That
index refers to it's placement in the original list of dictionaries.
This is very useful when one wants to reference the original entry by
index.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnOneIndex()
0
>>> print PLOD(test).sort("name").returnOneIndex()
3
:param last:
The last origin of the current list is returned rather than the first.
:return:
An integer representing the original placement of the first item in
the list. Returns None if the list is currently empty.
'''
if len(self.table)==0:
return None
else:
if last:
return self.index_track.pop()
else:
return self.index_track[0]
|
def function[returnOneIndex, parameter[self, last]]:
constant[Return the first origin index (integer) of the current list. That
index refers to it's placement in the original list of dictionaries.
This is very useful when one wants to reference the original entry by
index.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnOneIndex()
0
>>> print PLOD(test).sort("name").returnOneIndex()
3
:param last:
The last origin of the current list is returned rather than the first.
:return:
An integer representing the original placement of the first item in
the list. Returns None if the list is currently empty.
]
if compare[call[name[len], parameter[name[self].table]] equal[==] constant[0]] begin[:]
return[constant[None]]
|
keyword[def] identifier[returnOneIndex] ( identifier[self] , identifier[last] = keyword[False] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[table] )== literal[int] :
keyword[return] keyword[None]
keyword[else] :
keyword[if] identifier[last] :
keyword[return] identifier[self] . identifier[index_track] . identifier[pop] ()
keyword[else] :
keyword[return] identifier[self] . identifier[index_track] [ literal[int] ]
|
def returnOneIndex(self, last=False):
"""Return the first origin index (integer) of the current list. That
index refers to it's placement in the original list of dictionaries.
This is very useful when one wants to reference the original entry by
index.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnOneIndex()
0
>>> print PLOD(test).sort("name").returnOneIndex()
3
:param last:
The last origin of the current list is returned rather than the first.
:return:
An integer representing the original placement of the first item in
the list. Returns None if the list is currently empty.
"""
if len(self.table) == 0:
return None # depends on [control=['if'], data=[]]
elif last:
return self.index_track.pop() # depends on [control=['if'], data=[]]
else:
return self.index_track[0]
|
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
|
def function[autoscale, parameter[self]]:
constant[
Sets the view limits to the nearest multiples of base that contain the
data.
]
<ast.Tuple object at 0x7da18ede5330> assign[=] call[name[self].axis.get_data_interval, parameter[]]
variable[locs] assign[=] call[name[self]._get_default_locs, parameter[name[vmin], name[vmax]]]
<ast.Tuple object at 0x7da18f722530> assign[=] call[name[locs]][list[[<ast.Constant object at 0x7da18f721300>, <ast.UnaryOp object at 0x7da18f721360>]]]
if compare[name[vmin] equal[==] name[vmax]] begin[:]
<ast.AugAssign object at 0x7da18f723d00>
<ast.AugAssign object at 0x7da18f721cc0>
return[call[name[nonsingular], parameter[name[vmin], name[vmax]]]]
|
keyword[def] identifier[autoscale] ( identifier[self] ):
literal[string]
( identifier[vmin] , identifier[vmax] )= identifier[self] . identifier[axis] . identifier[get_data_interval] ()
identifier[locs] = identifier[self] . identifier[_get_default_locs] ( identifier[vmin] , identifier[vmax] )
( identifier[vmin] , identifier[vmax] )= identifier[locs] [[ literal[int] ,- literal[int] ]]
keyword[if] identifier[vmin] == identifier[vmax] :
identifier[vmin] -= literal[int]
identifier[vmax] += literal[int]
keyword[return] identifier[nonsingular] ( identifier[vmin] , identifier[vmax] )
|
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1 # depends on [control=['if'], data=['vmin', 'vmax']]
return nonsingular(vmin, vmax)
|
def edit_dataset_metadata(request, dataset_id=None):
"""Renders a template to upload or edit a Dataset.
Most of the heavy lifting is done by add_dataset(...).
"""
if request.method == 'POST':
return add_dataset(request, dataset_id)
elif request.method == 'GET':
# create a blank form
# Edit
if dataset_id:
metadata_form = DatasetUploadForm(
instance=get_object_or_404(Dataset, pk=dataset_id)
)
# Upload
else:
metadata_form = DatasetUploadForm()
return render(
request,
'datafreezer/upload.html',
{
'fileUploadForm': metadata_form,
}
)
|
def function[edit_dataset_metadata, parameter[request, dataset_id]]:
constant[Renders a template to upload or edit a Dataset.
Most of the heavy lifting is done by add_dataset(...).
]
if compare[name[request].method equal[==] constant[POST]] begin[:]
return[call[name[add_dataset], parameter[name[request], name[dataset_id]]]]
|
keyword[def] identifier[edit_dataset_metadata] ( identifier[request] , identifier[dataset_id] = keyword[None] ):
literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
keyword[return] identifier[add_dataset] ( identifier[request] , identifier[dataset_id] )
keyword[elif] identifier[request] . identifier[method] == literal[string] :
keyword[if] identifier[dataset_id] :
identifier[metadata_form] = identifier[DatasetUploadForm] (
identifier[instance] = identifier[get_object_or_404] ( identifier[Dataset] , identifier[pk] = identifier[dataset_id] )
)
keyword[else] :
identifier[metadata_form] = identifier[DatasetUploadForm] ()
keyword[return] identifier[render] (
identifier[request] ,
literal[string] ,
{
literal[string] : identifier[metadata_form] ,
}
)
|
def edit_dataset_metadata(request, dataset_id=None):
"""Renders a template to upload or edit a Dataset.
Most of the heavy lifting is done by add_dataset(...).
"""
if request.method == 'POST':
return add_dataset(request, dataset_id) # depends on [control=['if'], data=[]]
elif request.method == 'GET':
# create a blank form
# Edit
if dataset_id:
metadata_form = DatasetUploadForm(instance=get_object_or_404(Dataset, pk=dataset_id)) # depends on [control=['if'], data=[]]
else:
# Upload
metadata_form = DatasetUploadForm()
return render(request, 'datafreezer/upload.html', {'fileUploadForm': metadata_form}) # depends on [control=['if'], data=[]]
|
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
|
def function[create, parameter[self, name, plugin_data_dir, gzip]]:
constant[
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
]
call[name[self].client.api.create_plugin, parameter[name[name], name[plugin_data_dir], name[gzip]]]
return[call[name[self].get, parameter[name[name]]]]
|
keyword[def] identifier[create] ( identifier[self] , identifier[name] , identifier[plugin_data_dir] , identifier[gzip] = keyword[False] ):
literal[string]
identifier[self] . identifier[client] . identifier[api] . identifier[create_plugin] ( identifier[name] , identifier[plugin_data_dir] , identifier[gzip] )
keyword[return] identifier[self] . identifier[get] ( identifier[name] )
|
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
|
def project(self, term, **kwargs):
"""Search for a project by id.
Args:
term (str): Term to search for.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
params = kwargs
baseuri = self._BASE_URI + 'projects/' + term
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res
|
def function[project, parameter[self, term]]:
constant[Search for a project by id.
Args:
term (str): Term to search for.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
]
variable[params] assign[=] name[kwargs]
variable[baseuri] assign[=] binary_operation[binary_operation[name[self]._BASE_URI + constant[projects/]] + name[term]]
variable[res] assign[=] call[name[self].session.get, parameter[name[baseuri]]]
call[name[self].handle_http_error, parameter[name[res]]]
return[name[res]]
|
keyword[def] identifier[project] ( identifier[self] , identifier[term] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[kwargs]
identifier[baseuri] = identifier[self] . identifier[_BASE_URI] + literal[string] + identifier[term]
identifier[res] = identifier[self] . identifier[session] . identifier[get] ( identifier[baseuri] , identifier[params] = identifier[params] )
identifier[self] . identifier[handle_http_error] ( identifier[res] )
keyword[return] identifier[res]
|
def project(self, term, **kwargs):
"""Search for a project by id.
Args:
term (str): Term to search for.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
params = kwargs
baseuri = self._BASE_URI + 'projects/' + term
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res
|
def get_bytes_to_image_callback(image_dims=(224, 224)):
"""Return a callback to process image bytes for ImageNet."""
from keras.preprocessing import image
import numpy as np
from PIL import Image
from io import BytesIO
def preprocess_image_bytes(data_bytes):
"""Process image bytes for ImageNet."""
try:
img = Image.open(BytesIO(data_bytes)) # open image
except OSError as e:
raise ValueError('Please provide a raw image')
img = img.resize(image_dims, Image.ANTIALIAS) # model requires 224x224 pixels
x = image.img_to_array(img) # convert image to numpy array
x = np.expand_dims(x, axis=0) # model expects dim 0 to be iterable across images
return x
return preprocess_image_bytes
|
def function[get_bytes_to_image_callback, parameter[image_dims]]:
constant[Return a callback to process image bytes for ImageNet.]
from relative_module[keras.preprocessing] import module[image]
import module[numpy] as alias[np]
from relative_module[PIL] import module[Image]
from relative_module[io] import module[BytesIO]
def function[preprocess_image_bytes, parameter[data_bytes]]:
constant[Process image bytes for ImageNet.]
<ast.Try object at 0x7da1b040b790>
variable[img] assign[=] call[name[img].resize, parameter[name[image_dims], name[Image].ANTIALIAS]]
variable[x] assign[=] call[name[image].img_to_array, parameter[name[img]]]
variable[x] assign[=] call[name[np].expand_dims, parameter[name[x]]]
return[name[x]]
return[name[preprocess_image_bytes]]
|
keyword[def] identifier[get_bytes_to_image_callback] ( identifier[image_dims] =( literal[int] , literal[int] )):
literal[string]
keyword[from] identifier[keras] . identifier[preprocessing] keyword[import] identifier[image]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[from] identifier[PIL] keyword[import] identifier[Image]
keyword[from] identifier[io] keyword[import] identifier[BytesIO]
keyword[def] identifier[preprocess_image_bytes] ( identifier[data_bytes] ):
literal[string]
keyword[try] :
identifier[img] = identifier[Image] . identifier[open] ( identifier[BytesIO] ( identifier[data_bytes] ))
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[img] = identifier[img] . identifier[resize] ( identifier[image_dims] , identifier[Image] . identifier[ANTIALIAS] )
identifier[x] = identifier[image] . identifier[img_to_array] ( identifier[img] )
identifier[x] = identifier[np] . identifier[expand_dims] ( identifier[x] , identifier[axis] = literal[int] )
keyword[return] identifier[x]
keyword[return] identifier[preprocess_image_bytes]
|
def get_bytes_to_image_callback(image_dims=(224, 224)):
"""Return a callback to process image bytes for ImageNet."""
from keras.preprocessing import image
import numpy as np
from PIL import Image
from io import BytesIO
def preprocess_image_bytes(data_bytes):
"""Process image bytes for ImageNet."""
try:
img = Image.open(BytesIO(data_bytes)) # open image # depends on [control=['try'], data=[]]
except OSError as e:
raise ValueError('Please provide a raw image') # depends on [control=['except'], data=[]]
img = img.resize(image_dims, Image.ANTIALIAS) # model requires 224x224 pixels
x = image.img_to_array(img) # convert image to numpy array
x = np.expand_dims(x, axis=0) # model expects dim 0 to be iterable across images
return x
return preprocess_image_bytes
|
def get_memberdef_nodes_and_signatures(self, node, kind):
"""Collects the memberdef nodes and corresponding signatures that
correspond to public function entries that are at most depth 2 deeper
than the current (compounddef) node. Returns a dictionary with
function signatures (what swig expects after the %feature directive)
as keys, and a list of corresponding memberdef nodes as values."""
sig_dict = {}
sig_prefix = ''
if kind in ('file', 'namespace'):
ns_node = node.getElementsByTagName('innernamespace')
if not ns_node and kind == 'namespace':
ns_node = node.getElementsByTagName('compoundname')
if ns_node:
sig_prefix = self.extract_text(ns_node[0]) + '::'
elif kind in ('class', 'struct'):
# Get the full function name.
cn_node = node.getElementsByTagName('compoundname')
sig_prefix = self.extract_text(cn_node[0]) + '::'
md_nodes = self.get_specific_subnodes(node, 'memberdef', recursive=2)
for n in md_nodes:
if n.attributes['prot'].value != 'public':
continue
if n.attributes['kind'].value in ['variable', 'typedef']:
continue
if not self.get_specific_subnodes(n, 'definition'):
continue
name = self.extract_text(self.get_specific_subnodes(n, 'name'))
if name[:8] == 'operator':
continue
sig = sig_prefix + name
if sig in sig_dict:
sig_dict[sig].append(n)
else:
sig_dict[sig] = [n]
return sig_dict
|
def function[get_memberdef_nodes_and_signatures, parameter[self, node, kind]]:
constant[Collects the memberdef nodes and corresponding signatures that
correspond to public function entries that are at most depth 2 deeper
than the current (compounddef) node. Returns a dictionary with
function signatures (what swig expects after the %feature directive)
as keys, and a list of corresponding memberdef nodes as values.]
variable[sig_dict] assign[=] dictionary[[], []]
variable[sig_prefix] assign[=] constant[]
if compare[name[kind] in tuple[[<ast.Constant object at 0x7da1b1608e80>, <ast.Constant object at 0x7da1b1608130>]]] begin[:]
variable[ns_node] assign[=] call[name[node].getElementsByTagName, parameter[constant[innernamespace]]]
if <ast.BoolOp object at 0x7da1b16097b0> begin[:]
variable[ns_node] assign[=] call[name[node].getElementsByTagName, parameter[constant[compoundname]]]
if name[ns_node] begin[:]
variable[sig_prefix] assign[=] binary_operation[call[name[self].extract_text, parameter[call[name[ns_node]][constant[0]]]] + constant[::]]
variable[md_nodes] assign[=] call[name[self].get_specific_subnodes, parameter[name[node], constant[memberdef]]]
for taget[name[n]] in starred[name[md_nodes]] begin[:]
if compare[call[name[n].attributes][constant[prot]].value not_equal[!=] constant[public]] begin[:]
continue
if compare[call[name[n].attributes][constant[kind]].value in list[[<ast.Constant object at 0x7da1b1505ff0>, <ast.Constant object at 0x7da1b1506f50>]]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b1505420> begin[:]
continue
variable[name] assign[=] call[name[self].extract_text, parameter[call[name[self].get_specific_subnodes, parameter[name[n], constant[name]]]]]
if compare[call[name[name]][<ast.Slice object at 0x7da1b1505c30>] equal[==] constant[operator]] begin[:]
continue
variable[sig] assign[=] binary_operation[name[sig_prefix] + name[name]]
if compare[name[sig] in name[sig_dict]] begin[:]
call[call[name[sig_dict]][name[sig]].append, parameter[name[n]]]
return[name[sig_dict]]
|
keyword[def] identifier[get_memberdef_nodes_and_signatures] ( identifier[self] , identifier[node] , identifier[kind] ):
literal[string]
identifier[sig_dict] ={}
identifier[sig_prefix] = literal[string]
keyword[if] identifier[kind] keyword[in] ( literal[string] , literal[string] ):
identifier[ns_node] = identifier[node] . identifier[getElementsByTagName] ( literal[string] )
keyword[if] keyword[not] identifier[ns_node] keyword[and] identifier[kind] == literal[string] :
identifier[ns_node] = identifier[node] . identifier[getElementsByTagName] ( literal[string] )
keyword[if] identifier[ns_node] :
identifier[sig_prefix] = identifier[self] . identifier[extract_text] ( identifier[ns_node] [ literal[int] ])+ literal[string]
keyword[elif] identifier[kind] keyword[in] ( literal[string] , literal[string] ):
identifier[cn_node] = identifier[node] . identifier[getElementsByTagName] ( literal[string] )
identifier[sig_prefix] = identifier[self] . identifier[extract_text] ( identifier[cn_node] [ literal[int] ])+ literal[string]
identifier[md_nodes] = identifier[self] . identifier[get_specific_subnodes] ( identifier[node] , literal[string] , identifier[recursive] = literal[int] )
keyword[for] identifier[n] keyword[in] identifier[md_nodes] :
keyword[if] identifier[n] . identifier[attributes] [ literal[string] ]. identifier[value] != literal[string] :
keyword[continue]
keyword[if] identifier[n] . identifier[attributes] [ literal[string] ]. identifier[value] keyword[in] [ literal[string] , literal[string] ]:
keyword[continue]
keyword[if] keyword[not] identifier[self] . identifier[get_specific_subnodes] ( identifier[n] , literal[string] ):
keyword[continue]
identifier[name] = identifier[self] . identifier[extract_text] ( identifier[self] . identifier[get_specific_subnodes] ( identifier[n] , literal[string] ))
keyword[if] identifier[name] [: literal[int] ]== literal[string] :
keyword[continue]
identifier[sig] = identifier[sig_prefix] + identifier[name]
keyword[if] identifier[sig] keyword[in] identifier[sig_dict] :
identifier[sig_dict] [ identifier[sig] ]. identifier[append] ( identifier[n] )
keyword[else] :
identifier[sig_dict] [ identifier[sig] ]=[ identifier[n] ]
keyword[return] identifier[sig_dict]
|
def get_memberdef_nodes_and_signatures(self, node, kind):
"""Collects the memberdef nodes and corresponding signatures that
correspond to public function entries that are at most depth 2 deeper
than the current (compounddef) node. Returns a dictionary with
function signatures (what swig expects after the %feature directive)
as keys, and a list of corresponding memberdef nodes as values."""
sig_dict = {}
sig_prefix = ''
if kind in ('file', 'namespace'):
ns_node = node.getElementsByTagName('innernamespace')
if not ns_node and kind == 'namespace':
ns_node = node.getElementsByTagName('compoundname') # depends on [control=['if'], data=[]]
if ns_node:
sig_prefix = self.extract_text(ns_node[0]) + '::' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kind']]
elif kind in ('class', 'struct'):
# Get the full function name.
cn_node = node.getElementsByTagName('compoundname')
sig_prefix = self.extract_text(cn_node[0]) + '::' # depends on [control=['if'], data=[]]
md_nodes = self.get_specific_subnodes(node, 'memberdef', recursive=2)
for n in md_nodes:
if n.attributes['prot'].value != 'public':
continue # depends on [control=['if'], data=[]]
if n.attributes['kind'].value in ['variable', 'typedef']:
continue # depends on [control=['if'], data=[]]
if not self.get_specific_subnodes(n, 'definition'):
continue # depends on [control=['if'], data=[]]
name = self.extract_text(self.get_specific_subnodes(n, 'name'))
if name[:8] == 'operator':
continue # depends on [control=['if'], data=[]]
sig = sig_prefix + name
if sig in sig_dict:
sig_dict[sig].append(n) # depends on [control=['if'], data=['sig', 'sig_dict']]
else:
sig_dict[sig] = [n] # depends on [control=['for'], data=['n']]
return sig_dict
|
def factory_reset(self, ids, except_ids=False, except_baudrate_and_ids=False):
""" Reset all motors on the bus to their factory default settings. """
mode = (0x02 if except_baudrate_and_ids else
0x01 if except_ids else 0xFF)
for id in ids:
try:
self._send_packet(self._protocol.DxlResetPacket(id, mode))
except (DxlTimeoutError, DxlCommunicationError):
pass
|
def function[factory_reset, parameter[self, ids, except_ids, except_baudrate_and_ids]]:
constant[ Reset all motors on the bus to their factory default settings. ]
variable[mode] assign[=] <ast.IfExp object at 0x7da1b1304ee0>
for taget[name[id]] in starred[name[ids]] begin[:]
<ast.Try object at 0x7da1b13066e0>
|
keyword[def] identifier[factory_reset] ( identifier[self] , identifier[ids] , identifier[except_ids] = keyword[False] , identifier[except_baudrate_and_ids] = keyword[False] ):
literal[string]
identifier[mode] =( literal[int] keyword[if] identifier[except_baudrate_and_ids] keyword[else]
literal[int] keyword[if] identifier[except_ids] keyword[else] literal[int] )
keyword[for] identifier[id] keyword[in] identifier[ids] :
keyword[try] :
identifier[self] . identifier[_send_packet] ( identifier[self] . identifier[_protocol] . identifier[DxlResetPacket] ( identifier[id] , identifier[mode] ))
keyword[except] ( identifier[DxlTimeoutError] , identifier[DxlCommunicationError] ):
keyword[pass]
|
def factory_reset(self, ids, except_ids=False, except_baudrate_and_ids=False):
""" Reset all motors on the bus to their factory default settings. """
mode = 2 if except_baudrate_and_ids else 1 if except_ids else 255
for id in ids:
try:
self._send_packet(self._protocol.DxlResetPacket(id, mode)) # depends on [control=['try'], data=[]]
except (DxlTimeoutError, DxlCommunicationError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['id']]
|
def get_event(self, name, default=_sentinel):
"""
Lookup an event by name.
:param str item: Event name
:return Event: Event instance under key
"""
if name not in self.events:
if self.create_events_on_access:
self.add_event(name)
elif default is not _sentinel:
return default
return self.events[name]
|
def function[get_event, parameter[self, name, default]]:
constant[
Lookup an event by name.
:param str item: Event name
:return Event: Event instance under key
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].events] begin[:]
if name[self].create_events_on_access begin[:]
call[name[self].add_event, parameter[name[name]]]
return[call[name[self].events][name[name]]]
|
keyword[def] identifier[get_event] ( identifier[self] , identifier[name] , identifier[default] = identifier[_sentinel] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[events] :
keyword[if] identifier[self] . identifier[create_events_on_access] :
identifier[self] . identifier[add_event] ( identifier[name] )
keyword[elif] identifier[default] keyword[is] keyword[not] identifier[_sentinel] :
keyword[return] identifier[default]
keyword[return] identifier[self] . identifier[events] [ identifier[name] ]
|
def get_event(self, name, default=_sentinel):
"""
Lookup an event by name.
:param str item: Event name
:return Event: Event instance under key
"""
if name not in self.events:
if self.create_events_on_access:
self.add_event(name) # depends on [control=['if'], data=[]]
elif default is not _sentinel:
return default # depends on [control=['if'], data=['default']] # depends on [control=['if'], data=['name']]
return self.events[name]
|
def materials(self):
"""
Property for accessing :class:`MaterialManager` instance, which is used to manage materials.
:rtype: yagocd.resources.material.MaterialManager
"""
if self._material_manager is None:
self._material_manager = MaterialManager(session=self._session)
return self._material_manager
|
def function[materials, parameter[self]]:
constant[
Property for accessing :class:`MaterialManager` instance, which is used to manage materials.
:rtype: yagocd.resources.material.MaterialManager
]
if compare[name[self]._material_manager is constant[None]] begin[:]
name[self]._material_manager assign[=] call[name[MaterialManager], parameter[]]
return[name[self]._material_manager]
|
keyword[def] identifier[materials] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_material_manager] keyword[is] keyword[None] :
identifier[self] . identifier[_material_manager] = identifier[MaterialManager] ( identifier[session] = identifier[self] . identifier[_session] )
keyword[return] identifier[self] . identifier[_material_manager]
|
def materials(self):
"""
Property for accessing :class:`MaterialManager` instance, which is used to manage materials.
:rtype: yagocd.resources.material.MaterialManager
"""
if self._material_manager is None:
self._material_manager = MaterialManager(session=self._session) # depends on [control=['if'], data=[]]
return self._material_manager
|
def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
log.debug('Indexing objects {0}-{1}. [{2}]'.format(
ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj))
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(
obj, repr(exc)))
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index)
|
def function[index_objects, parameter[mapping_type, ids, chunk_size, es, index]]:
constant[Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
]
if name[settings].ES_DISABLED begin[:]
return[None]
call[name[log].debug, parameter[call[constant[Indexing objects {0}-{1}. [{2}]].format, parameter[call[name[ids]][constant[0]], call[name[ids]][<ast.UnaryOp object at 0x7da207f030d0>], call[name[len], parameter[name[ids]]]]]]]
variable[model] assign[=] call[name[mapping_type].get_model, parameter[]]
for taget[name[id_list]] in starred[call[name[chunked], parameter[name[ids], name[chunk_size]]]] begin[:]
variable[documents] assign[=] list[[]]
for taget[name[obj]] in starred[call[name[model].objects.filter, parameter[]]] begin[:]
<ast.Try object at 0x7da1b10e5f60>
if name[documents] begin[:]
call[name[mapping_type].bulk_index, parameter[name[documents]]]
|
keyword[def] identifier[index_objects] ( identifier[mapping_type] , identifier[ids] , identifier[chunk_size] = literal[int] , identifier[es] = keyword[None] , identifier[index] = keyword[None] ):
literal[string]
keyword[if] identifier[settings] . identifier[ES_DISABLED] :
keyword[return]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] (
identifier[ids] [ literal[int] ], identifier[ids] [- literal[int] ], identifier[len] ( identifier[ids] )))
identifier[model] = identifier[mapping_type] . identifier[get_model] ()
keyword[for] identifier[id_list] keyword[in] identifier[chunked] ( identifier[ids] , identifier[chunk_size] ):
identifier[documents] =[]
keyword[for] identifier[obj] keyword[in] identifier[model] . identifier[objects] . identifier[filter] ( identifier[id__in] = identifier[id_list] ):
keyword[try] :
identifier[documents] . identifier[append] ( identifier[mapping_type] . identifier[extract_document] ( identifier[obj] . identifier[id] , identifier[obj] ))
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[log] . identifier[exception] ( literal[string] . identifier[format] (
identifier[obj] , identifier[repr] ( identifier[exc] )))
keyword[if] identifier[documents] :
identifier[mapping_type] . identifier[bulk_index] ( identifier[documents] , identifier[id_field] = literal[string] , identifier[es] = identifier[es] , identifier[index] = identifier[index] )
|
def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return # depends on [control=['if'], data=[]]
log.debug('Indexing objects {0}-{1}. [{2}]'.format(ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj)) # depends on [control=['try'], data=[]]
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(obj, repr(exc))) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['obj']]
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['id_list']]
|
def params(self):
""" Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache
"""
params = odict([])
for key,model in self.models.items():
params.update(model.params)
return params
|
def function[params, parameter[self]]:
constant[ Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache
]
variable[params] assign[=] call[name[odict], parameter[list[[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b232f970>, <ast.Name object at 0x7da1b232f7f0>]]] in starred[call[name[self].models.items, parameter[]]] begin[:]
call[name[params].update, parameter[name[model].params]]
return[name[params]]
|
keyword[def] identifier[params] ( identifier[self] ):
literal[string]
identifier[params] = identifier[odict] ([])
keyword[for] identifier[key] , identifier[model] keyword[in] identifier[self] . identifier[models] . identifier[items] ():
identifier[params] . identifier[update] ( identifier[model] . identifier[params] )
keyword[return] identifier[params]
|
def params(self):
""" Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache
"""
params = odict([])
for (key, model) in self.models.items():
params.update(model.params) # depends on [control=['for'], data=[]]
return params
|
def create(cls, name, ipv4_network=None, ipv6_network=None,
comment=None):
"""
Create the network element
:param str name: Name of element
:param str ipv4_network: network cidr (optional if ipv6)
:param str ipv6_network: network cidr (optional if ipv4)
:param str comment: comment (optional)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: Network
.. note:: Either an ipv4_network or ipv6_network must be specified
"""
ipv4_network = ipv4_network if ipv4_network else None
ipv6_network = ipv6_network if ipv6_network else None
json = {'name': name,
'ipv4_network': ipv4_network,
'ipv6_network': ipv6_network,
'comment': comment}
return ElementCreator(cls, json)
|
def function[create, parameter[cls, name, ipv4_network, ipv6_network, comment]]:
constant[
Create the network element
:param str name: Name of element
:param str ipv4_network: network cidr (optional if ipv6)
:param str ipv6_network: network cidr (optional if ipv4)
:param str comment: comment (optional)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: Network
.. note:: Either an ipv4_network or ipv6_network must be specified
]
variable[ipv4_network] assign[=] <ast.IfExp object at 0x7da1b1babc40>
variable[ipv6_network] assign[=] <ast.IfExp object at 0x7da1b1babbe0>
variable[json] assign[=] dictionary[[<ast.Constant object at 0x7da1b1babe20>, <ast.Constant object at 0x7da1b1ba89a0>, <ast.Constant object at 0x7da1b1baa9b0>, <ast.Constant object at 0x7da1b1baa890>], [<ast.Name object at 0x7da1b1ba94e0>, <ast.Name object at 0x7da1b1ba8bb0>, <ast.Name object at 0x7da1b1ba9ea0>, <ast.Name object at 0x7da1b1babbb0>]]
return[call[name[ElementCreator], parameter[name[cls], name[json]]]]
|
keyword[def] identifier[create] ( identifier[cls] , identifier[name] , identifier[ipv4_network] = keyword[None] , identifier[ipv6_network] = keyword[None] ,
identifier[comment] = keyword[None] ):
literal[string]
identifier[ipv4_network] = identifier[ipv4_network] keyword[if] identifier[ipv4_network] keyword[else] keyword[None]
identifier[ipv6_network] = identifier[ipv6_network] keyword[if] identifier[ipv6_network] keyword[else] keyword[None]
identifier[json] ={ literal[string] : identifier[name] ,
literal[string] : identifier[ipv4_network] ,
literal[string] : identifier[ipv6_network] ,
literal[string] : identifier[comment] }
keyword[return] identifier[ElementCreator] ( identifier[cls] , identifier[json] )
|
def create(cls, name, ipv4_network=None, ipv6_network=None, comment=None):
"""
Create the network element
:param str name: Name of element
:param str ipv4_network: network cidr (optional if ipv6)
:param str ipv6_network: network cidr (optional if ipv4)
:param str comment: comment (optional)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: Network
.. note:: Either an ipv4_network or ipv6_network must be specified
"""
ipv4_network = ipv4_network if ipv4_network else None
ipv6_network = ipv6_network if ipv6_network else None
json = {'name': name, 'ipv4_network': ipv4_network, 'ipv6_network': ipv6_network, 'comment': comment}
return ElementCreator(cls, json)
|
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False,
with_superuser=True, accept_global_perms=True, perms_filter='pk__in'):
"""Return queryset with required permissions."""
if isinstance(perms, str):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError(
"Given perms must have same app label "
"({} != {})".format(app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError(
"ContentType was once computed to be {} and another "
"one {}".format(ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
elif klass is None:
raise WrongAppError("Cannot determine content type")
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model and perms_filter == 'pk__in':
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have `codenames` list
# First check if user is superuser and if so, return queryset immediately
if with_superuser and user.is_superuser:
return queryset
# Check if the user is anonymous. The
# django.contrib.auth.models.AnonymousUser object doesn't work for queries
# and it's nice to be able to pass in request.user blindly.
if user.is_anonymous:
user = get_anonymous_user()
global_perms = set()
has_global_perms = False
# a superuser has by default assigned global perms for any
if accept_global_perms and with_superuser:
for code in codenames:
if user.has_perm(ctype.app_label + '.' + code):
global_perms.add(code)
for code in global_perms:
codenames.remove(code)
# prerequisite: there must be elements in global_perms otherwise just
# follow the procedure for object based permissions only AND
# 1. codenames is empty, which means that permissions are ONLY set
# globally, therefore return the full queryset.
# OR
# 2. any_perm is True, then the global permission beats the object
# based permission anyway, therefore return full queryset
if global_perms and (not codenames or any_perm):
return queryset
# if we have global perms and still some object based perms differing
# from global perms and any_perm is set to false, then we have to flag
# that global perms exist in order to merge object based permissions by
# user and by group correctly. Scenario: global perm change_xx and
# object based perm delete_xx on object A for user, and object based
# permission delete_xx on object B for group, to which user is
# assigned.
# get_objects_for_user(user, [change_xx, delete_xx], use_groups=True,
# any_perm=False, accept_global_perms=True) must retrieve object A and
# B.
elif global_perms and codenames:
has_global_perms = True
# Now we should extract list of pk values for which we would filter
# queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = (user_model.objects
.filter(Q(user=user) | Q(user=get_anonymous_user()))
.filter(permission__content_type=ctype))
if codenames:
user_obj_perms_queryset = user_obj_perms_queryset.filter(
permission__codename__in=codenames)
direct_fields = ['content_object__pk', 'permission__codename']
generic_fields = ['object_pk', 'permission__codename']
if user_model.objects.is_generic():
user_fields = generic_fields
else:
user_fields = direct_fields
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {
'permission__content_type': ctype,
'group__{}'.format(get_user_model().groups.field.related_query_name()): user,
}
if codenames:
group_filters.update({
'permission__codename__in': codenames,
})
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
group_fields = generic_fields
else:
group_fields = direct_fields
if not any_perm and codenames and not has_global_perms:
user_obj_perms = user_obj_perms_queryset.values_list(*user_fields)
groups_obj_perms = groups_obj_perms_queryset.values_list(*group_fields)
data = list(user_obj_perms) + list(groups_obj_perms)
# sorting/grouping by pk (first in result tuple)
data = sorted(data, key=lambda t: t[0])
pk_list = []
for pk, group in groupby(data, lambda t: t[0]):
obj_codenames = set((e[1] for e in group))
if codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(**{perms_filter: pk_list})
return objects
if not any_perm and len(codenames) > 1:
counts = user_obj_perms_queryset.values(
user_fields[0]).annotate(object_pk_count=Count(user_fields[0]))
user_obj_perms_queryset = counts.filter(
object_pk_count__gte=len(codenames))
values = user_obj_perms_queryset.values_list(user_fields[0], flat=True)
if user_model.objects.is_generic():
values = list(values)
query = Q(**{perms_filter: values})
if use_groups:
values = groups_obj_perms_queryset.values_list(group_fields[0], flat=True)
if group_model.objects.is_generic():
values = list(values)
query |= Q(**{perms_filter: values})
return queryset.filter(query)
|
def function[get_objects_for_user, parameter[user, perms, klass, use_groups, any_perm, with_superuser, accept_global_perms, perms_filter]]:
constant[Return queryset with required permissions.]
if call[name[isinstance], parameter[name[perms], name[str]]] begin[:]
variable[perms] assign[=] list[[<ast.Name object at 0x7da1b1b68f70>]]
variable[ctype] assign[=] constant[None]
variable[app_label] assign[=] constant[None]
variable[codenames] assign[=] call[name[set], parameter[]]
for taget[name[perm]] in starred[name[perms]] begin[:]
if compare[constant[.] in name[perm]] begin[:]
<ast.Tuple object at 0x7da1b1b6a3e0> assign[=] call[name[perm].split, parameter[constant[.], constant[1]]]
if <ast.BoolOp object at 0x7da1b1b6ab60> begin[:]
<ast.Raise object at 0x7da1b1b69ea0>
call[name[codenames].add, parameter[name[codename]]]
if compare[name[app_label] is_not constant[None]] begin[:]
variable[new_ctype] assign[=] call[name[ContentType].objects.get, parameter[]]
if <ast.BoolOp object at 0x7da1b1b6afb0> begin[:]
<ast.Raise object at 0x7da1b1b683a0>
if <ast.BoolOp object at 0x7da1b1b68b20> begin[:]
variable[queryset] assign[=] call[name[_get_queryset], parameter[name[klass]]]
variable[ctype] assign[=] call[name[ContentType].objects.get_for_model, parameter[name[queryset].model]]
if <ast.BoolOp object at 0x7da1b1b68c70> begin[:]
return[name[queryset]]
if name[user].is_anonymous begin[:]
variable[user] assign[=] call[name[get_anonymous_user], parameter[]]
variable[global_perms] assign[=] call[name[set], parameter[]]
variable[has_global_perms] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1b68520> begin[:]
for taget[name[code]] in starred[name[codenames]] begin[:]
if call[name[user].has_perm, parameter[binary_operation[binary_operation[name[ctype].app_label + constant[.]] + name[code]]]] begin[:]
call[name[global_perms].add, parameter[name[code]]]
for taget[name[code]] in starred[name[global_perms]] begin[:]
call[name[codenames].remove, parameter[name[code]]]
if <ast.BoolOp object at 0x7da1b1b6be50> begin[:]
return[name[queryset]]
variable[user_model] assign[=] call[name[get_user_obj_perms_model], parameter[name[queryset].model]]
variable[user_obj_perms_queryset] assign[=] call[call[name[user_model].objects.filter, parameter[binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]]]].filter, parameter[]]
if name[codenames] begin[:]
variable[user_obj_perms_queryset] assign[=] call[name[user_obj_perms_queryset].filter, parameter[]]
variable[direct_fields] assign[=] list[[<ast.Constant object at 0x7da18dc06380>, <ast.Constant object at 0x7da18dc046d0>]]
variable[generic_fields] assign[=] list[[<ast.Constant object at 0x7da18dc05c60>, <ast.Constant object at 0x7da18dc06b90>]]
if call[name[user_model].objects.is_generic, parameter[]] begin[:]
variable[user_fields] assign[=] name[generic_fields]
if name[use_groups] begin[:]
variable[group_model] assign[=] call[name[get_group_obj_perms_model], parameter[name[queryset].model]]
variable[group_filters] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf910>, <ast.Call object at 0x7da18c4cc550>], [<ast.Name object at 0x7da18c4cc100>, <ast.Name object at 0x7da18c4cf220>]]
if name[codenames] begin[:]
call[name[group_filters].update, parameter[dictionary[[<ast.Constant object at 0x7da18c4cc250>], [<ast.Name object at 0x7da18c4cf970>]]]]
variable[groups_obj_perms_queryset] assign[=] call[name[group_model].objects.filter, parameter[]]
if call[name[group_model].objects.is_generic, parameter[]] begin[:]
variable[group_fields] assign[=] name[generic_fields]
if <ast.BoolOp object at 0x7da18c4cfac0> begin[:]
variable[user_obj_perms] assign[=] call[name[user_obj_perms_queryset].values_list, parameter[<ast.Starred object at 0x7da18c4cfd30>]]
variable[groups_obj_perms] assign[=] call[name[groups_obj_perms_queryset].values_list, parameter[<ast.Starred object at 0x7da18c4cf610>]]
variable[data] assign[=] binary_operation[call[name[list], parameter[name[user_obj_perms]]] + call[name[list], parameter[name[groups_obj_perms]]]]
variable[data] assign[=] call[name[sorted], parameter[name[data]]]
variable[pk_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cfc70>, <ast.Name object at 0x7da18c4cc2e0>]]] in starred[call[name[groupby], parameter[name[data], <ast.Lambda object at 0x7da18c4cddb0>]]] begin[:]
variable[obj_codenames] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b1adc310>]]
if call[name[codenames].issubset, parameter[name[obj_codenames]]] begin[:]
call[name[pk_list].append, parameter[name[pk]]]
variable[objects] assign[=] call[name[queryset].filter, parameter[]]
return[name[objects]]
if <ast.BoolOp object at 0x7da1b1adc190> begin[:]
variable[counts] assign[=] call[call[name[user_obj_perms_queryset].values, parameter[call[name[user_fields]][constant[0]]]].annotate, parameter[]]
variable[user_obj_perms_queryset] assign[=] call[name[counts].filter, parameter[]]
variable[values] assign[=] call[name[user_obj_perms_queryset].values_list, parameter[call[name[user_fields]][constant[0]]]]
if call[name[user_model].objects.is_generic, parameter[]] begin[:]
variable[values] assign[=] call[name[list], parameter[name[values]]]
variable[query] assign[=] call[name[Q], parameter[]]
if name[use_groups] begin[:]
variable[values] assign[=] call[name[groups_obj_perms_queryset].values_list, parameter[call[name[group_fields]][constant[0]]]]
if call[name[group_model].objects.is_generic, parameter[]] begin[:]
variable[values] assign[=] call[name[list], parameter[name[values]]]
<ast.AugAssign object at 0x7da1b1adc610>
return[call[name[queryset].filter, parameter[name[query]]]]
|
keyword[def] identifier[get_objects_for_user] ( identifier[user] , identifier[perms] , identifier[klass] = keyword[None] , identifier[use_groups] = keyword[True] , identifier[any_perm] = keyword[False] ,
identifier[with_superuser] = keyword[True] , identifier[accept_global_perms] = keyword[True] , identifier[perms_filter] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[perms] , identifier[str] ):
identifier[perms] =[ identifier[perms] ]
identifier[ctype] = keyword[None]
identifier[app_label] = keyword[None]
identifier[codenames] = identifier[set] ()
keyword[for] identifier[perm] keyword[in] identifier[perms] :
keyword[if] literal[string] keyword[in] identifier[perm] :
identifier[new_app_label] , identifier[codename] = identifier[perm] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[app_label] keyword[is] keyword[not] keyword[None] keyword[and] identifier[app_label] != identifier[new_app_label] :
keyword[raise] identifier[MixedContentTypeError] (
literal[string]
literal[string] . identifier[format] ( identifier[app_label] , identifier[new_app_label] ))
keyword[else] :
identifier[app_label] = identifier[new_app_label]
keyword[else] :
identifier[codename] = identifier[perm]
identifier[codenames] . identifier[add] ( identifier[codename] )
keyword[if] identifier[app_label] keyword[is] keyword[not] keyword[None] :
identifier[new_ctype] = identifier[ContentType] . identifier[objects] . identifier[get] ( identifier[app_label] = identifier[app_label] ,
identifier[permission__codename] = identifier[codename] )
keyword[if] identifier[ctype] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ctype] != identifier[new_ctype] :
keyword[raise] identifier[MixedContentTypeError] (
literal[string]
literal[string] . identifier[format] ( identifier[ctype] , identifier[new_ctype] ))
keyword[else] :
identifier[ctype] = identifier[new_ctype]
keyword[if] identifier[ctype] keyword[is] keyword[None] keyword[and] identifier[klass] keyword[is] keyword[not] keyword[None] :
identifier[queryset] = identifier[_get_queryset] ( identifier[klass] )
identifier[ctype] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[queryset] . identifier[model] )
keyword[elif] identifier[ctype] keyword[is] keyword[not] keyword[None] keyword[and] identifier[klass] keyword[is] keyword[None] :
identifier[queryset] = identifier[_get_queryset] ( identifier[ctype] . identifier[model_class] ())
keyword[elif] identifier[klass] keyword[is] keyword[None] :
keyword[raise] identifier[WrongAppError] ( literal[string] )
keyword[else] :
identifier[queryset] = identifier[_get_queryset] ( identifier[klass] )
keyword[if] identifier[ctype] . identifier[model_class] ()!= identifier[queryset] . identifier[model] keyword[and] identifier[perms_filter] == literal[string] :
keyword[raise] identifier[MixedContentTypeError] ( literal[string]
literal[string] )
keyword[if] identifier[with_superuser] keyword[and] identifier[user] . identifier[is_superuser] :
keyword[return] identifier[queryset]
keyword[if] identifier[user] . identifier[is_anonymous] :
identifier[user] = identifier[get_anonymous_user] ()
identifier[global_perms] = identifier[set] ()
identifier[has_global_perms] = keyword[False]
keyword[if] identifier[accept_global_perms] keyword[and] identifier[with_superuser] :
keyword[for] identifier[code] keyword[in] identifier[codenames] :
keyword[if] identifier[user] . identifier[has_perm] ( identifier[ctype] . identifier[app_label] + literal[string] + identifier[code] ):
identifier[global_perms] . identifier[add] ( identifier[code] )
keyword[for] identifier[code] keyword[in] identifier[global_perms] :
identifier[codenames] . identifier[remove] ( identifier[code] )
keyword[if] identifier[global_perms] keyword[and] ( keyword[not] identifier[codenames] keyword[or] identifier[any_perm] ):
keyword[return] identifier[queryset]
keyword[elif] identifier[global_perms] keyword[and] identifier[codenames] :
identifier[has_global_perms] = keyword[True]
identifier[user_model] = identifier[get_user_obj_perms_model] ( identifier[queryset] . identifier[model] )
identifier[user_obj_perms_queryset] =( identifier[user_model] . identifier[objects]
. identifier[filter] ( identifier[Q] ( identifier[user] = identifier[user] )| identifier[Q] ( identifier[user] = identifier[get_anonymous_user] ()))
. identifier[filter] ( identifier[permission__content_type] = identifier[ctype] ))
keyword[if] identifier[codenames] :
identifier[user_obj_perms_queryset] = identifier[user_obj_perms_queryset] . identifier[filter] (
identifier[permission__codename__in] = identifier[codenames] )
identifier[direct_fields] =[ literal[string] , literal[string] ]
identifier[generic_fields] =[ literal[string] , literal[string] ]
keyword[if] identifier[user_model] . identifier[objects] . identifier[is_generic] ():
identifier[user_fields] = identifier[generic_fields]
keyword[else] :
identifier[user_fields] = identifier[direct_fields]
keyword[if] identifier[use_groups] :
identifier[group_model] = identifier[get_group_obj_perms_model] ( identifier[queryset] . identifier[model] )
identifier[group_filters] ={
literal[string] : identifier[ctype] ,
literal[string] . identifier[format] ( identifier[get_user_model] (). identifier[groups] . identifier[field] . identifier[related_query_name] ()): identifier[user] ,
}
keyword[if] identifier[codenames] :
identifier[group_filters] . identifier[update] ({
literal[string] : identifier[codenames] ,
})
identifier[groups_obj_perms_queryset] = identifier[group_model] . identifier[objects] . identifier[filter] (** identifier[group_filters] )
keyword[if] identifier[group_model] . identifier[objects] . identifier[is_generic] ():
identifier[group_fields] = identifier[generic_fields]
keyword[else] :
identifier[group_fields] = identifier[direct_fields]
keyword[if] keyword[not] identifier[any_perm] keyword[and] identifier[codenames] keyword[and] keyword[not] identifier[has_global_perms] :
identifier[user_obj_perms] = identifier[user_obj_perms_queryset] . identifier[values_list] (* identifier[user_fields] )
identifier[groups_obj_perms] = identifier[groups_obj_perms_queryset] . identifier[values_list] (* identifier[group_fields] )
identifier[data] = identifier[list] ( identifier[user_obj_perms] )+ identifier[list] ( identifier[groups_obj_perms] )
identifier[data] = identifier[sorted] ( identifier[data] , identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ])
identifier[pk_list] =[]
keyword[for] identifier[pk] , identifier[group] keyword[in] identifier[groupby] ( identifier[data] , keyword[lambda] identifier[t] : identifier[t] [ literal[int] ]):
identifier[obj_codenames] = identifier[set] (( identifier[e] [ literal[int] ] keyword[for] identifier[e] keyword[in] identifier[group] ))
keyword[if] identifier[codenames] . identifier[issubset] ( identifier[obj_codenames] ):
identifier[pk_list] . identifier[append] ( identifier[pk] )
identifier[objects] = identifier[queryset] . identifier[filter] (**{ identifier[perms_filter] : identifier[pk_list] })
keyword[return] identifier[objects]
keyword[if] keyword[not] identifier[any_perm] keyword[and] identifier[len] ( identifier[codenames] )> literal[int] :
identifier[counts] = identifier[user_obj_perms_queryset] . identifier[values] (
identifier[user_fields] [ literal[int] ]). identifier[annotate] ( identifier[object_pk_count] = identifier[Count] ( identifier[user_fields] [ literal[int] ]))
identifier[user_obj_perms_queryset] = identifier[counts] . identifier[filter] (
identifier[object_pk_count__gte] = identifier[len] ( identifier[codenames] ))
identifier[values] = identifier[user_obj_perms_queryset] . identifier[values_list] ( identifier[user_fields] [ literal[int] ], identifier[flat] = keyword[True] )
keyword[if] identifier[user_model] . identifier[objects] . identifier[is_generic] ():
identifier[values] = identifier[list] ( identifier[values] )
identifier[query] = identifier[Q] (**{ identifier[perms_filter] : identifier[values] })
keyword[if] identifier[use_groups] :
identifier[values] = identifier[groups_obj_perms_queryset] . identifier[values_list] ( identifier[group_fields] [ literal[int] ], identifier[flat] = keyword[True] )
keyword[if] identifier[group_model] . identifier[objects] . identifier[is_generic] ():
identifier[values] = identifier[list] ( identifier[values] )
identifier[query] |= identifier[Q] (**{ identifier[perms_filter] : identifier[values] })
keyword[return] identifier[queryset] . identifier[filter] ( identifier[query] )
|
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False, with_superuser=True, accept_global_perms=True, perms_filter='pk__in'):
"""Return queryset with required permissions."""
if isinstance(perms, str):
perms = [perms] # depends on [control=['if'], data=[]]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
(new_app_label, codename) = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError('Given perms must have same app label ({} != {})'.format(app_label, new_app_label)) # depends on [control=['if'], data=[]]
else:
app_label = new_app_label # depends on [control=['if'], data=['perm']]
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label, permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError('ContentType was once computed to be {} and another one {}'.format(ctype, new_ctype)) # depends on [control=['if'], data=[]]
else:
ctype = new_ctype # depends on [control=['if'], data=['app_label']] # depends on [control=['for'], data=['perm']]
# Compute queryset and ctype if still missing
if ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model) # depends on [control=['if'], data=[]]
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class()) # depends on [control=['if'], data=[]]
elif klass is None:
raise WrongAppError('Cannot determine content type') # depends on [control=['if'], data=[]]
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model and perms_filter == 'pk__in':
raise MixedContentTypeError('Content type for given perms and klass differs') # depends on [control=['if'], data=[]]
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have `codenames` list
# First check if user is superuser and if so, return queryset immediately
if with_superuser and user.is_superuser:
return queryset # depends on [control=['if'], data=[]]
# Check if the user is anonymous. The
# django.contrib.auth.models.AnonymousUser object doesn't work for queries
# and it's nice to be able to pass in request.user blindly.
if user.is_anonymous:
user = get_anonymous_user() # depends on [control=['if'], data=[]]
global_perms = set()
has_global_perms = False
# a superuser has by default assigned global perms for any
if accept_global_perms and with_superuser:
for code in codenames:
if user.has_perm(ctype.app_label + '.' + code):
global_perms.add(code) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['code']]
for code in global_perms:
codenames.remove(code) # depends on [control=['for'], data=['code']]
# prerequisite: there must be elements in global_perms otherwise just
# follow the procedure for object based permissions only AND
# 1. codenames is empty, which means that permissions are ONLY set
# globally, therefore return the full queryset.
# OR
# 2. any_perm is True, then the global permission beats the object
# based permission anyway, therefore return full queryset
if global_perms and (not codenames or any_perm):
return queryset # depends on [control=['if'], data=[]]
# if we have global perms and still some object based perms differing
# from global perms and any_perm is set to false, then we have to flag
# that global perms exist in order to merge object based permissions by
# user and by group correctly. Scenario: global perm change_xx and
# object based perm delete_xx on object A for user, and object based
# permission delete_xx on object B for group, to which user is
# assigned.
# get_objects_for_user(user, [change_xx, delete_xx], use_groups=True,
# any_perm=False, accept_global_perms=True) must retrieve object A and
# B.
elif global_perms and codenames:
has_global_perms = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Now we should extract list of pk values for which we would filter
# queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = user_model.objects.filter(Q(user=user) | Q(user=get_anonymous_user())).filter(permission__content_type=ctype)
if codenames:
user_obj_perms_queryset = user_obj_perms_queryset.filter(permission__codename__in=codenames) # depends on [control=['if'], data=[]]
direct_fields = ['content_object__pk', 'permission__codename']
generic_fields = ['object_pk', 'permission__codename']
if user_model.objects.is_generic():
user_fields = generic_fields # depends on [control=['if'], data=[]]
else:
user_fields = direct_fields
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {'permission__content_type': ctype, 'group__{}'.format(get_user_model().groups.field.related_query_name()): user}
if codenames:
group_filters.update({'permission__codename__in': codenames}) # depends on [control=['if'], data=[]]
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
group_fields = generic_fields # depends on [control=['if'], data=[]]
else:
group_fields = direct_fields
if not any_perm and codenames and (not has_global_perms):
user_obj_perms = user_obj_perms_queryset.values_list(*user_fields)
groups_obj_perms = groups_obj_perms_queryset.values_list(*group_fields)
data = list(user_obj_perms) + list(groups_obj_perms)
# sorting/grouping by pk (first in result tuple)
data = sorted(data, key=lambda t: t[0])
pk_list = []
for (pk, group) in groupby(data, lambda t: t[0]):
obj_codenames = set((e[1] for e in group))
if codenames.issubset(obj_codenames):
pk_list.append(pk) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
objects = queryset.filter(**{perms_filter: pk_list})
return objects # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not any_perm and len(codenames) > 1:
counts = user_obj_perms_queryset.values(user_fields[0]).annotate(object_pk_count=Count(user_fields[0]))
user_obj_perms_queryset = counts.filter(object_pk_count__gte=len(codenames)) # depends on [control=['if'], data=[]]
values = user_obj_perms_queryset.values_list(user_fields[0], flat=True)
if user_model.objects.is_generic():
values = list(values) # depends on [control=['if'], data=[]]
query = Q(**{perms_filter: values})
if use_groups:
values = groups_obj_perms_queryset.values_list(group_fields[0], flat=True)
if group_model.objects.is_generic():
values = list(values) # depends on [control=['if'], data=[]]
query |= Q(**{perms_filter: values}) # depends on [control=['if'], data=[]]
return queryset.filter(query)
|
def _refine_upcheck(merge, min_goodness):
"""Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
"""
# Remove any entries which would be covered by entries above the merge
# position.
changed = False
for i in sorted(merge.entries, reverse=True):
# Get all the entries that are between the entry we're looking at the
# insertion index of the proposed merged index. If this entry would be
# covered up by any of them then we remove it from the merge.
entry = merge.routing_table[i]
key, mask = entry.key, entry.mask
if any(intersect(key, mask, other.key, other.mask) for other in
merge.routing_table[i+1:merge.insertion_index]):
# The entry would be partially or wholly covered by another entry,
# remove it from the merge and return a new merge.
merge = _Merge(merge.routing_table, merge.entries - {i})
changed = True
# Check if the merge is sufficiently good
if merge.goodness <= min_goodness:
merge = _Merge(merge.routing_table) # Replace with empty merge
break
# Return the final merge
return merge, changed
|
def function[_refine_upcheck, parameter[merge, min_goodness]]:
constant[Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
]
variable[changed] assign[=] constant[False]
for taget[name[i]] in starred[call[name[sorted], parameter[name[merge].entries]]] begin[:]
variable[entry] assign[=] call[name[merge].routing_table][name[i]]
<ast.Tuple object at 0x7da1b19afb20> assign[=] tuple[[<ast.Attribute object at 0x7da1b19ad3c0>, <ast.Attribute object at 0x7da1b19af2e0>]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b19adc60>]] begin[:]
variable[merge] assign[=] call[name[_Merge], parameter[name[merge].routing_table, binary_operation[name[merge].entries - <ast.Set object at 0x7da1b19cbe20>]]]
variable[changed] assign[=] constant[True]
if compare[name[merge].goodness less_or_equal[<=] name[min_goodness]] begin[:]
variable[merge] assign[=] call[name[_Merge], parameter[name[merge].routing_table]]
break
return[tuple[[<ast.Name object at 0x7da1b19c8a60>, <ast.Name object at 0x7da1b19c9ab0>]]]
|
keyword[def] identifier[_refine_upcheck] ( identifier[merge] , identifier[min_goodness] ):
literal[string]
identifier[changed] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[sorted] ( identifier[merge] . identifier[entries] , identifier[reverse] = keyword[True] ):
identifier[entry] = identifier[merge] . identifier[routing_table] [ identifier[i] ]
identifier[key] , identifier[mask] = identifier[entry] . identifier[key] , identifier[entry] . identifier[mask]
keyword[if] identifier[any] ( identifier[intersect] ( identifier[key] , identifier[mask] , identifier[other] . identifier[key] , identifier[other] . identifier[mask] ) keyword[for] identifier[other] keyword[in]
identifier[merge] . identifier[routing_table] [ identifier[i] + literal[int] : identifier[merge] . identifier[insertion_index] ]):
identifier[merge] = identifier[_Merge] ( identifier[merge] . identifier[routing_table] , identifier[merge] . identifier[entries] -{ identifier[i] })
identifier[changed] = keyword[True]
keyword[if] identifier[merge] . identifier[goodness] <= identifier[min_goodness] :
identifier[merge] = identifier[_Merge] ( identifier[merge] . identifier[routing_table] )
keyword[break]
keyword[return] identifier[merge] , identifier[changed]
|
def _refine_upcheck(merge, min_goodness):
"""Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
"""
# Remove any entries which would be covered by entries above the merge
# position.
changed = False
for i in sorted(merge.entries, reverse=True):
# Get all the entries that are between the entry we're looking at the
# insertion index of the proposed merged index. If this entry would be
# covered up by any of them then we remove it from the merge.
entry = merge.routing_table[i]
(key, mask) = (entry.key, entry.mask)
if any((intersect(key, mask, other.key, other.mask) for other in merge.routing_table[i + 1:merge.insertion_index])):
# The entry would be partially or wholly covered by another entry,
# remove it from the merge and return a new merge.
merge = _Merge(merge.routing_table, merge.entries - {i})
changed = True
# Check if the merge is sufficiently good
if merge.goodness <= min_goodness:
merge = _Merge(merge.routing_table) # Replace with empty merge
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# Return the final merge
return (merge, changed)
|
def __get_activity_by_name(self, name, category_id = None, resurrect = True):
"""get most recent, preferably not deleted activity by it's name"""
if category_id:
query = """
SELECT a.id, a.name, a.deleted, coalesce(b.name, ?) as category
FROM activities a
LEFT JOIN categories b ON category_id = b.id
WHERE lower(a.name) = lower(?)
AND category_id = ?
ORDER BY a.deleted, a.id desc
LIMIT 1
"""
res = self.fetchone(query, (self._unsorted_localized, name, category_id))
else:
query = """
SELECT a.id, a.name, a.deleted, coalesce(b.name, ?) as category
FROM activities a
LEFT JOIN categories b ON category_id = b.id
WHERE lower(a.name) = lower(?)
ORDER BY a.deleted, a.id desc
LIMIT 1
"""
res = self.fetchone(query, (self._unsorted_localized, name, ))
if res:
keys = ('id', 'name', 'deleted', 'category')
res = dict([(key, res[key]) for key in keys])
res['deleted'] = res['deleted'] or False
# if the activity was marked as deleted, resurrect on first call
# and put in the unsorted category
if res['deleted'] and resurrect:
update = """
UPDATE activities
SET deleted = null, category_id = -1
WHERE id = ?
"""
self.execute(update, (res['id'], ))
return res
return None
|
def function[__get_activity_by_name, parameter[self, name, category_id, resurrect]]:
constant[get most recent, preferably not deleted activity by it's name]
if name[category_id] begin[:]
variable[query] assign[=] constant[
SELECT a.id, a.name, a.deleted, coalesce(b.name, ?) as category
FROM activities a
LEFT JOIN categories b ON category_id = b.id
WHERE lower(a.name) = lower(?)
AND category_id = ?
ORDER BY a.deleted, a.id desc
LIMIT 1
]
variable[res] assign[=] call[name[self].fetchone, parameter[name[query], tuple[[<ast.Attribute object at 0x7da204565630>, <ast.Name object at 0x7da204567d30>, <ast.Name object at 0x7da2045646d0>]]]]
if name[res] begin[:]
variable[keys] assign[=] tuple[[<ast.Constant object at 0x7da2045651b0>, <ast.Constant object at 0x7da2045663b0>, <ast.Constant object at 0x7da204565c90>, <ast.Constant object at 0x7da204566350>]]
variable[res] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da204565540>]]
call[name[res]][constant[deleted]] assign[=] <ast.BoolOp object at 0x7da204564400>
if <ast.BoolOp object at 0x7da2045659c0> begin[:]
variable[update] assign[=] constant[
UPDATE activities
SET deleted = null, category_id = -1
WHERE id = ?
]
call[name[self].execute, parameter[name[update], tuple[[<ast.Subscript object at 0x7da18bc734c0>]]]]
return[name[res]]
return[constant[None]]
|
keyword[def] identifier[__get_activity_by_name] ( identifier[self] , identifier[name] , identifier[category_id] = keyword[None] , identifier[resurrect] = keyword[True] ):
literal[string]
keyword[if] identifier[category_id] :
identifier[query] = literal[string]
identifier[res] = identifier[self] . identifier[fetchone] ( identifier[query] ,( identifier[self] . identifier[_unsorted_localized] , identifier[name] , identifier[category_id] ))
keyword[else] :
identifier[query] = literal[string]
identifier[res] = identifier[self] . identifier[fetchone] ( identifier[query] ,( identifier[self] . identifier[_unsorted_localized] , identifier[name] ,))
keyword[if] identifier[res] :
identifier[keys] =( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[res] = identifier[dict] ([( identifier[key] , identifier[res] [ identifier[key] ]) keyword[for] identifier[key] keyword[in] identifier[keys] ])
identifier[res] [ literal[string] ]= identifier[res] [ literal[string] ] keyword[or] keyword[False]
keyword[if] identifier[res] [ literal[string] ] keyword[and] identifier[resurrect] :
identifier[update] = literal[string]
identifier[self] . identifier[execute] ( identifier[update] ,( identifier[res] [ literal[string] ],))
keyword[return] identifier[res]
keyword[return] keyword[None]
|
def __get_activity_by_name(self, name, category_id=None, resurrect=True):
"""get most recent, preferably not deleted activity by it's name"""
if category_id:
query = '\n SELECT a.id, a.name, a.deleted, coalesce(b.name, ?) as category\n FROM activities a\n LEFT JOIN categories b ON category_id = b.id\n WHERE lower(a.name) = lower(?)\n AND category_id = ?\n ORDER BY a.deleted, a.id desc\n LIMIT 1\n '
res = self.fetchone(query, (self._unsorted_localized, name, category_id)) # depends on [control=['if'], data=[]]
else:
query = '\n SELECT a.id, a.name, a.deleted, coalesce(b.name, ?) as category\n FROM activities a\n LEFT JOIN categories b ON category_id = b.id\n WHERE lower(a.name) = lower(?)\n ORDER BY a.deleted, a.id desc\n LIMIT 1\n '
res = self.fetchone(query, (self._unsorted_localized, name))
if res:
keys = ('id', 'name', 'deleted', 'category')
res = dict([(key, res[key]) for key in keys])
res['deleted'] = res['deleted'] or False
# if the activity was marked as deleted, resurrect on first call
# and put in the unsorted category
if res['deleted'] and resurrect:
update = '\n UPDATE activities\n SET deleted = null, category_id = -1\n WHERE id = ?\n '
self.execute(update, (res['id'],)) # depends on [control=['if'], data=[]]
return res # depends on [control=['if'], data=[]]
return None
|
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
|
def function[path, parameter[self]]:
constant[Node's relative path from the root node]
if name[self].parent begin[:]
<ast.Try object at 0x7da1b053acb0>
return[call[name[os].path.join, parameter[name[parent_path], name[self].name]]]
return[constant[b'/']]
|
keyword[def] identifier[path] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] :
keyword[try] :
identifier[parent_path] = identifier[self] . identifier[parent] . identifier[path] . identifier[encode] ()
keyword[except] identifier[AttributeError] :
identifier[parent_path] = identifier[self] . identifier[parent] . identifier[path]
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[parent_path] , identifier[self] . identifier[name] )
keyword[return] literal[string]
|
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode() # depends on [control=['try'], data=[]]
except AttributeError:
parent_path = self.parent.path # depends on [control=['except'], data=[]]
return os.path.join(parent_path, self.name) # depends on [control=['if'], data=[]]
return b'/'
|
def db_import(self, urls=None, force_download=False):
"""Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download
"""
if not urls:
urls = [
defaults.url_base + table_conf.tables[model]['file_name']
for model in table_conf.tables
]
log.info('Update CTD database from %s', urls)
self.drop_all()
self.download_urls(urls=urls, force_download=force_download)
self.create_all()
self.import_tables()
self.session.close()
|
def function[db_import, parameter[self, urls, force_download]]:
constant[Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download
]
if <ast.UnaryOp object at 0x7da1b0a2d930> begin[:]
variable[urls] assign[=] <ast.ListComp object at 0x7da1b0a2eb30>
call[name[log].info, parameter[constant[Update CTD database from %s], name[urls]]]
call[name[self].drop_all, parameter[]]
call[name[self].download_urls, parameter[]]
call[name[self].create_all, parameter[]]
call[name[self].import_tables, parameter[]]
call[name[self].session.close, parameter[]]
|
keyword[def] identifier[db_import] ( identifier[self] , identifier[urls] = keyword[None] , identifier[force_download] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[urls] :
identifier[urls] =[
identifier[defaults] . identifier[url_base] + identifier[table_conf] . identifier[tables] [ identifier[model] ][ literal[string] ]
keyword[for] identifier[model] keyword[in] identifier[table_conf] . identifier[tables]
]
identifier[log] . identifier[info] ( literal[string] , identifier[urls] )
identifier[self] . identifier[drop_all] ()
identifier[self] . identifier[download_urls] ( identifier[urls] = identifier[urls] , identifier[force_download] = identifier[force_download] )
identifier[self] . identifier[create_all] ()
identifier[self] . identifier[import_tables] ()
identifier[self] . identifier[session] . identifier[close] ()
|
def db_import(self, urls=None, force_download=False):
"""Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download
"""
if not urls:
urls = [defaults.url_base + table_conf.tables[model]['file_name'] for model in table_conf.tables] # depends on [control=['if'], data=[]]
log.info('Update CTD database from %s', urls)
self.drop_all()
self.download_urls(urls=urls, force_download=force_download)
self.create_all()
self.import_tables()
self.session.close()
|
def set_href_prefix(self, prefix):
"""
Set the prefix of any hrefs associated with this thing.
prefix -- the prefix
"""
self.href_prefix = prefix
for property_ in self.properties.values():
property_.set_href_prefix(prefix)
for action_name in self.actions.keys():
for action in self.actions[action_name]:
action.set_href_prefix(prefix)
|
def function[set_href_prefix, parameter[self, prefix]]:
constant[
Set the prefix of any hrefs associated with this thing.
prefix -- the prefix
]
name[self].href_prefix assign[=] name[prefix]
for taget[name[property_]] in starred[call[name[self].properties.values, parameter[]]] begin[:]
call[name[property_].set_href_prefix, parameter[name[prefix]]]
for taget[name[action_name]] in starred[call[name[self].actions.keys, parameter[]]] begin[:]
for taget[name[action]] in starred[call[name[self].actions][name[action_name]]] begin[:]
call[name[action].set_href_prefix, parameter[name[prefix]]]
|
keyword[def] identifier[set_href_prefix] ( identifier[self] , identifier[prefix] ):
literal[string]
identifier[self] . identifier[href_prefix] = identifier[prefix]
keyword[for] identifier[property_] keyword[in] identifier[self] . identifier[properties] . identifier[values] ():
identifier[property_] . identifier[set_href_prefix] ( identifier[prefix] )
keyword[for] identifier[action_name] keyword[in] identifier[self] . identifier[actions] . identifier[keys] ():
keyword[for] identifier[action] keyword[in] identifier[self] . identifier[actions] [ identifier[action_name] ]:
identifier[action] . identifier[set_href_prefix] ( identifier[prefix] )
|
def set_href_prefix(self, prefix):
"""
Set the prefix of any hrefs associated with this thing.
prefix -- the prefix
"""
self.href_prefix = prefix
for property_ in self.properties.values():
property_.set_href_prefix(prefix) # depends on [control=['for'], data=['property_']]
for action_name in self.actions.keys():
for action in self.actions[action_name]:
action.set_href_prefix(prefix) # depends on [control=['for'], data=['action']] # depends on [control=['for'], data=['action_name']]
|
def find_global(self, pattern):
"""
Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive!
"""
pos_s = self.reader.search(pattern)
if len(pos_s) == 0:
return -1
return pos_s[0]
|
def function[find_global, parameter[self, pattern]]:
constant[
Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive!
]
variable[pos_s] assign[=] call[name[self].reader.search, parameter[name[pattern]]]
if compare[call[name[len], parameter[name[pos_s]]] equal[==] constant[0]] begin[:]
return[<ast.UnaryOp object at 0x7da1b100ca00>]
return[call[name[pos_s]][constant[0]]]
|
keyword[def] identifier[find_global] ( identifier[self] , identifier[pattern] ):
literal[string]
identifier[pos_s] = identifier[self] . identifier[reader] . identifier[search] ( identifier[pattern] )
keyword[if] identifier[len] ( identifier[pos_s] )== literal[int] :
keyword[return] - literal[int]
keyword[return] identifier[pos_s] [ literal[int] ]
|
def find_global(self, pattern):
"""
Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive!
"""
pos_s = self.reader.search(pattern)
if len(pos_s) == 0:
return -1 # depends on [control=['if'], data=[]]
return pos_s[0]
|
def _query_init(k, oracle, query, method='all'):
"""A helper function for query-matching function initialization."""
if method == 'all':
a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]])
dvec = (a * a).sum(axis=1) # Could skip the sqrt
_d = dvec.argmin()
return oracle.latent[oracle.data[k]][_d], dvec[_d]
else:
a = np.subtract(query, oracle.f_array[k])
dvec = (a * a).sum() # Could skip the sqrt
return k, dvec
|
def function[_query_init, parameter[k, oracle, query, method]]:
constant[A helper function for query-matching function initialization.]
if compare[name[method] equal[==] constant[all]] begin[:]
variable[a] assign[=] call[name[np].subtract, parameter[name[query], <ast.ListComp object at 0x7da2054a4340>]]
variable[dvec] assign[=] call[binary_operation[name[a] * name[a]].sum, parameter[]]
variable[_d] assign[=] call[name[dvec].argmin, parameter[]]
return[tuple[[<ast.Subscript object at 0x7da18dc04e80>, <ast.Subscript object at 0x7da18dc05b40>]]]
|
keyword[def] identifier[_query_init] ( identifier[k] , identifier[oracle] , identifier[query] , identifier[method] = literal[string] ):
literal[string]
keyword[if] identifier[method] == literal[string] :
identifier[a] = identifier[np] . identifier[subtract] ( identifier[query] ,[ identifier[oracle] . identifier[f_array] [ identifier[t] ] keyword[for] identifier[t] keyword[in] identifier[oracle] . identifier[latent] [ identifier[oracle] . identifier[data] [ identifier[k] ]]])
identifier[dvec] =( identifier[a] * identifier[a] ). identifier[sum] ( identifier[axis] = literal[int] )
identifier[_d] = identifier[dvec] . identifier[argmin] ()
keyword[return] identifier[oracle] . identifier[latent] [ identifier[oracle] . identifier[data] [ identifier[k] ]][ identifier[_d] ], identifier[dvec] [ identifier[_d] ]
keyword[else] :
identifier[a] = identifier[np] . identifier[subtract] ( identifier[query] , identifier[oracle] . identifier[f_array] [ identifier[k] ])
identifier[dvec] =( identifier[a] * identifier[a] ). identifier[sum] ()
keyword[return] identifier[k] , identifier[dvec]
|
def _query_init(k, oracle, query, method='all'):
"""A helper function for query-matching function initialization."""
if method == 'all':
a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]])
dvec = (a * a).sum(axis=1) # Could skip the sqrt
_d = dvec.argmin()
return (oracle.latent[oracle.data[k]][_d], dvec[_d]) # depends on [control=['if'], data=[]]
else:
a = np.subtract(query, oracle.f_array[k])
dvec = (a * a).sum() # Could skip the sqrt
return (k, dvec)
|
def pack_found_items(self, s_text, target):
""" pack up found items for search ctrl
:param target: treectrl obj
:param s_text: text to search, lower case
return list of found items
"""
all_children = self.all_children
all_text = [target.GetItemText(i).lower() for i in all_children]
found_items = [child for i, child in enumerate(all_children)
if s_text in all_text[i]]
return found_items
|
def function[pack_found_items, parameter[self, s_text, target]]:
constant[ pack up found items for search ctrl
:param target: treectrl obj
:param s_text: text to search, lower case
return list of found items
]
variable[all_children] assign[=] name[self].all_children
variable[all_text] assign[=] <ast.ListComp object at 0x7da1b09bddb0>
variable[found_items] assign[=] <ast.ListComp object at 0x7da1b09becb0>
return[name[found_items]]
|
keyword[def] identifier[pack_found_items] ( identifier[self] , identifier[s_text] , identifier[target] ):
literal[string]
identifier[all_children] = identifier[self] . identifier[all_children]
identifier[all_text] =[ identifier[target] . identifier[GetItemText] ( identifier[i] ). identifier[lower] () keyword[for] identifier[i] keyword[in] identifier[all_children] ]
identifier[found_items] =[ identifier[child] keyword[for] identifier[i] , identifier[child] keyword[in] identifier[enumerate] ( identifier[all_children] )
keyword[if] identifier[s_text] keyword[in] identifier[all_text] [ identifier[i] ]]
keyword[return] identifier[found_items]
|
def pack_found_items(self, s_text, target):
""" pack up found items for search ctrl
:param target: treectrl obj
:param s_text: text to search, lower case
return list of found items
"""
all_children = self.all_children
all_text = [target.GetItemText(i).lower() for i in all_children]
found_items = [child for (i, child) in enumerate(all_children) if s_text in all_text[i]]
return found_items
|
def _api_on_write_error(self, status_code, **kwargs):
"""
Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled.
"""
return_error = { "code": self.get_status() }
exc_info = kwargs.get("exc_info")
if exc_info and isinstance(exc_info[1], oz.json_api.ApiError):
return_error["error"] = exc_info[1].message
else:
return_error["error"] = API_ERROR_CODE_MAP.get(self.get_status(), "Unknown error")
if oz.settings.get("debug"):
return_error["trace"] = "".join(traceback.format_exception(*exc_info))
self.finish(return_error)
return oz.break_trigger
|
def function[_api_on_write_error, parameter[self, status_code]]:
constant[
Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled.
]
variable[return_error] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a31570>], [<ast.Call object at 0x7da1b0a32860>]]
variable[exc_info] assign[=] call[name[kwargs].get, parameter[constant[exc_info]]]
if <ast.BoolOp object at 0x7da1b0a32b00> begin[:]
call[name[return_error]][constant[error]] assign[=] call[name[exc_info]][constant[1]].message
if call[name[oz].settings.get, parameter[constant[debug]]] begin[:]
call[name[return_error]][constant[trace]] assign[=] call[constant[].join, parameter[call[name[traceback].format_exception, parameter[<ast.Starred object at 0x7da1b0b57280>]]]]
call[name[self].finish, parameter[name[return_error]]]
return[name[oz].break_trigger]
|
keyword[def] identifier[_api_on_write_error] ( identifier[self] , identifier[status_code] ,** identifier[kwargs] ):
literal[string]
identifier[return_error] ={ literal[string] : identifier[self] . identifier[get_status] ()}
identifier[exc_info] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[exc_info] keyword[and] identifier[isinstance] ( identifier[exc_info] [ literal[int] ], identifier[oz] . identifier[json_api] . identifier[ApiError] ):
identifier[return_error] [ literal[string] ]= identifier[exc_info] [ literal[int] ]. identifier[message]
keyword[else] :
identifier[return_error] [ literal[string] ]= identifier[API_ERROR_CODE_MAP] . identifier[get] ( identifier[self] . identifier[get_status] (), literal[string] )
keyword[if] identifier[oz] . identifier[settings] . identifier[get] ( literal[string] ):
identifier[return_error] [ literal[string] ]= literal[string] . identifier[join] ( identifier[traceback] . identifier[format_exception] (* identifier[exc_info] ))
identifier[self] . identifier[finish] ( identifier[return_error] )
keyword[return] identifier[oz] . identifier[break_trigger]
|
def _api_on_write_error(self, status_code, **kwargs):
"""
Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled.
"""
return_error = {'code': self.get_status()}
exc_info = kwargs.get('exc_info')
if exc_info and isinstance(exc_info[1], oz.json_api.ApiError):
return_error['error'] = exc_info[1].message # depends on [control=['if'], data=[]]
else:
return_error['error'] = API_ERROR_CODE_MAP.get(self.get_status(), 'Unknown error')
if oz.settings.get('debug'):
return_error['trace'] = ''.join(traceback.format_exception(*exc_info)) # depends on [control=['if'], data=[]]
self.finish(return_error)
return oz.break_trigger
|
def start_capture(self, slot_number, port_number, output_file, data_link_type="DLT_EN10MB"):
"""
Starts a packet capture.
:param slot_number: slot number
:param port_number: port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
try:
open(output_file, 'w+').close()
except OSError as e:
raise DynamipsError('Can not write capture to "{}": {}'.format(output_file, str(e)))
try:
adapter = self._slots[slot_number]
except IndexError:
raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name,
slot_number=slot_number))
if not adapter.port_exists(port_number):
raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
data_link_type = data_link_type.lower()
if data_link_type.startswith("dlt_"):
data_link_type = data_link_type[4:]
nio = adapter.get_nio(port_number)
if not nio:
raise DynamipsError("Port {slot_number}/{port_number} is not connected".format(slot_number=slot_number,
port_number=port_number))
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
raise DynamipsError("Port {port_number} has already a filter applied on {adapter}".format(adapter=adapter,
port_number=port_number))
yield from nio.bind_filter("both", "capture")
yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file))
log.info('Router "{name}" [{id}]: starting packet capture on port {slot_number}/{port_number}'.format(name=self._name,
id=self._id,
nio_name=nio.name,
slot_number=slot_number,
port_number=port_number))
|
def function[start_capture, parameter[self, slot_number, port_number, output_file, data_link_type]]:
constant[
Starts a packet capture.
:param slot_number: slot number
:param port_number: port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
]
<ast.Try object at 0x7da2047ebb50>
<ast.Try object at 0x7da2047e81c0>
if <ast.UnaryOp object at 0x7da2047ebe20> begin[:]
<ast.Raise object at 0x7da2047e99f0>
variable[data_link_type] assign[=] call[name[data_link_type].lower, parameter[]]
if call[name[data_link_type].startswith, parameter[constant[dlt_]]] begin[:]
variable[data_link_type] assign[=] call[name[data_link_type]][<ast.Slice object at 0x7da2045657e0>]
variable[nio] assign[=] call[name[adapter].get_nio, parameter[name[port_number]]]
if <ast.UnaryOp object at 0x7da204564430> begin[:]
<ast.Raise object at 0x7da204564250>
if <ast.BoolOp object at 0x7da20cabef50> begin[:]
<ast.Raise object at 0x7da20e74b700>
<ast.YieldFrom object at 0x7da2047e98d0>
<ast.YieldFrom object at 0x7da2047eb0a0>
call[name[log].info, parameter[call[constant[Router "{name}" [{id}]: starting packet capture on port {slot_number}/{port_number}].format, parameter[]]]]
|
keyword[def] identifier[start_capture] ( identifier[self] , identifier[slot_number] , identifier[port_number] , identifier[output_file] , identifier[data_link_type] = literal[string] ):
literal[string]
keyword[try] :
identifier[open] ( identifier[output_file] , literal[string] ). identifier[close] ()
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[output_file] , identifier[str] ( identifier[e] )))
keyword[try] :
identifier[adapter] = identifier[self] . identifier[_slots] [ identifier[slot_number] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[slot_number] = identifier[slot_number] ))
keyword[if] keyword[not] identifier[adapter] . identifier[port_exists] ( identifier[port_number] ):
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[adapter] = identifier[adapter] ,
identifier[port_number] = identifier[port_number] ))
identifier[data_link_type] = identifier[data_link_type] . identifier[lower] ()
keyword[if] identifier[data_link_type] . identifier[startswith] ( literal[string] ):
identifier[data_link_type] = identifier[data_link_type] [ literal[int] :]
identifier[nio] = identifier[adapter] . identifier[get_nio] ( identifier[port_number] )
keyword[if] keyword[not] identifier[nio] :
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[slot_number] = identifier[slot_number] ,
identifier[port_number] = identifier[port_number] ))
keyword[if] identifier[nio] . identifier[input_filter] [ literal[int] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nio] . identifier[output_filter] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[adapter] = identifier[adapter] ,
identifier[port_number] = identifier[port_number] ))
keyword[yield] keyword[from] identifier[nio] . identifier[bind_filter] ( literal[string] , literal[string] )
keyword[yield] keyword[from] identifier[nio] . identifier[setup_filter] ( literal[string] , literal[string] . identifier[format] ( identifier[data_link_type] , identifier[output_file] ))
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[id] = identifier[self] . identifier[_id] ,
identifier[nio_name] = identifier[nio] . identifier[name] ,
identifier[slot_number] = identifier[slot_number] ,
identifier[port_number] = identifier[port_number] ))
|
def start_capture(self, slot_number, port_number, output_file, data_link_type='DLT_EN10MB'):
"""
Starts a packet capture.
:param slot_number: slot number
:param port_number: port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
try:
open(output_file, 'w+').close() # depends on [control=['try'], data=[]]
except OSError as e:
raise DynamipsError('Can not write capture to "{}": {}'.format(output_file, str(e))) # depends on [control=['except'], data=['e']]
try:
adapter = self._slots[slot_number] # depends on [control=['try'], data=[]]
except IndexError:
raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name, slot_number=slot_number)) # depends on [control=['except'], data=[]]
if not adapter.port_exists(port_number):
raise DynamipsError('Port {port_number} does not exist in adapter {adapter}'.format(adapter=adapter, port_number=port_number)) # depends on [control=['if'], data=[]]
data_link_type = data_link_type.lower()
if data_link_type.startswith('dlt_'):
data_link_type = data_link_type[4:] # depends on [control=['if'], data=[]]
nio = adapter.get_nio(port_number)
if not nio:
raise DynamipsError('Port {slot_number}/{port_number} is not connected'.format(slot_number=slot_number, port_number=port_number)) # depends on [control=['if'], data=[]]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
raise DynamipsError('Port {port_number} has already a filter applied on {adapter}'.format(adapter=adapter, port_number=port_number)) # depends on [control=['if'], data=[]]
yield from nio.bind_filter('both', 'capture')
yield from nio.setup_filter('both', '{} "{}"'.format(data_link_type, output_file))
log.info('Router "{name}" [{id}]: starting packet capture on port {slot_number}/{port_number}'.format(name=self._name, id=self._id, nio_name=nio.name, slot_number=slot_number, port_number=port_number))
|
def task_date(self, task_re):
""" Get a datetime.date object for the last task that matches a regex.
:param task_re: regex, eg re.compile('Development Freeze').
See txproductpages.milestones for some useful regex
constants to pass in here.
:returns: deferred that when fired returns a datetime.date object
:raises: TaskNotFoundException if no tasks matched.
"""
tasks = yield self.schedule_tasks()
task_date = None
for task in tasks:
if task_re.match(task['name']):
(y, m, d) = task['date_finish'].split('-')
task_date = date(int(y), int(m), int(d))
if task_date:
defer.returnValue(task_date)
raise TaskNotFoundException()
|
def function[task_date, parameter[self, task_re]]:
constant[ Get a datetime.date object for the last task that matches a regex.
:param task_re: regex, eg re.compile('Development Freeze').
See txproductpages.milestones for some useful regex
constants to pass in here.
:returns: deferred that when fired returns a datetime.date object
:raises: TaskNotFoundException if no tasks matched.
]
variable[tasks] assign[=] <ast.Yield object at 0x7da20c7969b0>
variable[task_date] assign[=] constant[None]
for taget[name[task]] in starred[name[tasks]] begin[:]
if call[name[task_re].match, parameter[call[name[task]][constant[name]]]] begin[:]
<ast.Tuple object at 0x7da20c7952a0> assign[=] call[call[name[task]][constant[date_finish]].split, parameter[constant[-]]]
variable[task_date] assign[=] call[name[date], parameter[call[name[int], parameter[name[y]]], call[name[int], parameter[name[m]]], call[name[int], parameter[name[d]]]]]
if name[task_date] begin[:]
call[name[defer].returnValue, parameter[name[task_date]]]
<ast.Raise object at 0x7da20e9b3a60>
|
keyword[def] identifier[task_date] ( identifier[self] , identifier[task_re] ):
literal[string]
identifier[tasks] = keyword[yield] identifier[self] . identifier[schedule_tasks] ()
identifier[task_date] = keyword[None]
keyword[for] identifier[task] keyword[in] identifier[tasks] :
keyword[if] identifier[task_re] . identifier[match] ( identifier[task] [ literal[string] ]):
( identifier[y] , identifier[m] , identifier[d] )= identifier[task] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[task_date] = identifier[date] ( identifier[int] ( identifier[y] ), identifier[int] ( identifier[m] ), identifier[int] ( identifier[d] ))
keyword[if] identifier[task_date] :
identifier[defer] . identifier[returnValue] ( identifier[task_date] )
keyword[raise] identifier[TaskNotFoundException] ()
|
def task_date(self, task_re):
""" Get a datetime.date object for the last task that matches a regex.
:param task_re: regex, eg re.compile('Development Freeze').
See txproductpages.milestones for some useful regex
constants to pass in here.
:returns: deferred that when fired returns a datetime.date object
:raises: TaskNotFoundException if no tasks matched.
"""
tasks = (yield self.schedule_tasks())
task_date = None
for task in tasks:
if task_re.match(task['name']):
(y, m, d) = task['date_finish'].split('-')
task_date = date(int(y), int(m), int(d)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']]
if task_date:
defer.returnValue(task_date) # depends on [control=['if'], data=[]]
raise TaskNotFoundException()
|
def renumber(args):
"""
%prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates.
"""
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile)
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(";"))
bed = Bed(abedfile)
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
ranks = []
gg = set()
for s in sbed:
accn = s.accn
achr, arank = atg_name(accn)
if achr != current_chr:
continue
ranks.append(arank)
gg.add(accn)
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), "==>", len(ranks), \
"==>", len(lranks), file=sys.stderr)
granks = set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, uc=opts.uc) for x in lranks) | \
set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks)
tagstore = {}
for s in sbed:
achr, arank = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME)
elif accn in gg:
tag = (accn, RETAIN)
else:
tag = (".", NEW)
tagstore[accn] = tag
# Find cases where genes overlap
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for tag, x in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP)
else:
tag = tagstore[accn]
print("\t".join((str(s), "|".join(tag))))
|
def function[renumber, parameter[args]]:
constant[
%prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates.
]
from relative_module[jcvi.algorithms.lis] import module[longest_increasing_subsequence]
from relative_module[jcvi.utils.grouper] import module[Grouper]
variable[p] assign[=] call[name[OptionParser], parameter[name[renumber].__doc__]]
call[name[p].set_annot_reformat_opts, parameter[]]
<ast.Tuple object at 0x7da20c6e6fb0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c6e52d0>]]
<ast.Tuple object at 0x7da20c6e7be0> assign[=] name[args]
variable[pf] assign[=] call[call[name[bedfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[abedfile] assign[=] binary_operation[name[pf] + constant[.a.bed]]
variable[bbedfile] assign[=] binary_operation[name[pf] + constant[.b.bed]]
if call[name[need_update], parameter[name[bedfile], tuple[[<ast.Name object at 0x7da20c6e5780>, <ast.Name object at 0x7da20c6e7550>]]]] begin[:]
call[name[prepare], parameter[name[bedfile]]]
variable[mbed] assign[=] call[name[Bed], parameter[name[bbedfile]]]
variable[g] assign[=] call[name[Grouper], parameter[]]
for taget[name[s]] in starred[name[mbed]] begin[:]
variable[accn] assign[=] name[s].accn
call[name[g].join, parameter[<ast.Starred object at 0x7da20c6e4940>]]
variable[bed] assign[=] call[name[Bed], parameter[name[abedfile]]]
for taget[tuple[[<ast.Name object at 0x7da1b0862e00>, <ast.Name object at 0x7da1b0861f30>]]] in starred[call[name[bed].sub_beds, parameter[]]] begin[:]
variable[current_chr] assign[=] call[name[chr_number], parameter[name[chr]]]
if <ast.UnaryOp object at 0x7da1b08620e0> begin[:]
continue
variable[ranks] assign[=] list[[]]
variable[gg] assign[=] call[name[set], parameter[]]
for taget[name[s]] in starred[name[sbed]] begin[:]
variable[accn] assign[=] name[s].accn
<ast.Tuple object at 0x7da1b0863a30> assign[=] call[name[atg_name], parameter[name[accn]]]
if compare[name[achr] not_equal[!=] name[current_chr]] begin[:]
continue
call[name[ranks].append, parameter[name[arank]]]
call[name[gg].add, parameter[name[accn]]]
variable[lranks] assign[=] call[name[longest_increasing_subsequence], parameter[name[ranks]]]
call[name[print], parameter[name[current_chr], call[name[len], parameter[name[sbed]]], constant[==>], call[name[len], parameter[name[ranks]]], constant[==>], call[name[len], parameter[name[lranks]]]]]
variable[granks] assign[=] binary_operation[call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b0863220>]] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b08626e0>]]]
variable[tagstore] assign[=] dictionary[[], []]
for taget[name[s]] in starred[name[sbed]] begin[:]
<ast.Tuple object at 0x7da1b0862080> assign[=] call[name[atg_name], parameter[name[s].accn]]
variable[accn] assign[=] name[s].accn
if compare[name[accn] in name[granks]] begin[:]
variable[tag] assign[=] tuple[[<ast.Name object at 0x7da1b0861bd0>, <ast.Name object at 0x7da1b08618d0>]]
call[name[tagstore]][name[accn]] assign[=] name[tag]
for taget[name[s]] in starred[name[sbed]] begin[:]
variable[accn] assign[=] name[s].accn
variable[gaccn] assign[=] call[name[g]][name[accn]]
variable[tags] assign[=] <ast.ListComp object at 0x7da1b08601c0>
variable[group] assign[=] <ast.ListComp object at 0x7da1b0861450>
variable[best] assign[=] call[call[name[min], parameter[name[group]]]][<ast.UnaryOp object at 0x7da1b0960940>]
if compare[name[accn] not_equal[!=] name[best]] begin[:]
variable[tag] assign[=] tuple[[<ast.Name object at 0x7da1b09609a0>, <ast.Name object at 0x7da1b0962800>]]
call[name[print], parameter[call[constant[ ].join, parameter[tuple[[<ast.Call object at 0x7da1b0961ea0>, <ast.Call object at 0x7da1b0962d70>]]]]]]
|
keyword[def] identifier[renumber] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[algorithms] . identifier[lis] keyword[import] identifier[longest_increasing_subsequence]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[grouper] keyword[import] identifier[Grouper]
identifier[p] = identifier[OptionParser] ( identifier[renumber] . identifier[__doc__] )
identifier[p] . identifier[set_annot_reformat_opts] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[bedfile] ,= identifier[args]
identifier[pf] = identifier[bedfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[abedfile] = identifier[pf] + literal[string]
identifier[bbedfile] = identifier[pf] + literal[string]
keyword[if] identifier[need_update] ( identifier[bedfile] ,( identifier[abedfile] , identifier[bbedfile] )):
identifier[prepare] ( identifier[bedfile] )
identifier[mbed] = identifier[Bed] ( identifier[bbedfile] )
identifier[g] = identifier[Grouper] ()
keyword[for] identifier[s] keyword[in] identifier[mbed] :
identifier[accn] = identifier[s] . identifier[accn]
identifier[g] . identifier[join] (* identifier[accn] . identifier[split] ( literal[string] ))
identifier[bed] = identifier[Bed] ( identifier[abedfile] )
keyword[for] identifier[chr] , identifier[sbed] keyword[in] identifier[bed] . identifier[sub_beds] ():
identifier[current_chr] = identifier[chr_number] ( identifier[chr] )
keyword[if] keyword[not] identifier[current_chr] :
keyword[continue]
identifier[ranks] =[]
identifier[gg] = identifier[set] ()
keyword[for] identifier[s] keyword[in] identifier[sbed] :
identifier[accn] = identifier[s] . identifier[accn]
identifier[achr] , identifier[arank] = identifier[atg_name] ( identifier[accn] )
keyword[if] identifier[achr] != identifier[current_chr] :
keyword[continue]
identifier[ranks] . identifier[append] ( identifier[arank] )
identifier[gg] . identifier[add] ( identifier[accn] )
identifier[lranks] = identifier[longest_increasing_subsequence] ( identifier[ranks] )
identifier[print] ( identifier[current_chr] , identifier[len] ( identifier[sbed] ), literal[string] , identifier[len] ( identifier[ranks] ), literal[string] , identifier[len] ( identifier[lranks] ), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[granks] = identifier[set] ( identifier[gene_name] ( identifier[current_chr] , identifier[x] , identifier[prefix] = identifier[opts] . identifier[prefix] , identifier[pad0] = identifier[opts] . identifier[pad0] , identifier[uc] = identifier[opts] . identifier[uc] ) keyword[for] identifier[x] keyword[in] identifier[lranks] )| identifier[set] ( identifier[gene_name] ( identifier[current_chr] , identifier[x] , identifier[prefix] = identifier[opts] . identifier[prefix] , identifier[pad0] = identifier[opts] . identifier[pad0] , identifier[sep] = literal[string] , identifier[uc] = identifier[opts] . identifier[uc] ) keyword[for] identifier[x] keyword[in] identifier[lranks] )
identifier[tagstore] ={}
keyword[for] identifier[s] keyword[in] identifier[sbed] :
identifier[achr] , identifier[arank] = identifier[atg_name] ( identifier[s] . identifier[accn] )
identifier[accn] = identifier[s] . identifier[accn]
keyword[if] identifier[accn] keyword[in] identifier[granks] :
identifier[tag] =( identifier[accn] , identifier[FRAME] )
keyword[elif] identifier[accn] keyword[in] identifier[gg] :
identifier[tag] =( identifier[accn] , identifier[RETAIN] )
keyword[else] :
identifier[tag] =( literal[string] , identifier[NEW] )
identifier[tagstore] [ identifier[accn] ]= identifier[tag]
keyword[for] identifier[s] keyword[in] identifier[sbed] :
identifier[accn] = identifier[s] . identifier[accn]
identifier[gaccn] = identifier[g] [ identifier[accn] ]
identifier[tags] =[(( identifier[tagstore] [ identifier[x] ][- literal[int] ] keyword[if] identifier[x] keyword[in] identifier[tagstore] keyword[else] identifier[NEW] ), identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[gaccn] ]
identifier[group] =[( identifier[PRIORITY] . identifier[index] ( identifier[tag] ), identifier[x] ) keyword[for] identifier[tag] , identifier[x] keyword[in] identifier[tags] ]
identifier[best] = identifier[min] ( identifier[group] )[- literal[int] ]
keyword[if] identifier[accn] != identifier[best] :
identifier[tag] =( identifier[best] , identifier[OVERLAP] )
keyword[else] :
identifier[tag] = identifier[tagstore] [ identifier[accn] ]
identifier[print] ( literal[string] . identifier[join] (( identifier[str] ( identifier[s] ), literal[string] . identifier[join] ( identifier[tag] ))))
|
def renumber(args):
"""
%prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates.
"""
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(bedfile,) = args
pf = bedfile.rsplit('.', 1)[0]
abedfile = pf + '.a.bed'
bbedfile = pf + '.b.bed'
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile) # depends on [control=['if'], data=[]]
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(';')) # depends on [control=['for'], data=['s']]
bed = Bed(abedfile)
for (chr, sbed) in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue # depends on [control=['if'], data=[]]
ranks = []
gg = set()
for s in sbed:
accn = s.accn
(achr, arank) = atg_name(accn)
if achr != current_chr:
continue # depends on [control=['if'], data=[]]
ranks.append(arank)
gg.add(accn) # depends on [control=['for'], data=['s']]
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), '==>', len(ranks), '==>', len(lranks), file=sys.stderr)
granks = set((gene_name(current_chr, x, prefix=opts.prefix, pad0=opts.pad0, uc=opts.uc) for x in lranks)) | set((gene_name(current_chr, x, prefix=opts.prefix, pad0=opts.pad0, sep='te', uc=opts.uc) for x in lranks))
tagstore = {}
for s in sbed:
(achr, arank) = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME) # depends on [control=['if'], data=['accn']]
elif accn in gg:
tag = (accn, RETAIN) # depends on [control=['if'], data=['accn']]
else:
tag = ('.', NEW)
tagstore[accn] = tag # depends on [control=['for'], data=['s']]
# Find cases where genes overlap
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [(tagstore[x][-1] if x in tagstore else NEW, x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for (tag, x) in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP) # depends on [control=['if'], data=['best']]
else:
tag = tagstore[accn]
print('\t'.join((str(s), '|'.join(tag)))) # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=[]]
|
def _parseExportDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_EXPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageExportTable}
@return: A new L{ImageExportTable} object.
"""
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
iet = directories.ImageExportTable.parse(rd)
auxFunctionRvaArray = list()
numberOfNames = iet.numberOfNames.value
addressOfNames = iet.addressOfNames.value
addressOfNameOrdinals = iet.addressOfNameOrdinals.value
addressOfFunctions = iet.addressOfFunctions.value
# populate the auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
auxFunctionRvaArray.append(self.getDwordAtRva(addressOfFunctions).value)
addressOfFunctions += datatypes.DWORD().sizeof()
for i in xrange(numberOfNames):
nameRva = self.getDwordAtRva(addressOfNames).value
nameOrdinal = self.getWordAtRva(addressOfNameOrdinals).value
exportName = self.readStringAtRva(nameRva).value
entry = directories.ExportTableEntry()
ordinal = nameOrdinal + iet.base.value
#print "Ordinal value: %d" % ordinal
entry.ordinal.value = ordinal
entry.nameOrdinal.vaue = nameOrdinal
entry.nameRva.value = nameRva
entry.name.value = exportName
entry.functionRva.value = auxFunctionRvaArray[nameOrdinal]
iet.exportTable.append(entry)
addressOfNames += datatypes.DWORD().sizeof()
addressOfNameOrdinals += datatypes.WORD().sizeof()
#print "export table length: %d" % len(iet.exportTable)
#print "auxFunctionRvaArray: %r" % auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
#print "auxFunctionRvaArray[%d]: %x" % (i, auxFunctionRvaArray[i])
if auxFunctionRvaArray[i] != iet.exportTable[i].functionRva.value:
entry = directories.ExportTableEntry()
entry.functionRva.value = auxFunctionRvaArray[i]
entry.ordinal.value = iet.base.value + i
iet.exportTable.append(entry)
#print "export table length: %d" % len(iet.exportTable)
sorted(iet.exportTable, key=lambda entry:entry.ordinal)
return iet
|
def function[_parseExportDirectory, parameter[self, rva, size, magic]]:
constant[
Parses the C{IMAGE_EXPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageExportTable}
@return: A new L{ImageExportTable} object.
]
variable[data] assign[=] call[name[self].getDataAtRva, parameter[name[rva], name[size]]]
variable[rd] assign[=] call[name[utils].ReadData, parameter[name[data]]]
variable[iet] assign[=] call[name[directories].ImageExportTable.parse, parameter[name[rd]]]
variable[auxFunctionRvaArray] assign[=] call[name[list], parameter[]]
variable[numberOfNames] assign[=] name[iet].numberOfNames.value
variable[addressOfNames] assign[=] name[iet].addressOfNames.value
variable[addressOfNameOrdinals] assign[=] name[iet].addressOfNameOrdinals.value
variable[addressOfFunctions] assign[=] name[iet].addressOfFunctions.value
for taget[name[i]] in starred[call[name[xrange], parameter[name[iet].numberOfFunctions.value]]] begin[:]
call[name[auxFunctionRvaArray].append, parameter[call[name[self].getDwordAtRva, parameter[name[addressOfFunctions]]].value]]
<ast.AugAssign object at 0x7da18dc98370>
for taget[name[i]] in starred[call[name[xrange], parameter[name[numberOfNames]]]] begin[:]
variable[nameRva] assign[=] call[name[self].getDwordAtRva, parameter[name[addressOfNames]]].value
variable[nameOrdinal] assign[=] call[name[self].getWordAtRva, parameter[name[addressOfNameOrdinals]]].value
variable[exportName] assign[=] call[name[self].readStringAtRva, parameter[name[nameRva]]].value
variable[entry] assign[=] call[name[directories].ExportTableEntry, parameter[]]
variable[ordinal] assign[=] binary_operation[name[nameOrdinal] + name[iet].base.value]
name[entry].ordinal.value assign[=] name[ordinal]
name[entry].nameOrdinal.vaue assign[=] name[nameOrdinal]
name[entry].nameRva.value assign[=] name[nameRva]
name[entry].name.value assign[=] name[exportName]
name[entry].functionRva.value assign[=] call[name[auxFunctionRvaArray]][name[nameOrdinal]]
call[name[iet].exportTable.append, parameter[name[entry]]]
<ast.AugAssign object at 0x7da1b26ae980>
<ast.AugAssign object at 0x7da1b26ac1c0>
for taget[name[i]] in starred[call[name[xrange], parameter[name[iet].numberOfFunctions.value]]] begin[:]
if compare[call[name[auxFunctionRvaArray]][name[i]] not_equal[!=] call[name[iet].exportTable][name[i]].functionRva.value] begin[:]
variable[entry] assign[=] call[name[directories].ExportTableEntry, parameter[]]
name[entry].functionRva.value assign[=] call[name[auxFunctionRvaArray]][name[i]]
name[entry].ordinal.value assign[=] binary_operation[name[iet].base.value + name[i]]
call[name[iet].exportTable.append, parameter[name[entry]]]
call[name[sorted], parameter[name[iet].exportTable]]
return[name[iet]]
|
keyword[def] identifier[_parseExportDirectory] ( identifier[self] , identifier[rva] , identifier[size] , identifier[magic] = identifier[consts] . identifier[PE32] ):
literal[string]
identifier[data] = identifier[self] . identifier[getDataAtRva] ( identifier[rva] , identifier[size] )
identifier[rd] = identifier[utils] . identifier[ReadData] ( identifier[data] )
identifier[iet] = identifier[directories] . identifier[ImageExportTable] . identifier[parse] ( identifier[rd] )
identifier[auxFunctionRvaArray] = identifier[list] ()
identifier[numberOfNames] = identifier[iet] . identifier[numberOfNames] . identifier[value]
identifier[addressOfNames] = identifier[iet] . identifier[addressOfNames] . identifier[value]
identifier[addressOfNameOrdinals] = identifier[iet] . identifier[addressOfNameOrdinals] . identifier[value]
identifier[addressOfFunctions] = identifier[iet] . identifier[addressOfFunctions] . identifier[value]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[iet] . identifier[numberOfFunctions] . identifier[value] ):
identifier[auxFunctionRvaArray] . identifier[append] ( identifier[self] . identifier[getDwordAtRva] ( identifier[addressOfFunctions] ). identifier[value] )
identifier[addressOfFunctions] += identifier[datatypes] . identifier[DWORD] (). identifier[sizeof] ()
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[numberOfNames] ):
identifier[nameRva] = identifier[self] . identifier[getDwordAtRva] ( identifier[addressOfNames] ). identifier[value]
identifier[nameOrdinal] = identifier[self] . identifier[getWordAtRva] ( identifier[addressOfNameOrdinals] ). identifier[value]
identifier[exportName] = identifier[self] . identifier[readStringAtRva] ( identifier[nameRva] ). identifier[value]
identifier[entry] = identifier[directories] . identifier[ExportTableEntry] ()
identifier[ordinal] = identifier[nameOrdinal] + identifier[iet] . identifier[base] . identifier[value]
identifier[entry] . identifier[ordinal] . identifier[value] = identifier[ordinal]
identifier[entry] . identifier[nameOrdinal] . identifier[vaue] = identifier[nameOrdinal]
identifier[entry] . identifier[nameRva] . identifier[value] = identifier[nameRva]
identifier[entry] . identifier[name] . identifier[value] = identifier[exportName]
identifier[entry] . identifier[functionRva] . identifier[value] = identifier[auxFunctionRvaArray] [ identifier[nameOrdinal] ]
identifier[iet] . identifier[exportTable] . identifier[append] ( identifier[entry] )
identifier[addressOfNames] += identifier[datatypes] . identifier[DWORD] (). identifier[sizeof] ()
identifier[addressOfNameOrdinals] += identifier[datatypes] . identifier[WORD] (). identifier[sizeof] ()
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[iet] . identifier[numberOfFunctions] . identifier[value] ):
keyword[if] identifier[auxFunctionRvaArray] [ identifier[i] ]!= identifier[iet] . identifier[exportTable] [ identifier[i] ]. identifier[functionRva] . identifier[value] :
identifier[entry] = identifier[directories] . identifier[ExportTableEntry] ()
identifier[entry] . identifier[functionRva] . identifier[value] = identifier[auxFunctionRvaArray] [ identifier[i] ]
identifier[entry] . identifier[ordinal] . identifier[value] = identifier[iet] . identifier[base] . identifier[value] + identifier[i]
identifier[iet] . identifier[exportTable] . identifier[append] ( identifier[entry] )
identifier[sorted] ( identifier[iet] . identifier[exportTable] , identifier[key] = keyword[lambda] identifier[entry] : identifier[entry] . identifier[ordinal] )
keyword[return] identifier[iet]
|
def _parseExportDirectory(self, rva, size, magic=consts.PE32):
"""
Parses the C{IMAGE_EXPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageExportTable}
@return: A new L{ImageExportTable} object.
"""
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
iet = directories.ImageExportTable.parse(rd)
auxFunctionRvaArray = list()
numberOfNames = iet.numberOfNames.value
addressOfNames = iet.addressOfNames.value
addressOfNameOrdinals = iet.addressOfNameOrdinals.value
addressOfFunctions = iet.addressOfFunctions.value
# populate the auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
auxFunctionRvaArray.append(self.getDwordAtRva(addressOfFunctions).value)
addressOfFunctions += datatypes.DWORD().sizeof() # depends on [control=['for'], data=[]]
for i in xrange(numberOfNames):
nameRva = self.getDwordAtRva(addressOfNames).value
nameOrdinal = self.getWordAtRva(addressOfNameOrdinals).value
exportName = self.readStringAtRva(nameRva).value
entry = directories.ExportTableEntry()
ordinal = nameOrdinal + iet.base.value
#print "Ordinal value: %d" % ordinal
entry.ordinal.value = ordinal
entry.nameOrdinal.vaue = nameOrdinal
entry.nameRva.value = nameRva
entry.name.value = exportName
entry.functionRva.value = auxFunctionRvaArray[nameOrdinal]
iet.exportTable.append(entry)
addressOfNames += datatypes.DWORD().sizeof()
addressOfNameOrdinals += datatypes.WORD().sizeof() # depends on [control=['for'], data=[]]
#print "export table length: %d" % len(iet.exportTable)
#print "auxFunctionRvaArray: %r" % auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
#print "auxFunctionRvaArray[%d]: %x" % (i, auxFunctionRvaArray[i])
if auxFunctionRvaArray[i] != iet.exportTable[i].functionRva.value:
entry = directories.ExportTableEntry()
entry.functionRva.value = auxFunctionRvaArray[i]
entry.ordinal.value = iet.base.value + i
iet.exportTable.append(entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
#print "export table length: %d" % len(iet.exportTable)
sorted(iet.exportTable, key=lambda entry: entry.ordinal)
return iet
|
def cmd_xor(k, i, o):
"""XOR cipher.
Note: XOR is not a 'secure cipher'. If you need strong crypto you must use
algorithms like AES. You can use habu.fernet for that.
Example:
\b
$ habu.xor -k mysecretkey -i /bin/ls > xored
$ habu.xor -k mysecretkey -i xored > uxored
$ sha1sum /bin/ls uxored
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb /bin/ls
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb uxored
"""
o.write(xor(i.read(), k.encode()))
|
def function[cmd_xor, parameter[k, i, o]]:
constant[XOR cipher.
Note: XOR is not a 'secure cipher'. If you need strong crypto you must use
algorithms like AES. You can use habu.fernet for that.
Example:
$ habu.xor -k mysecretkey -i /bin/ls > xored
$ habu.xor -k mysecretkey -i xored > uxored
$ sha1sum /bin/ls uxored
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb /bin/ls
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb uxored
]
call[name[o].write, parameter[call[name[xor], parameter[call[name[i].read, parameter[]], call[name[k].encode, parameter[]]]]]]
|
keyword[def] identifier[cmd_xor] ( identifier[k] , identifier[i] , identifier[o] ):
literal[string]
identifier[o] . identifier[write] ( identifier[xor] ( identifier[i] . identifier[read] (), identifier[k] . identifier[encode] ()))
|
def cmd_xor(k, i, o):
"""XOR cipher.
Note: XOR is not a 'secure cipher'. If you need strong crypto you must use
algorithms like AES. You can use habu.fernet for that.
Example:
\x08
$ habu.xor -k mysecretkey -i /bin/ls > xored
$ habu.xor -k mysecretkey -i xored > uxored
$ sha1sum /bin/ls uxored
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb /bin/ls
$ 6fcf930fcee1395a1c95f87dd38413e02deff4bb uxored
"""
o.write(xor(i.read(), k.encode()))
|
def get_for(self, historics_id, with_estimate=None):
""" Get the historic query for the given ID
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget
:param historics_id: playback id of the query
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate)
|
def function[get_for, parameter[self, historics_id, with_estimate]]:
constant[ Get the historic query for the given ID
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget
:param historics_id: playback id of the query
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
]
return[call[name[self].get, parameter[name[historics_id]]]]
|
keyword[def] identifier[get_for] ( identifier[self] , identifier[historics_id] , identifier[with_estimate] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( identifier[historics_id] , identifier[maximum] = keyword[None] , identifier[page] = keyword[None] , identifier[with_estimate] = identifier[with_estimate] )
|
def get_for(self, historics_id, with_estimate=None):
""" Get the historic query for the given ID
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget
:param historics_id: playback id of the query
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate)
|
def put_target_state():
"""SDP target State.
Sets the target state
"""
sdp_state = SDPState()
errval, errdict = _check_status(sdp_state)
if errval == "error":
LOG.debug(errdict['reason'])
rdict = dict(
current_state="unknown",
last_updated="unknown",
reason=errdict['reason']
)
else:
try:
LOG.debug('request is of type %s', type(request))
request_data = request.data
LOG.debug('request data is of type %s', type(request_data))
LOG.debug('request is %s', request_data)
request_data = request.data
target_state = request_data['value'].lower()
sdp_state.update_target_state(target_state)
rdict = dict(message='Target state successfully updated to {}'
.format(target_state))
except ValueError as error:
rdict = dict(error='Failed to set target state',
reason=str(error))
except RuntimeError as error:
rdict = dict(error='RunTime error',
reason=str(error))
return rdict
|
def function[put_target_state, parameter[]]:
constant[SDP target State.
Sets the target state
]
variable[sdp_state] assign[=] call[name[SDPState], parameter[]]
<ast.Tuple object at 0x7da20c7962f0> assign[=] call[name[_check_status], parameter[name[sdp_state]]]
if compare[name[errval] equal[==] constant[error]] begin[:]
call[name[LOG].debug, parameter[call[name[errdict]][constant[reason]]]]
variable[rdict] assign[=] call[name[dict], parameter[]]
return[name[rdict]]
|
keyword[def] identifier[put_target_state] ():
literal[string]
identifier[sdp_state] = identifier[SDPState] ()
identifier[errval] , identifier[errdict] = identifier[_check_status] ( identifier[sdp_state] )
keyword[if] identifier[errval] == literal[string] :
identifier[LOG] . identifier[debug] ( identifier[errdict] [ literal[string] ])
identifier[rdict] = identifier[dict] (
identifier[current_state] = literal[string] ,
identifier[last_updated] = literal[string] ,
identifier[reason] = identifier[errdict] [ literal[string] ]
)
keyword[else] :
keyword[try] :
identifier[LOG] . identifier[debug] ( literal[string] , identifier[type] ( identifier[request] ))
identifier[request_data] = identifier[request] . identifier[data]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[type] ( identifier[request_data] ))
identifier[LOG] . identifier[debug] ( literal[string] , identifier[request_data] )
identifier[request_data] = identifier[request] . identifier[data]
identifier[target_state] = identifier[request_data] [ literal[string] ]. identifier[lower] ()
identifier[sdp_state] . identifier[update_target_state] ( identifier[target_state] )
identifier[rdict] = identifier[dict] ( identifier[message] = literal[string]
. identifier[format] ( identifier[target_state] ))
keyword[except] identifier[ValueError] keyword[as] identifier[error] :
identifier[rdict] = identifier[dict] ( identifier[error] = literal[string] ,
identifier[reason] = identifier[str] ( identifier[error] ))
keyword[except] identifier[RuntimeError] keyword[as] identifier[error] :
identifier[rdict] = identifier[dict] ( identifier[error] = literal[string] ,
identifier[reason] = identifier[str] ( identifier[error] ))
keyword[return] identifier[rdict]
|
def put_target_state():
"""SDP target State.
Sets the target state
"""
sdp_state = SDPState()
(errval, errdict) = _check_status(sdp_state)
if errval == 'error':
LOG.debug(errdict['reason'])
rdict = dict(current_state='unknown', last_updated='unknown', reason=errdict['reason']) # depends on [control=['if'], data=[]]
else:
try:
LOG.debug('request is of type %s', type(request))
request_data = request.data
LOG.debug('request data is of type %s', type(request_data))
LOG.debug('request is %s', request_data)
request_data = request.data
target_state = request_data['value'].lower()
sdp_state.update_target_state(target_state)
rdict = dict(message='Target state successfully updated to {}'.format(target_state)) # depends on [control=['try'], data=[]]
except ValueError as error:
rdict = dict(error='Failed to set target state', reason=str(error)) # depends on [control=['except'], data=['error']]
except RuntimeError as error:
rdict = dict(error='RunTime error', reason=str(error)) # depends on [control=['except'], data=['error']]
return rdict
|
def number_aware_alphabetical_cmp(str1, str2):
""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
"""
def flatten_tokens(tokens):
l = []
for token in tokens:
if isinstance(token, str):
for char in token:
l.append(char)
else:
assert isinstance(token, float)
l.append(token)
return l
seq1 = flatten_tokens(tokenize_by_number(str1))
seq2 = flatten_tokens(tokenize_by_number(str2))
l = min(len(seq1),len(seq2))
i = 0
while i < l:
if seq1[i] < seq2[i]:
return -1
elif seq1[i] > seq2[i]:
return 1
i += 1
if len(seq1) < len(seq2):
return -1
elif len(seq1) > len(seq2):
return 1
return 0
|
def function[number_aware_alphabetical_cmp, parameter[str1, str2]]:
constant[ cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
]
def function[flatten_tokens, parameter[tokens]]:
variable[l] assign[=] list[[]]
for taget[name[token]] in starred[name[tokens]] begin[:]
if call[name[isinstance], parameter[name[token], name[str]]] begin[:]
for taget[name[char]] in starred[name[token]] begin[:]
call[name[l].append, parameter[name[char]]]
return[name[l]]
variable[seq1] assign[=] call[name[flatten_tokens], parameter[call[name[tokenize_by_number], parameter[name[str1]]]]]
variable[seq2] assign[=] call[name[flatten_tokens], parameter[call[name[tokenize_by_number], parameter[name[str2]]]]]
variable[l] assign[=] call[name[min], parameter[call[name[len], parameter[name[seq1]]], call[name[len], parameter[name[seq2]]]]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] name[l]] begin[:]
if compare[call[name[seq1]][name[i]] less[<] call[name[seq2]][name[i]]] begin[:]
return[<ast.UnaryOp object at 0x7da1b26ae0b0>]
<ast.AugAssign object at 0x7da1b26acb50>
if compare[call[name[len], parameter[name[seq1]]] less[<] call[name[len], parameter[name[seq2]]]] begin[:]
return[<ast.UnaryOp object at 0x7da1b26adc60>]
return[constant[0]]
|
keyword[def] identifier[number_aware_alphabetical_cmp] ( identifier[str1] , identifier[str2] ):
literal[string]
keyword[def] identifier[flatten_tokens] ( identifier[tokens] ):
identifier[l] =[]
keyword[for] identifier[token] keyword[in] identifier[tokens] :
keyword[if] identifier[isinstance] ( identifier[token] , identifier[str] ):
keyword[for] identifier[char] keyword[in] identifier[token] :
identifier[l] . identifier[append] ( identifier[char] )
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[token] , identifier[float] )
identifier[l] . identifier[append] ( identifier[token] )
keyword[return] identifier[l]
identifier[seq1] = identifier[flatten_tokens] ( identifier[tokenize_by_number] ( identifier[str1] ))
identifier[seq2] = identifier[flatten_tokens] ( identifier[tokenize_by_number] ( identifier[str2] ))
identifier[l] = identifier[min] ( identifier[len] ( identifier[seq1] ), identifier[len] ( identifier[seq2] ))
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[l] :
keyword[if] identifier[seq1] [ identifier[i] ]< identifier[seq2] [ identifier[i] ]:
keyword[return] - literal[int]
keyword[elif] identifier[seq1] [ identifier[i] ]> identifier[seq2] [ identifier[i] ]:
keyword[return] literal[int]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[seq1] )< identifier[len] ( identifier[seq2] ):
keyword[return] - literal[int]
keyword[elif] identifier[len] ( identifier[seq1] )> identifier[len] ( identifier[seq2] ):
keyword[return] literal[int]
keyword[return] literal[int]
|
def number_aware_alphabetical_cmp(str1, str2):
""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
"""
def flatten_tokens(tokens):
l = []
for token in tokens:
if isinstance(token, str):
for char in token:
l.append(char) # depends on [control=['for'], data=['char']] # depends on [control=['if'], data=[]]
else:
assert isinstance(token, float)
l.append(token) # depends on [control=['for'], data=['token']]
return l
seq1 = flatten_tokens(tokenize_by_number(str1))
seq2 = flatten_tokens(tokenize_by_number(str2))
l = min(len(seq1), len(seq2))
i = 0
while i < l:
if seq1[i] < seq2[i]:
return -1 # depends on [control=['if'], data=[]]
elif seq1[i] > seq2[i]:
return 1 # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['while'], data=['i']]
if len(seq1) < len(seq2):
return -1 # depends on [control=['if'], data=[]]
elif len(seq1) > len(seq2):
return 1 # depends on [control=['if'], data=[]]
return 0
|
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client
|
def function[zk_client, parameter[host, scheme, credential]]:
constant[ returns a connected (and possibly authenticated) ZK client ]
if <ast.UnaryOp object at 0x7da18f00ee60> begin[:]
variable[host] assign[=] binary_operation[constant[%s:%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00c6a0>, <ast.Name object at 0x7da18f00c3a0>]]]
variable[client] assign[=] call[name[KazooClient], parameter[]]
call[name[client].start, parameter[]]
if compare[name[scheme] not_equal[!=] constant[]] begin[:]
call[name[client].add_auth, parameter[name[scheme], name[credential]]]
return[name[client]]
|
keyword[def] identifier[zk_client] ( identifier[host] , identifier[scheme] , identifier[credential] ):
literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[host] ):
identifier[host] = literal[string] %( identifier[host] , identifier[DEFAULT_ZK_PORT] )
identifier[client] = identifier[KazooClient] ( identifier[hosts] = identifier[host] )
identifier[client] . identifier[start] ()
keyword[if] identifier[scheme] != literal[string] :
identifier[client] . identifier[add_auth] ( identifier[scheme] , identifier[credential] )
keyword[return] identifier[client]
|
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match('.*:\\d+$', host):
host = '%s:%d' % (host, DEFAULT_ZK_PORT) # depends on [control=['if'], data=[]]
client = KazooClient(hosts=host)
client.start()
if scheme != '':
client.add_auth(scheme, credential) # depends on [control=['if'], data=['scheme']]
return client
|
def get_release_id_data(self, release_id: bytes) -> Tuple[str, str, str]:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
release id, *if* it is available on the current registry.
* Parameters:
* ``release_id``: 32 byte release identifier
"""
self._validate_set_registry()
return self.registry._get_release_data(release_id)
|
def function[get_release_id_data, parameter[self, release_id]]:
constant[
Returns ``(package_name, version, manifest_uri)`` associated with the given
release id, *if* it is available on the current registry.
* Parameters:
* ``release_id``: 32 byte release identifier
]
call[name[self]._validate_set_registry, parameter[]]
return[call[name[self].registry._get_release_data, parameter[name[release_id]]]]
|
keyword[def] identifier[get_release_id_data] ( identifier[self] , identifier[release_id] : identifier[bytes] )-> identifier[Tuple] [ identifier[str] , identifier[str] , identifier[str] ]:
literal[string]
identifier[self] . identifier[_validate_set_registry] ()
keyword[return] identifier[self] . identifier[registry] . identifier[_get_release_data] ( identifier[release_id] )
|
def get_release_id_data(self, release_id: bytes) -> Tuple[str, str, str]:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
release id, *if* it is available on the current registry.
* Parameters:
* ``release_id``: 32 byte release identifier
"""
self._validate_set_registry()
return self.registry._get_release_data(release_id)
|
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw)
|
def function[build, parameter[self]]:
constant[A null "builder" for directories.]
<ast.Global object at 0x7da204347f10>
if compare[name[self].builder is_not name[MkdirBuilder]] begin[:]
call[name[SCons].Node.Node.build, parameter[name[self]]]
|
keyword[def] identifier[build] ( identifier[self] ,** identifier[kw] ):
literal[string]
keyword[global] identifier[MkdirBuilder]
keyword[if] identifier[self] . identifier[builder] keyword[is] keyword[not] identifier[MkdirBuilder] :
identifier[SCons] . identifier[Node] . identifier[Node] . identifier[build] ( identifier[self] ,** identifier[kw] )
|
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw) # depends on [control=['if'], data=[]]
|
def _repeat_length(cls, part):
"""
The length of the repeated portions of ``part``.
:param part: a number
:type part: list of int
:returns: the first index at which part repeats
:rtype: int
If part does not repeat, result is the length of part.
Complexity: O(len(part)^2)
"""
repeat_len = len(part)
if repeat_len == 0:
return repeat_len
first_digit = part[0]
limit = repeat_len // 2 + 1
indices = (i for i in range(1, limit) if part[i] == first_digit)
for index in indices:
(quot, rem) = divmod(repeat_len, index)
if rem == 0:
first_chunk = part[0:index]
if all(first_chunk == part[x:x + index] \
for x in range(index, quot * index, index)):
return index
return repeat_len
|
def function[_repeat_length, parameter[cls, part]]:
constant[
The length of the repeated portions of ``part``.
:param part: a number
:type part: list of int
:returns: the first index at which part repeats
:rtype: int
If part does not repeat, result is the length of part.
Complexity: O(len(part)^2)
]
variable[repeat_len] assign[=] call[name[len], parameter[name[part]]]
if compare[name[repeat_len] equal[==] constant[0]] begin[:]
return[name[repeat_len]]
variable[first_digit] assign[=] call[name[part]][constant[0]]
variable[limit] assign[=] binary_operation[binary_operation[name[repeat_len] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] + constant[1]]
variable[indices] assign[=] <ast.GeneratorExp object at 0x7da1b094baf0>
for taget[name[index]] in starred[name[indices]] begin[:]
<ast.Tuple object at 0x7da1b094a860> assign[=] call[name[divmod], parameter[name[repeat_len], name[index]]]
if compare[name[rem] equal[==] constant[0]] begin[:]
variable[first_chunk] assign[=] call[name[part]][<ast.Slice object at 0x7da1b0949420>]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b09487c0>]] begin[:]
return[name[index]]
return[name[repeat_len]]
|
keyword[def] identifier[_repeat_length] ( identifier[cls] , identifier[part] ):
literal[string]
identifier[repeat_len] = identifier[len] ( identifier[part] )
keyword[if] identifier[repeat_len] == literal[int] :
keyword[return] identifier[repeat_len]
identifier[first_digit] = identifier[part] [ literal[int] ]
identifier[limit] = identifier[repeat_len] // literal[int] + literal[int]
identifier[indices] =( identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[limit] ) keyword[if] identifier[part] [ identifier[i] ]== identifier[first_digit] )
keyword[for] identifier[index] keyword[in] identifier[indices] :
( identifier[quot] , identifier[rem] )= identifier[divmod] ( identifier[repeat_len] , identifier[index] )
keyword[if] identifier[rem] == literal[int] :
identifier[first_chunk] = identifier[part] [ literal[int] : identifier[index] ]
keyword[if] identifier[all] ( identifier[first_chunk] == identifier[part] [ identifier[x] : identifier[x] + identifier[index] ] keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[index] , identifier[quot] * identifier[index] , identifier[index] )):
keyword[return] identifier[index]
keyword[return] identifier[repeat_len]
|
def _repeat_length(cls, part):
"""
The length of the repeated portions of ``part``.
:param part: a number
:type part: list of int
:returns: the first index at which part repeats
:rtype: int
If part does not repeat, result is the length of part.
Complexity: O(len(part)^2)
"""
repeat_len = len(part)
if repeat_len == 0:
return repeat_len # depends on [control=['if'], data=['repeat_len']]
first_digit = part[0]
limit = repeat_len // 2 + 1
indices = (i for i in range(1, limit) if part[i] == first_digit)
for index in indices:
(quot, rem) = divmod(repeat_len, index)
if rem == 0:
first_chunk = part[0:index]
if all((first_chunk == part[x:x + index] for x in range(index, quot * index, index))):
return index # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
return repeat_len
|
def parents(self, id, level=None, featuretype=None, order_by=None,
reverse=False, completely_within=False, limit=None):
"""
Return parents of feature `id`.
{_relation_docstring}
"""
return self._relation(
id, join_on='parent', join_to='child', level=level,
featuretype=featuretype, order_by=order_by, reverse=reverse,
limit=limit, completely_within=completely_within)
|
def function[parents, parameter[self, id, level, featuretype, order_by, reverse, completely_within, limit]]:
constant[
Return parents of feature `id`.
{_relation_docstring}
]
return[call[name[self]._relation, parameter[name[id]]]]
|
keyword[def] identifier[parents] ( identifier[self] , identifier[id] , identifier[level] = keyword[None] , identifier[featuretype] = keyword[None] , identifier[order_by] = keyword[None] ,
identifier[reverse] = keyword[False] , identifier[completely_within] = keyword[False] , identifier[limit] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_relation] (
identifier[id] , identifier[join_on] = literal[string] , identifier[join_to] = literal[string] , identifier[level] = identifier[level] ,
identifier[featuretype] = identifier[featuretype] , identifier[order_by] = identifier[order_by] , identifier[reverse] = identifier[reverse] ,
identifier[limit] = identifier[limit] , identifier[completely_within] = identifier[completely_within] )
|
def parents(self, id, level=None, featuretype=None, order_by=None, reverse=False, completely_within=False, limit=None):
"""
Return parents of feature `id`.
{_relation_docstring}
"""
return self._relation(id, join_on='parent', join_to='child', level=level, featuretype=featuretype, order_by=order_by, reverse=reverse, limit=limit, completely_within=completely_within)
|
def _get_plsr_dac_charge(self, plsr_dac_array, no_offset=False):
'''Takes the PlsrDAC calibration and the stored C-high/C-low mask to calculate the charge from the PlsrDAC array on a pixel basis
'''
charge = np.zeros_like(self.c_low_mask, dtype=np.float16) # charge in electrons
if self.vcal_c0 is not None and self.vcal_c1 is not None and self.c_low is not None and self.c_mid is not None and self.c_high is not None:
voltage = self.vcal_c1 * plsr_dac_array if no_offset else self.vcal_c0 + self.vcal_c1 * plsr_dac_array
charge[np.logical_and(self.c_low_mask, ~self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_low / 0.16022
charge[np.logical_and(~self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_mid / 0.16022
charge[np.logical_and(self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_high / 0.16022
return charge
|
def function[_get_plsr_dac_charge, parameter[self, plsr_dac_array, no_offset]]:
constant[Takes the PlsrDAC calibration and the stored C-high/C-low mask to calculate the charge from the PlsrDAC array on a pixel basis
]
variable[charge] assign[=] call[name[np].zeros_like, parameter[name[self].c_low_mask]]
if <ast.BoolOp object at 0x7da1b11c49d0> begin[:]
variable[voltage] assign[=] <ast.IfExp object at 0x7da1b11c6c80>
call[name[charge]][call[name[np].logical_and, parameter[name[self].c_low_mask, <ast.UnaryOp object at 0x7da1b11c5750>]]] assign[=] binary_operation[binary_operation[call[name[voltage]][call[name[np].logical_and, parameter[name[self].c_low_mask, <ast.UnaryOp object at 0x7da1b11c5030>]]] * name[self].c_low] / constant[0.16022]]
call[name[charge]][call[name[np].logical_and, parameter[<ast.UnaryOp object at 0x7da1b11c5240>, name[self].c_high_mask]]] assign[=] binary_operation[binary_operation[call[name[voltage]][call[name[np].logical_and, parameter[name[self].c_low_mask, <ast.UnaryOp object at 0x7da1b11c5780>]]] * name[self].c_mid] / constant[0.16022]]
call[name[charge]][call[name[np].logical_and, parameter[name[self].c_low_mask, name[self].c_high_mask]]] assign[=] binary_operation[binary_operation[call[name[voltage]][call[name[np].logical_and, parameter[name[self].c_low_mask, <ast.UnaryOp object at 0x7da1b1191de0>]]] * name[self].c_high] / constant[0.16022]]
return[name[charge]]
|
keyword[def] identifier[_get_plsr_dac_charge] ( identifier[self] , identifier[plsr_dac_array] , identifier[no_offset] = keyword[False] ):
literal[string]
identifier[charge] = identifier[np] . identifier[zeros_like] ( identifier[self] . identifier[c_low_mask] , identifier[dtype] = identifier[np] . identifier[float16] )
keyword[if] identifier[self] . identifier[vcal_c0] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[vcal_c1] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[c_low] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[c_mid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[c_high] keyword[is] keyword[not] keyword[None] :
identifier[voltage] = identifier[self] . identifier[vcal_c1] * identifier[plsr_dac_array] keyword[if] identifier[no_offset] keyword[else] identifier[self] . identifier[vcal_c0] + identifier[self] . identifier[vcal_c1] * identifier[plsr_dac_array]
identifier[charge] [ identifier[np] . identifier[logical_and] ( identifier[self] . identifier[c_low_mask] ,~ identifier[self] . identifier[c_high_mask] )]= identifier[voltage] [ identifier[np] . identifier[logical_and] ( identifier[self] . identifier[c_low_mask] ,~ identifier[self] . identifier[c_high_mask] )]* identifier[self] . identifier[c_low] / literal[int]
identifier[charge] [ identifier[np] . identifier[logical_and] (~ identifier[self] . identifier[c_low_mask] , identifier[self] . identifier[c_high_mask] )]= identifier[voltage] [ identifier[np] . identifier[logical_and] ( identifier[self] . identifier[c_low_mask] ,~ identifier[self] . identifier[c_high_mask] )]* identifier[self] . identifier[c_mid] / literal[int]
identifier[charge] [ identifier[np] . identifier[logical_and] ( identifier[self] . identifier[c_low_mask] , identifier[self] . identifier[c_high_mask] )]= identifier[voltage] [ identifier[np] . identifier[logical_and] ( identifier[self] . identifier[c_low_mask] ,~ identifier[self] . identifier[c_high_mask] )]* identifier[self] . identifier[c_high] / literal[int]
keyword[return] identifier[charge]
|
def _get_plsr_dac_charge(self, plsr_dac_array, no_offset=False):
"""Takes the PlsrDAC calibration and the stored C-high/C-low mask to calculate the charge from the PlsrDAC array on a pixel basis
"""
charge = np.zeros_like(self.c_low_mask, dtype=np.float16) # charge in electrons
if self.vcal_c0 is not None and self.vcal_c1 is not None and (self.c_low is not None) and (self.c_mid is not None) and (self.c_high is not None):
voltage = self.vcal_c1 * plsr_dac_array if no_offset else self.vcal_c0 + self.vcal_c1 * plsr_dac_array
charge[np.logical_and(self.c_low_mask, ~self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_low / 0.16022
charge[np.logical_and(~self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_mid / 0.16022
charge[np.logical_and(self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_high / 0.16022 # depends on [control=['if'], data=[]]
return charge
|
def inference(self):
"""
Returns:
A :class:`TowerTensorHandles`, containing only the inference towers.
"""
handles = [h for h in self._handles if not h.is_training]
return TowerTensorHandles(handles)
|
def function[inference, parameter[self]]:
constant[
Returns:
A :class:`TowerTensorHandles`, containing only the inference towers.
]
variable[handles] assign[=] <ast.ListComp object at 0x7da2041d8f40>
return[call[name[TowerTensorHandles], parameter[name[handles]]]]
|
keyword[def] identifier[inference] ( identifier[self] ):
literal[string]
identifier[handles] =[ identifier[h] keyword[for] identifier[h] keyword[in] identifier[self] . identifier[_handles] keyword[if] keyword[not] identifier[h] . identifier[is_training] ]
keyword[return] identifier[TowerTensorHandles] ( identifier[handles] )
|
def inference(self):
"""
Returns:
A :class:`TowerTensorHandles`, containing only the inference towers.
"""
handles = [h for h in self._handles if not h.is_training]
return TowerTensorHandles(handles)
|
def _get_source_sum(source_hash, file_path, saltenv):
'''
Extract the hash sum, whether it is in a remote hash file, or just a string.
'''
ret = dict()
schemes = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file')
invalid_hash_msg = ("Source hash '{0}' format is invalid. It must be in "
"the format <hash type>=<hash>").format(source_hash)
source_hash = six.text_type(source_hash)
source_hash_scheme = _urlparse(source_hash).scheme
if source_hash_scheme in schemes:
# The source_hash is a file on a server
cached_hash_file = __salt__['cp.cache_file'](source_hash, saltenv)
if not cached_hash_file:
raise CommandExecutionError(('Source hash file {0} not'
' found').format(source_hash))
ret = __salt__['file.extract_hash'](cached_hash_file, '', file_path)
if ret is None:
raise SaltInvocationError(invalid_hash_msg)
else:
# The source_hash is a hash string
items = source_hash.split('=', 1)
if len(items) != 2:
invalid_hash_msg = ('{0}, or it must be a supported protocol'
': {1}').format(invalid_hash_msg,
', '.join(schemes))
raise SaltInvocationError(invalid_hash_msg)
ret['hash_type'], ret['hsum'] = [item.strip().lower() for item in items]
return ret
|
def function[_get_source_sum, parameter[source_hash, file_path, saltenv]]:
constant[
Extract the hash sum, whether it is in a remote hash file, or just a string.
]
variable[ret] assign[=] call[name[dict], parameter[]]
variable[schemes] assign[=] tuple[[<ast.Constant object at 0x7da2044c0a30>, <ast.Constant object at 0x7da2044c2350>, <ast.Constant object at 0x7da2044c0f70>, <ast.Constant object at 0x7da2044c2890>, <ast.Constant object at 0x7da2044c3190>, <ast.Constant object at 0x7da2044c0430>, <ast.Constant object at 0x7da2044c23e0>]]
variable[invalid_hash_msg] assign[=] call[constant[Source hash '{0}' format is invalid. It must be in the format <hash type>=<hash>].format, parameter[name[source_hash]]]
variable[source_hash] assign[=] call[name[six].text_type, parameter[name[source_hash]]]
variable[source_hash_scheme] assign[=] call[name[_urlparse], parameter[name[source_hash]]].scheme
if compare[name[source_hash_scheme] in name[schemes]] begin[:]
variable[cached_hash_file] assign[=] call[call[name[__salt__]][constant[cp.cache_file]], parameter[name[source_hash], name[saltenv]]]
if <ast.UnaryOp object at 0x7da2044c0ee0> begin[:]
<ast.Raise object at 0x7da2044c1270>
variable[ret] assign[=] call[call[name[__salt__]][constant[file.extract_hash]], parameter[name[cached_hash_file], constant[], name[file_path]]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da2044c2ef0>
return[name[ret]]
|
keyword[def] identifier[_get_source_sum] ( identifier[source_hash] , identifier[file_path] , identifier[saltenv] ):
literal[string]
identifier[ret] = identifier[dict] ()
identifier[schemes] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
identifier[invalid_hash_msg] =( literal[string]
literal[string] ). identifier[format] ( identifier[source_hash] )
identifier[source_hash] = identifier[six] . identifier[text_type] ( identifier[source_hash] )
identifier[source_hash_scheme] = identifier[_urlparse] ( identifier[source_hash] ). identifier[scheme]
keyword[if] identifier[source_hash_scheme] keyword[in] identifier[schemes] :
identifier[cached_hash_file] = identifier[__salt__] [ literal[string] ]( identifier[source_hash] , identifier[saltenv] )
keyword[if] keyword[not] identifier[cached_hash_file] :
keyword[raise] identifier[CommandExecutionError] (( literal[string]
literal[string] ). identifier[format] ( identifier[source_hash] ))
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cached_hash_file] , literal[string] , identifier[file_path] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
keyword[raise] identifier[SaltInvocationError] ( identifier[invalid_hash_msg] )
keyword[else] :
identifier[items] = identifier[source_hash] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[items] )!= literal[int] :
identifier[invalid_hash_msg] =( literal[string]
literal[string] ). identifier[format] ( identifier[invalid_hash_msg] ,
literal[string] . identifier[join] ( identifier[schemes] ))
keyword[raise] identifier[SaltInvocationError] ( identifier[invalid_hash_msg] )
identifier[ret] [ literal[string] ], identifier[ret] [ literal[string] ]=[ identifier[item] . identifier[strip] (). identifier[lower] () keyword[for] identifier[item] keyword[in] identifier[items] ]
keyword[return] identifier[ret]
|
def _get_source_sum(source_hash, file_path, saltenv):
"""
Extract the hash sum, whether it is in a remote hash file, or just a string.
"""
ret = dict()
schemes = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file')
invalid_hash_msg = "Source hash '{0}' format is invalid. It must be in the format <hash type>=<hash>".format(source_hash)
source_hash = six.text_type(source_hash)
source_hash_scheme = _urlparse(source_hash).scheme
if source_hash_scheme in schemes:
# The source_hash is a file on a server
cached_hash_file = __salt__['cp.cache_file'](source_hash, saltenv)
if not cached_hash_file:
raise CommandExecutionError('Source hash file {0} not found'.format(source_hash)) # depends on [control=['if'], data=[]]
ret = __salt__['file.extract_hash'](cached_hash_file, '', file_path)
if ret is None:
raise SaltInvocationError(invalid_hash_msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# The source_hash is a hash string
items = source_hash.split('=', 1)
if len(items) != 2:
invalid_hash_msg = '{0}, or it must be a supported protocol: {1}'.format(invalid_hash_msg, ', '.join(schemes))
raise SaltInvocationError(invalid_hash_msg) # depends on [control=['if'], data=[]]
(ret['hash_type'], ret['hsum']) = [item.strip().lower() for item in items]
return ret
|
def filePostProcess(self):
'''
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery` method has been called, each
file will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
'''
# directories are already reparented, traverse the children and get a flattened
# list of all directories. previously, all directories should have had their
# names adjusted to remove a potentially leading path separator
nodes_remaining = [d for d in self.dirs]
all_directories = []
while len(nodes_remaining) > 0:
d = nodes_remaining.pop()
all_directories.append(d)
for child in d.children:
if child.kind == "dir":
nodes_remaining.append(child)
all_directories.sort()
for f in self.files:
if not f.location:
sys.stderr.write(utils.critical(
"Cannot reparent file [{0}] because it's location was not discovered.\n".format(
f.name
)
))
continue
elif os.sep not in f.location:
# top-level file, cannot parent do a directory
utils.verbose_log(
"### File [{0}] with location [{1}] was identified as being at the top level".format(
f.name, f.location
),
utils.AnsiColors.BOLD_YELLOW
)
continue
dirname = os.path.dirname(f.location)
found = False
for d in all_directories:
if dirname == d.name:
d.children.append(f)
f.parent = d
found = True
break
if not found:
sys.stderr.write(utils.critical(
"Could not find directory parent of file [{0}] with location [{1}].\n".format(
f.name, f.location
)
))
|
def function[filePostProcess, parameter[self]]:
constant[
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery` method has been called, each
file will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
]
variable[nodes_remaining] assign[=] <ast.ListComp object at 0x7da1b0781630>
variable[all_directories] assign[=] list[[]]
while compare[call[name[len], parameter[name[nodes_remaining]]] greater[>] constant[0]] begin[:]
variable[d] assign[=] call[name[nodes_remaining].pop, parameter[]]
call[name[all_directories].append, parameter[name[d]]]
for taget[name[child]] in starred[name[d].children] begin[:]
if compare[name[child].kind equal[==] constant[dir]] begin[:]
call[name[nodes_remaining].append, parameter[name[child]]]
call[name[all_directories].sort, parameter[]]
for taget[name[f]] in starred[name[self].files] begin[:]
if <ast.UnaryOp object at 0x7da1b0781840> begin[:]
call[name[sys].stderr.write, parameter[call[name[utils].critical, parameter[call[constant[Cannot reparent file [{0}] because it's location was not discovered.
].format, parameter[name[f].name]]]]]]
continue
variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[f].location]]
variable[found] assign[=] constant[False]
for taget[name[d]] in starred[name[all_directories]] begin[:]
if compare[name[dirname] equal[==] name[d].name] begin[:]
call[name[d].children.append, parameter[name[f]]]
name[f].parent assign[=] name[d]
variable[found] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da1b0780a30> begin[:]
call[name[sys].stderr.write, parameter[call[name[utils].critical, parameter[call[constant[Could not find directory parent of file [{0}] with location [{1}].
].format, parameter[name[f].name, name[f].location]]]]]]
|
keyword[def] identifier[filePostProcess] ( identifier[self] ):
literal[string]
identifier[nodes_remaining] =[ identifier[d] keyword[for] identifier[d] keyword[in] identifier[self] . identifier[dirs] ]
identifier[all_directories] =[]
keyword[while] identifier[len] ( identifier[nodes_remaining] )> literal[int] :
identifier[d] = identifier[nodes_remaining] . identifier[pop] ()
identifier[all_directories] . identifier[append] ( identifier[d] )
keyword[for] identifier[child] keyword[in] identifier[d] . identifier[children] :
keyword[if] identifier[child] . identifier[kind] == literal[string] :
identifier[nodes_remaining] . identifier[append] ( identifier[child] )
identifier[all_directories] . identifier[sort] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[files] :
keyword[if] keyword[not] identifier[f] . identifier[location] :
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[utils] . identifier[critical] (
literal[string] . identifier[format] (
identifier[f] . identifier[name]
)
))
keyword[continue]
keyword[elif] identifier[os] . identifier[sep] keyword[not] keyword[in] identifier[f] . identifier[location] :
identifier[utils] . identifier[verbose_log] (
literal[string] . identifier[format] (
identifier[f] . identifier[name] , identifier[f] . identifier[location]
),
identifier[utils] . identifier[AnsiColors] . identifier[BOLD_YELLOW]
)
keyword[continue]
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[f] . identifier[location] )
identifier[found] = keyword[False]
keyword[for] identifier[d] keyword[in] identifier[all_directories] :
keyword[if] identifier[dirname] == identifier[d] . identifier[name] :
identifier[d] . identifier[children] . identifier[append] ( identifier[f] )
identifier[f] . identifier[parent] = identifier[d]
identifier[found] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[found] :
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[utils] . identifier[critical] (
literal[string] . identifier[format] (
identifier[f] . identifier[name] , identifier[f] . identifier[location]
)
))
|
def filePostProcess(self):
"""
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery` method has been called, each
file will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
"""
# directories are already reparented, traverse the children and get a flattened
# list of all directories. previously, all directories should have had their
# names adjusted to remove a potentially leading path separator
nodes_remaining = [d for d in self.dirs]
all_directories = []
while len(nodes_remaining) > 0:
d = nodes_remaining.pop()
all_directories.append(d)
for child in d.children:
if child.kind == 'dir':
nodes_remaining.append(child) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['while'], data=[]]
all_directories.sort()
for f in self.files:
if not f.location:
sys.stderr.write(utils.critical("Cannot reparent file [{0}] because it's location was not discovered.\n".format(f.name)))
continue # depends on [control=['if'], data=[]]
elif os.sep not in f.location:
# top-level file, cannot parent do a directory
utils.verbose_log('### File [{0}] with location [{1}] was identified as being at the top level'.format(f.name, f.location), utils.AnsiColors.BOLD_YELLOW)
continue # depends on [control=['if'], data=[]]
dirname = os.path.dirname(f.location)
found = False
for d in all_directories:
if dirname == d.name:
d.children.append(f)
f.parent = d
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
if not found:
sys.stderr.write(utils.critical('Could not find directory parent of file [{0}] with location [{1}].\n'.format(f.name, f.location))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
|
def argparser(self):
"""
Argparser option with search functionality specific for ranges.
"""
core_parser = self.core_parser
core_parser.add_argument('-r', '--range', type=str, help="The range to search for use")
return core_parser
|
def function[argparser, parameter[self]]:
constant[
Argparser option with search functionality specific for ranges.
]
variable[core_parser] assign[=] name[self].core_parser
call[name[core_parser].add_argument, parameter[constant[-r], constant[--range]]]
return[name[core_parser]]
|
keyword[def] identifier[argparser] ( identifier[self] ):
literal[string]
identifier[core_parser] = identifier[self] . identifier[core_parser]
identifier[core_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[str] , identifier[help] = literal[string] )
keyword[return] identifier[core_parser]
|
def argparser(self):
"""
Argparser option with search functionality specific for ranges.
"""
core_parser = self.core_parser
core_parser.add_argument('-r', '--range', type=str, help='The range to search for use')
return core_parser
|
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
}
|
def function[createTemporaryCredentials, parameter[clientId, accessToken, start, expiry, scopes, name]]:
constant[ Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
]
for taget[name[scope]] in starred[name[scopes]] begin[:]
if <ast.UnaryOp object at 0x7da1b04038e0> begin[:]
<ast.Raise object at 0x7da1b0403fa0>
if compare[binary_operation[name[expiry] - name[start]] greater[>] call[name[datetime].timedelta, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b0403310>
variable[cert] assign[=] call[name[dict], parameter[]]
if name[name] begin[:]
call[name[cert]][constant[issuer]] assign[=] call[name[utils].toStr, parameter[name[clientId]]]
variable[sig] assign[=] list[[<ast.BinOp object at 0x7da1b04005b0>]]
if name[name] begin[:]
call[name[sig].extend, parameter[list[[<ast.BinOp object at 0x7da1b0400670>, <ast.BinOp object at 0x7da1b0401d50>]]]]
call[name[sig].extend, parameter[binary_operation[list[[<ast.BinOp object at 0x7da1b0403ac0>, <ast.BinOp object at 0x7da1b04011e0>, <ast.BinOp object at 0x7da1b04038b0>, <ast.Constant object at 0x7da1b0403160>]] + name[scopes]]]]
variable[sigStr] assign[=] call[call[constant[
].join, parameter[name[sig]]].encode, parameter[]]
if call[name[isinstance], parameter[name[accessToken], name[six].text_type]] begin[:]
variable[accessToken] assign[=] call[name[accessToken].encode, parameter[]]
variable[sig] assign[=] call[call[name[hmac].new, parameter[name[accessToken], name[sigStr], name[hashlib].sha256]].digest, parameter[]]
call[name[cert]][constant[signature]] assign[=] call[name[utils].encodeStringForB64Header, parameter[name[sig]]]
variable[newToken] assign[=] call[call[name[hmac].new, parameter[name[accessToken], call[name[cert]][constant[seed]], name[hashlib].sha256]].digest, parameter[]]
variable[newToken] assign[=] call[call[name[utils].makeB64UrlSafe, parameter[call[name[utils].encodeStringForB64Header, parameter[name[newToken]]]]].replace, parameter[constant[b'='], constant[b'']]]
return[dictionary[[<ast.Constant object at 0x7da2049603d0>, <ast.Constant object at 0x7da204963dc0>, <ast.Constant object at 0x7da2049605b0>], [<ast.BoolOp object at 0x7da2049629b0>, <ast.Name object at 0x7da204963430>, <ast.Call object at 0x7da204961b70>]]]
|
keyword[def] identifier[createTemporaryCredentials] ( identifier[clientId] , identifier[accessToken] , identifier[start] , identifier[expiry] , identifier[scopes] , identifier[name] = keyword[None] ):
literal[string]
keyword[for] identifier[scope] keyword[in] identifier[scopes] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[scope] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[exceptions] . identifier[TaskclusterFailure] ( literal[string] )
keyword[if] identifier[expiry] - identifier[start] > identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] ):
keyword[raise] identifier[exceptions] . identifier[TaskclusterFailure] ( literal[string] )
identifier[cert] = identifier[dict] (
identifier[version] = literal[int] ,
identifier[scopes] = identifier[scopes] ,
identifier[start] = identifier[calendar] . identifier[timegm] ( identifier[start] . identifier[utctimetuple] ())* literal[int] ,
identifier[expiry] = identifier[calendar] . identifier[timegm] ( identifier[expiry] . identifier[utctimetuple] ())* literal[int] ,
identifier[seed] = identifier[utils] . identifier[slugId] (). identifier[encode] ( literal[string] )+ identifier[utils] . identifier[slugId] (). identifier[encode] ( literal[string] ),
)
keyword[if] identifier[name] :
identifier[cert] [ literal[string] ]= identifier[utils] . identifier[toStr] ( identifier[clientId] )
identifier[sig] =[ literal[string] + identifier[utils] . identifier[toStr] ( identifier[cert] [ literal[string] ])]
keyword[if] identifier[name] :
identifier[sig] . identifier[extend] ([
literal[string] + identifier[utils] . identifier[toStr] ( identifier[name] ),
literal[string] + identifier[utils] . identifier[toStr] ( identifier[clientId] ),
])
identifier[sig] . identifier[extend] ([
literal[string] + identifier[utils] . identifier[toStr] ( identifier[cert] [ literal[string] ]),
literal[string] + identifier[utils] . identifier[toStr] ( identifier[cert] [ literal[string] ]),
literal[string] + identifier[utils] . identifier[toStr] ( identifier[cert] [ literal[string] ]),
literal[string]
]+ identifier[scopes] )
identifier[sigStr] = literal[string] . identifier[join] ( identifier[sig] ). identifier[encode] ()
keyword[if] identifier[isinstance] ( identifier[accessToken] , identifier[six] . identifier[text_type] ):
identifier[accessToken] = identifier[accessToken] . identifier[encode] ()
identifier[sig] = identifier[hmac] . identifier[new] ( identifier[accessToken] , identifier[sigStr] , identifier[hashlib] . identifier[sha256] ). identifier[digest] ()
identifier[cert] [ literal[string] ]= identifier[utils] . identifier[encodeStringForB64Header] ( identifier[sig] )
identifier[newToken] = identifier[hmac] . identifier[new] ( identifier[accessToken] , identifier[cert] [ literal[string] ], identifier[hashlib] . identifier[sha256] ). identifier[digest] ()
identifier[newToken] = identifier[utils] . identifier[makeB64UrlSafe] ( identifier[utils] . identifier[encodeStringForB64Header] ( identifier[newToken] )). identifier[replace] ( literal[string] , literal[string] )
keyword[return] {
literal[string] : identifier[name] keyword[or] identifier[clientId] ,
literal[string] : identifier[newToken] ,
literal[string] : identifier[utils] . identifier[dumpJson] ( identifier[cert] ),
}
|
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['scope']]
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed') # depends on [control=['if'], data=[]]
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(version=1, scopes=scopes, start=calendar.timegm(start.utctimetuple()) * 1000, expiry=calendar.timegm(expiry.utctimetuple()) * 1000, seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'))
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId) # depends on [control=['if'], data=[]]
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend(['clientId:' + utils.toStr(name), 'issuer:' + utils.toStr(clientId)]) # depends on [control=['if'], data=[]]
sig.extend(['seed:' + utils.toStr(cert['seed']), 'start:' + utils.toStr(cert['start']), 'expiry:' + utils.toStr(cert['expiry']), 'scopes:'] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode() # depends on [control=['if'], data=[]]
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {'clientId': name or clientId, 'accessToken': newToken, 'certificate': utils.dumpJson(cert)}
|
def clean(self, tol=None):
"""
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
"""
poly = self.polydata(False)
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.PointMergingOn()
cleanPolyData.ConvertLinesToPointsOn()
cleanPolyData.ConvertPolysToLinesOn()
cleanPolyData.SetInputData(poly)
if tol:
cleanPolyData.SetTolerance(tol)
cleanPolyData.Update()
return self.updateMesh(cleanPolyData.GetOutput())
|
def function[clean, parameter[self, tol]]:
constant[
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
]
variable[poly] assign[=] call[name[self].polydata, parameter[constant[False]]]
variable[cleanPolyData] assign[=] call[name[vtk].vtkCleanPolyData, parameter[]]
call[name[cleanPolyData].PointMergingOn, parameter[]]
call[name[cleanPolyData].ConvertLinesToPointsOn, parameter[]]
call[name[cleanPolyData].ConvertPolysToLinesOn, parameter[]]
call[name[cleanPolyData].SetInputData, parameter[name[poly]]]
if name[tol] begin[:]
call[name[cleanPolyData].SetTolerance, parameter[name[tol]]]
call[name[cleanPolyData].Update, parameter[]]
return[call[name[self].updateMesh, parameter[call[name[cleanPolyData].GetOutput, parameter[]]]]]
|
keyword[def] identifier[clean] ( identifier[self] , identifier[tol] = keyword[None] ):
literal[string]
identifier[poly] = identifier[self] . identifier[polydata] ( keyword[False] )
identifier[cleanPolyData] = identifier[vtk] . identifier[vtkCleanPolyData] ()
identifier[cleanPolyData] . identifier[PointMergingOn] ()
identifier[cleanPolyData] . identifier[ConvertLinesToPointsOn] ()
identifier[cleanPolyData] . identifier[ConvertPolysToLinesOn] ()
identifier[cleanPolyData] . identifier[SetInputData] ( identifier[poly] )
keyword[if] identifier[tol] :
identifier[cleanPolyData] . identifier[SetTolerance] ( identifier[tol] )
identifier[cleanPolyData] . identifier[Update] ()
keyword[return] identifier[self] . identifier[updateMesh] ( identifier[cleanPolyData] . identifier[GetOutput] ())
|
def clean(self, tol=None):
"""
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
"""
poly = self.polydata(False)
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.PointMergingOn()
cleanPolyData.ConvertLinesToPointsOn()
cleanPolyData.ConvertPolysToLinesOn()
cleanPolyData.SetInputData(poly)
if tol:
cleanPolyData.SetTolerance(tol) # depends on [control=['if'], data=[]]
cleanPolyData.Update()
return self.updateMesh(cleanPolyData.GetOutput())
|
def parse_scwrl_out(scwrl_std_out, scwrl_pdb):
"""Parses SCWRL output and returns PDB and SCWRL score.
Parameters
----------
scwrl_std_out : str
Std out from SCWRL.
scwrl_pdb : str
String of packed SCWRL PDB.
Returns
-------
fixed_scwrl_str : str
String of packed SCWRL PDB, with correct PDB format.
score : float
SCWRL Score
"""
score = re.findall(
r'Total minimal energy of the graph = ([-0-9.]+)', scwrl_std_out)[0]
# Add temperature factors to SCWRL out
split_scwrl = scwrl_pdb.splitlines()
fixed_scwrl = []
for line in split_scwrl:
if len(line) < 80:
line += ' ' * (80 - len(line))
if re.search(r'H?E?T?ATO?M\s+\d+.+', line):
front = line[:61]
temp_factor = ' 0.00'
back = line[66:]
fixed_scwrl.append(''.join([front, temp_factor, back]))
else:
fixed_scwrl.append(line)
fixed_scwrl_str = '\n'.join(fixed_scwrl) + '\n'
return fixed_scwrl_str, float(score)
|
def function[parse_scwrl_out, parameter[scwrl_std_out, scwrl_pdb]]:
constant[Parses SCWRL output and returns PDB and SCWRL score.
Parameters
----------
scwrl_std_out : str
Std out from SCWRL.
scwrl_pdb : str
String of packed SCWRL PDB.
Returns
-------
fixed_scwrl_str : str
String of packed SCWRL PDB, with correct PDB format.
score : float
SCWRL Score
]
variable[score] assign[=] call[call[name[re].findall, parameter[constant[Total minimal energy of the graph = ([-0-9.]+)], name[scwrl_std_out]]]][constant[0]]
variable[split_scwrl] assign[=] call[name[scwrl_pdb].splitlines, parameter[]]
variable[fixed_scwrl] assign[=] list[[]]
for taget[name[line]] in starred[name[split_scwrl]] begin[:]
if compare[call[name[len], parameter[name[line]]] less[<] constant[80]] begin[:]
<ast.AugAssign object at 0x7da1b283b700>
if call[name[re].search, parameter[constant[H?E?T?ATO?M\s+\d+.+], name[line]]] begin[:]
variable[front] assign[=] call[name[line]][<ast.Slice object at 0x7da1b28512a0>]
variable[temp_factor] assign[=] constant[ 0.00]
variable[back] assign[=] call[name[line]][<ast.Slice object at 0x7da1b2853cd0>]
call[name[fixed_scwrl].append, parameter[call[constant[].join, parameter[list[[<ast.Name object at 0x7da1b2851a50>, <ast.Name object at 0x7da1b2851ba0>, <ast.Name object at 0x7da1b28522f0>]]]]]]
variable[fixed_scwrl_str] assign[=] binary_operation[call[constant[
].join, parameter[name[fixed_scwrl]]] + constant[
]]
return[tuple[[<ast.Name object at 0x7da1b2853a00>, <ast.Call object at 0x7da1b2850e80>]]]
|
keyword[def] identifier[parse_scwrl_out] ( identifier[scwrl_std_out] , identifier[scwrl_pdb] ):
literal[string]
identifier[score] = identifier[re] . identifier[findall] (
literal[string] , identifier[scwrl_std_out] )[ literal[int] ]
identifier[split_scwrl] = identifier[scwrl_pdb] . identifier[splitlines] ()
identifier[fixed_scwrl] =[]
keyword[for] identifier[line] keyword[in] identifier[split_scwrl] :
keyword[if] identifier[len] ( identifier[line] )< literal[int] :
identifier[line] += literal[string] *( literal[int] - identifier[len] ( identifier[line] ))
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[front] = identifier[line] [: literal[int] ]
identifier[temp_factor] = literal[string]
identifier[back] = identifier[line] [ literal[int] :]
identifier[fixed_scwrl] . identifier[append] ( literal[string] . identifier[join] ([ identifier[front] , identifier[temp_factor] , identifier[back] ]))
keyword[else] :
identifier[fixed_scwrl] . identifier[append] ( identifier[line] )
identifier[fixed_scwrl_str] = literal[string] . identifier[join] ( identifier[fixed_scwrl] )+ literal[string]
keyword[return] identifier[fixed_scwrl_str] , identifier[float] ( identifier[score] )
|
def parse_scwrl_out(scwrl_std_out, scwrl_pdb):
"""Parses SCWRL output and returns PDB and SCWRL score.
Parameters
----------
scwrl_std_out : str
Std out from SCWRL.
scwrl_pdb : str
String of packed SCWRL PDB.
Returns
-------
fixed_scwrl_str : str
String of packed SCWRL PDB, with correct PDB format.
score : float
SCWRL Score
"""
score = re.findall('Total minimal energy of the graph = ([-0-9.]+)', scwrl_std_out)[0]
# Add temperature factors to SCWRL out
split_scwrl = scwrl_pdb.splitlines()
fixed_scwrl = []
for line in split_scwrl:
if len(line) < 80:
line += ' ' * (80 - len(line)) # depends on [control=['if'], data=[]]
if re.search('H?E?T?ATO?M\\s+\\d+.+', line):
front = line[:61]
temp_factor = ' 0.00'
back = line[66:]
fixed_scwrl.append(''.join([front, temp_factor, back])) # depends on [control=['if'], data=[]]
else:
fixed_scwrl.append(line) # depends on [control=['for'], data=['line']]
fixed_scwrl_str = '\n'.join(fixed_scwrl) + '\n'
return (fixed_scwrl_str, float(score))
|
def add_commands(parser, functions, namespace=None, namespace_kwargs=None,
func_kwargs=None,
# deprecated args:
title=None, description=None, help=None):
"""
Adds given functions as commands to given parser.
:param parser:
an :class:`argparse.ArgumentParser` instance.
:param functions:
a list of functions. A subparser is created for each of them.
If the function is decorated with :func:`~argh.decorators.arg`, the
arguments are passed to :class:`argparse.ArgumentParser.add_argument`.
See also :func:`~argh.dispatching.dispatch` for requirements
concerning function signatures. The command name is inferred from the
function name. Note that the underscores in the name are replaced with
hyphens, i.e. function name "foo_bar" becomes command name "foo-bar".
:param namespace:
an optional string representing the group of commands. For example, if
a command named "hello" is added without the namespace, it will be
available as "prog.py hello"; if the namespace if specified as "greet",
then the command will be accessible as "prog.py greet hello". The
namespace itself is not callable, so "prog.py greet" will fail and only
display a help message.
:param func_kwargs:
a `dict` of keyword arguments to be passed to each nested ArgumentParser
instance created per command (i.e. per function). Members of this
dictionary have the highest priority, so a function's docstring is
overridden by a `help` in `func_kwargs` (if present).
:param namespace_kwargs:
a `dict` of keyword arguments to be passed to the nested ArgumentParser
instance under given `namespace`.
Deprecated params that should be moved into `namespace_kwargs`:
:param title:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
:param description:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as
`description`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
:param help:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
.. note::
This function modifies the parser object. Generally side effects are
bad practice but we don't seem to have any choice as ArgumentParser is
pretty opaque.
You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit
more predictable API.
.. note::
An attempt to add commands to a parser which already has a default
function (e.g. added with :func:`~argh.assembling.set_default_command`)
results in `AssemblingError`.
"""
# FIXME "namespace" is a correct name but it clashes with the "namespace"
# that represents arguments (argparse.Namespace and our ArghNamespace).
# We should rename the argument here.
if DEST_FUNCTION in parser._defaults:
_require_support_for_default_command_with_subparsers()
namespace_kwargs = namespace_kwargs or {}
# FIXME remove this by 1.0
#
if title:
warnings.warn('argument `title` is deprecated in add_commands(),'
' use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['description'] = title
if help:
warnings.warn('argument `help` is deprecated in add_commands(),'
' use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['help'] = help
if description:
warnings.warn('argument `description` is deprecated in add_commands(),'
' use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['description'] = description
#
# /
subparsers_action = get_subparsers(parser, create=True)
if namespace:
# Make a nested parser and init a deeper _SubParsersAction under it.
# Create a named group of commands. It will be listed along with
# root-level commands in ``app.py --help``; in that context its `title`
# can be used as a short description on the right side of its name.
# Normally `title` is shown above the list of commands
# in ``app.py my-namespace --help``.
subsubparser_kw = {
'help': namespace_kwargs.get('title'),
}
subsubparser = subparsers_action.add_parser(namespace, **subsubparser_kw)
subparsers_action = subsubparser.add_subparsers(**namespace_kwargs)
else:
assert not namespace_kwargs, ('`parser_kwargs` only makes sense '
'with `namespace`.')
for func in functions:
cmd_name, func_parser_kwargs = _extract_command_meta_from_func(func)
# override any computed kwargs by manually supplied ones
if func_kwargs:
func_parser_kwargs.update(func_kwargs)
# create and set up the parser for this command
command_parser = subparsers_action.add_parser(cmd_name, **func_parser_kwargs)
set_default_command(command_parser, func)
|
def function[add_commands, parameter[parser, functions, namespace, namespace_kwargs, func_kwargs, title, description, help]]:
constant[
Adds given functions as commands to given parser.
:param parser:
an :class:`argparse.ArgumentParser` instance.
:param functions:
a list of functions. A subparser is created for each of them.
If the function is decorated with :func:`~argh.decorators.arg`, the
arguments are passed to :class:`argparse.ArgumentParser.add_argument`.
See also :func:`~argh.dispatching.dispatch` for requirements
concerning function signatures. The command name is inferred from the
function name. Note that the underscores in the name are replaced with
hyphens, i.e. function name "foo_bar" becomes command name "foo-bar".
:param namespace:
an optional string representing the group of commands. For example, if
a command named "hello" is added without the namespace, it will be
available as "prog.py hello"; if the namespace if specified as "greet",
then the command will be accessible as "prog.py greet hello". The
namespace itself is not callable, so "prog.py greet" will fail and only
display a help message.
:param func_kwargs:
a `dict` of keyword arguments to be passed to each nested ArgumentParser
instance created per command (i.e. per function). Members of this
dictionary have the highest priority, so a function's docstring is
overridden by a `help` in `func_kwargs` (if present).
:param namespace_kwargs:
a `dict` of keyword arguments to be passed to the nested ArgumentParser
instance under given `namespace`.
Deprecated params that should be moved into `namespace_kwargs`:
:param title:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
:param description:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as
`description`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
:param help:
passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`.
.. deprecated:: 0.26.0
Please use `namespace_kwargs` instead.
.. note::
This function modifies the parser object. Generally side effects are
bad practice but we don't seem to have any choice as ArgumentParser is
pretty opaque.
You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit
more predictable API.
.. note::
An attempt to add commands to a parser which already has a default
function (e.g. added with :func:`~argh.assembling.set_default_command`)
results in `AssemblingError`.
]
if compare[name[DEST_FUNCTION] in name[parser]._defaults] begin[:]
call[name[_require_support_for_default_command_with_subparsers], parameter[]]
variable[namespace_kwargs] assign[=] <ast.BoolOp object at 0x7da1b0553e50>
if name[title] begin[:]
call[name[warnings].warn, parameter[constant[argument `title` is deprecated in add_commands(), use `parser_kwargs` instead], name[DeprecationWarning]]]
call[name[namespace_kwargs]][constant[description]] assign[=] name[title]
if name[help] begin[:]
call[name[warnings].warn, parameter[constant[argument `help` is deprecated in add_commands(), use `parser_kwargs` instead], name[DeprecationWarning]]]
call[name[namespace_kwargs]][constant[help]] assign[=] name[help]
if name[description] begin[:]
call[name[warnings].warn, parameter[constant[argument `description` is deprecated in add_commands(), use `parser_kwargs` instead], name[DeprecationWarning]]]
call[name[namespace_kwargs]][constant[description]] assign[=] name[description]
variable[subparsers_action] assign[=] call[name[get_subparsers], parameter[name[parser]]]
if name[namespace] begin[:]
variable[subsubparser_kw] assign[=] dictionary[[<ast.Constant object at 0x7da20c9921a0>], [<ast.Call object at 0x7da20c9936d0>]]
variable[subsubparser] assign[=] call[name[subparsers_action].add_parser, parameter[name[namespace]]]
variable[subparsers_action] assign[=] call[name[subsubparser].add_subparsers, parameter[]]
for taget[name[func]] in starred[name[functions]] begin[:]
<ast.Tuple object at 0x7da20c9939d0> assign[=] call[name[_extract_command_meta_from_func], parameter[name[func]]]
if name[func_kwargs] begin[:]
call[name[func_parser_kwargs].update, parameter[name[func_kwargs]]]
variable[command_parser] assign[=] call[name[subparsers_action].add_parser, parameter[name[cmd_name]]]
call[name[set_default_command], parameter[name[command_parser], name[func]]]
|
keyword[def] identifier[add_commands] ( identifier[parser] , identifier[functions] , identifier[namespace] = keyword[None] , identifier[namespace_kwargs] = keyword[None] ,
identifier[func_kwargs] = keyword[None] ,
identifier[title] = keyword[None] , identifier[description] = keyword[None] , identifier[help] = keyword[None] ):
literal[string]
keyword[if] identifier[DEST_FUNCTION] keyword[in] identifier[parser] . identifier[_defaults] :
identifier[_require_support_for_default_command_with_subparsers] ()
identifier[namespace_kwargs] = identifier[namespace_kwargs] keyword[or] {}
keyword[if] identifier[title] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[DeprecationWarning] )
identifier[namespace_kwargs] [ literal[string] ]= identifier[title]
keyword[if] identifier[help] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[DeprecationWarning] )
identifier[namespace_kwargs] [ literal[string] ]= identifier[help]
keyword[if] identifier[description] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[DeprecationWarning] )
identifier[namespace_kwargs] [ literal[string] ]= identifier[description]
identifier[subparsers_action] = identifier[get_subparsers] ( identifier[parser] , identifier[create] = keyword[True] )
keyword[if] identifier[namespace] :
identifier[subsubparser_kw] ={
literal[string] : identifier[namespace_kwargs] . identifier[get] ( literal[string] ),
}
identifier[subsubparser] = identifier[subparsers_action] . identifier[add_parser] ( identifier[namespace] ,** identifier[subsubparser_kw] )
identifier[subparsers_action] = identifier[subsubparser] . identifier[add_subparsers] (** identifier[namespace_kwargs] )
keyword[else] :
keyword[assert] keyword[not] identifier[namespace_kwargs] ,( literal[string]
literal[string] )
keyword[for] identifier[func] keyword[in] identifier[functions] :
identifier[cmd_name] , identifier[func_parser_kwargs] = identifier[_extract_command_meta_from_func] ( identifier[func] )
keyword[if] identifier[func_kwargs] :
identifier[func_parser_kwargs] . identifier[update] ( identifier[func_kwargs] )
identifier[command_parser] = identifier[subparsers_action] . identifier[add_parser] ( identifier[cmd_name] ,** identifier[func_parser_kwargs] )
identifier[set_default_command] ( identifier[command_parser] , identifier[func] )
|
def add_commands(parser, functions, namespace=None, namespace_kwargs=None, func_kwargs=None, title=None, description=None, help=None):
# deprecated args:
'\n Adds given functions as commands to given parser.\n\n :param parser:\n\n an :class:`argparse.ArgumentParser` instance.\n\n :param functions:\n\n a list of functions. A subparser is created for each of them.\n If the function is decorated with :func:`~argh.decorators.arg`, the\n arguments are passed to :class:`argparse.ArgumentParser.add_argument`.\n See also :func:`~argh.dispatching.dispatch` for requirements\n concerning function signatures. The command name is inferred from the\n function name. Note that the underscores in the name are replaced with\n hyphens, i.e. function name "foo_bar" becomes command name "foo-bar".\n\n :param namespace:\n\n an optional string representing the group of commands. For example, if\n a command named "hello" is added without the namespace, it will be\n available as "prog.py hello"; if the namespace if specified as "greet",\n then the command will be accessible as "prog.py greet hello". The\n namespace itself is not callable, so "prog.py greet" will fail and only\n display a help message.\n\n :param func_kwargs:\n\n a `dict` of keyword arguments to be passed to each nested ArgumentParser\n instance created per command (i.e. per function). Members of this\n dictionary have the highest priority, so a function\'s docstring is\n overridden by a `help` in `func_kwargs` (if present).\n\n :param namespace_kwargs:\n\n a `dict` of keyword arguments to be passed to the nested ArgumentParser\n instance under given `namespace`.\n\n Deprecated params that should be moved into `namespace_kwargs`:\n\n :param title:\n\n passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`.\n\n .. deprecated:: 0.26.0\n\n Please use `namespace_kwargs` instead.\n\n :param description:\n\n passed to :meth:`argparse.ArgumentParser.add_subparsers` as\n `description`.\n\n .. deprecated:: 0.26.0\n\n Please use `namespace_kwargs` instead.\n\n :param help:\n\n passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`.\n\n .. deprecated:: 0.26.0\n\n Please use `namespace_kwargs` instead.\n\n .. note::\n\n This function modifies the parser object. Generally side effects are\n bad practice but we don\'t seem to have any choice as ArgumentParser is\n pretty opaque.\n You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit\n more predictable API.\n\n .. note::\n\n An attempt to add commands to a parser which already has a default\n function (e.g. added with :func:`~argh.assembling.set_default_command`)\n results in `AssemblingError`.\n\n '
# FIXME "namespace" is a correct name but it clashes with the "namespace"
# that represents arguments (argparse.Namespace and our ArghNamespace).
# We should rename the argument here.
if DEST_FUNCTION in parser._defaults:
_require_support_for_default_command_with_subparsers() # depends on [control=['if'], data=[]]
namespace_kwargs = namespace_kwargs or {}
# FIXME remove this by 1.0
#
if title:
warnings.warn('argument `title` is deprecated in add_commands(), use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['description'] = title # depends on [control=['if'], data=[]]
if help:
warnings.warn('argument `help` is deprecated in add_commands(), use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['help'] = help # depends on [control=['if'], data=[]]
if description:
warnings.warn('argument `description` is deprecated in add_commands(), use `parser_kwargs` instead', DeprecationWarning)
namespace_kwargs['description'] = description # depends on [control=['if'], data=[]]
#
# /
subparsers_action = get_subparsers(parser, create=True)
if namespace:
# Make a nested parser and init a deeper _SubParsersAction under it.
# Create a named group of commands. It will be listed along with
# root-level commands in ``app.py --help``; in that context its `title`
# can be used as a short description on the right side of its name.
# Normally `title` is shown above the list of commands
# in ``app.py my-namespace --help``.
subsubparser_kw = {'help': namespace_kwargs.get('title')}
subsubparser = subparsers_action.add_parser(namespace, **subsubparser_kw)
subparsers_action = subsubparser.add_subparsers(**namespace_kwargs) # depends on [control=['if'], data=[]]
else:
assert not namespace_kwargs, '`parser_kwargs` only makes sense with `namespace`.'
for func in functions:
(cmd_name, func_parser_kwargs) = _extract_command_meta_from_func(func)
# override any computed kwargs by manually supplied ones
if func_kwargs:
func_parser_kwargs.update(func_kwargs) # depends on [control=['if'], data=[]]
# create and set up the parser for this command
command_parser = subparsers_action.add_parser(cmd_name, **func_parser_kwargs)
set_default_command(command_parser, func) # depends on [control=['for'], data=['func']]
|
def add(self, text, name, field, type='term', size=None, params=None):
"""
Set the suggester of given type.
:param text: text
:param name: name of suggest
:param field: field to be used
:param type: type of suggester to add, available types are: completion,
phrase, term
:param size: number of phrases
:param params: dict of additional parameters to pass to the suggester
:return: None
"""
func = None
if type == 'completion':
func = self.add_completion
elif type == 'phrase':
func = self.add_phrase
elif type == 'term':
func = self.add_term
else:
raise InvalidQuery('Invalid type')
func(text=text, name=name, field=field, size=size, params=params)
|
def function[add, parameter[self, text, name, field, type, size, params]]:
constant[
Set the suggester of given type.
:param text: text
:param name: name of suggest
:param field: field to be used
:param type: type of suggester to add, available types are: completion,
phrase, term
:param size: number of phrases
:param params: dict of additional parameters to pass to the suggester
:return: None
]
variable[func] assign[=] constant[None]
if compare[name[type] equal[==] constant[completion]] begin[:]
variable[func] assign[=] name[self].add_completion
call[name[func], parameter[]]
|
keyword[def] identifier[add] ( identifier[self] , identifier[text] , identifier[name] , identifier[field] , identifier[type] = literal[string] , identifier[size] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
identifier[func] = keyword[None]
keyword[if] identifier[type] == literal[string] :
identifier[func] = identifier[self] . identifier[add_completion]
keyword[elif] identifier[type] == literal[string] :
identifier[func] = identifier[self] . identifier[add_phrase]
keyword[elif] identifier[type] == literal[string] :
identifier[func] = identifier[self] . identifier[add_term]
keyword[else] :
keyword[raise] identifier[InvalidQuery] ( literal[string] )
identifier[func] ( identifier[text] = identifier[text] , identifier[name] = identifier[name] , identifier[field] = identifier[field] , identifier[size] = identifier[size] , identifier[params] = identifier[params] )
|
def add(self, text, name, field, type='term', size=None, params=None):
"""
Set the suggester of given type.
:param text: text
:param name: name of suggest
:param field: field to be used
:param type: type of suggester to add, available types are: completion,
phrase, term
:param size: number of phrases
:param params: dict of additional parameters to pass to the suggester
:return: None
"""
func = None
if type == 'completion':
func = self.add_completion # depends on [control=['if'], data=[]]
elif type == 'phrase':
func = self.add_phrase # depends on [control=['if'], data=[]]
elif type == 'term':
func = self.add_term # depends on [control=['if'], data=[]]
else:
raise InvalidQuery('Invalid type')
func(text=text, name=name, field=field, size=size, params=params)
|
def synctree(src, dst, onexist=None):
"""Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
"""
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError
if dst.exists() and not dst.is_dir():
raise ValueError
if onexist is None:
def onexist(): pass
_synctree(src, dst, onexist)
|
def function[synctree, parameter[src, dst, onexist]]:
constant[Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
]
variable[src] assign[=] call[call[name[pathlib].Path, parameter[name[src]]].resolve, parameter[]]
variable[dst] assign[=] call[call[name[pathlib].Path, parameter[name[dst]]].resolve, parameter[]]
if <ast.UnaryOp object at 0x7da18bccaf20> begin[:]
<ast.Raise object at 0x7da18bcc82e0>
if <ast.BoolOp object at 0x7da18bccb2b0> begin[:]
<ast.Raise object at 0x7da18bcc97b0>
if compare[name[onexist] is constant[None]] begin[:]
def function[onexist, parameter[]]:
pass
call[name[_synctree], parameter[name[src], name[dst], name[onexist]]]
|
keyword[def] identifier[synctree] ( identifier[src] , identifier[dst] , identifier[onexist] = keyword[None] ):
literal[string]
identifier[src] = identifier[pathlib] . identifier[Path] ( identifier[src] ). identifier[resolve] ()
identifier[dst] = identifier[pathlib] . identifier[Path] ( identifier[dst] ). identifier[resolve] ()
keyword[if] keyword[not] identifier[src] . identifier[is_dir] ():
keyword[raise] identifier[ValueError]
keyword[if] identifier[dst] . identifier[exists] () keyword[and] keyword[not] identifier[dst] . identifier[is_dir] ():
keyword[raise] identifier[ValueError]
keyword[if] identifier[onexist] keyword[is] keyword[None] :
keyword[def] identifier[onexist] (): keyword[pass]
identifier[_synctree] ( identifier[src] , identifier[dst] , identifier[onexist] )
|
def synctree(src, dst, onexist=None):
"""Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
"""
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError # depends on [control=['if'], data=[]]
if dst.exists() and (not dst.is_dir()):
raise ValueError # depends on [control=['if'], data=[]]
if onexist is None:
def onexist():
pass # depends on [control=['if'], data=[]]
_synctree(src, dst, onexist)
|
def export_flow_process_data(params, process):
"""
Creates a new SequenceFlow XML element for given edge parameters and adds it to 'process' element.
:param params: dictionary with edge parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for sequence flows)
"""
output_flow = eTree.SubElement(process, consts.Consts.sequence_flow)
output_flow.set(consts.Consts.id, params[consts.Consts.id])
output_flow.set(consts.Consts.name, params[consts.Consts.name])
output_flow.set(consts.Consts.source_ref, params[consts.Consts.source_ref])
output_flow.set(consts.Consts.target_ref, params[consts.Consts.target_ref])
if consts.Consts.condition_expression in params:
condition_expression_params = params[consts.Consts.condition_expression]
condition_expression = eTree.SubElement(output_flow, consts.Consts.condition_expression)
condition_expression.set(consts.Consts.id, condition_expression_params[consts.Consts.id])
condition_expression.set(consts.Consts.id, condition_expression_params[consts.Consts.id])
condition_expression.text = condition_expression_params[consts.Consts.condition_expression]
output_flow.set(consts.Consts.name, condition_expression_params[consts.Consts.condition_expression])
|
def function[export_flow_process_data, parameter[params, process]]:
constant[
Creates a new SequenceFlow XML element for given edge parameters and adds it to 'process' element.
:param params: dictionary with edge parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for sequence flows)
]
variable[output_flow] assign[=] call[name[eTree].SubElement, parameter[name[process], name[consts].Consts.sequence_flow]]
call[name[output_flow].set, parameter[name[consts].Consts.id, call[name[params]][name[consts].Consts.id]]]
call[name[output_flow].set, parameter[name[consts].Consts.name, call[name[params]][name[consts].Consts.name]]]
call[name[output_flow].set, parameter[name[consts].Consts.source_ref, call[name[params]][name[consts].Consts.source_ref]]]
call[name[output_flow].set, parameter[name[consts].Consts.target_ref, call[name[params]][name[consts].Consts.target_ref]]]
if compare[name[consts].Consts.condition_expression in name[params]] begin[:]
variable[condition_expression_params] assign[=] call[name[params]][name[consts].Consts.condition_expression]
variable[condition_expression] assign[=] call[name[eTree].SubElement, parameter[name[output_flow], name[consts].Consts.condition_expression]]
call[name[condition_expression].set, parameter[name[consts].Consts.id, call[name[condition_expression_params]][name[consts].Consts.id]]]
call[name[condition_expression].set, parameter[name[consts].Consts.id, call[name[condition_expression_params]][name[consts].Consts.id]]]
name[condition_expression].text assign[=] call[name[condition_expression_params]][name[consts].Consts.condition_expression]
call[name[output_flow].set, parameter[name[consts].Consts.name, call[name[condition_expression_params]][name[consts].Consts.condition_expression]]]
|
keyword[def] identifier[export_flow_process_data] ( identifier[params] , identifier[process] ):
literal[string]
identifier[output_flow] = identifier[eTree] . identifier[SubElement] ( identifier[process] , identifier[consts] . identifier[Consts] . identifier[sequence_flow] )
identifier[output_flow] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[id] , identifier[params] [ identifier[consts] . identifier[Consts] . identifier[id] ])
identifier[output_flow] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[name] , identifier[params] [ identifier[consts] . identifier[Consts] . identifier[name] ])
identifier[output_flow] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[source_ref] , identifier[params] [ identifier[consts] . identifier[Consts] . identifier[source_ref] ])
identifier[output_flow] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[target_ref] , identifier[params] [ identifier[consts] . identifier[Consts] . identifier[target_ref] ])
keyword[if] identifier[consts] . identifier[Consts] . identifier[condition_expression] keyword[in] identifier[params] :
identifier[condition_expression_params] = identifier[params] [ identifier[consts] . identifier[Consts] . identifier[condition_expression] ]
identifier[condition_expression] = identifier[eTree] . identifier[SubElement] ( identifier[output_flow] , identifier[consts] . identifier[Consts] . identifier[condition_expression] )
identifier[condition_expression] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[id] , identifier[condition_expression_params] [ identifier[consts] . identifier[Consts] . identifier[id] ])
identifier[condition_expression] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[id] , identifier[condition_expression_params] [ identifier[consts] . identifier[Consts] . identifier[id] ])
identifier[condition_expression] . identifier[text] = identifier[condition_expression_params] [ identifier[consts] . identifier[Consts] . identifier[condition_expression] ]
identifier[output_flow] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[name] , identifier[condition_expression_params] [ identifier[consts] . identifier[Consts] . identifier[condition_expression] ])
|
def export_flow_process_data(params, process):
"""
Creates a new SequenceFlow XML element for given edge parameters and adds it to 'process' element.
:param params: dictionary with edge parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for sequence flows)
"""
output_flow = eTree.SubElement(process, consts.Consts.sequence_flow)
output_flow.set(consts.Consts.id, params[consts.Consts.id])
output_flow.set(consts.Consts.name, params[consts.Consts.name])
output_flow.set(consts.Consts.source_ref, params[consts.Consts.source_ref])
output_flow.set(consts.Consts.target_ref, params[consts.Consts.target_ref])
if consts.Consts.condition_expression in params:
condition_expression_params = params[consts.Consts.condition_expression]
condition_expression = eTree.SubElement(output_flow, consts.Consts.condition_expression)
condition_expression.set(consts.Consts.id, condition_expression_params[consts.Consts.id])
condition_expression.set(consts.Consts.id, condition_expression_params[consts.Consts.id])
condition_expression.text = condition_expression_params[consts.Consts.condition_expression]
output_flow.set(consts.Consts.name, condition_expression_params[consts.Consts.condition_expression]) # depends on [control=['if'], data=['params']]
|
def depersist(self, key):
"""
Remove ``key`` from dictionary.
:param key: Key to remove from Zookeeper.
:type key: string
"""
self.connection.retry(self.connection.delete, self.__path_of(key))
self.__increment_last_updated()
|
def function[depersist, parameter[self, key]]:
constant[
Remove ``key`` from dictionary.
:param key: Key to remove from Zookeeper.
:type key: string
]
call[name[self].connection.retry, parameter[name[self].connection.delete, call[name[self].__path_of, parameter[name[key]]]]]
call[name[self].__increment_last_updated, parameter[]]
|
keyword[def] identifier[depersist] ( identifier[self] , identifier[key] ):
literal[string]
identifier[self] . identifier[connection] . identifier[retry] ( identifier[self] . identifier[connection] . identifier[delete] , identifier[self] . identifier[__path_of] ( identifier[key] ))
identifier[self] . identifier[__increment_last_updated] ()
|
def depersist(self, key):
"""
Remove ``key`` from dictionary.
:param key: Key to remove from Zookeeper.
:type key: string
"""
self.connection.retry(self.connection.delete, self.__path_of(key))
self.__increment_last_updated()
|
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = dataset.get_dimension_index(dim)
if dim_idx in [0, 1]:
l, b, r, t = dataset.bounds.lbrt()
dim2, dim1 = dataset.data.shape[:2]
xdate, ydate = isinstance(l, util.datetime_types), isinstance(b, util.datetime_types)
if l == r or dim1 == 0:
xlin = np.full((dim1,), l, dtype=('datetime64[us]' if xdate else 'float'))
elif xdate:
xlin = util.date_range(l, r, dim1, dataset._time_unit)
else:
xstep = float(r - l)/dim1
xlin = np.linspace(l+(xstep/2.), r-(xstep/2.), dim1)
if b == t or dim2 == 0:
ylin = np.full((dim2,), b, dtype=('datetime64[us]' if ydate else 'float'))
elif ydate:
ylin = util.date_range(b, t, dim2, dataset._time_unit)
else:
ystep = float(t - b)/dim2
ylin = np.linspace(b+(ystep/2.), t-(ystep/2.), dim2)
if expanded:
values = np.meshgrid(ylin, xlin)[abs(dim_idx-1)]
return values.flatten() if flat else values.T
else:
return ylin if dim_idx else xlin
elif dataset.ndims <= dim_idx < len(dataset.dimensions()):
# Raster arrays are stored with different orientation
# than expanded column format, reorient before expanding
if dataset.data.ndim > 2:
data = dataset.data[:, :, dim_idx-dataset.ndims]
else:
data = dataset.data
data = np.flipud(data)
return data.T.flatten() if flat else data
else:
return None
|
def function[values, parameter[cls, dataset, dim, expanded, flat, compute]]:
constant[
The set of samples available along a particular dimension.
]
variable[dim_idx] assign[=] call[name[dataset].get_dimension_index, parameter[name[dim]]]
if compare[name[dim_idx] in list[[<ast.Constant object at 0x7da18f09f5e0>, <ast.Constant object at 0x7da18f09cdf0>]]] begin[:]
<ast.Tuple object at 0x7da18f09f160> assign[=] call[name[dataset].bounds.lbrt, parameter[]]
<ast.Tuple object at 0x7da18f09cbe0> assign[=] call[name[dataset].data.shape][<ast.Slice object at 0x7da18f09cee0>]
<ast.Tuple object at 0x7da18f09f4f0> assign[=] tuple[[<ast.Call object at 0x7da18f09c6d0>, <ast.Call object at 0x7da18f09e740>]]
if <ast.BoolOp object at 0x7da18f09ca00> begin[:]
variable[xlin] assign[=] call[name[np].full, parameter[tuple[[<ast.Name object at 0x7da18f09d5d0>]], name[l]]]
if <ast.BoolOp object at 0x7da1b2346d10> begin[:]
variable[ylin] assign[=] call[name[np].full, parameter[tuple[[<ast.Name object at 0x7da1b2347430>]], name[b]]]
if name[expanded] begin[:]
variable[values] assign[=] call[call[name[np].meshgrid, parameter[name[ylin], name[xlin]]]][call[name[abs], parameter[binary_operation[name[dim_idx] - constant[1]]]]]
return[<ast.IfExp object at 0x7da1b2346860>]
|
keyword[def] identifier[values] ( identifier[cls] , identifier[dataset] , identifier[dim] , identifier[expanded] = keyword[True] , identifier[flat] = keyword[True] , identifier[compute] = keyword[True] ):
literal[string]
identifier[dim_idx] = identifier[dataset] . identifier[get_dimension_index] ( identifier[dim] )
keyword[if] identifier[dim_idx] keyword[in] [ literal[int] , literal[int] ]:
identifier[l] , identifier[b] , identifier[r] , identifier[t] = identifier[dataset] . identifier[bounds] . identifier[lbrt] ()
identifier[dim2] , identifier[dim1] = identifier[dataset] . identifier[data] . identifier[shape] [: literal[int] ]
identifier[xdate] , identifier[ydate] = identifier[isinstance] ( identifier[l] , identifier[util] . identifier[datetime_types] ), identifier[isinstance] ( identifier[b] , identifier[util] . identifier[datetime_types] )
keyword[if] identifier[l] == identifier[r] keyword[or] identifier[dim1] == literal[int] :
identifier[xlin] = identifier[np] . identifier[full] (( identifier[dim1] ,), identifier[l] , identifier[dtype] =( literal[string] keyword[if] identifier[xdate] keyword[else] literal[string] ))
keyword[elif] identifier[xdate] :
identifier[xlin] = identifier[util] . identifier[date_range] ( identifier[l] , identifier[r] , identifier[dim1] , identifier[dataset] . identifier[_time_unit] )
keyword[else] :
identifier[xstep] = identifier[float] ( identifier[r] - identifier[l] )/ identifier[dim1]
identifier[xlin] = identifier[np] . identifier[linspace] ( identifier[l] +( identifier[xstep] / literal[int] ), identifier[r] -( identifier[xstep] / literal[int] ), identifier[dim1] )
keyword[if] identifier[b] == identifier[t] keyword[or] identifier[dim2] == literal[int] :
identifier[ylin] = identifier[np] . identifier[full] (( identifier[dim2] ,), identifier[b] , identifier[dtype] =( literal[string] keyword[if] identifier[ydate] keyword[else] literal[string] ))
keyword[elif] identifier[ydate] :
identifier[ylin] = identifier[util] . identifier[date_range] ( identifier[b] , identifier[t] , identifier[dim2] , identifier[dataset] . identifier[_time_unit] )
keyword[else] :
identifier[ystep] = identifier[float] ( identifier[t] - identifier[b] )/ identifier[dim2]
identifier[ylin] = identifier[np] . identifier[linspace] ( identifier[b] +( identifier[ystep] / literal[int] ), identifier[t] -( identifier[ystep] / literal[int] ), identifier[dim2] )
keyword[if] identifier[expanded] :
identifier[values] = identifier[np] . identifier[meshgrid] ( identifier[ylin] , identifier[xlin] )[ identifier[abs] ( identifier[dim_idx] - literal[int] )]
keyword[return] identifier[values] . identifier[flatten] () keyword[if] identifier[flat] keyword[else] identifier[values] . identifier[T]
keyword[else] :
keyword[return] identifier[ylin] keyword[if] identifier[dim_idx] keyword[else] identifier[xlin]
keyword[elif] identifier[dataset] . identifier[ndims] <= identifier[dim_idx] < identifier[len] ( identifier[dataset] . identifier[dimensions] ()):
keyword[if] identifier[dataset] . identifier[data] . identifier[ndim] > literal[int] :
identifier[data] = identifier[dataset] . identifier[data] [:,:, identifier[dim_idx] - identifier[dataset] . identifier[ndims] ]
keyword[else] :
identifier[data] = identifier[dataset] . identifier[data]
identifier[data] = identifier[np] . identifier[flipud] ( identifier[data] )
keyword[return] identifier[data] . identifier[T] . identifier[flatten] () keyword[if] identifier[flat] keyword[else] identifier[data]
keyword[else] :
keyword[return] keyword[None]
|
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = dataset.get_dimension_index(dim)
if dim_idx in [0, 1]:
(l, b, r, t) = dataset.bounds.lbrt()
(dim2, dim1) = dataset.data.shape[:2]
(xdate, ydate) = (isinstance(l, util.datetime_types), isinstance(b, util.datetime_types))
if l == r or dim1 == 0:
xlin = np.full((dim1,), l, dtype='datetime64[us]' if xdate else 'float') # depends on [control=['if'], data=[]]
elif xdate:
xlin = util.date_range(l, r, dim1, dataset._time_unit) # depends on [control=['if'], data=[]]
else:
xstep = float(r - l) / dim1
xlin = np.linspace(l + xstep / 2.0, r - xstep / 2.0, dim1)
if b == t or dim2 == 0:
ylin = np.full((dim2,), b, dtype='datetime64[us]' if ydate else 'float') # depends on [control=['if'], data=[]]
elif ydate:
ylin = util.date_range(b, t, dim2, dataset._time_unit) # depends on [control=['if'], data=[]]
else:
ystep = float(t - b) / dim2
ylin = np.linspace(b + ystep / 2.0, t - ystep / 2.0, dim2)
if expanded:
values = np.meshgrid(ylin, xlin)[abs(dim_idx - 1)]
return values.flatten() if flat else values.T # depends on [control=['if'], data=[]]
else:
return ylin if dim_idx else xlin # depends on [control=['if'], data=['dim_idx']]
elif dataset.ndims <= dim_idx < len(dataset.dimensions()):
# Raster arrays are stored with different orientation
# than expanded column format, reorient before expanding
if dataset.data.ndim > 2:
data = dataset.data[:, :, dim_idx - dataset.ndims] # depends on [control=['if'], data=[]]
else:
data = dataset.data
data = np.flipud(data)
return data.T.flatten() if flat else data # depends on [control=['if'], data=['dim_idx']]
else:
return None
|
def _set_get_mac_address_table(self, v, load=False):
"""
Setter method for get_mac_address_table, mapped from YANG variable /brocade_mac_address_table_rpc/get_mac_address_table (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_mac_address_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_mac_address_table() directly.
YANG Description: This is a function that returns operational data for a
given mac entry and the corresponding details of that mac
entry. The mac entries are fetched similar to the snmp
get-next model. When no input is given to this rpc, first
set of mac entries will be fetched. If there are any
more extra mac entries that are there to be fetched,
the flag has-more at the end of the o/p will be set to
true. To get the next set of mac entries, this rpc has to
be queried again with the last mac entry details of the
previous set as the input in get-next-request case. With
get-next-request all three fields i.e. last-mac-address,
last-vlan-id and last-mac-type need to be passed as input.
When the rpc is queried with a mac-address as input in the
get-request case the corresponding mac entry, if exists,
will be fetched.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_mac_address_table.get_mac_address_table, is_leaf=True, yang_name="get-mac-address-table", rest_name="get-mac-address-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getl2sysmac-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_mac_address_table must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_mac_address_table.get_mac_address_table, is_leaf=True, yang_name="get-mac-address-table", rest_name="get-mac-address-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getl2sysmac-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='rpc', is_config=True)""",
})
self.__get_mac_address_table = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_get_mac_address_table, parameter[self, v, load]]:
constant[
Setter method for get_mac_address_table, mapped from YANG variable /brocade_mac_address_table_rpc/get_mac_address_table (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_mac_address_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_mac_address_table() directly.
YANG Description: This is a function that returns operational data for a
given mac entry and the corresponding details of that mac
entry. The mac entries are fetched similar to the snmp
get-next model. When no input is given to this rpc, first
set of mac entries will be fetched. If there are any
more extra mac entries that are there to be fetched,
the flag has-more at the end of the o/p will be set to
true. To get the next set of mac entries, this rpc has to
be queried again with the last mac entry details of the
previous set as the input in get-next-request case. With
get-next-request all three fields i.e. last-mac-address,
last-vlan-id and last-mac-type need to be passed as input.
When the rpc is queried with a mac-address as input in the
get-request case the corresponding mac entry, if exists,
will be fetched.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bc70190>
name[self].__get_mac_address_table assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_get_mac_address_table] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[get_mac_address_table] . identifier[get_mac_address_table] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[False] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__get_mac_address_table] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_get_mac_address_table(self, v, load=False):
"""
Setter method for get_mac_address_table, mapped from YANG variable /brocade_mac_address_table_rpc/get_mac_address_table (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_mac_address_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_mac_address_table() directly.
YANG Description: This is a function that returns operational data for a
given mac entry and the corresponding details of that mac
entry. The mac entries are fetched similar to the snmp
get-next model. When no input is given to this rpc, first
set of mac entries will be fetched. If there are any
more extra mac entries that are there to be fetched,
the flag has-more at the end of the o/p will be set to
true. To get the next set of mac entries, this rpc has to
be queried again with the last mac entry details of the
previous set as the input in get-next-request case. With
get-next-request all three fields i.e. last-mac-address,
last-vlan-id and last-mac-type need to be passed as input.
When the rpc is queried with a mac-address as input in the
get-request case the corresponding mac entry, if exists,
will be fetched.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=get_mac_address_table.get_mac_address_table, is_leaf=True, yang_name='get-mac-address-table', rest_name='get-mac-address-table', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getl2sysmac-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='rpc', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'get_mac_address_table must be of a type compatible with rpc', 'defined-type': 'rpc', 'generated-type': 'YANGDynClass(base=get_mac_address_table.get_mac_address_table, is_leaf=True, yang_name="get-mac-address-table", rest_name="get-mac-address-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'getl2sysmac-action-point\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mac-address-table\', defining_module=\'brocade-mac-address-table\', yang_type=\'rpc\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__get_mac_address_table = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def _set_root(self, request):
"""Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree
"""
if request.state_root:
root = request.state_root
else:
head = self._get_chain_head()
root = head.state_root_hash
try:
self._tree.set_merkle_root(root)
except KeyError as e:
LOGGER.debug('Unable to find root "%s" in database', e)
raise _ResponseFailed(self._status.NO_ROOT)
return root
|
def function[_set_root, parameter[self, request]]:
constant[Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree
]
if name[request].state_root begin[:]
variable[root] assign[=] name[request].state_root
<ast.Try object at 0x7da20c7c8f10>
return[name[root]]
|
keyword[def] identifier[_set_root] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] identifier[request] . identifier[state_root] :
identifier[root] = identifier[request] . identifier[state_root]
keyword[else] :
identifier[head] = identifier[self] . identifier[_get_chain_head] ()
identifier[root] = identifier[head] . identifier[state_root_hash]
keyword[try] :
identifier[self] . identifier[_tree] . identifier[set_merkle_root] ( identifier[root] )
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[e] )
keyword[raise] identifier[_ResponseFailed] ( identifier[self] . identifier[_status] . identifier[NO_ROOT] )
keyword[return] identifier[root]
|
def _set_root(self, request):
"""Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree
"""
if request.state_root:
root = request.state_root # depends on [control=['if'], data=[]]
else:
head = self._get_chain_head()
root = head.state_root_hash
try:
self._tree.set_merkle_root(root) # depends on [control=['try'], data=[]]
except KeyError as e:
LOGGER.debug('Unable to find root "%s" in database', e)
raise _ResponseFailed(self._status.NO_ROOT) # depends on [control=['except'], data=['e']]
return root
|
def run_once(function, state={}, errors={}):
"""A memoization decorator, whose purpose is to cache calls."""
@six.wraps(function)
def _wrapper(*args, **kwargs):
if function in errors:
# Deliberate use of LBYL.
six.reraise(*errors[function])
try:
return state[function]
except KeyError:
try:
state[function] = result = function(*args, **kwargs)
return result
except Exception:
errors[function] = sys.exc_info()
raise
return _wrapper
|
def function[run_once, parameter[function, state, errors]]:
constant[A memoization decorator, whose purpose is to cache calls.]
def function[_wrapper, parameter[]]:
if compare[name[function] in name[errors]] begin[:]
call[name[six].reraise, parameter[<ast.Starred object at 0x7da20e74b6a0>]]
<ast.Try object at 0x7da20e74ad70>
return[name[_wrapper]]
|
keyword[def] identifier[run_once] ( identifier[function] , identifier[state] ={}, identifier[errors] ={}):
literal[string]
@ identifier[six] . identifier[wraps] ( identifier[function] )
keyword[def] identifier[_wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[function] keyword[in] identifier[errors] :
identifier[six] . identifier[reraise] (* identifier[errors] [ identifier[function] ])
keyword[try] :
keyword[return] identifier[state] [ identifier[function] ]
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[state] [ identifier[function] ]= identifier[result] = identifier[function] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[result]
keyword[except] identifier[Exception] :
identifier[errors] [ identifier[function] ]= identifier[sys] . identifier[exc_info] ()
keyword[raise]
keyword[return] identifier[_wrapper]
|
def run_once(function, state={}, errors={}):
"""A memoization decorator, whose purpose is to cache calls."""
@six.wraps(function)
def _wrapper(*args, **kwargs):
if function in errors:
# Deliberate use of LBYL.
six.reraise(*errors[function]) # depends on [control=['if'], data=['function', 'errors']]
try:
return state[function] # depends on [control=['try'], data=[]]
except KeyError:
try:
state[function] = result = function(*args, **kwargs)
return result # depends on [control=['try'], data=[]]
except Exception:
errors[function] = sys.exc_info()
raise # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return _wrapper
|
def format_mentions(text, format_callback=format_mention):
"""Searches the given text for mentions generated by `expand_mention()` and returns a human-readable form.
For example:
"@<bob http://example.org/twtxt.txt>" will result in "@bob"
If you follow a source: source.nick will be bold
If you are the mentioned source: source.nick will be bold and coloured
If nothing from the above is true: nick will be unstyled
If nothing from the above is true and nick is not given: url will be used
"""
def handle_mention(match):
name, url = match.groups()
return format_callback(name, url)
return mention_re.sub(handle_mention, text)
|
def function[format_mentions, parameter[text, format_callback]]:
constant[Searches the given text for mentions generated by `expand_mention()` and returns a human-readable form.
For example:
"@<bob http://example.org/twtxt.txt>" will result in "@bob"
If you follow a source: source.nick will be bold
If you are the mentioned source: source.nick will be bold and coloured
If nothing from the above is true: nick will be unstyled
If nothing from the above is true and nick is not given: url will be used
]
def function[handle_mention, parameter[match]]:
<ast.Tuple object at 0x7da1b008a2c0> assign[=] call[name[match].groups, parameter[]]
return[call[name[format_callback], parameter[name[name], name[url]]]]
return[call[name[mention_re].sub, parameter[name[handle_mention], name[text]]]]
|
keyword[def] identifier[format_mentions] ( identifier[text] , identifier[format_callback] = identifier[format_mention] ):
literal[string]
keyword[def] identifier[handle_mention] ( identifier[match] ):
identifier[name] , identifier[url] = identifier[match] . identifier[groups] ()
keyword[return] identifier[format_callback] ( identifier[name] , identifier[url] )
keyword[return] identifier[mention_re] . identifier[sub] ( identifier[handle_mention] , identifier[text] )
|
def format_mentions(text, format_callback=format_mention):
"""Searches the given text for mentions generated by `expand_mention()` and returns a human-readable form.
For example:
"@<bob http://example.org/twtxt.txt>" will result in "@bob"
If you follow a source: source.nick will be bold
If you are the mentioned source: source.nick will be bold and coloured
If nothing from the above is true: nick will be unstyled
If nothing from the above is true and nick is not given: url will be used
"""
def handle_mention(match):
(name, url) = match.groups()
return format_callback(name, url)
return mention_re.sub(handle_mention, text)
|
def eth_getTransactionByBlockHashAndIndex(self, bhash, index=0):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblockhashandindex
:param bhash: Block hash
:type bhash: str
:param index: Index position (optional)
:type index: int
"""
result = yield from self.rpc_call('eth_getTransactionByBlockHashAndIndex',
[bhash, hex(index)])
# TODO: Update result response
return result
|
def function[eth_getTransactionByBlockHashAndIndex, parameter[self, bhash, index]]:
constant[https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblockhashandindex
:param bhash: Block hash
:type bhash: str
:param index: Index position (optional)
:type index: int
]
variable[result] assign[=] <ast.YieldFrom object at 0x7da18eb541f0>
return[name[result]]
|
keyword[def] identifier[eth_getTransactionByBlockHashAndIndex] ( identifier[self] , identifier[bhash] , identifier[index] = literal[int] ):
literal[string]
identifier[result] = keyword[yield] keyword[from] identifier[self] . identifier[rpc_call] ( literal[string] ,
[ identifier[bhash] , identifier[hex] ( identifier[index] )])
keyword[return] identifier[result]
|
def eth_getTransactionByBlockHashAndIndex(self, bhash, index=0):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblockhashandindex
:param bhash: Block hash
:type bhash: str
:param index: Index position (optional)
:type index: int
"""
result = (yield from self.rpc_call('eth_getTransactionByBlockHashAndIndex', [bhash, hex(index)]))
# TODO: Update result response
return result
|
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data)
|
def function[cancel_id, parameter[cls, id]]:
constant[
Cancels command denoted by this id
Args:
`id`: command id
]
variable[conn] assign[=] call[name[Qubole].agent, parameter[]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b101a830>], [<ast.Constant object at 0x7da1b1019690>]]
return[call[name[conn].put, parameter[call[name[cls].element_path, parameter[name[id]]], name[data]]]]
|
keyword[def] identifier[cancel_id] ( identifier[cls] , identifier[id] ):
literal[string]
identifier[conn] = identifier[Qubole] . identifier[agent] ()
identifier[data] ={ literal[string] : literal[string] }
keyword[return] identifier[conn] . identifier[put] ( identifier[cls] . identifier[element_path] ( identifier[id] ), identifier[data] )
|
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {'status': 'kill'}
return conn.put(cls.element_path(id), data)
|
def has(self, key: str) -> bool:
"""
Returns True is set() has been called for a key. The cached value may be of type 'None'.
:param key:
:return:
"""
if key in self._services:
return True
return False
|
def function[has, parameter[self, key]]:
constant[
Returns True is set() has been called for a key. The cached value may be of type 'None'.
:param key:
:return:
]
if compare[name[key] in name[self]._services] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[has] ( identifier[self] , identifier[key] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_services] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def has(self, key: str) -> bool:
"""
Returns True is set() has been called for a key. The cached value may be of type 'None'.
:param key:
:return:
"""
if key in self._services:
return True # depends on [control=['if'], data=[]]
return False
|
def get_visible_scopes(self):
"""Get list of non-internal scopes for token.
:returns: A list of scopes.
"""
return [k for k, s in current_oauth2server.scope_choices()
if k in self.scopes]
|
def function[get_visible_scopes, parameter[self]]:
constant[Get list of non-internal scopes for token.
:returns: A list of scopes.
]
return[<ast.ListComp object at 0x7da1b2524040>]
|
keyword[def] identifier[get_visible_scopes] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[k] keyword[for] identifier[k] , identifier[s] keyword[in] identifier[current_oauth2server] . identifier[scope_choices] ()
keyword[if] identifier[k] keyword[in] identifier[self] . identifier[scopes] ]
|
def get_visible_scopes(self):
"""Get list of non-internal scopes for token.
:returns: A list of scopes.
"""
return [k for (k, s) in current_oauth2server.scope_choices() if k in self.scopes]
|
def toggle_codecompletion(self, checked):
"""Toggle automatic code completion"""
self.shell.set_codecompletion_auto(checked)
self.set_option('codecompletion/auto', checked)
|
def function[toggle_codecompletion, parameter[self, checked]]:
constant[Toggle automatic code completion]
call[name[self].shell.set_codecompletion_auto, parameter[name[checked]]]
call[name[self].set_option, parameter[constant[codecompletion/auto], name[checked]]]
|
keyword[def] identifier[toggle_codecompletion] ( identifier[self] , identifier[checked] ):
literal[string]
identifier[self] . identifier[shell] . identifier[set_codecompletion_auto] ( identifier[checked] )
identifier[self] . identifier[set_option] ( literal[string] , identifier[checked] )
|
def toggle_codecompletion(self, checked):
"""Toggle automatic code completion"""
self.shell.set_codecompletion_auto(checked)
self.set_option('codecompletion/auto', checked)
|
def distance(self, channel=1):
"""
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
"""
self._ensure_mode(self.MODE_IR_SEEK)
channel = self._normalize_channel(channel)
ret_value = self.value((channel * 2) + 1)
# The value will be -128 if no beacon is found, return None instead
return None if ret_value == -128 else ret_value
|
def function[distance, parameter[self, channel]]:
constant[
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
]
call[name[self]._ensure_mode, parameter[name[self].MODE_IR_SEEK]]
variable[channel] assign[=] call[name[self]._normalize_channel, parameter[name[channel]]]
variable[ret_value] assign[=] call[name[self].value, parameter[binary_operation[binary_operation[name[channel] * constant[2]] + constant[1]]]]
return[<ast.IfExp object at 0x7da1b17cd4b0>]
|
keyword[def] identifier[distance] ( identifier[self] , identifier[channel] = literal[int] ):
literal[string]
identifier[self] . identifier[_ensure_mode] ( identifier[self] . identifier[MODE_IR_SEEK] )
identifier[channel] = identifier[self] . identifier[_normalize_channel] ( identifier[channel] )
identifier[ret_value] = identifier[self] . identifier[value] (( identifier[channel] * literal[int] )+ literal[int] )
keyword[return] keyword[None] keyword[if] identifier[ret_value] ==- literal[int] keyword[else] identifier[ret_value]
|
def distance(self, channel=1):
"""
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
"""
self._ensure_mode(self.MODE_IR_SEEK)
channel = self._normalize_channel(channel)
ret_value = self.value(channel * 2 + 1)
# The value will be -128 if no beacon is found, return None instead
return None if ret_value == -128 else ret_value
|
def on_connected(self, headers, body):
"""
Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real
heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats
are not 0, we start up the heartbeat loop accordingly.
:param dict headers: headers in the connection message
:param body: the message body
"""
if 'heart-beat' in headers:
self.heartbeats = utils.calculate_heartbeats(
headers['heart-beat'].replace(' ', '').split(','), self.heartbeats)
if self.heartbeats != (0, 0):
self.send_sleep = self.heartbeats[0] / 1000
# by default, receive gets an additional grace of 50%
# set a different heart-beat-receive-scale when creating the connection to override that
self.receive_sleep = (self.heartbeats[1] / 1000) * self.heart_beat_receive_scale
log.debug("Setting receive_sleep to %s", self.receive_sleep)
# Give grace of receiving the first heartbeat
self.received_heartbeat = monotonic() + self.receive_sleep
self.running = True
if self.heartbeat_thread is None:
self.heartbeat_thread = utils.default_create_thread(
self.__heartbeat_loop)
self.heartbeat_thread.name = "StompHeartbeat%s" % \
getattr(self.heartbeat_thread, "name", "Thread")
|
def function[on_connected, parameter[self, headers, body]]:
constant[
Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real
heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats
are not 0, we start up the heartbeat loop accordingly.
:param dict headers: headers in the connection message
:param body: the message body
]
if compare[constant[heart-beat] in name[headers]] begin[:]
name[self].heartbeats assign[=] call[name[utils].calculate_heartbeats, parameter[call[call[call[name[headers]][constant[heart-beat]].replace, parameter[constant[ ], constant[]]].split, parameter[constant[,]]], name[self].heartbeats]]
if compare[name[self].heartbeats not_equal[!=] tuple[[<ast.Constant object at 0x7da2044c2aa0>, <ast.Constant object at 0x7da2044c2e00>]]] begin[:]
name[self].send_sleep assign[=] binary_operation[call[name[self].heartbeats][constant[0]] / constant[1000]]
name[self].receive_sleep assign[=] binary_operation[binary_operation[call[name[self].heartbeats][constant[1]] / constant[1000]] * name[self].heart_beat_receive_scale]
call[name[log].debug, parameter[constant[Setting receive_sleep to %s], name[self].receive_sleep]]
name[self].received_heartbeat assign[=] binary_operation[call[name[monotonic], parameter[]] + name[self].receive_sleep]
name[self].running assign[=] constant[True]
if compare[name[self].heartbeat_thread is constant[None]] begin[:]
name[self].heartbeat_thread assign[=] call[name[utils].default_create_thread, parameter[name[self].__heartbeat_loop]]
name[self].heartbeat_thread.name assign[=] binary_operation[constant[StompHeartbeat%s] <ast.Mod object at 0x7da2590d6920> call[name[getattr], parameter[name[self].heartbeat_thread, constant[name], constant[Thread]]]]
|
keyword[def] identifier[on_connected] ( identifier[self] , identifier[headers] , identifier[body] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[headers] :
identifier[self] . identifier[heartbeats] = identifier[utils] . identifier[calculate_heartbeats] (
identifier[headers] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] ), identifier[self] . identifier[heartbeats] )
keyword[if] identifier[self] . identifier[heartbeats] !=( literal[int] , literal[int] ):
identifier[self] . identifier[send_sleep] = identifier[self] . identifier[heartbeats] [ literal[int] ]/ literal[int]
identifier[self] . identifier[receive_sleep] =( identifier[self] . identifier[heartbeats] [ literal[int] ]/ literal[int] )* identifier[self] . identifier[heart_beat_receive_scale]
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[receive_sleep] )
identifier[self] . identifier[received_heartbeat] = identifier[monotonic] ()+ identifier[self] . identifier[receive_sleep]
identifier[self] . identifier[running] = keyword[True]
keyword[if] identifier[self] . identifier[heartbeat_thread] keyword[is] keyword[None] :
identifier[self] . identifier[heartbeat_thread] = identifier[utils] . identifier[default_create_thread] (
identifier[self] . identifier[__heartbeat_loop] )
identifier[self] . identifier[heartbeat_thread] . identifier[name] = literal[string] % identifier[getattr] ( identifier[self] . identifier[heartbeat_thread] , literal[string] , literal[string] )
|
def on_connected(self, headers, body):
"""
Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real
heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats
are not 0, we start up the heartbeat loop accordingly.
:param dict headers: headers in the connection message
:param body: the message body
"""
if 'heart-beat' in headers:
self.heartbeats = utils.calculate_heartbeats(headers['heart-beat'].replace(' ', '').split(','), self.heartbeats)
if self.heartbeats != (0, 0):
self.send_sleep = self.heartbeats[0] / 1000
# by default, receive gets an additional grace of 50%
# set a different heart-beat-receive-scale when creating the connection to override that
self.receive_sleep = self.heartbeats[1] / 1000 * self.heart_beat_receive_scale
log.debug('Setting receive_sleep to %s', self.receive_sleep)
# Give grace of receiving the first heartbeat
self.received_heartbeat = monotonic() + self.receive_sleep
self.running = True
if self.heartbeat_thread is None:
self.heartbeat_thread = utils.default_create_thread(self.__heartbeat_loop)
self.heartbeat_thread.name = 'StompHeartbeat%s' % getattr(self.heartbeat_thread, 'name', 'Thread') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['headers']]
|
def select(self, *cluster_ids):
"""Select a list of clusters."""
# HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])`
# This makes it more convenient to select multiple clusters with
# the snippet: `:c 1 2 3` instead of `:c 1,2,3`.
if cluster_ids and isinstance(cluster_ids[0], (tuple, list)):
cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:])
# Remove non-existing clusters from the selection.
cluster_ids = self._keep_existing_clusters(cluster_ids)
# Update the cluster view selection.
self.cluster_view.select(cluster_ids)
|
def function[select, parameter[self]]:
constant[Select a list of clusters.]
if <ast.BoolOp object at 0x7da1b12f2c20> begin[:]
variable[cluster_ids] assign[=] binary_operation[call[name[list], parameter[call[name[cluster_ids]][constant[0]]]] + call[name[list], parameter[call[name[cluster_ids]][<ast.Slice object at 0x7da1b12f3670>]]]]
variable[cluster_ids] assign[=] call[name[self]._keep_existing_clusters, parameter[name[cluster_ids]]]
call[name[self].cluster_view.select, parameter[name[cluster_ids]]]
|
keyword[def] identifier[select] ( identifier[self] ,* identifier[cluster_ids] ):
literal[string]
keyword[if] identifier[cluster_ids] keyword[and] identifier[isinstance] ( identifier[cluster_ids] [ literal[int] ],( identifier[tuple] , identifier[list] )):
identifier[cluster_ids] = identifier[list] ( identifier[cluster_ids] [ literal[int] ])+ identifier[list] ( identifier[cluster_ids] [ literal[int] :])
identifier[cluster_ids] = identifier[self] . identifier[_keep_existing_clusters] ( identifier[cluster_ids] )
identifier[self] . identifier[cluster_view] . identifier[select] ( identifier[cluster_ids] )
|
def select(self, *cluster_ids):
"""Select a list of clusters."""
# HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])`
# This makes it more convenient to select multiple clusters with
# the snippet: `:c 1 2 3` instead of `:c 1,2,3`.
if cluster_ids and isinstance(cluster_ids[0], (tuple, list)):
cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:]) # depends on [control=['if'], data=[]]
# Remove non-existing clusters from the selection.
cluster_ids = self._keep_existing_clusters(cluster_ids)
# Update the cluster view selection.
self.cluster_view.select(cluster_ids)
|
def parse_message(message, validation_level=None, find_groups=True, message_profile=None, report_file=None,
force_validation=False):
"""
Parse the given ER7-encoded message and return an instance of :class:`Message <hl7apy.core.Message>`.
:type message: ``str``
:param message: the ER7-encoded message to be parsed
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type find_groups: ``bool``
:param find_groups: if ``True``, automatically assign the segments found to the appropriate
:class:`Groups <hl7apy.core.Group>` instances. If ``False``, the segments found are assigned as
children of the :class:`Message <hl7apy.core.Message>` instance
:type force_validation: ``bool``
:type force_validation: if ``True``, automatically forces the message validation after the end of the parsing
:return: an instance of :class:`Message <hl7apy.core.Message>`
>>> message = "MSH|^~\&|GHH_ADT||||20080115153000||OML^O33^OML_O33|0123456789|P|2.5||||AL\\rPID|1||" \
"566-554-3423^^^GHH^MR||EVERYMAN^ADAM^A|||M|||2222 HOME STREET^^ANN ARBOR^MI^^USA||555-555-2004|||M\\r"
>>> m = parse_message(message)
>>> print(m)
<Message OML_O33>
>>> print(m.msh.sending_application.to_er7())
GHH_ADT
>>> print(m.children)
[<Segment MSH>, <Group OML_O33_PATIENT>]
"""
message = message.lstrip()
encoding_chars, message_structure, version = get_message_info(message)
validation_level = _get_validation_level(validation_level)
try:
reference = message_profile[message_structure] if message_profile else None
except KeyError:
raise MessageProfileNotFound()
try:
m = Message(name=message_structure, reference=reference, version=version,
validation_level=validation_level, encoding_chars=encoding_chars)
except InvalidName:
m = Message(version=version, validation_level=validation_level,
encoding_chars=encoding_chars)
try:
children = parse_segments(message, m.version, encoding_chars, validation_level, m.reference, find_groups)
except AttributeError: # m.reference can raise i
children = parse_segments(message, m.version, encoding_chars, validation_level, find_groups=False)
m.children = children
if force_validation:
if message_profile is None:
Validator.validate(m, report_file=report_file)
else:
Validator.validate(m, message_profile[message_structure], report_file=report_file)
return m
|
def function[parse_message, parameter[message, validation_level, find_groups, message_profile, report_file, force_validation]]:
constant[
Parse the given ER7-encoded message and return an instance of :class:`Message <hl7apy.core.Message>`.
:type message: ``str``
:param message: the ER7-encoded message to be parsed
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type find_groups: ``bool``
:param find_groups: if ``True``, automatically assign the segments found to the appropriate
:class:`Groups <hl7apy.core.Group>` instances. If ``False``, the segments found are assigned as
children of the :class:`Message <hl7apy.core.Message>` instance
:type force_validation: ``bool``
:type force_validation: if ``True``, automatically forces the message validation after the end of the parsing
:return: an instance of :class:`Message <hl7apy.core.Message>`
>>> message = "MSH|^~\&|GHH_ADT||||20080115153000||OML^O33^OML_O33|0123456789|P|2.5||||AL\rPID|1||" "566-554-3423^^^GHH^MR||EVERYMAN^ADAM^A|||M|||2222 HOME STREET^^ANN ARBOR^MI^^USA||555-555-2004|||M\r"
>>> m = parse_message(message)
>>> print(m)
<Message OML_O33>
>>> print(m.msh.sending_application.to_er7())
GHH_ADT
>>> print(m.children)
[<Segment MSH>, <Group OML_O33_PATIENT>]
]
variable[message] assign[=] call[name[message].lstrip, parameter[]]
<ast.Tuple object at 0x7da1b0dbfd00> assign[=] call[name[get_message_info], parameter[name[message]]]
variable[validation_level] assign[=] call[name[_get_validation_level], parameter[name[validation_level]]]
<ast.Try object at 0x7da1b0dbfac0>
<ast.Try object at 0x7da1b0dbf820>
<ast.Try object at 0x7da1b0dbf160>
name[m].children assign[=] name[children]
if name[force_validation] begin[:]
if compare[name[message_profile] is constant[None]] begin[:]
call[name[Validator].validate, parameter[name[m]]]
return[name[m]]
|
keyword[def] identifier[parse_message] ( identifier[message] , identifier[validation_level] = keyword[None] , identifier[find_groups] = keyword[True] , identifier[message_profile] = keyword[None] , identifier[report_file] = keyword[None] ,
identifier[force_validation] = keyword[False] ):
literal[string]
identifier[message] = identifier[message] . identifier[lstrip] ()
identifier[encoding_chars] , identifier[message_structure] , identifier[version] = identifier[get_message_info] ( identifier[message] )
identifier[validation_level] = identifier[_get_validation_level] ( identifier[validation_level] )
keyword[try] :
identifier[reference] = identifier[message_profile] [ identifier[message_structure] ] keyword[if] identifier[message_profile] keyword[else] keyword[None]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[MessageProfileNotFound] ()
keyword[try] :
identifier[m] = identifier[Message] ( identifier[name] = identifier[message_structure] , identifier[reference] = identifier[reference] , identifier[version] = identifier[version] ,
identifier[validation_level] = identifier[validation_level] , identifier[encoding_chars] = identifier[encoding_chars] )
keyword[except] identifier[InvalidName] :
identifier[m] = identifier[Message] ( identifier[version] = identifier[version] , identifier[validation_level] = identifier[validation_level] ,
identifier[encoding_chars] = identifier[encoding_chars] )
keyword[try] :
identifier[children] = identifier[parse_segments] ( identifier[message] , identifier[m] . identifier[version] , identifier[encoding_chars] , identifier[validation_level] , identifier[m] . identifier[reference] , identifier[find_groups] )
keyword[except] identifier[AttributeError] :
identifier[children] = identifier[parse_segments] ( identifier[message] , identifier[m] . identifier[version] , identifier[encoding_chars] , identifier[validation_level] , identifier[find_groups] = keyword[False] )
identifier[m] . identifier[children] = identifier[children]
keyword[if] identifier[force_validation] :
keyword[if] identifier[message_profile] keyword[is] keyword[None] :
identifier[Validator] . identifier[validate] ( identifier[m] , identifier[report_file] = identifier[report_file] )
keyword[else] :
identifier[Validator] . identifier[validate] ( identifier[m] , identifier[message_profile] [ identifier[message_structure] ], identifier[report_file] = identifier[report_file] )
keyword[return] identifier[m]
|
def parse_message(message, validation_level=None, find_groups=True, message_profile=None, report_file=None, force_validation=False):
"""
Parse the given ER7-encoded message and return an instance of :class:`Message <hl7apy.core.Message>`.
:type message: ``str``
:param message: the ER7-encoded message to be parsed
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type find_groups: ``bool``
:param find_groups: if ``True``, automatically assign the segments found to the appropriate
:class:`Groups <hl7apy.core.Group>` instances. If ``False``, the segments found are assigned as
children of the :class:`Message <hl7apy.core.Message>` instance
:type force_validation: ``bool``
:type force_validation: if ``True``, automatically forces the message validation after the end of the parsing
:return: an instance of :class:`Message <hl7apy.core.Message>`
>>> message = "MSH|^~\\&|GHH_ADT||||20080115153000||OML^O33^OML_O33|0123456789|P|2.5||||AL\\rPID|1||" "566-554-3423^^^GHH^MR||EVERYMAN^ADAM^A|||M|||2222 HOME STREET^^ANN ARBOR^MI^^USA||555-555-2004|||M\\r"
>>> m = parse_message(message)
>>> print(m)
<Message OML_O33>
>>> print(m.msh.sending_application.to_er7())
GHH_ADT
>>> print(m.children)
[<Segment MSH>, <Group OML_O33_PATIENT>]
"""
message = message.lstrip()
(encoding_chars, message_structure, version) = get_message_info(message)
validation_level = _get_validation_level(validation_level)
try:
reference = message_profile[message_structure] if message_profile else None # depends on [control=['try'], data=[]]
except KeyError:
raise MessageProfileNotFound() # depends on [control=['except'], data=[]]
try:
m = Message(name=message_structure, reference=reference, version=version, validation_level=validation_level, encoding_chars=encoding_chars) # depends on [control=['try'], data=[]]
except InvalidName:
m = Message(version=version, validation_level=validation_level, encoding_chars=encoding_chars) # depends on [control=['except'], data=[]]
try:
children = parse_segments(message, m.version, encoding_chars, validation_level, m.reference, find_groups) # depends on [control=['try'], data=[]]
except AttributeError: # m.reference can raise i
children = parse_segments(message, m.version, encoding_chars, validation_level, find_groups=False) # depends on [control=['except'], data=[]]
m.children = children
if force_validation:
if message_profile is None:
Validator.validate(m, report_file=report_file) # depends on [control=['if'], data=[]]
else:
Validator.validate(m, message_profile[message_structure], report_file=report_file) # depends on [control=['if'], data=[]]
return m
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.