code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if (set(['image_url', 'label']) != set(names) or any(t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.')
else:
if (set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names)) or \
any(t != 'STRING' for t in types):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' +
'STRING columns.') | def function[check_dataset, parameter[dataset, mode]]:
constant[Validate we have a good dataset.]
variable[names] assign[=] <ast.ListComp object at 0x7da1b113f6d0>
variable[types] assign[=] <ast.ListComp object at 0x7da1b113fb20>
if compare[name[mode] equal[==] constant[train]] begin[:]
if <ast.BoolOp object at 0x7da1b113e800> begin[:]
<ast.Raise object at 0x7da1b113fd90> | keyword[def] identifier[check_dataset] ( identifier[dataset] , identifier[mode] ):
literal[string]
identifier[names] =[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[dataset] . identifier[schema] ]
identifier[types] =[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[dataset] . identifier[schema] ]
keyword[if] identifier[mode] == literal[string] :
keyword[if] ( identifier[set] ([ literal[string] , literal[string] ])!= identifier[set] ( identifier[names] ) keyword[or] identifier[any] ( identifier[t] != literal[string] keyword[for] identifier[t] keyword[in] identifier[types] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] ( identifier[set] ([ literal[string] ])!= identifier[set] ( identifier[names] ) keyword[and] identifier[set] ([ literal[string] , literal[string] ])!= identifier[set] ( identifier[names] )) keyword[or] identifier[any] ( identifier[t] != literal[string] keyword[for] identifier[t] keyword[in] identifier[types] ):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] ) | def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if set(['image_url', 'label']) != set(names) or any((t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names) or any((t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' + 'STRING columns.') # depends on [control=['if'], data=[]] |
def set_dtreat_dfit(self, dfit=None):
""" Set the fitting dictionnary
A dict contaning all parameters for fitting the data
Valid dict content includes:
- 'type': str
'fft': A fourier filtering
'svd': A svd filtering
"""
warnings.warn("Not implemented yet !, dfit forced to None")
dfit = None
assert dfit is None or isinstance(dfit,dict)
if isinstance(dfit,dict):
assert 'type' in dfit.keys()
assert dfit['type'] in ['svd','fft']
self._dtreat['dfit'] = dfit
self._ddata['uptodate'] = False | def function[set_dtreat_dfit, parameter[self, dfit]]:
constant[ Set the fitting dictionnary
A dict contaning all parameters for fitting the data
Valid dict content includes:
- 'type': str
'fft': A fourier filtering
'svd': A svd filtering
]
call[name[warnings].warn, parameter[constant[Not implemented yet !, dfit forced to None]]]
variable[dfit] assign[=] constant[None]
assert[<ast.BoolOp object at 0x7da18bcca740>]
if call[name[isinstance], parameter[name[dfit], name[dict]]] begin[:]
assert[compare[constant[type] in call[name[dfit].keys, parameter[]]]]
assert[compare[call[name[dfit]][constant[type]] in list[[<ast.Constant object at 0x7da18bcc9990>, <ast.Constant object at 0x7da18bcc9510>]]]]
call[name[self]._dtreat][constant[dfit]] assign[=] name[dfit]
call[name[self]._ddata][constant[uptodate]] assign[=] constant[False] | keyword[def] identifier[set_dtreat_dfit] ( identifier[self] , identifier[dfit] = keyword[None] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[dfit] = keyword[None]
keyword[assert] identifier[dfit] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[dfit] , identifier[dict] )
keyword[if] identifier[isinstance] ( identifier[dfit] , identifier[dict] ):
keyword[assert] literal[string] keyword[in] identifier[dfit] . identifier[keys] ()
keyword[assert] identifier[dfit] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]
identifier[self] . identifier[_dtreat] [ literal[string] ]= identifier[dfit]
identifier[self] . identifier[_ddata] [ literal[string] ]= keyword[False] | def set_dtreat_dfit(self, dfit=None):
""" Set the fitting dictionnary
A dict contaning all parameters for fitting the data
Valid dict content includes:
- 'type': str
'fft': A fourier filtering
'svd': A svd filtering
"""
warnings.warn('Not implemented yet !, dfit forced to None')
dfit = None
assert dfit is None or isinstance(dfit, dict)
if isinstance(dfit, dict):
assert 'type' in dfit.keys()
assert dfit['type'] in ['svd', 'fft'] # depends on [control=['if'], data=[]]
self._dtreat['dfit'] = dfit
self._ddata['uptodate'] = False |
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key) | def function[expanded_indexer, parameter[key, ndim]]:
constant[Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
]
if <ast.UnaryOp object at 0x7da20c6c5150> begin[:]
variable[key] assign[=] tuple[[<ast.Name object at 0x7da20c6c4b20>]]
variable[new_key] assign[=] list[[]]
variable[found_ellipsis] assign[=] constant[False]
for taget[name[k]] in starred[name[key]] begin[:]
if compare[name[k] is name[Ellipsis]] begin[:]
if <ast.UnaryOp object at 0x7da20c6c6290> begin[:]
call[name[new_key].extend, parameter[binary_operation[binary_operation[binary_operation[name[ndim] + constant[1]] - call[name[len], parameter[name[key]]]] * list[[<ast.Call object at 0x7da20c6c5570>]]]]]
variable[found_ellipsis] assign[=] constant[True]
if compare[call[name[len], parameter[name[new_key]]] greater[>] name[ndim]] begin[:]
<ast.Raise object at 0x7da20e957760>
call[name[new_key].extend, parameter[binary_operation[binary_operation[name[ndim] - call[name[len], parameter[name[new_key]]]] * list[[<ast.Call object at 0x7da20c6c6170>]]]]]
return[call[name[tuple], parameter[name[new_key]]]] | keyword[def] identifier[expanded_indexer] ( identifier[key] , identifier[ndim] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[tuple] ):
identifier[key] =( identifier[key] ,)
identifier[new_key] =[]
identifier[found_ellipsis] = keyword[False]
keyword[for] identifier[k] keyword[in] identifier[key] :
keyword[if] identifier[k] keyword[is] identifier[Ellipsis] :
keyword[if] keyword[not] identifier[found_ellipsis] :
identifier[new_key] . identifier[extend] (( identifier[ndim] + literal[int] - identifier[len] ( identifier[key] ))*[ identifier[slice] ( keyword[None] )])
identifier[found_ellipsis] = keyword[True]
keyword[else] :
identifier[new_key] . identifier[append] ( identifier[slice] ( keyword[None] ))
keyword[else] :
identifier[new_key] . identifier[append] ( identifier[k] )
keyword[if] identifier[len] ( identifier[new_key] )> identifier[ndim] :
keyword[raise] identifier[IndexError] ( literal[string] )
identifier[new_key] . identifier[extend] (( identifier[ndim] - identifier[len] ( identifier[new_key] ))*[ identifier[slice] ( keyword[None] )])
keyword[return] identifier[tuple] ( identifier[new_key] ) | def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,) # depends on [control=['if'], data=[]]
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True # depends on [control=['if'], data=[]]
else:
new_key.append(slice(None)) # depends on [control=['if'], data=[]]
else:
new_key.append(k) # depends on [control=['for'], data=['k']]
if len(new_key) > ndim:
raise IndexError('too many indices') # depends on [control=['if'], data=[]]
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key) |
def _set_version():
"""Set check50 __version__"""
global __version__
from pkg_resources import get_distribution, DistributionNotFound
import os
# https://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package
try:
dist = get_distribution("check50")
# Normalize path for cross-OS compatibility.
dist_loc = os.path.normcase(dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, "check50")):
# This version is not installed, but another version is.
raise DistributionNotFound
except DistributionNotFound:
__version__ = "locally installed, no version information available"
else:
__version__ = dist.version | def function[_set_version, parameter[]]:
constant[Set check50 __version__]
<ast.Global object at 0x7da207f99900>
from relative_module[pkg_resources] import module[get_distribution], module[DistributionNotFound]
import module[os]
<ast.Try object at 0x7da207f9bca0> | keyword[def] identifier[_set_version] ():
literal[string]
keyword[global] identifier[__version__]
keyword[from] identifier[pkg_resources] keyword[import] identifier[get_distribution] , identifier[DistributionNotFound]
keyword[import] identifier[os]
keyword[try] :
identifier[dist] = identifier[get_distribution] ( literal[string] )
identifier[dist_loc] = identifier[os] . identifier[path] . identifier[normcase] ( identifier[dist] . identifier[location] )
identifier[here] = identifier[os] . identifier[path] . identifier[normcase] ( identifier[__file__] )
keyword[if] keyword[not] identifier[here] . identifier[startswith] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dist_loc] , literal[string] )):
keyword[raise] identifier[DistributionNotFound]
keyword[except] identifier[DistributionNotFound] :
identifier[__version__] = literal[string]
keyword[else] :
identifier[__version__] = identifier[dist] . identifier[version] | def _set_version():
"""Set check50 __version__"""
global __version__
from pkg_resources import get_distribution, DistributionNotFound
import os
# https://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package
try:
dist = get_distribution('check50')
# Normalize path for cross-OS compatibility.
dist_loc = os.path.normcase(dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, 'check50')):
# This version is not installed, but another version is.
raise DistributionNotFound # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except DistributionNotFound:
__version__ = 'locally installed, no version information available' # depends on [control=['except'], data=[]]
else:
__version__ = dist.version |
def save_project(self, project, filename=''):
r"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\networks\saved_file.pnm', or can
be a relative path such as '..\..\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
"""
if filename == '':
filename = project.name
filename = self._parse_filename(filename=filename, ext='pnm')
# Save dictionary as pickle
d = {project.name: project}
with open(filename, 'wb') as f:
pickle.dump(d, f) | def function[save_project, parameter[self, project, filename]]:
constant[
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\networks\saved_file.pnm', or can
be a relative path such as '..\..\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
]
if compare[name[filename] equal[==] constant[]] begin[:]
variable[filename] assign[=] name[project].name
variable[filename] assign[=] call[name[self]._parse_filename, parameter[]]
variable[d] assign[=] dictionary[[<ast.Attribute object at 0x7da18c4cc550>], [<ast.Name object at 0x7da18c4ce8f0>]]
with call[name[open], parameter[name[filename], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[d], name[f]]] | keyword[def] identifier[save_project] ( identifier[self] , identifier[project] , identifier[filename] = literal[string] ):
literal[string]
keyword[if] identifier[filename] == literal[string] :
identifier[filename] = identifier[project] . identifier[name]
identifier[filename] = identifier[self] . identifier[_parse_filename] ( identifier[filename] = identifier[filename] , identifier[ext] = literal[string] )
identifier[d] ={ identifier[project] . identifier[name] : identifier[project] }
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[d] , identifier[f] ) | def save_project(self, project, filename=''):
"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\\networks\\saved_file.pnm', or can
be a relative path such as '..\\..\\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
"""
if filename == '':
filename = project.name # depends on [control=['if'], data=['filename']]
filename = self._parse_filename(filename=filename, ext='pnm')
# Save dictionary as pickle
d = {project.name: project}
with open(filename, 'wb') as f:
pickle.dump(d, f) # depends on [control=['with'], data=['f']] |
def policy_set_definitions(self):
"""Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
"""
api_version = self._get_api_version('policy_set_definitions')
if api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicySetDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | def function[policy_set_definitions, parameter[self]]:
constant[Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
]
variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[policy_set_definitions]]]
if compare[name[api_version] equal[==] constant[2017-06-01-preview]] begin[:]
from relative_module[v2017_06_01_preview.operations] import module[PolicySetDefinitionsOperations]
return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]] | keyword[def] identifier[policy_set_definitions] ( identifier[self] ):
literal[string]
identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] )
keyword[if] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2017_06_01_preview] . identifier[operations] keyword[import] identifier[PolicySetDefinitionsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_03_01] . identifier[operations] keyword[import] identifier[PolicySetDefinitionsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_05_01] . identifier[operations] keyword[import] identifier[PolicySetDefinitionsOperations] keyword[as] identifier[OperationClass]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] ))
keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] ))) | def policy_set_definitions(self):
"""Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
"""
api_version = self._get_api_version('policy_set_definitions')
if api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicySetDefinitionsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicySetDefinitionsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicySetDefinitionsOperations as OperationClass # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('APIVersion {} is not available'.format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files) | def function[findall, parameter[dir]]:
constant[
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
]
variable[files] assign[=] call[name[_find_all_simple], parameter[name[dir]]]
if compare[name[dir] equal[==] name[os].curdir] begin[:]
variable[make_rel] assign[=] call[name[functools].partial, parameter[name[os].path.relpath]]
variable[files] assign[=] call[name[map], parameter[name[make_rel], name[files]]]
return[call[name[list], parameter[name[files]]]] | keyword[def] identifier[findall] ( identifier[dir] = identifier[os] . identifier[curdir] ):
literal[string]
identifier[files] = identifier[_find_all_simple] ( identifier[dir] )
keyword[if] identifier[dir] == identifier[os] . identifier[curdir] :
identifier[make_rel] = identifier[functools] . identifier[partial] ( identifier[os] . identifier[path] . identifier[relpath] , identifier[start] = identifier[dir] )
identifier[files] = identifier[map] ( identifier[make_rel] , identifier[files] )
keyword[return] identifier[list] ( identifier[files] ) | def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files) # depends on [control=['if'], data=['dir']]
return list(files) |
def clgrad(obj, exe, arg, delta=DELTA):
"""
Returns numerical gradient function of given class method
with respect to a class attribute
Input: obj, general object
exe (str), name of object method
arg (str), name of object atribute
delta(float, optional), finite difference step
Output: gradient function object
"""
f, x = get_method_and_copy_of_attribute(obj, exe, arg)
def grad_f(*args, **kwargs):
grad_val = numpy.zeros(x.shape)
it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xi in it:
i = it.multi_index
xi += delta/2
fp = f(*args, **kwargs)
xi -= delta
fm = f(*args, **kwargs)
xi += delta/2
grad_val[i] = (fp - fm)/delta
return grad_val
return grad_f | def function[clgrad, parameter[obj, exe, arg, delta]]:
constant[
Returns numerical gradient function of given class method
with respect to a class attribute
Input: obj, general object
exe (str), name of object method
arg (str), name of object atribute
delta(float, optional), finite difference step
Output: gradient function object
]
<ast.Tuple object at 0x7da1b26afd90> assign[=] call[name[get_method_and_copy_of_attribute], parameter[name[obj], name[exe], name[arg]]]
def function[grad_f, parameter[]]:
variable[grad_val] assign[=] call[name[numpy].zeros, parameter[name[x].shape]]
variable[it] assign[=] call[name[numpy].nditer, parameter[name[x]]]
for taget[name[xi]] in starred[name[it]] begin[:]
variable[i] assign[=] name[it].multi_index
<ast.AugAssign object at 0x7da1b26adb70>
variable[fp] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da1b26ad5d0>]]
<ast.AugAssign object at 0x7da1b26ae200>
variable[fm] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da1b26ad5a0>]]
<ast.AugAssign object at 0x7da18fe90220>
call[name[grad_val]][name[i]] assign[=] binary_operation[binary_operation[name[fp] - name[fm]] / name[delta]]
return[name[grad_val]]
return[name[grad_f]] | keyword[def] identifier[clgrad] ( identifier[obj] , identifier[exe] , identifier[arg] , identifier[delta] = identifier[DELTA] ):
literal[string]
identifier[f] , identifier[x] = identifier[get_method_and_copy_of_attribute] ( identifier[obj] , identifier[exe] , identifier[arg] )
keyword[def] identifier[grad_f] (* identifier[args] ,** identifier[kwargs] ):
identifier[grad_val] = identifier[numpy] . identifier[zeros] ( identifier[x] . identifier[shape] )
identifier[it] = identifier[numpy] . identifier[nditer] ( identifier[x] , identifier[op_flags] =[ literal[string] ], identifier[flags] =[ literal[string] ])
keyword[for] identifier[xi] keyword[in] identifier[it] :
identifier[i] = identifier[it] . identifier[multi_index]
identifier[xi] += identifier[delta] / literal[int]
identifier[fp] = identifier[f] (* identifier[args] ,** identifier[kwargs] )
identifier[xi] -= identifier[delta]
identifier[fm] = identifier[f] (* identifier[args] ,** identifier[kwargs] )
identifier[xi] += identifier[delta] / literal[int]
identifier[grad_val] [ identifier[i] ]=( identifier[fp] - identifier[fm] )/ identifier[delta]
keyword[return] identifier[grad_val]
keyword[return] identifier[grad_f] | def clgrad(obj, exe, arg, delta=DELTA):
"""
Returns numerical gradient function of given class method
with respect to a class attribute
Input: obj, general object
exe (str), name of object method
arg (str), name of object atribute
delta(float, optional), finite difference step
Output: gradient function object
"""
(f, x) = get_method_and_copy_of_attribute(obj, exe, arg)
def grad_f(*args, **kwargs):
grad_val = numpy.zeros(x.shape)
it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xi in it:
i = it.multi_index
xi += delta / 2
fp = f(*args, **kwargs)
xi -= delta
fm = f(*args, **kwargs)
xi += delta / 2
grad_val[i] = (fp - fm) / delta # depends on [control=['for'], data=['xi']]
return grad_val
return grad_f |
def options(self):
"""A :class:`dict` of all config options."""
try:
return dict(self.cfg.items("twtxt"))
except configparser.NoSectionError as e:
logger.debug(e)
return {} | def function[options, parameter[self]]:
constant[A :class:`dict` of all config options.]
<ast.Try object at 0x7da1b009d210> | keyword[def] identifier[options] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[dict] ( identifier[self] . identifier[cfg] . identifier[items] ( literal[string] ))
keyword[except] identifier[configparser] . identifier[NoSectionError] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( identifier[e] )
keyword[return] {} | def options(self):
"""A :class:`dict` of all config options."""
try:
return dict(self.cfg.items('twtxt')) # depends on [control=['try'], data=[]]
except configparser.NoSectionError as e:
logger.debug(e)
return {} # depends on [control=['except'], data=['e']] |
def iter_variants(self):
"""Iterate over marker information."""
if not self.has_index:
raise NotImplementedError("Not implemented when IMPUTE2 file is "
"not indexed (see genipe)")
for name, row in self._impute2_index.iterrows():
# Seeking to the right place in the file
f = self._impute2_file
f.seek(int(row.seek))
chrom, name, pos, a1, a2 = f.read(1024).split(" ")[:5]
pos = int(pos)
yield Variant(name, CHROM_STR_ENCODE.get(chrom, chrom), pos,
[a1, a2]) | def function[iter_variants, parameter[self]]:
constant[Iterate over marker information.]
if <ast.UnaryOp object at 0x7da1b24e17b0> begin[:]
<ast.Raise object at 0x7da1b24e2200>
for taget[tuple[[<ast.Name object at 0x7da1b24e1150>, <ast.Name object at 0x7da1b24e08b0>]]] in starred[call[name[self]._impute2_index.iterrows, parameter[]]] begin[:]
variable[f] assign[=] name[self]._impute2_file
call[name[f].seek, parameter[call[name[int], parameter[name[row].seek]]]]
<ast.Tuple object at 0x7da1b24e2110> assign[=] call[call[call[name[f].read, parameter[constant[1024]]].split, parameter[constant[ ]]]][<ast.Slice object at 0x7da1b24e0f40>]
variable[pos] assign[=] call[name[int], parameter[name[pos]]]
<ast.Yield object at 0x7da1b24e26e0> | keyword[def] identifier[iter_variants] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_index] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] )
keyword[for] identifier[name] , identifier[row] keyword[in] identifier[self] . identifier[_impute2_index] . identifier[iterrows] ():
identifier[f] = identifier[self] . identifier[_impute2_file]
identifier[f] . identifier[seek] ( identifier[int] ( identifier[row] . identifier[seek] ))
identifier[chrom] , identifier[name] , identifier[pos] , identifier[a1] , identifier[a2] = identifier[f] . identifier[read] ( literal[int] ). identifier[split] ( literal[string] )[: literal[int] ]
identifier[pos] = identifier[int] ( identifier[pos] )
keyword[yield] identifier[Variant] ( identifier[name] , identifier[CHROM_STR_ENCODE] . identifier[get] ( identifier[chrom] , identifier[chrom] ), identifier[pos] ,
[ identifier[a1] , identifier[a2] ]) | def iter_variants(self):
"""Iterate over marker information."""
if not self.has_index:
raise NotImplementedError('Not implemented when IMPUTE2 file is not indexed (see genipe)') # depends on [control=['if'], data=[]]
for (name, row) in self._impute2_index.iterrows():
# Seeking to the right place in the file
f = self._impute2_file
f.seek(int(row.seek))
(chrom, name, pos, a1, a2) = f.read(1024).split(' ')[:5]
pos = int(pos)
yield Variant(name, CHROM_STR_ENCODE.get(chrom, chrom), pos, [a1, a2]) # depends on [control=['for'], data=[]] |
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build() | def function[build_static, parameter[self]]:
constant[ Build static files ]
if <ast.UnaryOp object at 0x7da18f722680> begin[:]
call[name[os].makedirs, parameter[name[self].build_static_dir]]
call[name[copy_tree], parameter[name[self].static_dir, name[self].build_static_dir]]
if name[self].webassets_cmd begin[:]
call[name[self].webassets_cmd.build, parameter[]] | keyword[def] identifier[build_static] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[build_static_dir] ):
identifier[os] . identifier[makedirs] ( identifier[self] . identifier[build_static_dir] )
identifier[copy_tree] ( identifier[self] . identifier[static_dir] , identifier[self] . identifier[build_static_dir] )
keyword[if] identifier[self] . identifier[webassets_cmd] :
identifier[self] . identifier[webassets_cmd] . identifier[build] () | def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir) # depends on [control=['if'], data=[]]
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build() # depends on [control=['if'], data=[]] |
def setBoundary(self, viewID, xmin, ymin, xmax, ymax):
"""setBoundary(string, double, double, double, double) -> None
Set the current boundary for the given view (see getBoundary()).
"""
self._connection._beginMessage(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_BOUNDARY, viewID, 1 + 8 + 8 + 8 + 8)
self._connection._string += struct.pack("!Bdddd",
tc.TYPE_BOUNDINGBOX, xmin, ymin, xmax, ymax)
self._connection._sendExact() | def function[setBoundary, parameter[self, viewID, xmin, ymin, xmax, ymax]]:
constant[setBoundary(string, double, double, double, double) -> None
Set the current boundary for the given view (see getBoundary()).
]
call[name[self]._connection._beginMessage, parameter[name[tc].CMD_SET_GUI_VARIABLE, name[tc].VAR_VIEW_BOUNDARY, name[viewID], binary_operation[binary_operation[binary_operation[binary_operation[constant[1] + constant[8]] + constant[8]] + constant[8]] + constant[8]]]]
<ast.AugAssign object at 0x7da1b0831f30>
call[name[self]._connection._sendExact, parameter[]] | keyword[def] identifier[setBoundary] ( identifier[self] , identifier[viewID] , identifier[xmin] , identifier[ymin] , identifier[xmax] , identifier[ymax] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_beginMessage] (
identifier[tc] . identifier[CMD_SET_GUI_VARIABLE] , identifier[tc] . identifier[VAR_VIEW_BOUNDARY] , identifier[viewID] , literal[int] + literal[int] + literal[int] + literal[int] + literal[int] )
identifier[self] . identifier[_connection] . identifier[_string] += identifier[struct] . identifier[pack] ( literal[string] ,
identifier[tc] . identifier[TYPE_BOUNDINGBOX] , identifier[xmin] , identifier[ymin] , identifier[xmax] , identifier[ymax] )
identifier[self] . identifier[_connection] . identifier[_sendExact] () | def setBoundary(self, viewID, xmin, ymin, xmax, ymax):
"""setBoundary(string, double, double, double, double) -> None
Set the current boundary for the given view (see getBoundary()).
"""
self._connection._beginMessage(tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_BOUNDARY, viewID, 1 + 8 + 8 + 8 + 8)
self._connection._string += struct.pack('!Bdddd', tc.TYPE_BOUNDINGBOX, xmin, ymin, xmax, ymax)
self._connection._sendExact() |
def add(self, pkgs):
"""Add packages in queue if not exist
"""
queue_list = self.packages()
pkgs = list(OrderedDict.fromkeys(pkgs))
print("\nAdd packages in the queue:\n")
with open(self.queue_list, "a") as queue:
for pkg in pkgs:
find = sbo_search_pkg(pkg)
if pkg not in queue_list and find is not None:
print("{0}{1}{2}".format(self.meta.color["GREEN"], pkg,
self.meta.color["ENDC"]))
queue.write(pkg + "\n")
self.quit = True
else:
print("{0}{1}{2}".format(self.meta.color["RED"], pkg,
self.meta.color["ENDC"]))
self.quit = True
queue.close()
if self.quit:
print("") | def function[add, parameter[self, pkgs]]:
constant[Add packages in queue if not exist
]
variable[queue_list] assign[=] call[name[self].packages, parameter[]]
variable[pkgs] assign[=] call[name[list], parameter[call[name[OrderedDict].fromkeys, parameter[name[pkgs]]]]]
call[name[print], parameter[constant[
Add packages in the queue:
]]]
with call[name[open], parameter[name[self].queue_list, constant[a]]] begin[:]
for taget[name[pkg]] in starred[name[pkgs]] begin[:]
variable[find] assign[=] call[name[sbo_search_pkg], parameter[name[pkg]]]
if <ast.BoolOp object at 0x7da204962d40> begin[:]
call[name[print], parameter[call[constant[{0}{1}{2}].format, parameter[call[name[self].meta.color][constant[GREEN]], name[pkg], call[name[self].meta.color][constant[ENDC]]]]]]
call[name[queue].write, parameter[binary_operation[name[pkg] + constant[
]]]]
name[self].quit assign[=] constant[True]
call[name[queue].close, parameter[]]
if name[self].quit begin[:]
call[name[print], parameter[constant[]]] | keyword[def] identifier[add] ( identifier[self] , identifier[pkgs] ):
literal[string]
identifier[queue_list] = identifier[self] . identifier[packages] ()
identifier[pkgs] = identifier[list] ( identifier[OrderedDict] . identifier[fromkeys] ( identifier[pkgs] ))
identifier[print] ( literal[string] )
keyword[with] identifier[open] ( identifier[self] . identifier[queue_list] , literal[string] ) keyword[as] identifier[queue] :
keyword[for] identifier[pkg] keyword[in] identifier[pkgs] :
identifier[find] = identifier[sbo_search_pkg] ( identifier[pkg] )
keyword[if] identifier[pkg] keyword[not] keyword[in] identifier[queue_list] keyword[and] identifier[find] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[pkg] ,
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ]))
identifier[queue] . identifier[write] ( identifier[pkg] + literal[string] )
identifier[self] . identifier[quit] = keyword[True]
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[pkg] ,
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ]))
identifier[self] . identifier[quit] = keyword[True]
identifier[queue] . identifier[close] ()
keyword[if] identifier[self] . identifier[quit] :
identifier[print] ( literal[string] ) | def add(self, pkgs):
"""Add packages in queue if not exist
"""
queue_list = self.packages()
pkgs = list(OrderedDict.fromkeys(pkgs))
print('\nAdd packages in the queue:\n')
with open(self.queue_list, 'a') as queue:
for pkg in pkgs:
find = sbo_search_pkg(pkg)
if pkg not in queue_list and find is not None:
print('{0}{1}{2}'.format(self.meta.color['GREEN'], pkg, self.meta.color['ENDC']))
queue.write(pkg + '\n')
self.quit = True # depends on [control=['if'], data=[]]
else:
print('{0}{1}{2}'.format(self.meta.color['RED'], pkg, self.meta.color['ENDC']))
self.quit = True # depends on [control=['for'], data=['pkg']]
queue.close() # depends on [control=['with'], data=['queue']]
if self.quit:
print('') # depends on [control=['if'], data=[]] |
def transitions_to(self, dst):
'''
returns enumerable of (prevstate, t) tuples
this is super slow and needs to be sped up
'''
if dst in self._transitions_to:
for t in self._transitions_to[dst]:
for s in self._transitions_to[dst][t]:
yield (s, t) | def function[transitions_to, parameter[self, dst]]:
constant[
returns enumerable of (prevstate, t) tuples
this is super slow and needs to be sped up
]
if compare[name[dst] in name[self]._transitions_to] begin[:]
for taget[name[t]] in starred[call[name[self]._transitions_to][name[dst]]] begin[:]
for taget[name[s]] in starred[call[call[name[self]._transitions_to][name[dst]]][name[t]]] begin[:]
<ast.Yield object at 0x7da20e9b34c0> | keyword[def] identifier[transitions_to] ( identifier[self] , identifier[dst] ):
literal[string]
keyword[if] identifier[dst] keyword[in] identifier[self] . identifier[_transitions_to] :
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_transitions_to] [ identifier[dst] ]:
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[_transitions_to] [ identifier[dst] ][ identifier[t] ]:
keyword[yield] ( identifier[s] , identifier[t] ) | def transitions_to(self, dst):
"""
returns enumerable of (prevstate, t) tuples
this is super slow and needs to be sped up
"""
if dst in self._transitions_to:
for t in self._transitions_to[dst]:
for s in self._transitions_to[dst][t]:
yield (s, t) # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=['dst']] |
def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info)
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read()
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, "lxml")
# Find all tables in the HTML code
tables = soup.find_all("table", attrs={"class": "data"})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={"class": "row1"}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]
if table_index == 2:
info_dict['Output Messages'] = data_list[1]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url
data_index += 1
table_index += 1
return info_dict | def function[run_ppm_server, parameter[pdb_file, outfile, force_rerun]]:
constant[Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
]
if call[name[ssbio].utils.force_rerun, parameter[]] begin[:]
variable[url] assign[=] constant[http://sunshine.phar.umich.edu/upload_file.php]
variable[files] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e825f0>], [<ast.Call object at 0x7da1b0e81cc0>]]
variable[r] assign[=] call[name[requests].post, parameter[name[url]]]
variable[info] assign[=] name[r].text
with call[name[open], parameter[name[outfile], constant[w]]] begin[:]
call[name[f].write, parameter[name[info]]]
variable[t] assign[=] call[name[info].replace, parameter[constant[
], constant[]]]
variable[tt] assign[=] call[name[t].replace, parameter[constant[
], constant[]]]
variable[ttt] assign[=] call[name[tt].replace, parameter[constant[ ], constant[]]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[ttt], constant[lxml]]]
variable[tables] assign[=] call[name[soup].find_all, parameter[constant[table]]]
variable[info_dict] assign[=] dictionary[[], []]
variable[table_index] assign[=] constant[0]
for taget[name[t]] in starred[name[tables]] begin[:]
variable[data_index] assign[=] constant[0]
for taget[name[data]] in starred[call[name[t].find_all, parameter[constant[tr]]]] begin[:]
variable[data_list] assign[=] call[name[list], parameter[name[data].strings]]
if compare[name[table_index] equal[==] constant[0]] begin[:]
call[name[info_dict]][constant[Depth/Hydrophobic Thickness]] assign[=] call[name[data_list]][constant[0]]
call[name[info_dict]][constant[deltaG_transfer]] assign[=] call[name[data_list]][constant[2]]
call[name[info_dict]][constant[Tilt Angle]] assign[=] call[name[data_list]][constant[3]]
if <ast.BoolOp object at 0x7da1b0e81870> begin[:]
call[name[info_dict]][constant[Embedded_residues_Tilt]] assign[=] call[name[data_list]][constant[0]]
call[name[info_dict]][constant[Embedded_residues]] assign[=] call[name[data_list]][constant[1]]
if <ast.BoolOp object at 0x7da1b0e823b0> begin[:]
call[name[info_dict]][constant[Transmembrane_secondary_structure_segments_Tilt]] assign[=] call[name[data_list]][constant[0]]
call[name[info_dict]][constant[Transmembrane_secondary_structure_segments]] assign[=] call[name[data_list]][constant[1]]
if compare[name[table_index] equal[==] constant[2]] begin[:]
call[name[info_dict]][constant[Output Messages]] assign[=] call[name[data_list]][constant[1]]
if compare[name[table_index] equal[==] constant[3]] begin[:]
variable[baseurl] assign[=] constant[http://sunshine.phar.umich.edu/]
variable[a] assign[=] call[name[data].find, parameter[constant[a]]]
variable[download_url] assign[=] binary_operation[name[baseurl] + call[call[name[a]][constant[href]].replace, parameter[constant[./], constant[]]]]
call[name[info_dict]][constant[Output file download link]] assign[=] name[download_url]
<ast.AugAssign object at 0x7da1b0e83040>
<ast.AugAssign object at 0x7da1b0e831f0>
return[name[info_dict]] | keyword[def] identifier[run_ppm_server] ( identifier[pdb_file] , identifier[outfile] , identifier[force_rerun] = keyword[False] ):
literal[string]
keyword[if] identifier[ssbio] . identifier[utils] . identifier[force_rerun] ( identifier[outfile] = identifier[outfile] , identifier[flag] = identifier[force_rerun] ):
identifier[url] = literal[string]
identifier[files] ={ literal[string] : identifier[open] ( identifier[pdb_file] , literal[string] )}
identifier[r] = identifier[requests] . identifier[post] ( identifier[url] , identifier[files] = identifier[files] )
identifier[info] = identifier[r] . identifier[text]
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[info] )
keyword[else] :
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[f] :
identifier[info] = identifier[f] . identifier[read] ()
identifier[t] = identifier[info] . identifier[replace] ( literal[string] , literal[string] )
identifier[tt] = identifier[t] . identifier[replace] ( literal[string] , literal[string] )
identifier[ttt] = identifier[tt] . identifier[replace] ( literal[string] , literal[string] )
identifier[soup] = identifier[BeautifulSoup] ( identifier[ttt] , literal[string] )
identifier[tables] = identifier[soup] . identifier[find_all] ( literal[string] , identifier[attrs] ={ literal[string] : literal[string] })
identifier[info_dict] ={}
identifier[table_index] = literal[int]
keyword[for] identifier[t] keyword[in] identifier[tables] :
identifier[data_index] = literal[int]
keyword[for] identifier[data] keyword[in] identifier[t] . identifier[find_all] ( literal[string] , identifier[attrs] ={ literal[string] : literal[string] }):
identifier[data_list] = identifier[list] ( identifier[data] . identifier[strings] )
keyword[if] identifier[table_index] == literal[int] :
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
keyword[if] identifier[table_index] == literal[int] keyword[and] identifier[data_index] == literal[int] :
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
keyword[if] identifier[table_index] == literal[int] keyword[and] identifier[data_index] == literal[int] :
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
keyword[if] identifier[table_index] == literal[int] :
identifier[info_dict] [ literal[string] ]= identifier[data_list] [ literal[int] ]
keyword[if] identifier[table_index] == literal[int] :
identifier[baseurl] = literal[string]
identifier[a] = identifier[data] . identifier[find] ( literal[string] , identifier[href] = keyword[True] )
identifier[download_url] = identifier[baseurl] + identifier[a] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[info_dict] [ literal[string] ]= identifier[download_url]
identifier[data_index] += literal[int]
identifier[table_index] += literal[int]
keyword[return] identifier[info_dict] | def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read() # depends on [control=['with'], data=['f']]
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, 'lxml')
# Find all tables in the HTML code
tables = soup.find_all('table', attrs={'class': 'data'})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={'class': 'row1'}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3] # depends on [control=['if'], data=[]]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1] # depends on [control=['if'], data=[]]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1] # depends on [control=['if'], data=[]]
if table_index == 2:
info_dict['Output Messages'] = data_list[1] # depends on [control=['if'], data=[]]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url # depends on [control=['if'], data=[]]
data_index += 1 # depends on [control=['for'], data=['data']]
table_index += 1 # depends on [control=['for'], data=['t']]
return info_dict |
def has_any_role(*items):
r"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage()
getter = functools.partial(discord.utils.get, ctx.author.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise MissingAnyRole(items)
return check(predicate) | def function[has_any_role, parameter[]]:
constant[A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
]
def function[predicate, parameter[ctx]]:
if <ast.UnaryOp object at 0x7da1b1fe5a80> begin[:]
<ast.Raise object at 0x7da1b1fe5450>
variable[getter] assign[=] call[name[functools].partial, parameter[name[discord].utils.get, name[ctx].author.roles]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1fe5f00>]] begin[:]
return[constant[True]]
<ast.Raise object at 0x7da1b1fe5900>
return[call[name[check], parameter[name[predicate]]]] | keyword[def] identifier[has_any_role] (* identifier[items] ):
literal[string]
keyword[def] identifier[predicate] ( identifier[ctx] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[ctx] . identifier[channel] , identifier[discord] . identifier[abc] . identifier[GuildChannel] ):
keyword[raise] identifier[NoPrivateMessage] ()
identifier[getter] = identifier[functools] . identifier[partial] ( identifier[discord] . identifier[utils] . identifier[get] , identifier[ctx] . identifier[author] . identifier[roles] )
keyword[if] identifier[any] ( identifier[getter] ( identifier[id] = identifier[item] ) keyword[is] keyword[not] keyword[None] keyword[if] identifier[isinstance] ( identifier[item] , identifier[int] ) keyword[else] identifier[getter] ( identifier[name] = identifier[item] ) keyword[is] keyword[not] keyword[None] keyword[for] identifier[item] keyword[in] identifier[items] ):
keyword[return] keyword[True]
keyword[raise] identifier[MissingAnyRole] ( identifier[items] )
keyword[return] identifier[check] ( identifier[predicate] ) | def has_any_role(*items):
"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage() # depends on [control=['if'], data=[]]
getter = functools.partial(discord.utils.get, ctx.author.roles)
if any((getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items)):
return True # depends on [control=['if'], data=[]]
raise MissingAnyRole(items)
return check(predicate) |
def forward(self, data_batch, is_train=None, carry_state=True):
"""Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
"""
# propagate states from the previous iteration
if carry_state:
if isinstance(self._next_states, (int, float)):
self._module.set_states(value=self._next_states)
else:
self._module.set_states(states=self._next_states)
self._module.forward(data_batch, is_train=is_train)
outputs = self._module.get_outputs(merge_multi_context=False)
self._next_states = outputs[:-1] | def function[forward, parameter[self, data_batch, is_train, carry_state]]:
constant[Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
]
if name[carry_state] begin[:]
if call[name[isinstance], parameter[name[self]._next_states, tuple[[<ast.Name object at 0x7da1b1f21240>, <ast.Name object at 0x7da1b1f21210>]]]] begin[:]
call[name[self]._module.set_states, parameter[]]
call[name[self]._module.forward, parameter[name[data_batch]]]
variable[outputs] assign[=] call[name[self]._module.get_outputs, parameter[]]
name[self]._next_states assign[=] call[name[outputs]][<ast.Slice object at 0x7da1b1f20070>] | keyword[def] identifier[forward] ( identifier[self] , identifier[data_batch] , identifier[is_train] = keyword[None] , identifier[carry_state] = keyword[True] ):
literal[string]
keyword[if] identifier[carry_state] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_next_states] ,( identifier[int] , identifier[float] )):
identifier[self] . identifier[_module] . identifier[set_states] ( identifier[value] = identifier[self] . identifier[_next_states] )
keyword[else] :
identifier[self] . identifier[_module] . identifier[set_states] ( identifier[states] = identifier[self] . identifier[_next_states] )
identifier[self] . identifier[_module] . identifier[forward] ( identifier[data_batch] , identifier[is_train] = identifier[is_train] )
identifier[outputs] = identifier[self] . identifier[_module] . identifier[get_outputs] ( identifier[merge_multi_context] = keyword[False] )
identifier[self] . identifier[_next_states] = identifier[outputs] [:- literal[int] ] | def forward(self, data_batch, is_train=None, carry_state=True):
"""Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
"""
# propagate states from the previous iteration
if carry_state:
if isinstance(self._next_states, (int, float)):
self._module.set_states(value=self._next_states) # depends on [control=['if'], data=[]]
else:
self._module.set_states(states=self._next_states) # depends on [control=['if'], data=[]]
self._module.forward(data_batch, is_train=is_train)
outputs = self._module.get_outputs(merge_multi_context=False)
self._next_states = outputs[:-1] |
def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | def function[get_parameter_p_value_too_high_warning, parameter[model_type, model_params, parameter, p_value, maximum_p_value]]:
constant[ Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
]
variable[warnings] assign[=] list[[]]
if compare[name[p_value] greater[>] name[maximum_p_value]] begin[:]
variable[data] assign[=] dictionary[[<ast.Call object at 0x7da18c4ce9b0>, <ast.Call object at 0x7da18c4cf0d0>], [<ast.Name object at 0x7da18c4ce7d0>, <ast.Name object at 0x7da18c4ce2f0>]]
call[name[data].update, parameter[name[model_params]]]
call[name[warnings].append, parameter[call[name[EEMeterWarning], parameter[]]]]
return[name[warnings]] | keyword[def] identifier[get_parameter_p_value_too_high_warning] (
identifier[model_type] , identifier[model_params] , identifier[parameter] , identifier[p_value] , identifier[maximum_p_value]
):
literal[string]
identifier[warnings] =[]
keyword[if] identifier[p_value] > identifier[maximum_p_value] :
identifier[data] ={
literal[string] . identifier[format] ( identifier[parameter] ): identifier[p_value] ,
literal[string] . identifier[format] ( identifier[parameter] ): identifier[maximum_p_value] ,
}
identifier[data] . identifier[update] ( identifier[model_params] )
identifier[warnings] . identifier[append] (
identifier[EEMeterWarning] (
identifier[qualified_name] =(
literal[string] . identifier[format] (
identifier[model_type] = identifier[model_type] , identifier[parameter] = identifier[parameter]
)
),
identifier[description] =(
literal[string] . identifier[format] (
identifier[parameter] = identifier[parameter]
)
),
identifier[data] = identifier[data] ,
)
)
keyword[return] identifier[warnings] | def get_parameter_p_value_too_high_warning(model_type, model_params, parameter, p_value, maximum_p_value):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {'{}_p_value'.format(parameter): p_value, '{}_maximum_p_value'.format(parameter): maximum_p_value}
data.update(model_params)
warnings.append(EEMeterWarning(qualified_name='eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high'.format(model_type=model_type, parameter=parameter), description='Model fit {parameter} p-value is too high. Candidate model rejected.'.format(parameter=parameter), data=data)) # depends on [control=['if'], data=['p_value', 'maximum_p_value']]
return warnings |
def info(self, *args):
"""Log an informational message. Used for normal operation."""
if _canShortcutLogging(self.logCategory, INFO):
return
infoObject(self.logObjectName(), self.logCategory,
*self.logFunction(*args)) | def function[info, parameter[self]]:
constant[Log an informational message. Used for normal operation.]
if call[name[_canShortcutLogging], parameter[name[self].logCategory, name[INFO]]] begin[:]
return[None]
call[name[infoObject], parameter[call[name[self].logObjectName, parameter[]], name[self].logCategory, <ast.Starred object at 0x7da1b0aba950>]] | keyword[def] identifier[info] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[_canShortcutLogging] ( identifier[self] . identifier[logCategory] , identifier[INFO] ):
keyword[return]
identifier[infoObject] ( identifier[self] . identifier[logObjectName] (), identifier[self] . identifier[logCategory] ,
* identifier[self] . identifier[logFunction] (* identifier[args] )) | def info(self, *args):
"""Log an informational message. Used for normal operation."""
if _canShortcutLogging(self.logCategory, INFO):
return # depends on [control=['if'], data=[]]
infoObject(self.logObjectName(), self.logCategory, *self.logFunction(*args)) |
def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **kwargs):
'''
Add new media to multiple users.
.. versionadded:: 2016.3.0
:param userids: ID of the user that uses the media
:param active: Whether the media is enabled (0 enabled, 1 disabled)
:param mediatypeid: ID of the media type used by the media
:param period: Time when the notifications can be sent as a time period
:param sendto: Address, user name or other identifier of the recipient
:param severity: Trigger severities to send notifications about
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: IDs of the created media.
CLI Example:
.. code-block:: bash
salt '*' zabbix.user_addmedia 4 active=0 mediatypeid=1 period='1-7,00:00-24:00' sendto='support2@example.com'
severity=63
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'user.addmedia'
params = {"users": []}
# Users
if not isinstance(userids, list):
userids = [userids]
for user in userids:
params['users'].append({"userid": user})
# Medias
params['medias'] = [{"active": active, "mediatypeid": mediatypeid, "period": period,
"sendto": sendto, "severity": severity}, ]
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['mediaids']
else:
raise KeyError
except KeyError:
return ret | def function[user_addmedia, parameter[userids, active, mediatypeid, period, sendto, severity]]:
constant[
Add new media to multiple users.
.. versionadded:: 2016.3.0
:param userids: ID of the user that uses the media
:param active: Whether the media is enabled (0 enabled, 1 disabled)
:param mediatypeid: ID of the media type used by the media
:param period: Time when the notifications can be sent as a time period
:param sendto: Address, user name or other identifier of the recipient
:param severity: Trigger severities to send notifications about
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: IDs of the created media.
CLI Example:
.. code-block:: bash
salt '*' zabbix.user_addmedia 4 active=0 mediatypeid=1 period='1-7,00:00-24:00' sendto='support2@example.com'
severity=63
]
variable[conn_args] assign[=] call[name[_login], parameter[]]
variable[ret] assign[=] dictionary[[], []]
<ast.Try object at 0x7da18dc06350> | keyword[def] identifier[user_addmedia] ( identifier[userids] , identifier[active] , identifier[mediatypeid] , identifier[period] , identifier[sendto] , identifier[severity] ,** identifier[kwargs] ):
literal[string]
identifier[conn_args] = identifier[_login] (** identifier[kwargs] )
identifier[ret] ={}
keyword[try] :
keyword[if] identifier[conn_args] :
identifier[method] = literal[string]
identifier[params] ={ literal[string] :[]}
keyword[if] keyword[not] identifier[isinstance] ( identifier[userids] , identifier[list] ):
identifier[userids] =[ identifier[userids] ]
keyword[for] identifier[user] keyword[in] identifier[userids] :
identifier[params] [ literal[string] ]. identifier[append] ({ literal[string] : identifier[user] })
identifier[params] [ literal[string] ]=[{ literal[string] : identifier[active] , literal[string] : identifier[mediatypeid] , literal[string] : identifier[period] ,
literal[string] : identifier[sendto] , literal[string] : identifier[severity] },]
identifier[ret] = identifier[_query] ( identifier[method] , identifier[params] , identifier[conn_args] [ literal[string] ], identifier[conn_args] [ literal[string] ])
keyword[return] identifier[ret] [ literal[string] ][ literal[string] ]
keyword[else] :
keyword[raise] identifier[KeyError]
keyword[except] identifier[KeyError] :
keyword[return] identifier[ret] | def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **kwargs):
"""
Add new media to multiple users.
.. versionadded:: 2016.3.0
:param userids: ID of the user that uses the media
:param active: Whether the media is enabled (0 enabled, 1 disabled)
:param mediatypeid: ID of the media type used by the media
:param period: Time when the notifications can be sent as a time period
:param sendto: Address, user name or other identifier of the recipient
:param severity: Trigger severities to send notifications about
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: IDs of the created media.
CLI Example:
.. code-block:: bash
salt '*' zabbix.user_addmedia 4 active=0 mediatypeid=1 period='1-7,00:00-24:00' sendto='support2@example.com'
severity=63
"""
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'user.addmedia'
params = {'users': []}
# Users
if not isinstance(userids, list):
userids = [userids] # depends on [control=['if'], data=[]]
for user in userids:
params['users'].append({'userid': user}) # depends on [control=['for'], data=['user']]
# Medias
params['medias'] = [{'active': active, 'mediatypeid': mediatypeid, 'period': period, 'sendto': sendto, 'severity': severity}]
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['mediaids'] # depends on [control=['if'], data=[]]
else:
raise KeyError # depends on [control=['try'], data=[]]
except KeyError:
return ret # depends on [control=['except'], data=[]] |
def set_wizard_step_description(self):
"""Set the text for description."""
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
field = self.parent.step_kw_field.selected_fields()
is_raster = is_raster_layer(self.parent.layer)
if is_raster:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_raster
else:
text_label = multiple_classified_hazard_classifications_raster
# noinspection PyAugmentAssignment
text_label = text_label % (
subcategory['name'], self.layer_purpose['name'])
else:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_vector
else:
text_label = multiple_classified_hazard_classifications_vector
# noinspection PyAugmentAssignment
text_label = text_label % (
subcategory['name'], self.layer_purpose['name'], field)
self.multi_classifications_label.setText(text_label) | def function[set_wizard_step_description, parameter[self]]:
constant[Set the text for description.]
variable[subcategory] assign[=] call[name[self].parent.step_kw_subcategory.selected_subcategory, parameter[]]
variable[field] assign[=] call[name[self].parent.step_kw_field.selected_fields, parameter[]]
variable[is_raster] assign[=] call[name[is_raster_layer], parameter[name[self].parent.layer]]
if name[is_raster] begin[:]
if compare[name[self].layer_mode equal[==] name[layer_mode_continuous]] begin[:]
variable[text_label] assign[=] name[multiple_continuous_hazard_classifications_raster]
variable[text_label] assign[=] binary_operation[name[text_label] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da2044c3640>, <ast.Subscript object at 0x7da2044c18d0>]]]
call[name[self].multi_classifications_label.setText, parameter[name[text_label]]] | keyword[def] identifier[set_wizard_step_description] ( identifier[self] ):
literal[string]
identifier[subcategory] = identifier[self] . identifier[parent] . identifier[step_kw_subcategory] . identifier[selected_subcategory] ()
identifier[field] = identifier[self] . identifier[parent] . identifier[step_kw_field] . identifier[selected_fields] ()
identifier[is_raster] = identifier[is_raster_layer] ( identifier[self] . identifier[parent] . identifier[layer] )
keyword[if] identifier[is_raster] :
keyword[if] identifier[self] . identifier[layer_mode] == identifier[layer_mode_continuous] :
identifier[text_label] = identifier[multiple_continuous_hazard_classifications_raster]
keyword[else] :
identifier[text_label] = identifier[multiple_classified_hazard_classifications_raster]
identifier[text_label] = identifier[text_label] %(
identifier[subcategory] [ literal[string] ], identifier[self] . identifier[layer_purpose] [ literal[string] ])
keyword[else] :
keyword[if] identifier[self] . identifier[layer_mode] == identifier[layer_mode_continuous] :
identifier[text_label] = identifier[multiple_continuous_hazard_classifications_vector]
keyword[else] :
identifier[text_label] = identifier[multiple_classified_hazard_classifications_vector]
identifier[text_label] = identifier[text_label] %(
identifier[subcategory] [ literal[string] ], identifier[self] . identifier[layer_purpose] [ literal[string] ], identifier[field] )
identifier[self] . identifier[multi_classifications_label] . identifier[setText] ( identifier[text_label] ) | def set_wizard_step_description(self):
"""Set the text for description."""
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
field = self.parent.step_kw_field.selected_fields()
is_raster = is_raster_layer(self.parent.layer)
if is_raster:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_raster # depends on [control=['if'], data=[]]
else:
text_label = multiple_classified_hazard_classifications_raster
# noinspection PyAugmentAssignment
text_label = text_label % (subcategory['name'], self.layer_purpose['name']) # depends on [control=['if'], data=[]]
else:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_vector # depends on [control=['if'], data=[]]
else:
text_label = multiple_classified_hazard_classifications_vector
# noinspection PyAugmentAssignment
text_label = text_label % (subcategory['name'], self.layer_purpose['name'], field)
self.multi_classifications_label.setText(text_label) |
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob) | def function[iglob, parameter[path_glob]]:
constant[Extended globbing function that supports ** and {opt1,opt2,opt3}.]
if call[name[_CHECK_RECURSIVE_GLOB].search, parameter[name[path_glob]]] begin[:]
variable[msg] assign[=] constant[invalid glob %r: recursive glob "**" must be used alone]
<ast.Raise object at 0x7da1b1f815d0>
if call[name[_CHECK_MISMATCH_SET].search, parameter[name[path_glob]]] begin[:]
variable[msg] assign[=] constant[invalid glob %r: mismatching set marker '{' or '}']
<ast.Raise object at 0x7da1b1f80a00>
return[call[name[_iglob], parameter[name[path_glob]]]] | keyword[def] identifier[iglob] ( identifier[path_glob] ):
literal[string]
keyword[if] identifier[_CHECK_RECURSIVE_GLOB] . identifier[search] ( identifier[path_glob] ):
identifier[msg] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[msg] % identifier[path_glob] )
keyword[if] identifier[_CHECK_MISMATCH_SET] . identifier[search] ( identifier[path_glob] ):
identifier[msg] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[msg] % identifier[path_glob] )
keyword[return] identifier[_iglob] ( identifier[path_glob] ) | def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = 'invalid glob %r: recursive glob "**" must be used alone'
raise ValueError(msg % path_glob) # depends on [control=['if'], data=[]]
if _CHECK_MISMATCH_SET.search(path_glob):
msg = "invalid glob %r: mismatching set marker '{' or '}'"
raise ValueError(msg % path_glob) # depends on [control=['if'], data=[]]
return _iglob(path_glob) |
def datadir_init(name,
auth='password',
user=None,
password=None,
encoding='UTF8',
locale=None,
waldir=None,
checksums=False,
runas=None):
'''
.. versionadded:: 2016.3.0
Initializes a postgres data directory
CLI Example:
.. code-block:: bash
salt '*' postgres.datadir_init '/var/lib/pgsql/data'
name
The name of the directory to initialize
auth
The default authentication method for local connections
password
The password to set for the postgres user
user
The database superuser name
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
'''
if datadir_exists(name):
log.info('%s already exists', name)
return False
ret = _run_initdb(
name,
auth=auth,
user=user,
password=password,
encoding=encoding,
locale=locale,
runas=runas)
return ret['retcode'] == 0 | def function[datadir_init, parameter[name, auth, user, password, encoding, locale, waldir, checksums, runas]]:
constant[
.. versionadded:: 2016.3.0
Initializes a postgres data directory
CLI Example:
.. code-block:: bash
salt '*' postgres.datadir_init '/var/lib/pgsql/data'
name
The name of the directory to initialize
auth
The default authentication method for local connections
password
The password to set for the postgres user
user
The database superuser name
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
]
if call[name[datadir_exists], parameter[name[name]]] begin[:]
call[name[log].info, parameter[constant[%s already exists], name[name]]]
return[constant[False]]
variable[ret] assign[=] call[name[_run_initdb], parameter[name[name]]]
return[compare[call[name[ret]][constant[retcode]] equal[==] constant[0]]] | keyword[def] identifier[datadir_init] ( identifier[name] ,
identifier[auth] = literal[string] ,
identifier[user] = keyword[None] ,
identifier[password] = keyword[None] ,
identifier[encoding] = literal[string] ,
identifier[locale] = keyword[None] ,
identifier[waldir] = keyword[None] ,
identifier[checksums] = keyword[False] ,
identifier[runas] = keyword[None] ):
literal[string]
keyword[if] identifier[datadir_exists] ( identifier[name] ):
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
keyword[return] keyword[False]
identifier[ret] = identifier[_run_initdb] (
identifier[name] ,
identifier[auth] = identifier[auth] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[encoding] = identifier[encoding] ,
identifier[locale] = identifier[locale] ,
identifier[runas] = identifier[runas] )
keyword[return] identifier[ret] [ literal[string] ]== literal[int] | def datadir_init(name, auth='password', user=None, password=None, encoding='UTF8', locale=None, waldir=None, checksums=False, runas=None):
"""
.. versionadded:: 2016.3.0
Initializes a postgres data directory
CLI Example:
.. code-block:: bash
salt '*' postgres.datadir_init '/var/lib/pgsql/data'
name
The name of the directory to initialize
auth
The default authentication method for local connections
password
The password to set for the postgres user
user
The database superuser name
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
"""
if datadir_exists(name):
log.info('%s already exists', name)
return False # depends on [control=['if'], data=[]]
ret = _run_initdb(name, auth=auth, user=user, password=password, encoding=encoding, locale=locale, runas=runas)
return ret['retcode'] == 0 |
def bytes_to_string(raw):
"""Convert bytes to string."""
ret = bytes()
for byte in raw:
if byte == 0x00:
return ret.decode("utf-8")
ret += bytes([byte])
return ret.decode("utf-8") | def function[bytes_to_string, parameter[raw]]:
constant[Convert bytes to string.]
variable[ret] assign[=] call[name[bytes], parameter[]]
for taget[name[byte]] in starred[name[raw]] begin[:]
if compare[name[byte] equal[==] constant[0]] begin[:]
return[call[name[ret].decode, parameter[constant[utf-8]]]]
<ast.AugAssign object at 0x7da1b26ad960>
return[call[name[ret].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[bytes_to_string] ( identifier[raw] ):
literal[string]
identifier[ret] = identifier[bytes] ()
keyword[for] identifier[byte] keyword[in] identifier[raw] :
keyword[if] identifier[byte] == literal[int] :
keyword[return] identifier[ret] . identifier[decode] ( literal[string] )
identifier[ret] += identifier[bytes] ([ identifier[byte] ])
keyword[return] identifier[ret] . identifier[decode] ( literal[string] ) | def bytes_to_string(raw):
"""Convert bytes to string."""
ret = bytes()
for byte in raw:
if byte == 0:
return ret.decode('utf-8') # depends on [control=['if'], data=[]]
ret += bytes([byte]) # depends on [control=['for'], data=['byte']]
return ret.decode('utf-8') |
def init(self, n=0, ftype="real", colfac=1.0e-8, lmfac=1.0e-3, fid=0):
"""Set selected properties of the fitserver instance.
Like in the constructor, the number of unknowns to be solved for;
the number of simultaneous solutions; the ftype and the collinearity
and Levenberg-Marquardt factor can be specified. Individual values can
be overwritten with the :meth:`set` function.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
"""
ftype = self._gettype(ftype)
self._fitids[fid]["stat"] = False
self._fitids[fid]["solved"] = False
self._fitids[fid]["haserr"] = False
self._fitids[fid]["fit"] = False
self._fitids[fid]["looped"] = False
if self._fitproxy.init(fid, n, ftype, colfac, lmfac):
self._fitids[fid]["stat"] = self._getstate(fid)
else:
return False | def function[init, parameter[self, n, ftype, colfac, lmfac, fid]]:
constant[Set selected properties of the fitserver instance.
Like in the constructor, the number of unknowns to be solved for;
the number of simultaneous solutions; the ftype and the collinearity
and Levenberg-Marquardt factor can be specified. Individual values can
be overwritten with the :meth:`set` function.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
]
variable[ftype] assign[=] call[name[self]._gettype, parameter[name[ftype]]]
call[call[name[self]._fitids][name[fid]]][constant[stat]] assign[=] constant[False]
call[call[name[self]._fitids][name[fid]]][constant[solved]] assign[=] constant[False]
call[call[name[self]._fitids][name[fid]]][constant[haserr]] assign[=] constant[False]
call[call[name[self]._fitids][name[fid]]][constant[fit]] assign[=] constant[False]
call[call[name[self]._fitids][name[fid]]][constant[looped]] assign[=] constant[False]
if call[name[self]._fitproxy.init, parameter[name[fid], name[n], name[ftype], name[colfac], name[lmfac]]] begin[:]
call[call[name[self]._fitids][name[fid]]][constant[stat]] assign[=] call[name[self]._getstate, parameter[name[fid]]] | keyword[def] identifier[init] ( identifier[self] , identifier[n] = literal[int] , identifier[ftype] = literal[string] , identifier[colfac] = literal[int] , identifier[lmfac] = literal[int] , identifier[fid] = literal[int] ):
literal[string]
identifier[ftype] = identifier[self] . identifier[_gettype] ( identifier[ftype] )
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= keyword[False]
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= keyword[False]
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= keyword[False]
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= keyword[False]
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= keyword[False]
keyword[if] identifier[self] . identifier[_fitproxy] . identifier[init] ( identifier[fid] , identifier[n] , identifier[ftype] , identifier[colfac] , identifier[lmfac] ):
identifier[self] . identifier[_fitids] [ identifier[fid] ][ literal[string] ]= identifier[self] . identifier[_getstate] ( identifier[fid] )
keyword[else] :
keyword[return] keyword[False] | def init(self, n=0, ftype='real', colfac=1e-08, lmfac=0.001, fid=0):
"""Set selected properties of the fitserver instance.
Like in the constructor, the number of unknowns to be solved for;
the number of simultaneous solutions; the ftype and the collinearity
and Levenberg-Marquardt factor can be specified. Individual values can
be overwritten with the :meth:`set` function.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
"""
ftype = self._gettype(ftype)
self._fitids[fid]['stat'] = False
self._fitids[fid]['solved'] = False
self._fitids[fid]['haserr'] = False
self._fitids[fid]['fit'] = False
self._fitids[fid]['looped'] = False
if self._fitproxy.init(fid, n, ftype, colfac, lmfac):
self._fitids[fid]['stat'] = self._getstate(fid) # depends on [control=['if'], data=[]]
else:
return False |
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall) | def function[xml_findall, parameter[xpath]]:
constant[Find a list of XML elements via xpath.]
def function[xpath_findall, parameter[value]]:
call[name[validate], parameter[name[ET].iselement, name[value]]]
return[call[name[value].findall, parameter[name[xpath]]]]
return[call[name[transform], parameter[name[xpath_findall]]]] | keyword[def] identifier[xml_findall] ( identifier[xpath] ):
literal[string]
keyword[def] identifier[xpath_findall] ( identifier[value] ):
identifier[validate] ( identifier[ET] . identifier[iselement] , identifier[value] )
keyword[return] identifier[value] . identifier[findall] ( identifier[xpath] )
keyword[return] identifier[transform] ( identifier[xpath_findall] ) | def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall) |
def process_pub_date(year, mon, day):
"""Create pub_date from what Pubmed provides in Journal PubDate entry
"""
pub_date = None
if year and re.match("[a-zA-Z]+", mon):
pub_date = datetime.datetime.strptime(
f"{year}-{mon}-{day}", "%Y-%b-%d"
).strftime("%Y-%m-%d")
elif year:
pub_date = f"{year}-{mon}-{day}"
return pub_date | def function[process_pub_date, parameter[year, mon, day]]:
constant[Create pub_date from what Pubmed provides in Journal PubDate entry
]
variable[pub_date] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b19cc0a0> begin[:]
variable[pub_date] assign[=] call[call[name[datetime].datetime.strptime, parameter[<ast.JoinedStr object at 0x7da1b19cd600>, constant[%Y-%b-%d]]].strftime, parameter[constant[%Y-%m-%d]]]
return[name[pub_date]] | keyword[def] identifier[process_pub_date] ( identifier[year] , identifier[mon] , identifier[day] ):
literal[string]
identifier[pub_date] = keyword[None]
keyword[if] identifier[year] keyword[and] identifier[re] . identifier[match] ( literal[string] , identifier[mon] ):
identifier[pub_date] = identifier[datetime] . identifier[datetime] . identifier[strptime] (
literal[string] , literal[string]
). identifier[strftime] ( literal[string] )
keyword[elif] identifier[year] :
identifier[pub_date] = literal[string]
keyword[return] identifier[pub_date] | def process_pub_date(year, mon, day):
"""Create pub_date from what Pubmed provides in Journal PubDate entry
"""
pub_date = None
if year and re.match('[a-zA-Z]+', mon):
pub_date = datetime.datetime.strptime(f'{year}-{mon}-{day}', '%Y-%b-%d').strftime('%Y-%m-%d') # depends on [control=['if'], data=[]]
elif year:
pub_date = f'{year}-{mon}-{day}' # depends on [control=['if'], data=[]]
return pub_date |
def hold_available(self):
"""可用持仓
"""
return self.history_table.groupby('code').amount.sum().replace(
0,
np.nan
).dropna().sort_index() | def function[hold_available, parameter[self]]:
constant[可用持仓
]
return[call[call[call[call[call[name[self].history_table.groupby, parameter[constant[code]]].amount.sum, parameter[]].replace, parameter[constant[0], name[np].nan]].dropna, parameter[]].sort_index, parameter[]]] | keyword[def] identifier[hold_available] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[history_table] . identifier[groupby] ( literal[string] ). identifier[amount] . identifier[sum] (). identifier[replace] (
literal[int] ,
identifier[np] . identifier[nan]
). identifier[dropna] (). identifier[sort_index] () | def hold_available(self):
"""可用持仓
"""
return self.history_table.groupby('code').amount.sum().replace(0, np.nan).dropna().sort_index() |
def _get_or_add(self, prop_name):
"""
Return element returned by 'get_or_add_' method for *prop_name*.
"""
get_or_add_method_name = 'get_or_add_%s' % prop_name
get_or_add_method = getattr(self, get_or_add_method_name)
element = get_or_add_method()
return element | def function[_get_or_add, parameter[self, prop_name]]:
constant[
Return element returned by 'get_or_add_' method for *prop_name*.
]
variable[get_or_add_method_name] assign[=] binary_operation[constant[get_or_add_%s] <ast.Mod object at 0x7da2590d6920> name[prop_name]]
variable[get_or_add_method] assign[=] call[name[getattr], parameter[name[self], name[get_or_add_method_name]]]
variable[element] assign[=] call[name[get_or_add_method], parameter[]]
return[name[element]] | keyword[def] identifier[_get_or_add] ( identifier[self] , identifier[prop_name] ):
literal[string]
identifier[get_or_add_method_name] = literal[string] % identifier[prop_name]
identifier[get_or_add_method] = identifier[getattr] ( identifier[self] , identifier[get_or_add_method_name] )
identifier[element] = identifier[get_or_add_method] ()
keyword[return] identifier[element] | def _get_or_add(self, prop_name):
"""
Return element returned by 'get_or_add_' method for *prop_name*.
"""
get_or_add_method_name = 'get_or_add_%s' % prop_name
get_or_add_method = getattr(self, get_or_add_method_name)
element = get_or_add_method()
return element |
def header(
self,
text,
level):
"""*convert plain-text to MMD header*
**Key Arguments:**
- ``text`` -- the text to convert to MMD header
- ``level`` -- the header level to convert the text to
**Return:**
- ``header`` -- the MMD header
**Usage:**
To convert a text MMD header:
.. code-block:: python
header = md.header(" This is my header ", level=3)
print header
# OUTPUT:
# ### This is my header
#
"""
m = self.reWS.match(text)
prefix = m.group(1)
text = m.group(2)
suffix = m.group(3)
return "#" * level + " %(text)s \n" % locals() | def function[header, parameter[self, text, level]]:
constant[*convert plain-text to MMD header*
**Key Arguments:**
- ``text`` -- the text to convert to MMD header
- ``level`` -- the header level to convert the text to
**Return:**
- ``header`` -- the MMD header
**Usage:**
To convert a text MMD header:
.. code-block:: python
header = md.header(" This is my header ", level=3)
print header
# OUTPUT:
# ### This is my header
#
]
variable[m] assign[=] call[name[self].reWS.match, parameter[name[text]]]
variable[prefix] assign[=] call[name[m].group, parameter[constant[1]]]
variable[text] assign[=] call[name[m].group, parameter[constant[2]]]
variable[suffix] assign[=] call[name[m].group, parameter[constant[3]]]
return[binary_operation[binary_operation[constant[#] * name[level]] + binary_operation[constant[ %(text)s
] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]]] | keyword[def] identifier[header] (
identifier[self] ,
identifier[text] ,
identifier[level] ):
literal[string]
identifier[m] = identifier[self] . identifier[reWS] . identifier[match] ( identifier[text] )
identifier[prefix] = identifier[m] . identifier[group] ( literal[int] )
identifier[text] = identifier[m] . identifier[group] ( literal[int] )
identifier[suffix] = identifier[m] . identifier[group] ( literal[int] )
keyword[return] literal[string] * identifier[level] + literal[string] % identifier[locals] () | def header(self, text, level):
"""*convert plain-text to MMD header*
**Key Arguments:**
- ``text`` -- the text to convert to MMD header
- ``level`` -- the header level to convert the text to
**Return:**
- ``header`` -- the MMD header
**Usage:**
To convert a text MMD header:
.. code-block:: python
header = md.header(" This is my header ", level=3)
print header
# OUTPUT:
# ### This is my header
#
"""
m = self.reWS.match(text)
prefix = m.group(1)
text = m.group(2)
suffix = m.group(3)
return '#' * level + ' %(text)s \n' % locals() |
def register(linter):
'''
Required method to auto register this checker
'''
linter.register_checker(ResourceLeakageChecker(linter))
linter.register_checker(BlacklistedImportsChecker(linter))
linter.register_checker(MovedTestCaseClassChecker(linter))
linter.register_checker(BlacklistedLoaderModulesUsageChecker(linter))
linter.register_checker(BlacklistedFunctionsChecker(linter)) | def function[register, parameter[linter]]:
constant[
Required method to auto register this checker
]
call[name[linter].register_checker, parameter[call[name[ResourceLeakageChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[BlacklistedImportsChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[MovedTestCaseClassChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[BlacklistedLoaderModulesUsageChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[BlacklistedFunctionsChecker], parameter[name[linter]]]]] | keyword[def] identifier[register] ( identifier[linter] ):
literal[string]
identifier[linter] . identifier[register_checker] ( identifier[ResourceLeakageChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[BlacklistedImportsChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[MovedTestCaseClassChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[BlacklistedLoaderModulesUsageChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[BlacklistedFunctionsChecker] ( identifier[linter] )) | def register(linter):
"""
Required method to auto register this checker
"""
linter.register_checker(ResourceLeakageChecker(linter))
linter.register_checker(BlacklistedImportsChecker(linter))
linter.register_checker(MovedTestCaseClassChecker(linter))
linter.register_checker(BlacklistedLoaderModulesUsageChecker(linter))
linter.register_checker(BlacklistedFunctionsChecker(linter)) |
def converted_gate_set(circuit: circuits.Circuit,
no_clifford_gates: bool = False,
atol: float = 1e-8,
) -> circuits.Circuit:
"""Returns a new, equivalent circuit using the gate set
{SingleQubitCliffordGate,
CZ/PauliInteractionGate, PauliStringPhasor}.
"""
conv_circuit = circuits.Circuit(circuit)
optimizers.ConvertToCzAndSingleGates().optimize_circuit(conv_circuit)
optimizers.MergeSingleQubitGates().optimize_circuit(conv_circuit)
ConvertToPauliStringPhasors(ignore_failures=True,
keep_clifford=not no_clifford_gates,
atol=atol,
).optimize_circuit(conv_circuit)
optimizers.DropEmptyMoments().optimize_circuit(conv_circuit)
return conv_circuit | def function[converted_gate_set, parameter[circuit, no_clifford_gates, atol]]:
constant[Returns a new, equivalent circuit using the gate set
{SingleQubitCliffordGate,
CZ/PauliInteractionGate, PauliStringPhasor}.
]
variable[conv_circuit] assign[=] call[name[circuits].Circuit, parameter[name[circuit]]]
call[call[name[optimizers].ConvertToCzAndSingleGates, parameter[]].optimize_circuit, parameter[name[conv_circuit]]]
call[call[name[optimizers].MergeSingleQubitGates, parameter[]].optimize_circuit, parameter[name[conv_circuit]]]
call[call[name[ConvertToPauliStringPhasors], parameter[]].optimize_circuit, parameter[name[conv_circuit]]]
call[call[name[optimizers].DropEmptyMoments, parameter[]].optimize_circuit, parameter[name[conv_circuit]]]
return[name[conv_circuit]] | keyword[def] identifier[converted_gate_set] ( identifier[circuit] : identifier[circuits] . identifier[Circuit] ,
identifier[no_clifford_gates] : identifier[bool] = keyword[False] ,
identifier[atol] : identifier[float] = literal[int] ,
)-> identifier[circuits] . identifier[Circuit] :
literal[string]
identifier[conv_circuit] = identifier[circuits] . identifier[Circuit] ( identifier[circuit] )
identifier[optimizers] . identifier[ConvertToCzAndSingleGates] (). identifier[optimize_circuit] ( identifier[conv_circuit] )
identifier[optimizers] . identifier[MergeSingleQubitGates] (). identifier[optimize_circuit] ( identifier[conv_circuit] )
identifier[ConvertToPauliStringPhasors] ( identifier[ignore_failures] = keyword[True] ,
identifier[keep_clifford] = keyword[not] identifier[no_clifford_gates] ,
identifier[atol] = identifier[atol] ,
). identifier[optimize_circuit] ( identifier[conv_circuit] )
identifier[optimizers] . identifier[DropEmptyMoments] (). identifier[optimize_circuit] ( identifier[conv_circuit] )
keyword[return] identifier[conv_circuit] | def converted_gate_set(circuit: circuits.Circuit, no_clifford_gates: bool=False, atol: float=1e-08) -> circuits.Circuit:
"""Returns a new, equivalent circuit using the gate set
{SingleQubitCliffordGate,
CZ/PauliInteractionGate, PauliStringPhasor}.
"""
conv_circuit = circuits.Circuit(circuit)
optimizers.ConvertToCzAndSingleGates().optimize_circuit(conv_circuit)
optimizers.MergeSingleQubitGates().optimize_circuit(conv_circuit)
ConvertToPauliStringPhasors(ignore_failures=True, keep_clifford=not no_clifford_gates, atol=atol).optimize_circuit(conv_circuit)
optimizers.DropEmptyMoments().optimize_circuit(conv_circuit)
return conv_circuit |
def del_cells(self, name):
"""Implementation of cells deletion
``del space.name`` where name is a cells, or
``del space.cells['name']``
"""
if name in self.cells:
cells = self.cells[name]
self.cells.del_item(name)
self.inherit()
self.model.spacegraph.update_subspaces(self)
elif name in self.dynamic_spaces:
cells = self.dynamic_spaces.pop(name)
self.dynamic_spaces.set_update()
else:
raise KeyError("Cells '%s' does not exist" % name)
NullImpl(cells) | def function[del_cells, parameter[self, name]]:
constant[Implementation of cells deletion
``del space.name`` where name is a cells, or
``del space.cells['name']``
]
if compare[name[name] in name[self].cells] begin[:]
variable[cells] assign[=] call[name[self].cells][name[name]]
call[name[self].cells.del_item, parameter[name[name]]]
call[name[self].inherit, parameter[]]
call[name[self].model.spacegraph.update_subspaces, parameter[name[self]]]
call[name[NullImpl], parameter[name[cells]]] | keyword[def] identifier[del_cells] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[cells] :
identifier[cells] = identifier[self] . identifier[cells] [ identifier[name] ]
identifier[self] . identifier[cells] . identifier[del_item] ( identifier[name] )
identifier[self] . identifier[inherit] ()
identifier[self] . identifier[model] . identifier[spacegraph] . identifier[update_subspaces] ( identifier[self] )
keyword[elif] identifier[name] keyword[in] identifier[self] . identifier[dynamic_spaces] :
identifier[cells] = identifier[self] . identifier[dynamic_spaces] . identifier[pop] ( identifier[name] )
identifier[self] . identifier[dynamic_spaces] . identifier[set_update] ()
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] % identifier[name] )
identifier[NullImpl] ( identifier[cells] ) | def del_cells(self, name):
"""Implementation of cells deletion
``del space.name`` where name is a cells, or
``del space.cells['name']``
"""
if name in self.cells:
cells = self.cells[name]
self.cells.del_item(name)
self.inherit()
self.model.spacegraph.update_subspaces(self) # depends on [control=['if'], data=['name']]
elif name in self.dynamic_spaces:
cells = self.dynamic_spaces.pop(name)
self.dynamic_spaces.set_update() # depends on [control=['if'], data=['name']]
else:
raise KeyError("Cells '%s' does not exist" % name)
NullImpl(cells) |
def create_module(module, target):
""" Create a module directory structure into the target directory. """
module_x = module.split('.')
cur_path = ''
for path in module_x:
cur_path = os.path.join(cur_path, path)
if not os.path.isdir(os.path.join(target, cur_path)):
os.mkdir(os.path.join(target, cur_path))
if not os.path.exists(os.path.join(target, cur_path, '__init__.py')):
touch(os.path.join(target, cur_path, '__init__.py'))
return cur_path | def function[create_module, parameter[module, target]]:
constant[ Create a module directory structure into the target directory. ]
variable[module_x] assign[=] call[name[module].split, parameter[constant[.]]]
variable[cur_path] assign[=] constant[]
for taget[name[path]] in starred[name[module_x]] begin[:]
variable[cur_path] assign[=] call[name[os].path.join, parameter[name[cur_path], name[path]]]
if <ast.UnaryOp object at 0x7da1b26adb10> begin[:]
call[name[os].mkdir, parameter[call[name[os].path.join, parameter[name[target], name[cur_path]]]]]
if <ast.UnaryOp object at 0x7da1b26afbb0> begin[:]
call[name[touch], parameter[call[name[os].path.join, parameter[name[target], name[cur_path], constant[__init__.py]]]]]
return[name[cur_path]] | keyword[def] identifier[create_module] ( identifier[module] , identifier[target] ):
literal[string]
identifier[module_x] = identifier[module] . identifier[split] ( literal[string] )
identifier[cur_path] = literal[string]
keyword[for] identifier[path] keyword[in] identifier[module_x] :
identifier[cur_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[cur_path] , identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] , identifier[cur_path] )):
identifier[os] . identifier[mkdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] , identifier[cur_path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] , identifier[cur_path] , literal[string] )):
identifier[touch] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] , identifier[cur_path] , literal[string] ))
keyword[return] identifier[cur_path] | def create_module(module, target):
""" Create a module directory structure into the target directory. """
module_x = module.split('.')
cur_path = ''
for path in module_x:
cur_path = os.path.join(cur_path, path)
if not os.path.isdir(os.path.join(target, cur_path)):
os.mkdir(os.path.join(target, cur_path)) # depends on [control=['if'], data=[]]
if not os.path.exists(os.path.join(target, cur_path, '__init__.py')):
touch(os.path.join(target, cur_path, '__init__.py')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return cur_path |
def _load_config(self):
"""Load the workflow stage config from the database."""
pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id)
stages = DB.get_hash_value(pb_key, 'workflow_stages')
stages = ast.literal_eval(stages)
return stages[self._index] | def function[_load_config, parameter[self]]:
constant[Load the workflow stage config from the database.]
variable[pb_key] assign[=] call[name[SchedulingObject].get_key, parameter[name[PB_KEY], name[self]._pb_id]]
variable[stages] assign[=] call[name[DB].get_hash_value, parameter[name[pb_key], constant[workflow_stages]]]
variable[stages] assign[=] call[name[ast].literal_eval, parameter[name[stages]]]
return[call[name[stages]][name[self]._index]] | keyword[def] identifier[_load_config] ( identifier[self] ):
literal[string]
identifier[pb_key] = identifier[SchedulingObject] . identifier[get_key] ( identifier[PB_KEY] , identifier[self] . identifier[_pb_id] )
identifier[stages] = identifier[DB] . identifier[get_hash_value] ( identifier[pb_key] , literal[string] )
identifier[stages] = identifier[ast] . identifier[literal_eval] ( identifier[stages] )
keyword[return] identifier[stages] [ identifier[self] . identifier[_index] ] | def _load_config(self):
"""Load the workflow stage config from the database."""
pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id)
stages = DB.get_hash_value(pb_key, 'workflow_stages')
stages = ast.literal_eval(stages)
return stages[self._index] |
def connect_post_namespaced_pod_exec(self, name, namespace, **kwargs): # noqa: E501
"""connect_post_namespaced_pod_exec # noqa: E501
connect POST requests to exec of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_exec(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodExecOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str command: Command is the remote command to execute. argv array. Not executed within a shell.
:param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Redirect the standard error stream of the pod for this call. Defaults to true.
:param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Redirect the standard output stream of the pod for this call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[connect_post_namespaced_pod_exec, parameter[self, name, namespace]]:
constant[connect_post_namespaced_pod_exec # noqa: E501
connect POST requests to exec of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_exec(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodExecOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str command: Command is the remote command to execute. argv array. Not executed within a shell.
:param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Redirect the standard error stream of the pod for this call. Defaults to true.
:param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Redirect the standard output stream of the pod for this call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].connect_post_namespaced_pod_exec_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[connect_post_namespaced_pod_exec] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[connect_post_namespaced_pod_exec_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[connect_post_namespaced_pod_exec_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def connect_post_namespaced_pod_exec(self, name, namespace, **kwargs): # noqa: E501
'connect_post_namespaced_pod_exec # noqa: E501\n\n connect POST requests to exec of Pod # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.connect_post_namespaced_pod_exec(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the PodExecOptions (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str command: Command is the remote command to execute. argv array. Not executed within a shell.\n :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.\n :param bool stderr: Redirect the standard error stream of the pod for this call. Defaults to true.\n :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.\n :param bool stdout: Redirect the standard output stream of the pod for this call. Defaults to true.\n :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"'+submission_obj.get(header_key)+'"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(','.join(csv_line))
return submission_lines | def function[clinvar_submission_lines, parameter[submission_objs, submission_header]]:
constant[Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
]
variable[submission_lines] assign[=] list[[]]
for taget[name[submission_obj]] in starred[name[submission_objs]] begin[:]
variable[csv_line] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18fe91930>, <ast.Name object at 0x7da18fe918a0>]]] in starred[call[name[submission_header].items, parameter[]]] begin[:]
if compare[name[header_key] in name[submission_obj]] begin[:]
call[name[csv_line].append, parameter[binary_operation[binary_operation[constant["] + call[name[submission_obj].get, parameter[name[header_key]]]] + constant["]]]]
call[name[submission_lines].append, parameter[call[constant[,].join, parameter[name[csv_line]]]]]
return[name[submission_lines]] | keyword[def] identifier[clinvar_submission_lines] ( identifier[submission_objs] , identifier[submission_header] ):
literal[string]
identifier[submission_lines] =[]
keyword[for] identifier[submission_obj] keyword[in] identifier[submission_objs] :
identifier[csv_line] =[]
keyword[for] identifier[header_key] , identifier[header_value] keyword[in] identifier[submission_header] . identifier[items] ():
keyword[if] identifier[header_key] keyword[in] identifier[submission_obj] :
identifier[csv_line] . identifier[append] ( literal[string] + identifier[submission_obj] . identifier[get] ( identifier[header_key] )+ literal[string] )
keyword[else] :
identifier[csv_line] . identifier[append] ( literal[string] )
identifier[submission_lines] . identifier[append] ( literal[string] . identifier[join] ( identifier[csv_line] ))
keyword[return] identifier[submission_lines] | def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for (header_key, header_value) in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"' + submission_obj.get(header_key) + '"') # depends on [control=['if'], data=['header_key', 'submission_obj']]
else: # Empty field for this this variant/casedata object
csv_line.append('""') # depends on [control=['for'], data=[]]
submission_lines.append(','.join(csv_line)) # depends on [control=['for'], data=['submission_obj']]
return submission_lines |
def check_permission(permission, hidden=True):
"""Check if permission is allowed.
If permission fails then the connection is aborted.
:param permission: The permission to check.
:param hidden: Determine if a 404 error (``True``) or 401/403 error
(``False``) should be returned if the permission is rejected (i.e.
hide or reveal the existence of a particular object).
"""
if permission is not None and not permission.can():
if hidden:
abort(404)
else:
if current_user.is_authenticated:
abort(403,
'You do not have a permission for this action')
abort(401) | def function[check_permission, parameter[permission, hidden]]:
constant[Check if permission is allowed.
If permission fails then the connection is aborted.
:param permission: The permission to check.
:param hidden: Determine if a 404 error (``True``) or 401/403 error
(``False``) should be returned if the permission is rejected (i.e.
hide or reveal the existence of a particular object).
]
if <ast.BoolOp object at 0x7da1b1942080> begin[:]
if name[hidden] begin[:]
call[name[abort], parameter[constant[404]]] | keyword[def] identifier[check_permission] ( identifier[permission] , identifier[hidden] = keyword[True] ):
literal[string]
keyword[if] identifier[permission] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[permission] . identifier[can] ():
keyword[if] identifier[hidden] :
identifier[abort] ( literal[int] )
keyword[else] :
keyword[if] identifier[current_user] . identifier[is_authenticated] :
identifier[abort] ( literal[int] ,
literal[string] )
identifier[abort] ( literal[int] ) | def check_permission(permission, hidden=True):
"""Check if permission is allowed.
If permission fails then the connection is aborted.
:param permission: The permission to check.
:param hidden: Determine if a 404 error (``True``) or 401/403 error
(``False``) should be returned if the permission is rejected (i.e.
hide or reveal the existence of a particular object).
"""
if permission is not None and (not permission.can()):
if hidden:
abort(404) # depends on [control=['if'], data=[]]
else:
if current_user.is_authenticated:
abort(403, 'You do not have a permission for this action') # depends on [control=['if'], data=[]]
abort(401) # depends on [control=['if'], data=[]] |
def check(self, context, version=None):
"""
Check off requirements that are met by name/version.
:param str|Bump|Requirement context: Either package name, requirement string, :class:`Bump`,
:class:`BumpRequirement`, or
:class:`pkg_resources.Requirement instance
:return: True if any requirement was satisified by context
"""
req_str = None
self.checked.append((context, version))
if isinstance(context, str) and not version:
context = BumpRequirement.parse(context)
if isinstance(context, Bump):
name = context.name
if context.new_version and context.new_version[0] == '==':
version = context.new_version[1]
else:
req_str = str(context)
elif isinstance(context, (pkg_resources.Requirement, BumpRequirement)):
name = context.project_name
if context.specs and context.specs[0][0] == '==':
version = context.specs[0][1]
else:
req_str = str(context)
else:
name = context
if name in self:
self.matched_name = True
for req in self[name]:
if req.required and (version and version in req or req_str == str(req)):
req.required = False
return True
return False | def function[check, parameter[self, context, version]]:
constant[
Check off requirements that are met by name/version.
:param str|Bump|Requirement context: Either package name, requirement string, :class:`Bump`,
:class:`BumpRequirement`, or
:class:`pkg_resources.Requirement instance
:return: True if any requirement was satisified by context
]
variable[req_str] assign[=] constant[None]
call[name[self].checked.append, parameter[tuple[[<ast.Name object at 0x7da1afe3bc70>, <ast.Name object at 0x7da1afe3ba90>]]]]
if <ast.BoolOp object at 0x7da1afe3b9a0> begin[:]
variable[context] assign[=] call[name[BumpRequirement].parse, parameter[name[context]]]
if call[name[isinstance], parameter[name[context], name[Bump]]] begin[:]
variable[name] assign[=] name[context].name
if <ast.BoolOp object at 0x7da1afe3a200> begin[:]
variable[version] assign[=] call[name[context].new_version][constant[1]]
if compare[name[name] in name[self]] begin[:]
name[self].matched_name assign[=] constant[True]
for taget[name[req]] in starred[call[name[self]][name[name]]] begin[:]
if <ast.BoolOp object at 0x7da1afe3bfd0> begin[:]
name[req].required assign[=] constant[False]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[check] ( identifier[self] , identifier[context] , identifier[version] = keyword[None] ):
literal[string]
identifier[req_str] = keyword[None]
identifier[self] . identifier[checked] . identifier[append] (( identifier[context] , identifier[version] ))
keyword[if] identifier[isinstance] ( identifier[context] , identifier[str] ) keyword[and] keyword[not] identifier[version] :
identifier[context] = identifier[BumpRequirement] . identifier[parse] ( identifier[context] )
keyword[if] identifier[isinstance] ( identifier[context] , identifier[Bump] ):
identifier[name] = identifier[context] . identifier[name]
keyword[if] identifier[context] . identifier[new_version] keyword[and] identifier[context] . identifier[new_version] [ literal[int] ]== literal[string] :
identifier[version] = identifier[context] . identifier[new_version] [ literal[int] ]
keyword[else] :
identifier[req_str] = identifier[str] ( identifier[context] )
keyword[elif] identifier[isinstance] ( identifier[context] ,( identifier[pkg_resources] . identifier[Requirement] , identifier[BumpRequirement] )):
identifier[name] = identifier[context] . identifier[project_name]
keyword[if] identifier[context] . identifier[specs] keyword[and] identifier[context] . identifier[specs] [ literal[int] ][ literal[int] ]== literal[string] :
identifier[version] = identifier[context] . identifier[specs] [ literal[int] ][ literal[int] ]
keyword[else] :
identifier[req_str] = identifier[str] ( identifier[context] )
keyword[else] :
identifier[name] = identifier[context]
keyword[if] identifier[name] keyword[in] identifier[self] :
identifier[self] . identifier[matched_name] = keyword[True]
keyword[for] identifier[req] keyword[in] identifier[self] [ identifier[name] ]:
keyword[if] identifier[req] . identifier[required] keyword[and] ( identifier[version] keyword[and] identifier[version] keyword[in] identifier[req] keyword[or] identifier[req_str] == identifier[str] ( identifier[req] )):
identifier[req] . identifier[required] = keyword[False]
keyword[return] keyword[True]
keyword[return] keyword[False] | def check(self, context, version=None):
"""
Check off requirements that are met by name/version.
:param str|Bump|Requirement context: Either package name, requirement string, :class:`Bump`,
:class:`BumpRequirement`, or
:class:`pkg_resources.Requirement instance
:return: True if any requirement was satisified by context
"""
req_str = None
self.checked.append((context, version))
if isinstance(context, str) and (not version):
context = BumpRequirement.parse(context) # depends on [control=['if'], data=[]]
if isinstance(context, Bump):
name = context.name
if context.new_version and context.new_version[0] == '==':
version = context.new_version[1] # depends on [control=['if'], data=[]]
else:
req_str = str(context) # depends on [control=['if'], data=[]]
elif isinstance(context, (pkg_resources.Requirement, BumpRequirement)):
name = context.project_name
if context.specs and context.specs[0][0] == '==':
version = context.specs[0][1] # depends on [control=['if'], data=[]]
else:
req_str = str(context) # depends on [control=['if'], data=[]]
else:
name = context
if name in self:
self.matched_name = True
for req in self[name]:
if req.required and (version and version in req or req_str == str(req)):
req.required = False
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['req']] # depends on [control=['if'], data=['name', 'self']]
return False |
def get_path(url):
"""
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
"""
url = urlsplit(url)
path = url.path
if url.query:
path += "?{}".format(url.query)
return path | def function[get_path, parameter[url]]:
constant[
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
]
variable[url] assign[=] call[name[urlsplit], parameter[name[url]]]
variable[path] assign[=] name[url].path
if name[url].query begin[:]
<ast.AugAssign object at 0x7da2047e8100>
return[name[path]] | keyword[def] identifier[get_path] ( identifier[url] ):
literal[string]
identifier[url] = identifier[urlsplit] ( identifier[url] )
identifier[path] = identifier[url] . identifier[path]
keyword[if] identifier[url] . identifier[query] :
identifier[path] += literal[string] . identifier[format] ( identifier[url] . identifier[query] )
keyword[return] identifier[path] | def get_path(url):
"""
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
"""
url = urlsplit(url)
path = url.path
if url.query:
path += '?{}'.format(url.query) # depends on [control=['if'], data=[]]
return path |
def _parse_mirteFile(path, logger=None):
""" Open and parses the mirteFile at <path>. """
l = logging.getLogger('_parse_mirteFile') if logger is None else logger
cache_path = os.path.join(os.path.dirname(path),
CACHE_FILENAME_TEMPLATE % os.path.basename(path))
if (os.path.exists(cache_path) and
os.path.getmtime(cache_path) >= os.path.getmtime(path)):
with open(cache_path) as f:
return msgpack.unpack(f)
with open(path) as f:
ret = yaml.load(f)
try:
with open(cache_path, 'w') as f:
msgpack.pack(ret, f)
except IOError as e:
if e.errno == errno.EACCES:
l.warn('Not allowed to write %s', path)
else:
raise
return ret | def function[_parse_mirteFile, parameter[path, logger]]:
constant[ Open and parses the mirteFile at <path>. ]
variable[l] assign[=] <ast.IfExp object at 0x7da1b15b6800>
variable[cache_path] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[path]]], binary_operation[name[CACHE_FILENAME_TEMPLATE] <ast.Mod object at 0x7da2590d6920> call[name[os].path.basename, parameter[name[path]]]]]]
if <ast.BoolOp object at 0x7da1b15b4160> begin[:]
with call[name[open], parameter[name[cache_path]]] begin[:]
return[call[name[msgpack].unpack, parameter[name[f]]]]
with call[name[open], parameter[name[path]]] begin[:]
variable[ret] assign[=] call[name[yaml].load, parameter[name[f]]]
<ast.Try object at 0x7da1b130b8e0>
return[name[ret]] | keyword[def] identifier[_parse_mirteFile] ( identifier[path] , identifier[logger] = keyword[None] ):
literal[string]
identifier[l] = identifier[logging] . identifier[getLogger] ( literal[string] ) keyword[if] identifier[logger] keyword[is] keyword[None] keyword[else] identifier[logger]
identifier[cache_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] ),
identifier[CACHE_FILENAME_TEMPLATE] % identifier[os] . identifier[path] . identifier[basename] ( identifier[path] ))
keyword[if] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[cache_path] ) keyword[and]
identifier[os] . identifier[path] . identifier[getmtime] ( identifier[cache_path] )>= identifier[os] . identifier[path] . identifier[getmtime] ( identifier[path] )):
keyword[with] identifier[open] ( identifier[cache_path] ) keyword[as] identifier[f] :
keyword[return] identifier[msgpack] . identifier[unpack] ( identifier[f] )
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[ret] = identifier[yaml] . identifier[load] ( identifier[f] )
keyword[try] :
keyword[with] identifier[open] ( identifier[cache_path] , literal[string] ) keyword[as] identifier[f] :
identifier[msgpack] . identifier[pack] ( identifier[ret] , identifier[f] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EACCES] :
identifier[l] . identifier[warn] ( literal[string] , identifier[path] )
keyword[else] :
keyword[raise]
keyword[return] identifier[ret] | def _parse_mirteFile(path, logger=None):
""" Open and parses the mirteFile at <path>. """
l = logging.getLogger('_parse_mirteFile') if logger is None else logger
cache_path = os.path.join(os.path.dirname(path), CACHE_FILENAME_TEMPLATE % os.path.basename(path))
if os.path.exists(cache_path) and os.path.getmtime(cache_path) >= os.path.getmtime(path):
with open(cache_path) as f:
return msgpack.unpack(f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
with open(path) as f:
ret = yaml.load(f) # depends on [control=['with'], data=['f']]
try:
with open(cache_path, 'w') as f:
msgpack.pack(ret, f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError as e:
if e.errno == errno.EACCES:
l.warn('Not allowed to write %s', path) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
return ret |
def pkg_blacklist(self):
"""Manage blacklist packages
"""
blacklist = BlackList()
options = [
"-b",
"--blacklist"
]
flag = [
"--add",
"--remove"
]
command = ["list"]
if (len(self.args) == 2 and self.args[0] in options and
self.args[1] == command[0]):
blacklist.listed()
elif (len(self.args) > 2 and self.args[0] in options and
flag[0] in self.args):
self.args.remove(flag[0])
blacklist.add(self.args[1:])
elif (len(self.args) == 3 and self.args[0] in options and
"ALL" in self.args and flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(blacklist.get_black())
elif (len(self.args) > 2 and self.args[0] in options and
flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(self.args[1:])
else:
usage("") | def function[pkg_blacklist, parameter[self]]:
constant[Manage blacklist packages
]
variable[blacklist] assign[=] call[name[BlackList], parameter[]]
variable[options] assign[=] list[[<ast.Constant object at 0x7da18f722fb0>, <ast.Constant object at 0x7da18f720250>]]
variable[flag] assign[=] list[[<ast.Constant object at 0x7da18f7230a0>, <ast.Constant object at 0x7da18f7201c0>]]
variable[command] assign[=] list[[<ast.Constant object at 0x7da18f723d30>]]
if <ast.BoolOp object at 0x7da18f723700> begin[:]
call[name[blacklist].listed, parameter[]] | keyword[def] identifier[pkg_blacklist] ( identifier[self] ):
literal[string]
identifier[blacklist] = identifier[BlackList] ()
identifier[options] =[
literal[string] ,
literal[string]
]
identifier[flag] =[
literal[string] ,
literal[string]
]
identifier[command] =[ literal[string] ]
keyword[if] ( identifier[len] ( identifier[self] . identifier[args] )== literal[int] keyword[and] identifier[self] . identifier[args] [ literal[int] ] keyword[in] identifier[options] keyword[and]
identifier[self] . identifier[args] [ literal[int] ]== identifier[command] [ literal[int] ]):
identifier[blacklist] . identifier[listed] ()
keyword[elif] ( identifier[len] ( identifier[self] . identifier[args] )> literal[int] keyword[and] identifier[self] . identifier[args] [ literal[int] ] keyword[in] identifier[options] keyword[and]
identifier[flag] [ literal[int] ] keyword[in] identifier[self] . identifier[args] ):
identifier[self] . identifier[args] . identifier[remove] ( identifier[flag] [ literal[int] ])
identifier[blacklist] . identifier[add] ( identifier[self] . identifier[args] [ literal[int] :])
keyword[elif] ( identifier[len] ( identifier[self] . identifier[args] )== literal[int] keyword[and] identifier[self] . identifier[args] [ literal[int] ] keyword[in] identifier[options] keyword[and]
literal[string] keyword[in] identifier[self] . identifier[args] keyword[and] identifier[flag] [ literal[int] ] keyword[in] identifier[self] . identifier[args] ):
identifier[self] . identifier[args] . identifier[remove] ( identifier[flag] [ literal[int] ])
identifier[blacklist] . identifier[remove] ( identifier[blacklist] . identifier[get_black] ())
keyword[elif] ( identifier[len] ( identifier[self] . identifier[args] )> literal[int] keyword[and] identifier[self] . identifier[args] [ literal[int] ] keyword[in] identifier[options] keyword[and]
identifier[flag] [ literal[int] ] keyword[in] identifier[self] . identifier[args] ):
identifier[self] . identifier[args] . identifier[remove] ( identifier[flag] [ literal[int] ])
identifier[blacklist] . identifier[remove] ( identifier[self] . identifier[args] [ literal[int] :])
keyword[else] :
identifier[usage] ( literal[string] ) | def pkg_blacklist(self):
"""Manage blacklist packages
"""
blacklist = BlackList()
options = ['-b', '--blacklist']
flag = ['--add', '--remove']
command = ['list']
if len(self.args) == 2 and self.args[0] in options and (self.args[1] == command[0]):
blacklist.listed() # depends on [control=['if'], data=[]]
elif len(self.args) > 2 and self.args[0] in options and (flag[0] in self.args):
self.args.remove(flag[0])
blacklist.add(self.args[1:]) # depends on [control=['if'], data=[]]
elif len(self.args) == 3 and self.args[0] in options and ('ALL' in self.args) and (flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(blacklist.get_black()) # depends on [control=['if'], data=[]]
elif len(self.args) > 2 and self.args[0] in options and (flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(self.args[1:]) # depends on [control=['if'], data=[]]
else:
usage('') |
def _list_libraries_cached(self, newer_than_secs=-1):
"""
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
"""
_ = self._conn # Ensures the connection exists and cache is initialized with it.
cache_data = self._cache.get('list_libraries', newer_than_secs)
if cache_data:
logger.debug('Library names are in cache.')
return cache_data
return self._list_libraries() | def function[_list_libraries_cached, parameter[self, newer_than_secs]]:
constant[
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
]
variable[_] assign[=] name[self]._conn
variable[cache_data] assign[=] call[name[self]._cache.get, parameter[constant[list_libraries], name[newer_than_secs]]]
if name[cache_data] begin[:]
call[name[logger].debug, parameter[constant[Library names are in cache.]]]
return[name[cache_data]]
return[call[name[self]._list_libraries, parameter[]]] | keyword[def] identifier[_list_libraries_cached] ( identifier[self] , identifier[newer_than_secs] =- literal[int] ):
literal[string]
identifier[_] = identifier[self] . identifier[_conn]
identifier[cache_data] = identifier[self] . identifier[_cache] . identifier[get] ( literal[string] , identifier[newer_than_secs] )
keyword[if] identifier[cache_data] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[cache_data]
keyword[return] identifier[self] . identifier[_list_libraries] () | def _list_libraries_cached(self, newer_than_secs=-1):
"""
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
"""
_ = self._conn # Ensures the connection exists and cache is initialized with it.
cache_data = self._cache.get('list_libraries', newer_than_secs)
if cache_data:
logger.debug('Library names are in cache.')
return cache_data # depends on [control=['if'], data=[]]
return self._list_libraries() |
def _read_offset_value(self, f, offset, size):
'''
Reads an integer value from file "f" at location "offset".
'''
f.seek(offset, 0)
if (size == 8):
return int.from_bytes(f.read(8), 'big', signed=True)
else:
return int.from_bytes(f.read(4), 'big', signed=True) | def function[_read_offset_value, parameter[self, f, offset, size]]:
constant[
Reads an integer value from file "f" at location "offset".
]
call[name[f].seek, parameter[name[offset], constant[0]]]
if compare[name[size] equal[==] constant[8]] begin[:]
return[call[name[int].from_bytes, parameter[call[name[f].read, parameter[constant[8]]], constant[big]]]] | keyword[def] identifier[_read_offset_value] ( identifier[self] , identifier[f] , identifier[offset] , identifier[size] ):
literal[string]
identifier[f] . identifier[seek] ( identifier[offset] , literal[int] )
keyword[if] ( identifier[size] == literal[int] ):
keyword[return] identifier[int] . identifier[from_bytes] ( identifier[f] . identifier[read] ( literal[int] ), literal[string] , identifier[signed] = keyword[True] )
keyword[else] :
keyword[return] identifier[int] . identifier[from_bytes] ( identifier[f] . identifier[read] ( literal[int] ), literal[string] , identifier[signed] = keyword[True] ) | def _read_offset_value(self, f, offset, size):
"""
Reads an integer value from file "f" at location "offset".
"""
f.seek(offset, 0)
if size == 8:
return int.from_bytes(f.read(8), 'big', signed=True) # depends on [control=['if'], data=[]]
else:
return int.from_bytes(f.read(4), 'big', signed=True) |
def summarize(self, file: Optional[TextIO] = None) -> None:
"""Print a summary of the graph."""
print(self.summary_str(), file=file) | def function[summarize, parameter[self, file]]:
constant[Print a summary of the graph.]
call[name[print], parameter[call[name[self].summary_str, parameter[]]]] | keyword[def] identifier[summarize] ( identifier[self] , identifier[file] : identifier[Optional] [ identifier[TextIO] ]= keyword[None] )-> keyword[None] :
literal[string]
identifier[print] ( identifier[self] . identifier[summary_str] (), identifier[file] = identifier[file] ) | def summarize(self, file: Optional[TextIO]=None) -> None:
"""Print a summary of the graph."""
print(self.summary_str(), file=file) |
def get_long_docs(*filenames):
"""Build rst description from a set of files."""
docs = []
for filename in filenames:
with open(filename, 'r') as f:
docs.append(f.read())
return "\n\n".join(docs) | def function[get_long_docs, parameter[]]:
constant[Build rst description from a set of files.]
variable[docs] assign[=] list[[]]
for taget[name[filename]] in starred[name[filenames]] begin[:]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
call[name[docs].append, parameter[call[name[f].read, parameter[]]]]
return[call[constant[
].join, parameter[name[docs]]]] | keyword[def] identifier[get_long_docs] (* identifier[filenames] ):
literal[string]
identifier[docs] =[]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[docs] . identifier[append] ( identifier[f] . identifier[read] ())
keyword[return] literal[string] . identifier[join] ( identifier[docs] ) | def get_long_docs(*filenames):
"""Build rst description from a set of files."""
docs = []
for filename in filenames:
with open(filename, 'r') as f:
docs.append(f.read()) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['filename']]
return '\n\n'.join(docs) |
def _generate_arg_types(coordlist_length, shape_name):
"""Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
"""
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types
# repeat args between n1 and n2
n1, n2 = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) //
(n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types | def function[_generate_arg_types, parameter[coordlist_length, shape_name]]:
constant[Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
]
from relative_module[ds9_region_parser] import module[ds9_shape_defs]
from relative_module[ds9_attr_parser] import module[ds9_shape_in_comment_defs]
if compare[name[shape_name] in name[ds9_shape_defs]] begin[:]
variable[shape_def] assign[=] call[name[ds9_shape_defs]][name[shape_name]]
variable[initial_arg_types] assign[=] name[shape_def].args_list
variable[arg_repeats] assign[=] name[shape_def].args_repeat
if compare[name[arg_repeats] is constant[None]] begin[:]
return[name[initial_arg_types]]
<ast.Tuple object at 0x7da18f7209a0> assign[=] name[arg_repeats]
variable[arg_types] assign[=] call[name[list], parameter[call[name[initial_arg_types]][<ast.Slice object at 0x7da18f722860>]]]
variable[num_of_repeats] assign[=] binary_operation[name[coordlist_length] - binary_operation[call[name[len], parameter[name[initial_arg_types]]] - name[n2]]]
call[name[arg_types].extend, parameter[binary_operation[binary_operation[binary_operation[name[num_of_repeats] - name[n1]] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[name[n2] - name[n1]]] * call[name[initial_arg_types]][<ast.Slice object at 0x7da18f722a70>]]]]
call[name[arg_types].extend, parameter[call[name[initial_arg_types]][<ast.Slice object at 0x7da18f722dd0>]]]
return[name[arg_types]] | keyword[def] identifier[_generate_arg_types] ( identifier[coordlist_length] , identifier[shape_name] ):
literal[string]
keyword[from] . identifier[ds9_region_parser] keyword[import] identifier[ds9_shape_defs]
keyword[from] . identifier[ds9_attr_parser] keyword[import] identifier[ds9_shape_in_comment_defs]
keyword[if] identifier[shape_name] keyword[in] identifier[ds9_shape_defs] :
identifier[shape_def] = identifier[ds9_shape_defs] [ identifier[shape_name] ]
keyword[else] :
identifier[shape_def] = identifier[ds9_shape_in_comment_defs] [ identifier[shape_name] ]
identifier[initial_arg_types] = identifier[shape_def] . identifier[args_list]
identifier[arg_repeats] = identifier[shape_def] . identifier[args_repeat]
keyword[if] identifier[arg_repeats] keyword[is] keyword[None] :
keyword[return] identifier[initial_arg_types]
identifier[n1] , identifier[n2] = identifier[arg_repeats]
identifier[arg_types] = identifier[list] ( identifier[initial_arg_types] [: identifier[n1] ])
identifier[num_of_repeats] = identifier[coordlist_length] -( identifier[len] ( identifier[initial_arg_types] )- identifier[n2] )
identifier[arg_types] . identifier[extend] (( identifier[num_of_repeats] - identifier[n1] )//
( identifier[n2] - identifier[n1] )* identifier[initial_arg_types] [ identifier[n1] : identifier[n2] ])
identifier[arg_types] . identifier[extend] ( identifier[initial_arg_types] [ identifier[n2] :])
keyword[return] identifier[arg_types] | def _generate_arg_types(coordlist_length, shape_name):
"""Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
"""
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name] # depends on [control=['if'], data=['shape_name', 'ds9_shape_defs']]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types # depends on [control=['if'], data=[]]
# repeat args between n1 and n2
(n1, n2) = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) // (n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types |
def monitor_counters(mc, output, counters, detailed, f):
"""Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields."""
# Print CSV header
output.write("time,{}{}\n".format("x,y," if detailed else "",
",".join(counters)))
system_info = mc.get_system_info()
# Make an initial sample of the counters
last_counter_values = sample_counters(mc, system_info)
start_time = time.time()
for _ in f():
# Snapshot the change in counter values
counter_values = sample_counters(mc, system_info)
delta = deltas(last_counter_values, counter_values)
last_counter_values = counter_values
now = time.time() - start_time
# Output the changes
if detailed:
for x, y in sorted(system_info):
output.write("{:0.1f},{},{},{}\n".format(
now, x, y,
",".join(str(getattr(delta[(x, y)], c))
for c in counters)))
else:
totals = [0 for _ in counters]
for xy in sorted(system_info):
for i, counter in enumerate(counters):
totals[i] += getattr(delta[xy], counter)
output.write("{:0.1f},{}\n".format(
now, ",".join(map(str, totals)))) | def function[monitor_counters, parameter[mc, output, counters, detailed, f]]:
constant[Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields.]
call[name[output].write, parameter[call[constant[time,{}{}
].format, parameter[<ast.IfExp object at 0x7da1b1971e10>, call[constant[,].join, parameter[name[counters]]]]]]]
variable[system_info] assign[=] call[name[mc].get_system_info, parameter[]]
variable[last_counter_values] assign[=] call[name[sample_counters], parameter[name[mc], name[system_info]]]
variable[start_time] assign[=] call[name[time].time, parameter[]]
for taget[name[_]] in starred[call[name[f], parameter[]]] begin[:]
variable[counter_values] assign[=] call[name[sample_counters], parameter[name[mc], name[system_info]]]
variable[delta] assign[=] call[name[deltas], parameter[name[last_counter_values], name[counter_values]]]
variable[last_counter_values] assign[=] name[counter_values]
variable[now] assign[=] binary_operation[call[name[time].time, parameter[]] - name[start_time]]
if name[detailed] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1972290>, <ast.Name object at 0x7da1b1971900>]]] in starred[call[name[sorted], parameter[name[system_info]]]] begin[:]
call[name[output].write, parameter[call[constant[{:0.1f},{},{},{}
].format, parameter[name[now], name[x], name[y], call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da1b1970e50>]]]]]] | keyword[def] identifier[monitor_counters] ( identifier[mc] , identifier[output] , identifier[counters] , identifier[detailed] , identifier[f] ):
literal[string]
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( literal[string] keyword[if] identifier[detailed] keyword[else] literal[string] ,
literal[string] . identifier[join] ( identifier[counters] )))
identifier[system_info] = identifier[mc] . identifier[get_system_info] ()
identifier[last_counter_values] = identifier[sample_counters] ( identifier[mc] , identifier[system_info] )
identifier[start_time] = identifier[time] . identifier[time] ()
keyword[for] identifier[_] keyword[in] identifier[f] ():
identifier[counter_values] = identifier[sample_counters] ( identifier[mc] , identifier[system_info] )
identifier[delta] = identifier[deltas] ( identifier[last_counter_values] , identifier[counter_values] )
identifier[last_counter_values] = identifier[counter_values]
identifier[now] = identifier[time] . identifier[time] ()- identifier[start_time]
keyword[if] identifier[detailed] :
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[sorted] ( identifier[system_info] ):
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[now] , identifier[x] , identifier[y] ,
literal[string] . identifier[join] ( identifier[str] ( identifier[getattr] ( identifier[delta] [( identifier[x] , identifier[y] )], identifier[c] ))
keyword[for] identifier[c] keyword[in] identifier[counters] )))
keyword[else] :
identifier[totals] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[counters] ]
keyword[for] identifier[xy] keyword[in] identifier[sorted] ( identifier[system_info] ):
keyword[for] identifier[i] , identifier[counter] keyword[in] identifier[enumerate] ( identifier[counters] ):
identifier[totals] [ identifier[i] ]+= identifier[getattr] ( identifier[delta] [ identifier[xy] ], identifier[counter] )
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[now] , literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[totals] )))) | def monitor_counters(mc, output, counters, detailed, f):
"""Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields."""
# Print CSV header
output.write('time,{}{}\n'.format('x,y,' if detailed else '', ','.join(counters)))
system_info = mc.get_system_info()
# Make an initial sample of the counters
last_counter_values = sample_counters(mc, system_info)
start_time = time.time()
for _ in f():
# Snapshot the change in counter values
counter_values = sample_counters(mc, system_info)
delta = deltas(last_counter_values, counter_values)
last_counter_values = counter_values
now = time.time() - start_time
# Output the changes
if detailed:
for (x, y) in sorted(system_info):
output.write('{:0.1f},{},{},{}\n'.format(now, x, y, ','.join((str(getattr(delta[x, y], c)) for c in counters)))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
totals = [0 for _ in counters]
for xy in sorted(system_info):
for (i, counter) in enumerate(counters):
totals[i] += getattr(delta[xy], counter) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['xy']]
output.write('{:0.1f},{}\n'.format(now, ','.join(map(str, totals)))) # depends on [control=['for'], data=['_']] |
def chdir(self,
path,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None,
loglevel=logging.DEBUG):
"""How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send()
"""
shutit = self.shutit
shutit.handle_note(note, 'Changing to path: ' + path)
shutit.log('Changing directory to path: "' + path + '"', level=logging.DEBUG)
if shutit.build['delivery'] in ('bash','dockerfile'):
self.send(ShutItSendSpec(self,
send=' command cd "' + path + '"',
timeout=timeout,
echo=False,
loglevel=loglevel))
elif shutit.build['delivery'] in ('docker',):
os.chdir(path)
else:
shutit.fail('chdir not supported for delivery method: ' + str(shutit.build['delivery'])) # pragma: no cover
shutit.handle_note_after(note=note)
return True | def function[chdir, parameter[self, path, timeout, note, loglevel]]:
constant[How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send()
]
variable[shutit] assign[=] name[self].shutit
call[name[shutit].handle_note, parameter[name[note], binary_operation[constant[Changing to path: ] + name[path]]]]
call[name[shutit].log, parameter[binary_operation[binary_operation[constant[Changing directory to path: "] + name[path]] + constant["]]]]
if compare[call[name[shutit].build][constant[delivery]] in tuple[[<ast.Constant object at 0x7da18f09d2a0>, <ast.Constant object at 0x7da18f09ecb0>]]] begin[:]
call[name[self].send, parameter[call[name[ShutItSendSpec], parameter[name[self]]]]]
call[name[shutit].handle_note_after, parameter[]]
return[constant[True]] | keyword[def] identifier[chdir] ( identifier[self] ,
identifier[path] ,
identifier[timeout] = identifier[shutit_global] . identifier[shutit_global_object] . identifier[default_timeout] ,
identifier[note] = keyword[None] ,
identifier[loglevel] = identifier[logging] . identifier[DEBUG] ):
literal[string]
identifier[shutit] = identifier[self] . identifier[shutit]
identifier[shutit] . identifier[handle_note] ( identifier[note] , literal[string] + identifier[path] )
identifier[shutit] . identifier[log] ( literal[string] + identifier[path] + literal[string] , identifier[level] = identifier[logging] . identifier[DEBUG] )
keyword[if] identifier[shutit] . identifier[build] [ literal[string] ] keyword[in] ( literal[string] , literal[string] ):
identifier[self] . identifier[send] ( identifier[ShutItSendSpec] ( identifier[self] ,
identifier[send] = literal[string] + identifier[path] + literal[string] ,
identifier[timeout] = identifier[timeout] ,
identifier[echo] = keyword[False] ,
identifier[loglevel] = identifier[loglevel] ))
keyword[elif] identifier[shutit] . identifier[build] [ literal[string] ] keyword[in] ( literal[string] ,):
identifier[os] . identifier[chdir] ( identifier[path] )
keyword[else] :
identifier[shutit] . identifier[fail] ( literal[string] + identifier[str] ( identifier[shutit] . identifier[build] [ literal[string] ]))
identifier[shutit] . identifier[handle_note_after] ( identifier[note] = identifier[note] )
keyword[return] keyword[True] | def chdir(self, path, timeout=shutit_global.shutit_global_object.default_timeout, note=None, loglevel=logging.DEBUG):
"""How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send()
"""
shutit = self.shutit
shutit.handle_note(note, 'Changing to path: ' + path)
shutit.log('Changing directory to path: "' + path + '"', level=logging.DEBUG)
if shutit.build['delivery'] in ('bash', 'dockerfile'):
self.send(ShutItSendSpec(self, send=' command cd "' + path + '"', timeout=timeout, echo=False, loglevel=loglevel)) # depends on [control=['if'], data=[]]
elif shutit.build['delivery'] in ('docker',):
os.chdir(path) # depends on [control=['if'], data=[]]
else:
shutit.fail('chdir not supported for delivery method: ' + str(shutit.build['delivery'])) # pragma: no cover
shutit.handle_note_after(note=note)
return True |
def get_next_run(self):
"""
function to return the next configuration and budget to run.
This function is called from HB_master, don't call this from
your script.
It returns None if this run of SH is finished or there are
pending jobs that need to finish to progress to the next stage.
If there are empty slots to be filled in the current SH stage
(which never happens in the original SH version), a new
configuration will be sampled and scheduled to run next.
"""
if self.is_finished:
return(None)
for k,v in self.data.items():
if v.status == 'QUEUED':
assert v.budget == self.budgets[self.stage], 'Configuration budget does not align with current stage!'
v.status = 'RUNNING'
self.num_running += 1
return(k, v.config, v.budget)
# check if there are still slots to fill in the current stage and return that
if (self.actual_num_configs[self.stage] < self.num_configs[self.stage]):
self.add_configuration()
return(self.get_next_run())
if self.num_running == 0:
# at this point a stage is completed
self.process_results()
return(self.get_next_run())
return(None) | def function[get_next_run, parameter[self]]:
constant[
function to return the next configuration and budget to run.
This function is called from HB_master, don't call this from
your script.
It returns None if this run of SH is finished or there are
pending jobs that need to finish to progress to the next stage.
If there are empty slots to be filled in the current SH stage
(which never happens in the original SH version), a new
configuration will be sampled and scheduled to run next.
]
if name[self].is_finished begin[:]
return[constant[None]]
for taget[tuple[[<ast.Name object at 0x7da1b194c610>, <ast.Name object at 0x7da1b194c790>]]] in starred[call[name[self].data.items, parameter[]]] begin[:]
if compare[name[v].status equal[==] constant[QUEUED]] begin[:]
assert[compare[name[v].budget equal[==] call[name[self].budgets][name[self].stage]]]
name[v].status assign[=] constant[RUNNING]
<ast.AugAssign object at 0x7da1b194c820>
return[tuple[[<ast.Name object at 0x7da1b194c9a0>, <ast.Attribute object at 0x7da1b194dde0>, <ast.Attribute object at 0x7da1b194d2a0>]]]
if compare[call[name[self].actual_num_configs][name[self].stage] less[<] call[name[self].num_configs][name[self].stage]] begin[:]
call[name[self].add_configuration, parameter[]]
return[call[name[self].get_next_run, parameter[]]]
if compare[name[self].num_running equal[==] constant[0]] begin[:]
call[name[self].process_results, parameter[]]
return[call[name[self].get_next_run, parameter[]]]
return[constant[None]] | keyword[def] identifier[get_next_run] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_finished] :
keyword[return] ( keyword[None] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[data] . identifier[items] ():
keyword[if] identifier[v] . identifier[status] == literal[string] :
keyword[assert] identifier[v] . identifier[budget] == identifier[self] . identifier[budgets] [ identifier[self] . identifier[stage] ], literal[string]
identifier[v] . identifier[status] = literal[string]
identifier[self] . identifier[num_running] += literal[int]
keyword[return] ( identifier[k] , identifier[v] . identifier[config] , identifier[v] . identifier[budget] )
keyword[if] ( identifier[self] . identifier[actual_num_configs] [ identifier[self] . identifier[stage] ]< identifier[self] . identifier[num_configs] [ identifier[self] . identifier[stage] ]):
identifier[self] . identifier[add_configuration] ()
keyword[return] ( identifier[self] . identifier[get_next_run] ())
keyword[if] identifier[self] . identifier[num_running] == literal[int] :
identifier[self] . identifier[process_results] ()
keyword[return] ( identifier[self] . identifier[get_next_run] ())
keyword[return] ( keyword[None] ) | def get_next_run(self):
"""
function to return the next configuration and budget to run.
This function is called from HB_master, don't call this from
your script.
It returns None if this run of SH is finished or there are
pending jobs that need to finish to progress to the next stage.
If there are empty slots to be filled in the current SH stage
(which never happens in the original SH version), a new
configuration will be sampled and scheduled to run next.
"""
if self.is_finished:
return None # depends on [control=['if'], data=[]]
for (k, v) in self.data.items():
if v.status == 'QUEUED':
assert v.budget == self.budgets[self.stage], 'Configuration budget does not align with current stage!'
v.status = 'RUNNING'
self.num_running += 1
return (k, v.config, v.budget) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # check if there are still slots to fill in the current stage and return that
if self.actual_num_configs[self.stage] < self.num_configs[self.stage]:
self.add_configuration()
return self.get_next_run() # depends on [control=['if'], data=[]]
if self.num_running == 0: # at this point a stage is completed
self.process_results()
return self.get_next_run() # depends on [control=['if'], data=[]]
return None |
def type(self):
"""Certificate type.
:return: The type of the certificate.
:rtype: CertificateType
"""
if self._device_mode == 1 or self._type == CertificateType.developer:
return CertificateType.developer
elif self._type == CertificateType.bootstrap:
return CertificateType.bootstrap
else:
return CertificateType.lwm2m | def function[type, parameter[self]]:
constant[Certificate type.
:return: The type of the certificate.
:rtype: CertificateType
]
if <ast.BoolOp object at 0x7da1b04b0190> begin[:]
return[name[CertificateType].developer] | keyword[def] identifier[type] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_device_mode] == literal[int] keyword[or] identifier[self] . identifier[_type] == identifier[CertificateType] . identifier[developer] :
keyword[return] identifier[CertificateType] . identifier[developer]
keyword[elif] identifier[self] . identifier[_type] == identifier[CertificateType] . identifier[bootstrap] :
keyword[return] identifier[CertificateType] . identifier[bootstrap]
keyword[else] :
keyword[return] identifier[CertificateType] . identifier[lwm2m] | def type(self):
"""Certificate type.
:return: The type of the certificate.
:rtype: CertificateType
"""
if self._device_mode == 1 or self._type == CertificateType.developer:
return CertificateType.developer # depends on [control=['if'], data=[]]
elif self._type == CertificateType.bootstrap:
return CertificateType.bootstrap # depends on [control=['if'], data=[]]
else:
return CertificateType.lwm2m |
def get_operation_cost(self, up, low):
"""
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
"""
up_costs = self.operation_costs.get(up, None)
if up_costs is None:
return np.inf
cost = up_costs.get(low, np.inf)
return cost | def function[get_operation_cost, parameter[self, up, low]]:
constant[
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
]
variable[up_costs] assign[=] call[name[self].operation_costs.get, parameter[name[up], constant[None]]]
if compare[name[up_costs] is constant[None]] begin[:]
return[name[np].inf]
variable[cost] assign[=] call[name[up_costs].get, parameter[name[low], name[np].inf]]
return[name[cost]] | keyword[def] identifier[get_operation_cost] ( identifier[self] , identifier[up] , identifier[low] ):
literal[string]
identifier[up_costs] = identifier[self] . identifier[operation_costs] . identifier[get] ( identifier[up] , keyword[None] )
keyword[if] identifier[up_costs] keyword[is] keyword[None] :
keyword[return] identifier[np] . identifier[inf]
identifier[cost] = identifier[up_costs] . identifier[get] ( identifier[low] , identifier[np] . identifier[inf] )
keyword[return] identifier[cost] | def get_operation_cost(self, up, low):
"""
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
"""
up_costs = self.operation_costs.get(up, None)
if up_costs is None:
return np.inf # depends on [control=['if'], data=[]]
cost = up_costs.get(low, np.inf)
return cost |
def start_file_logger(filename, name='database_manager', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | def function[start_file_logger, parameter[filename, name, level, format_string]]:
constant[Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
]
if compare[name[format_string] is constant[None]] begin[:]
variable[format_string] assign[=] constant[%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s]
<ast.Global object at 0x7da1b0198940>
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[name]]]
call[name[logger].setLevel, parameter[name[level]]]
variable[handler] assign[=] call[name[logging].FileHandler, parameter[name[filename]]]
call[name[handler].setLevel, parameter[name[level]]]
variable[formatter] assign[=] call[name[logging].Formatter, parameter[name[format_string]]]
call[name[handler].setFormatter, parameter[name[formatter]]]
call[name[logger].addHandler, parameter[name[handler]]]
return[name[logger]] | keyword[def] identifier[start_file_logger] ( identifier[filename] , identifier[name] = literal[string] , identifier[level] = identifier[logging] . identifier[DEBUG] , identifier[format_string] = keyword[None] ):
literal[string]
keyword[if] identifier[format_string] keyword[is] keyword[None] :
identifier[format_string] = literal[string]
keyword[global] identifier[logger]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[name] )
identifier[logger] . identifier[setLevel] ( identifier[level] )
identifier[handler] = identifier[logging] . identifier[FileHandler] ( identifier[filename] )
identifier[handler] . identifier[setLevel] ( identifier[level] )
identifier[formatter] = identifier[logging] . identifier[Formatter] ( identifier[format_string] , identifier[datefmt] = literal[string] )
identifier[handler] . identifier[setFormatter] ( identifier[formatter] )
identifier[logger] . identifier[addHandler] ( identifier[handler] )
keyword[return] identifier[logger] | def start_file_logger(filename, name='database_manager', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = '%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s' # depends on [control=['if'], data=['format_string']]
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger |
def rename_rater(self, name, new_name):
"""Rename event type."""
for rater in self.root.iterfind('rater'):
if rater.get('name') == name:
rater.set('name', new_name)
self.save() | def function[rename_rater, parameter[self, name, new_name]]:
constant[Rename event type.]
for taget[name[rater]] in starred[call[name[self].root.iterfind, parameter[constant[rater]]]] begin[:]
if compare[call[name[rater].get, parameter[constant[name]]] equal[==] name[name]] begin[:]
call[name[rater].set, parameter[constant[name], name[new_name]]]
call[name[self].save, parameter[]] | keyword[def] identifier[rename_rater] ( identifier[self] , identifier[name] , identifier[new_name] ):
literal[string]
keyword[for] identifier[rater] keyword[in] identifier[self] . identifier[root] . identifier[iterfind] ( literal[string] ):
keyword[if] identifier[rater] . identifier[get] ( literal[string] )== identifier[name] :
identifier[rater] . identifier[set] ( literal[string] , identifier[new_name] )
identifier[self] . identifier[save] () | def rename_rater(self, name, new_name):
"""Rename event type."""
for rater in self.root.iterfind('rater'):
if rater.get('name') == name:
rater.set('name', new_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rater']]
self.save() |
def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep) | def function[divide, parameter[elements, by, translate, sep]]:
constant[Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
]
variable[outer_spans] assign[=] <ast.ListComp object at 0x7da18f09e800>
return[call[name[divide_by_spans], parameter[name[elements], name[outer_spans]]]] | keyword[def] identifier[divide] ( identifier[elements] , identifier[by] , identifier[translate] = keyword[False] , identifier[sep] = literal[string] ):
literal[string]
identifier[outer_spans] =[ identifier[spans] ( identifier[elem] ) keyword[for] identifier[elem] keyword[in] identifier[by] ]
keyword[return] identifier[divide_by_spans] ( identifier[elements] , identifier[outer_spans] , identifier[translate] = identifier[translate] , identifier[sep] = identifier[sep] ) | def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep) |
def get(self, attr):
"""
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
"""
if self._attr:
ret = self._attr.copy()
if attr:
ret.update(attr)
return ret
else:
return attr if attr else {} | def function[get, parameter[self, attr]]:
constant[
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
]
if name[self]._attr begin[:]
variable[ret] assign[=] call[name[self]._attr.copy, parameter[]]
if name[attr] begin[:]
call[name[ret].update, parameter[name[attr]]]
return[name[ret]] | keyword[def] identifier[get] ( identifier[self] , identifier[attr] ):
literal[string]
keyword[if] identifier[self] . identifier[_attr] :
identifier[ret] = identifier[self] . identifier[_attr] . identifier[copy] ()
keyword[if] identifier[attr] :
identifier[ret] . identifier[update] ( identifier[attr] )
keyword[return] identifier[ret]
keyword[else] :
keyword[return] identifier[attr] keyword[if] identifier[attr] keyword[else] {} | def get(self, attr):
"""
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
"""
if self._attr:
ret = self._attr.copy()
if attr:
ret.update(attr) # depends on [control=['if'], data=[]]
return ret # depends on [control=['if'], data=[]]
else:
return attr if attr else {} |
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
if len(self.Hashes) > 0:
if not isinstance(self.Hashes[0], UInt256):
corrected_hashes = list(map(lambda i: UInt256(data=binascii.unhexlify(i)), self.Hashes))
return s.uint8 + GetVarSize(corrected_hashes) | def function[Size, parameter[self]]:
constant[
Get the total size in bytes of the object.
Returns:
int: size.
]
if compare[call[name[len], parameter[name[self].Hashes]] greater[>] constant[0]] begin[:]
if <ast.UnaryOp object at 0x7da20e9b28f0> begin[:]
variable[corrected_hashes] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20e9b0070>, name[self].Hashes]]]]
return[binary_operation[name[s].uint8 + call[name[GetVarSize], parameter[name[corrected_hashes]]]]] | keyword[def] identifier[Size] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[Hashes] )> literal[int] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[Hashes] [ literal[int] ], identifier[UInt256] ):
identifier[corrected_hashes] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[i] : identifier[UInt256] ( identifier[data] = identifier[binascii] . identifier[unhexlify] ( identifier[i] )), identifier[self] . identifier[Hashes] ))
keyword[return] identifier[s] . identifier[uint8] + identifier[GetVarSize] ( identifier[corrected_hashes] ) | def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
if len(self.Hashes) > 0:
if not isinstance(self.Hashes[0], UInt256):
corrected_hashes = list(map(lambda i: UInt256(data=binascii.unhexlify(i)), self.Hashes)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return s.uint8 + GetVarSize(corrected_hashes) |
def postprocess(trun):
"""Perform postprocessing of the given test run"""
plog = []
plog.append(("trun", process_trun(trun)))
for tsuite in trun["testsuites"]:
plog.append(("tsuite", process_tsuite(tsuite)))
for tcase in tsuite["testcases"]:
plog.append(("tcase", process_tcase(tcase)))
for task, success in plog:
if not success:
cij.err("rprtr::postprocess: FAILED for %r" % task)
return sum((success for task, success in plog)) | def function[postprocess, parameter[trun]]:
constant[Perform postprocessing of the given test run]
variable[plog] assign[=] list[[]]
call[name[plog].append, parameter[tuple[[<ast.Constant object at 0x7da1b0192530>, <ast.Call object at 0x7da1b0192560>]]]]
for taget[name[tsuite]] in starred[call[name[trun]][constant[testsuites]]] begin[:]
call[name[plog].append, parameter[tuple[[<ast.Constant object at 0x7da1b0192800>, <ast.Call object at 0x7da1b0191780>]]]]
for taget[name[tcase]] in starred[call[name[tsuite]][constant[testcases]]] begin[:]
call[name[plog].append, parameter[tuple[[<ast.Constant object at 0x7da1b0190af0>, <ast.Call object at 0x7da1b0190be0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0190a90>, <ast.Name object at 0x7da1b0190cd0>]]] in starred[name[plog]] begin[:]
if <ast.UnaryOp object at 0x7da1b0190c40> begin[:]
call[name[cij].err, parameter[binary_operation[constant[rprtr::postprocess: FAILED for %r] <ast.Mod object at 0x7da2590d6920> name[task]]]]
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b01e6020>]]] | keyword[def] identifier[postprocess] ( identifier[trun] ):
literal[string]
identifier[plog] =[]
identifier[plog] . identifier[append] (( literal[string] , identifier[process_trun] ( identifier[trun] )))
keyword[for] identifier[tsuite] keyword[in] identifier[trun] [ literal[string] ]:
identifier[plog] . identifier[append] (( literal[string] , identifier[process_tsuite] ( identifier[tsuite] )))
keyword[for] identifier[tcase] keyword[in] identifier[tsuite] [ literal[string] ]:
identifier[plog] . identifier[append] (( literal[string] , identifier[process_tcase] ( identifier[tcase] )))
keyword[for] identifier[task] , identifier[success] keyword[in] identifier[plog] :
keyword[if] keyword[not] identifier[success] :
identifier[cij] . identifier[err] ( literal[string] % identifier[task] )
keyword[return] identifier[sum] (( identifier[success] keyword[for] identifier[task] , identifier[success] keyword[in] identifier[plog] )) | def postprocess(trun):
"""Perform postprocessing of the given test run"""
plog = []
plog.append(('trun', process_trun(trun)))
for tsuite in trun['testsuites']:
plog.append(('tsuite', process_tsuite(tsuite)))
for tcase in tsuite['testcases']:
plog.append(('tcase', process_tcase(tcase))) # depends on [control=['for'], data=['tcase']] # depends on [control=['for'], data=['tsuite']]
for (task, success) in plog:
if not success:
cij.err('rprtr::postprocess: FAILED for %r' % task) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return sum((success for (task, success) in plog)) |
def _tpl_possibilities(self):
"""
Construct a list of possible paths to templates.
"""
tpl_possibilities = [
os.path.realpath(self.tpl)
]
for tpl_dir in self.tpl_dirs:
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "{0}.tpl".format(self.tpl))))
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "{0}.py".format(self.tpl))))
return tpl_possibilities | def function[_tpl_possibilities, parameter[self]]:
constant[
Construct a list of possible paths to templates.
]
variable[tpl_possibilities] assign[=] list[[<ast.Call object at 0x7da1b1d5ca60>]]
for taget[name[tpl_dir]] in starred[name[self].tpl_dirs] begin[:]
call[name[tpl_possibilities].append, parameter[call[name[os].path.realpath, parameter[call[name[os].path.join, parameter[name[tpl_dir], call[constant[{0}.tpl].format, parameter[name[self].tpl]]]]]]]]
call[name[tpl_possibilities].append, parameter[call[name[os].path.realpath, parameter[call[name[os].path.join, parameter[name[tpl_dir], call[constant[{0}.py].format, parameter[name[self].tpl]]]]]]]]
return[name[tpl_possibilities]] | keyword[def] identifier[_tpl_possibilities] ( identifier[self] ):
literal[string]
identifier[tpl_possibilities] =[
identifier[os] . identifier[path] . identifier[realpath] ( identifier[self] . identifier[tpl] )
]
keyword[for] identifier[tpl_dir] keyword[in] identifier[self] . identifier[tpl_dirs] :
identifier[tpl_possibilities] . identifier[append] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tpl_dir] , literal[string] . identifier[format] ( identifier[self] . identifier[tpl] ))))
identifier[tpl_possibilities] . identifier[append] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tpl_dir] , literal[string] . identifier[format] ( identifier[self] . identifier[tpl] ))))
keyword[return] identifier[tpl_possibilities] | def _tpl_possibilities(self):
"""
Construct a list of possible paths to templates.
"""
tpl_possibilities = [os.path.realpath(self.tpl)]
for tpl_dir in self.tpl_dirs:
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, '{0}.tpl'.format(self.tpl))))
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, '{0}.py'.format(self.tpl)))) # depends on [control=['for'], data=['tpl_dir']]
return tpl_possibilities |
def RGB(self, val):
"""Set the color using an Nx3 array of RGB uint8 values"""
# need to convert to normalized float
val = np.atleast_1d(val).astype(np.float32) / 255.
self.rgba = val | def function[RGB, parameter[self, val]]:
constant[Set the color using an Nx3 array of RGB uint8 values]
variable[val] assign[=] binary_operation[call[call[name[np].atleast_1d, parameter[name[val]]].astype, parameter[name[np].float32]] / constant[255.0]]
name[self].rgba assign[=] name[val] | keyword[def] identifier[RGB] ( identifier[self] , identifier[val] ):
literal[string]
identifier[val] = identifier[np] . identifier[atleast_1d] ( identifier[val] ). identifier[astype] ( identifier[np] . identifier[float32] )/ literal[int]
identifier[self] . identifier[rgba] = identifier[val] | def RGB(self, val):
"""Set the color using an Nx3 array of RGB uint8 values"""
# need to convert to normalized float
val = np.atleast_1d(val).astype(np.float32) / 255.0
self.rgba = val |
def deserialize_from_http_generics(cls, body_bytes, headers):
# type: (Optional[Union[AnyStr, IO]], Mapping) -> Any
"""Deserialize from HTTP response.
Use bytes and headers to NOT use any requests/aiohttp or whatever
specific implementation.
Headers will tested for "content-type"
"""
# Try to use content-type from headers if available
content_type = None
if 'content-type' in headers:
content_type = headers['content-type'].split(";")[0].strip().lower()
# Ouch, this server did not declare what it sent...
# Let's guess it's JSON...
# Also, since Autorest was considering that an empty body was a valid JSON,
# need that test as well....
else:
content_type = "application/json"
if body_bytes:
return cls.deserialize_from_text(body_bytes, content_type)
return None | def function[deserialize_from_http_generics, parameter[cls, body_bytes, headers]]:
constant[Deserialize from HTTP response.
Use bytes and headers to NOT use any requests/aiohttp or whatever
specific implementation.
Headers will tested for "content-type"
]
variable[content_type] assign[=] constant[None]
if compare[constant[content-type] in name[headers]] begin[:]
variable[content_type] assign[=] call[call[call[call[call[name[headers]][constant[content-type]].split, parameter[constant[;]]]][constant[0]].strip, parameter[]].lower, parameter[]]
if name[body_bytes] begin[:]
return[call[name[cls].deserialize_from_text, parameter[name[body_bytes], name[content_type]]]]
return[constant[None]] | keyword[def] identifier[deserialize_from_http_generics] ( identifier[cls] , identifier[body_bytes] , identifier[headers] ):
literal[string]
identifier[content_type] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[headers] :
identifier[content_type] = identifier[headers] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] (). identifier[lower] ()
keyword[else] :
identifier[content_type] = literal[string]
keyword[if] identifier[body_bytes] :
keyword[return] identifier[cls] . identifier[deserialize_from_text] ( identifier[body_bytes] , identifier[content_type] )
keyword[return] keyword[None] | def deserialize_from_http_generics(cls, body_bytes, headers):
# type: (Optional[Union[AnyStr, IO]], Mapping) -> Any
'Deserialize from HTTP response.\n\n Use bytes and headers to NOT use any requests/aiohttp or whatever\n specific implementation.\n Headers will tested for "content-type"\n '
# Try to use content-type from headers if available
content_type = None
if 'content-type' in headers:
content_type = headers['content-type'].split(';')[0].strip().lower() # depends on [control=['if'], data=['headers']]
else:
# Ouch, this server did not declare what it sent...
# Let's guess it's JSON...
# Also, since Autorest was considering that an empty body was a valid JSON,
# need that test as well....
content_type = 'application/json'
if body_bytes:
return cls.deserialize_from_text(body_bytes, content_type) # depends on [control=['if'], data=[]]
return None |
def get_cnot_control_positions(k):
"""
Returns a list of positions for the controls of the CNOTs used when
decomposing uniformly controlled rotations, as outlined in
arXiv:quant-ph/0407010.
Referencing Fig. 2 in the aforementioned paper, this method
uses the convention that, going up from the target qubit,
the control qubits are labelled :math:`1, 2, \ldots, k`,
where :math:`k` is the number of control qubits.
The returned list provides the qubit that controls
each successive CNOT, in order from left to right.
:param int k: the number of control qubits
:return: the list of positions of the controls
:rtype: list
"""
rotation_cnots = [1, 1]
for i in range(2, k + 1):
# algorithm described is to replace the last control
# with a control to the new qubit
# and then repeat the sequence twice
rotation_cnots[-1] = i
rotation_cnots = rotation_cnots + rotation_cnots
return rotation_cnots | def function[get_cnot_control_positions, parameter[k]]:
constant[
Returns a list of positions for the controls of the CNOTs used when
decomposing uniformly controlled rotations, as outlined in
arXiv:quant-ph/0407010.
Referencing Fig. 2 in the aforementioned paper, this method
uses the convention that, going up from the target qubit,
the control qubits are labelled :math:`1, 2, \ldots, k`,
where :math:`k` is the number of control qubits.
The returned list provides the qubit that controls
each successive CNOT, in order from left to right.
:param int k: the number of control qubits
:return: the list of positions of the controls
:rtype: list
]
variable[rotation_cnots] assign[=] list[[<ast.Constant object at 0x7da20c76dba0>, <ast.Constant object at 0x7da20c76f250>]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], binary_operation[name[k] + constant[1]]]]] begin[:]
call[name[rotation_cnots]][<ast.UnaryOp object at 0x7da20c76c190>] assign[=] name[i]
variable[rotation_cnots] assign[=] binary_operation[name[rotation_cnots] + name[rotation_cnots]]
return[name[rotation_cnots]] | keyword[def] identifier[get_cnot_control_positions] ( identifier[k] ):
literal[string]
identifier[rotation_cnots] =[ literal[int] , literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[k] + literal[int] ):
identifier[rotation_cnots] [- literal[int] ]= identifier[i]
identifier[rotation_cnots] = identifier[rotation_cnots] + identifier[rotation_cnots]
keyword[return] identifier[rotation_cnots] | def get_cnot_control_positions(k):
"""
Returns a list of positions for the controls of the CNOTs used when
decomposing uniformly controlled rotations, as outlined in
arXiv:quant-ph/0407010.
Referencing Fig. 2 in the aforementioned paper, this method
uses the convention that, going up from the target qubit,
the control qubits are labelled :math:`1, 2, \\ldots, k`,
where :math:`k` is the number of control qubits.
The returned list provides the qubit that controls
each successive CNOT, in order from left to right.
:param int k: the number of control qubits
:return: the list of positions of the controls
:rtype: list
"""
rotation_cnots = [1, 1]
for i in range(2, k + 1):
# algorithm described is to replace the last control
# with a control to the new qubit
# and then repeat the sequence twice
rotation_cnots[-1] = i
rotation_cnots = rotation_cnots + rotation_cnots # depends on [control=['for'], data=['i']]
return rotation_cnots |
def swagger(app, prefix=None, process_doc=_sanitize,
from_file_keyword=None, template=None):
"""
Call this from an @app.route method like this
@app.route('/spec.json')
def spec():
return jsonify(swagger(app))
We go through all endpoints of the app searching for swagger endpoints
We provide the minimum required data according to swagger specs
Callers can and should add and override at will
Arguments:
app -- the flask app to inspect
Keyword arguments:
process_doc -- text sanitization method, the default simply replaces \n with <br>
from_file_keyword -- how to specify a file to load doc from
template -- The spec to start with and update as flask-swagger finds paths.
"""
output = {
"swagger": "2.0",
"info": {
"version": "0.0.0",
"title": "Cool product name",
}
}
paths = defaultdict(dict)
definitions = defaultdict(dict)
if template is not None:
output.update(template)
# check for template provided paths and definitions
for k, v in output.get('paths', {}).items():
paths[k] = v
for k, v in output.get('definitions', {}).items():
definitions[k] = v
output["paths"] = paths
output["definitions"] = definitions
ignore_verbs = {"HEAD", "OPTIONS"}
# technically only responses is non-optional
optional_fields = ['tags', 'consumes', 'produces', 'schemes', 'security',
'deprecated', 'operationId', 'externalDocs']
for rule in app.url_map.iter_rules():
if prefix and rule.rule[:len(prefix)] != prefix:
continue
endpoint = app.view_functions[rule.endpoint]
methods = dict()
for verb in rule.methods.difference(ignore_verbs):
verb = verb.lower()
if hasattr(endpoint, 'methods') \
and verb in map(lambda m: m.lower(), endpoint.methods) \
and hasattr(endpoint.view_class, verb):
methods[verb] = getattr(endpoint.view_class, verb)
else:
methods[verb] = endpoint
operations = dict()
for verb, method in methods.items():
summary, description, swag = _parse_docstring(method, process_doc,
from_file_keyword)
if swag is not None: # we only add endpoints with swagger data in the docstrings
defs = swag.get('definitions', [])
defs = _extract_definitions(defs)
params = swag.get('parameters', [])
defs += _extract_definitions(params)
responses = swag.get('responses', {})
responses = {
str(key): value
for key, value in responses.items()
}
if responses is not None:
defs = defs + _extract_definitions(responses.values())
for definition in defs:
def_id = definition.pop('id')
if def_id is not None:
definitions[def_id].update(definition)
operation = dict(
summary=summary,
description=description,
responses=responses
)
# parameters - swagger ui dislikes empty parameter lists
if len(params) > 0:
operation['parameters'] = params
# other optionals
for key in optional_fields:
if key in swag:
operation[key] = swag.get(key)
operations[verb] = operation
if len(operations):
rule = str(rule)
for arg in re.findall('(<([^<>]*:)?([^<>]*)>)', rule):
rule = rule.replace(arg[0], '{%s}' % arg[2])
paths[rule].update(operations)
return output | def function[swagger, parameter[app, prefix, process_doc, from_file_keyword, template]]:
constant[
Call this from an @app.route method like this
@app.route('/spec.json')
def spec():
return jsonify(swagger(app))
We go through all endpoints of the app searching for swagger endpoints
We provide the minimum required data according to swagger specs
Callers can and should add and override at will
Arguments:
app -- the flask app to inspect
Keyword arguments:
process_doc -- text sanitization method, the default simply replaces
with <br>
from_file_keyword -- how to specify a file to load doc from
template -- The spec to start with and update as flask-swagger finds paths.
]
variable[output] assign[=] dictionary[[<ast.Constant object at 0x7da1b15195a0>, <ast.Constant object at 0x7da1b151a890>], [<ast.Constant object at 0x7da1b151a830>, <ast.Dict object at 0x7da1b1519660>]]
variable[paths] assign[=] call[name[defaultdict], parameter[name[dict]]]
variable[definitions] assign[=] call[name[defaultdict], parameter[name[dict]]]
if compare[name[template] is_not constant[None]] begin[:]
call[name[output].update, parameter[name[template]]]
for taget[tuple[[<ast.Name object at 0x7da1b151a9e0>, <ast.Name object at 0x7da1b1519900>]]] in starred[call[call[name[output].get, parameter[constant[paths], dictionary[[], []]]].items, parameter[]]] begin[:]
call[name[paths]][name[k]] assign[=] name[v]
for taget[tuple[[<ast.Name object at 0x7da1b1519360>, <ast.Name object at 0x7da1b1518bb0>]]] in starred[call[call[name[output].get, parameter[constant[definitions], dictionary[[], []]]].items, parameter[]]] begin[:]
call[name[definitions]][name[k]] assign[=] name[v]
call[name[output]][constant[paths]] assign[=] name[paths]
call[name[output]][constant[definitions]] assign[=] name[definitions]
variable[ignore_verbs] assign[=] <ast.Set object at 0x7da204344df0>
variable[optional_fields] assign[=] list[[<ast.Constant object at 0x7da204347970>, <ast.Constant object at 0x7da2043459f0>, <ast.Constant object at 0x7da204346470>, <ast.Constant object at 0x7da2043466e0>, <ast.Constant object at 0x7da204345ed0>, <ast.Constant object at 0x7da204345990>, <ast.Constant object at 0x7da2043447c0>, <ast.Constant object at 0x7da2043441c0>]]
for taget[name[rule]] in starred[call[name[app].url_map.iter_rules, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da204344970> begin[:]
continue
variable[endpoint] assign[=] call[name[app].view_functions][name[rule].endpoint]
variable[methods] assign[=] call[name[dict], parameter[]]
for taget[name[verb]] in starred[call[name[rule].methods.difference, parameter[name[ignore_verbs]]]] begin[:]
variable[verb] assign[=] call[name[verb].lower, parameter[]]
if <ast.BoolOp object at 0x7da204347d00> begin[:]
call[name[methods]][name[verb]] assign[=] call[name[getattr], parameter[name[endpoint].view_class, name[verb]]]
variable[operations] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2043458d0>, <ast.Name object at 0x7da204346cb0>]]] in starred[call[name[methods].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da204347dc0> assign[=] call[name[_parse_docstring], parameter[name[method], name[process_doc], name[from_file_keyword]]]
if compare[name[swag] is_not constant[None]] begin[:]
variable[defs] assign[=] call[name[swag].get, parameter[constant[definitions], list[[]]]]
variable[defs] assign[=] call[name[_extract_definitions], parameter[name[defs]]]
variable[params] assign[=] call[name[swag].get, parameter[constant[parameters], list[[]]]]
<ast.AugAssign object at 0x7da204345210>
variable[responses] assign[=] call[name[swag].get, parameter[constant[responses], dictionary[[], []]]]
variable[responses] assign[=] <ast.DictComp object at 0x7da204345660>
if compare[name[responses] is_not constant[None]] begin[:]
variable[defs] assign[=] binary_operation[name[defs] + call[name[_extract_definitions], parameter[call[name[responses].values, parameter[]]]]]
for taget[name[definition]] in starred[name[defs]] begin[:]
variable[def_id] assign[=] call[name[definition].pop, parameter[constant[id]]]
if compare[name[def_id] is_not constant[None]] begin[:]
call[call[name[definitions]][name[def_id]].update, parameter[name[definition]]]
variable[operation] assign[=] call[name[dict], parameter[]]
if compare[call[name[len], parameter[name[params]]] greater[>] constant[0]] begin[:]
call[name[operation]][constant[parameters]] assign[=] name[params]
for taget[name[key]] in starred[name[optional_fields]] begin[:]
if compare[name[key] in name[swag]] begin[:]
call[name[operation]][name[key]] assign[=] call[name[swag].get, parameter[name[key]]]
call[name[operations]][name[verb]] assign[=] name[operation]
if call[name[len], parameter[name[operations]]] begin[:]
variable[rule] assign[=] call[name[str], parameter[name[rule]]]
for taget[name[arg]] in starred[call[name[re].findall, parameter[constant[(<([^<>]*:)?([^<>]*)>)], name[rule]]]] begin[:]
variable[rule] assign[=] call[name[rule].replace, parameter[call[name[arg]][constant[0]], binary_operation[constant[{%s}] <ast.Mod object at 0x7da2590d6920> call[name[arg]][constant[2]]]]]
call[call[name[paths]][name[rule]].update, parameter[name[operations]]]
return[name[output]] | keyword[def] identifier[swagger] ( identifier[app] , identifier[prefix] = keyword[None] , identifier[process_doc] = identifier[_sanitize] ,
identifier[from_file_keyword] = keyword[None] , identifier[template] = keyword[None] ):
literal[string]
identifier[output] ={
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
}
identifier[paths] = identifier[defaultdict] ( identifier[dict] )
identifier[definitions] = identifier[defaultdict] ( identifier[dict] )
keyword[if] identifier[template] keyword[is] keyword[not] keyword[None] :
identifier[output] . identifier[update] ( identifier[template] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[output] . identifier[get] ( literal[string] ,{}). identifier[items] ():
identifier[paths] [ identifier[k] ]= identifier[v]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[output] . identifier[get] ( literal[string] ,{}). identifier[items] ():
identifier[definitions] [ identifier[k] ]= identifier[v]
identifier[output] [ literal[string] ]= identifier[paths]
identifier[output] [ literal[string] ]= identifier[definitions]
identifier[ignore_verbs] ={ literal[string] , literal[string] }
identifier[optional_fields] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[rule] keyword[in] identifier[app] . identifier[url_map] . identifier[iter_rules] ():
keyword[if] identifier[prefix] keyword[and] identifier[rule] . identifier[rule] [: identifier[len] ( identifier[prefix] )]!= identifier[prefix] :
keyword[continue]
identifier[endpoint] = identifier[app] . identifier[view_functions] [ identifier[rule] . identifier[endpoint] ]
identifier[methods] = identifier[dict] ()
keyword[for] identifier[verb] keyword[in] identifier[rule] . identifier[methods] . identifier[difference] ( identifier[ignore_verbs] ):
identifier[verb] = identifier[verb] . identifier[lower] ()
keyword[if] identifier[hasattr] ( identifier[endpoint] , literal[string] ) keyword[and] identifier[verb] keyword[in] identifier[map] ( keyword[lambda] identifier[m] : identifier[m] . identifier[lower] (), identifier[endpoint] . identifier[methods] ) keyword[and] identifier[hasattr] ( identifier[endpoint] . identifier[view_class] , identifier[verb] ):
identifier[methods] [ identifier[verb] ]= identifier[getattr] ( identifier[endpoint] . identifier[view_class] , identifier[verb] )
keyword[else] :
identifier[methods] [ identifier[verb] ]= identifier[endpoint]
identifier[operations] = identifier[dict] ()
keyword[for] identifier[verb] , identifier[method] keyword[in] identifier[methods] . identifier[items] ():
identifier[summary] , identifier[description] , identifier[swag] = identifier[_parse_docstring] ( identifier[method] , identifier[process_doc] ,
identifier[from_file_keyword] )
keyword[if] identifier[swag] keyword[is] keyword[not] keyword[None] :
identifier[defs] = identifier[swag] . identifier[get] ( literal[string] ,[])
identifier[defs] = identifier[_extract_definitions] ( identifier[defs] )
identifier[params] = identifier[swag] . identifier[get] ( literal[string] ,[])
identifier[defs] += identifier[_extract_definitions] ( identifier[params] )
identifier[responses] = identifier[swag] . identifier[get] ( literal[string] ,{})
identifier[responses] ={
identifier[str] ( identifier[key] ): identifier[value]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[responses] . identifier[items] ()
}
keyword[if] identifier[responses] keyword[is] keyword[not] keyword[None] :
identifier[defs] = identifier[defs] + identifier[_extract_definitions] ( identifier[responses] . identifier[values] ())
keyword[for] identifier[definition] keyword[in] identifier[defs] :
identifier[def_id] = identifier[definition] . identifier[pop] ( literal[string] )
keyword[if] identifier[def_id] keyword[is] keyword[not] keyword[None] :
identifier[definitions] [ identifier[def_id] ]. identifier[update] ( identifier[definition] )
identifier[operation] = identifier[dict] (
identifier[summary] = identifier[summary] ,
identifier[description] = identifier[description] ,
identifier[responses] = identifier[responses]
)
keyword[if] identifier[len] ( identifier[params] )> literal[int] :
identifier[operation] [ literal[string] ]= identifier[params]
keyword[for] identifier[key] keyword[in] identifier[optional_fields] :
keyword[if] identifier[key] keyword[in] identifier[swag] :
identifier[operation] [ identifier[key] ]= identifier[swag] . identifier[get] ( identifier[key] )
identifier[operations] [ identifier[verb] ]= identifier[operation]
keyword[if] identifier[len] ( identifier[operations] ):
identifier[rule] = identifier[str] ( identifier[rule] )
keyword[for] identifier[arg] keyword[in] identifier[re] . identifier[findall] ( literal[string] , identifier[rule] ):
identifier[rule] = identifier[rule] . identifier[replace] ( identifier[arg] [ literal[int] ], literal[string] % identifier[arg] [ literal[int] ])
identifier[paths] [ identifier[rule] ]. identifier[update] ( identifier[operations] )
keyword[return] identifier[output] | def swagger(app, prefix=None, process_doc=_sanitize, from_file_keyword=None, template=None):
"""
Call this from an @app.route method like this
@app.route('/spec.json')
def spec():
return jsonify(swagger(app))
We go through all endpoints of the app searching for swagger endpoints
We provide the minimum required data according to swagger specs
Callers can and should add and override at will
Arguments:
app -- the flask app to inspect
Keyword arguments:
process_doc -- text sanitization method, the default simply replaces
with <br>
from_file_keyword -- how to specify a file to load doc from
template -- The spec to start with and update as flask-swagger finds paths.
"""
output = {'swagger': '2.0', 'info': {'version': '0.0.0', 'title': 'Cool product name'}}
paths = defaultdict(dict)
definitions = defaultdict(dict)
if template is not None:
output.update(template)
# check for template provided paths and definitions
for (k, v) in output.get('paths', {}).items():
paths[k] = v # depends on [control=['for'], data=[]]
for (k, v) in output.get('definitions', {}).items():
definitions[k] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['template']]
output['paths'] = paths
output['definitions'] = definitions
ignore_verbs = {'HEAD', 'OPTIONS'}
# technically only responses is non-optional
optional_fields = ['tags', 'consumes', 'produces', 'schemes', 'security', 'deprecated', 'operationId', 'externalDocs']
for rule in app.url_map.iter_rules():
if prefix and rule.rule[:len(prefix)] != prefix:
continue # depends on [control=['if'], data=[]]
endpoint = app.view_functions[rule.endpoint]
methods = dict()
for verb in rule.methods.difference(ignore_verbs):
verb = verb.lower()
if hasattr(endpoint, 'methods') and verb in map(lambda m: m.lower(), endpoint.methods) and hasattr(endpoint.view_class, verb):
methods[verb] = getattr(endpoint.view_class, verb) # depends on [control=['if'], data=[]]
else:
methods[verb] = endpoint # depends on [control=['for'], data=['verb']]
operations = dict()
for (verb, method) in methods.items():
(summary, description, swag) = _parse_docstring(method, process_doc, from_file_keyword)
if swag is not None: # we only add endpoints with swagger data in the docstrings
defs = swag.get('definitions', [])
defs = _extract_definitions(defs)
params = swag.get('parameters', [])
defs += _extract_definitions(params)
responses = swag.get('responses', {})
responses = {str(key): value for (key, value) in responses.items()}
if responses is not None:
defs = defs + _extract_definitions(responses.values()) # depends on [control=['if'], data=['responses']]
for definition in defs:
def_id = definition.pop('id')
if def_id is not None:
definitions[def_id].update(definition) # depends on [control=['if'], data=['def_id']] # depends on [control=['for'], data=['definition']]
operation = dict(summary=summary, description=description, responses=responses)
# parameters - swagger ui dislikes empty parameter lists
if len(params) > 0:
operation['parameters'] = params # depends on [control=['if'], data=[]]
# other optionals
for key in optional_fields:
if key in swag:
operation[key] = swag.get(key) # depends on [control=['if'], data=['key', 'swag']] # depends on [control=['for'], data=['key']]
operations[verb] = operation # depends on [control=['if'], data=['swag']] # depends on [control=['for'], data=[]]
if len(operations):
rule = str(rule)
for arg in re.findall('(<([^<>]*:)?([^<>]*)>)', rule):
rule = rule.replace(arg[0], '{%s}' % arg[2]) # depends on [control=['for'], data=['arg']]
paths[rule].update(operations) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']]
return output |
def _mapreduce_job_counters_metrics(self, running_jobs, auth, ssl_verify, addl_tags):
"""
Get custom metrics specified for each counter
"""
for job_metrics in itervalues(running_jobs):
job_name = job_metrics['job_name']
# Check if the job_name exist in the custom metrics
if self.general_counters or (job_name in self.job_specific_counters):
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(
job_metrics['tracking_url'],
auth,
ssl_verify,
'counters',
self.MAPREDUCE_SERVICE_CHECK,
tags=addl_tags,
)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
# Cycle through all the counter groups for this job
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
# Add any counters in the job specific metrics
if job_specific_metrics and group_name in job_specific_metrics:
counter_metrics = counter_metrics.union(job_specific_metrics[group_name])
# Add any counters in the general metrics
if group_name in self.general_counters:
counter_metrics = counter_metrics.union(self.general_counters[group_name])
if counter_metrics:
# Cycle through all the counters in this counter group
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
# Check if the counter name is in the custom metrics for this group name
if counter_name and counter_name in counter_metrics:
tags = [
'app_name:' + job_metrics.get('app_name'),
'user_name:' + job_metrics.get('user_name'),
'job_name:' + job_name,
'counter_name:' + str(counter_name).lower(),
]
tags.extend(addl_tags)
self._set_metrics_from_json(
counter, self.MAPREDUCE_JOB_COUNTER_METRICS, tags
) | def function[_mapreduce_job_counters_metrics, parameter[self, running_jobs, auth, ssl_verify, addl_tags]]:
constant[
Get custom metrics specified for each counter
]
for taget[name[job_metrics]] in starred[call[name[itervalues], parameter[name[running_jobs]]]] begin[:]
variable[job_name] assign[=] call[name[job_metrics]][constant[job_name]]
if <ast.BoolOp object at 0x7da20e955300> begin[:]
variable[job_specific_metrics] assign[=] call[name[self].job_specific_counters.get, parameter[name[job_name]]]
variable[metrics_json] assign[=] call[name[self]._rest_request_to_json, parameter[call[name[job_metrics]][constant[tracking_url]], name[auth], name[ssl_verify], constant[counters], name[self].MAPREDUCE_SERVICE_CHECK]]
if call[name[metrics_json].get, parameter[constant[jobCounters]]] begin[:]
if call[call[name[metrics_json]][constant[jobCounters]].get, parameter[constant[counterGroup]]] begin[:]
for taget[name[counter_group]] in starred[call[call[name[metrics_json]][constant[jobCounters]]][constant[counterGroup]]] begin[:]
variable[group_name] assign[=] call[name[counter_group].get, parameter[constant[counterGroupName]]]
if name[group_name] begin[:]
variable[counter_metrics] assign[=] call[name[set], parameter[list[[]]]]
if <ast.BoolOp object at 0x7da20e957790> begin[:]
variable[counter_metrics] assign[=] call[name[counter_metrics].union, parameter[call[name[job_specific_metrics]][name[group_name]]]]
if compare[name[group_name] in name[self].general_counters] begin[:]
variable[counter_metrics] assign[=] call[name[counter_metrics].union, parameter[call[name[self].general_counters][name[group_name]]]]
if name[counter_metrics] begin[:]
if call[name[counter_group].get, parameter[constant[counter]]] begin[:]
for taget[name[counter]] in starred[call[name[counter_group]][constant[counter]]] begin[:]
variable[counter_name] assign[=] call[name[counter].get, parameter[constant[name]]]
if <ast.BoolOp object at 0x7da20e9557b0> begin[:]
variable[tags] assign[=] list[[<ast.BinOp object at 0x7da20e9562c0>, <ast.BinOp object at 0x7da20e954730>, <ast.BinOp object at 0x7da20e955f00>, <ast.BinOp object at 0x7da20c7cab30>]]
call[name[tags].extend, parameter[name[addl_tags]]]
call[name[self]._set_metrics_from_json, parameter[name[counter], name[self].MAPREDUCE_JOB_COUNTER_METRICS, name[tags]]] | keyword[def] identifier[_mapreduce_job_counters_metrics] ( identifier[self] , identifier[running_jobs] , identifier[auth] , identifier[ssl_verify] , identifier[addl_tags] ):
literal[string]
keyword[for] identifier[job_metrics] keyword[in] identifier[itervalues] ( identifier[running_jobs] ):
identifier[job_name] = identifier[job_metrics] [ literal[string] ]
keyword[if] identifier[self] . identifier[general_counters] keyword[or] ( identifier[job_name] keyword[in] identifier[self] . identifier[job_specific_counters] ):
identifier[job_specific_metrics] = identifier[self] . identifier[job_specific_counters] . identifier[get] ( identifier[job_name] )
identifier[metrics_json] = identifier[self] . identifier[_rest_request_to_json] (
identifier[job_metrics] [ literal[string] ],
identifier[auth] ,
identifier[ssl_verify] ,
literal[string] ,
identifier[self] . identifier[MAPREDUCE_SERVICE_CHECK] ,
identifier[tags] = identifier[addl_tags] ,
)
keyword[if] identifier[metrics_json] . identifier[get] ( literal[string] ):
keyword[if] identifier[metrics_json] [ literal[string] ]. identifier[get] ( literal[string] ):
keyword[for] identifier[counter_group] keyword[in] identifier[metrics_json] [ literal[string] ][ literal[string] ]:
identifier[group_name] = identifier[counter_group] . identifier[get] ( literal[string] )
keyword[if] identifier[group_name] :
identifier[counter_metrics] = identifier[set] ([])
keyword[if] identifier[job_specific_metrics] keyword[and] identifier[group_name] keyword[in] identifier[job_specific_metrics] :
identifier[counter_metrics] = identifier[counter_metrics] . identifier[union] ( identifier[job_specific_metrics] [ identifier[group_name] ])
keyword[if] identifier[group_name] keyword[in] identifier[self] . identifier[general_counters] :
identifier[counter_metrics] = identifier[counter_metrics] . identifier[union] ( identifier[self] . identifier[general_counters] [ identifier[group_name] ])
keyword[if] identifier[counter_metrics] :
keyword[if] identifier[counter_group] . identifier[get] ( literal[string] ):
keyword[for] identifier[counter] keyword[in] identifier[counter_group] [ literal[string] ]:
identifier[counter_name] = identifier[counter] . identifier[get] ( literal[string] )
keyword[if] identifier[counter_name] keyword[and] identifier[counter_name] keyword[in] identifier[counter_metrics] :
identifier[tags] =[
literal[string] + identifier[job_metrics] . identifier[get] ( literal[string] ),
literal[string] + identifier[job_metrics] . identifier[get] ( literal[string] ),
literal[string] + identifier[job_name] ,
literal[string] + identifier[str] ( identifier[counter_name] ). identifier[lower] (),
]
identifier[tags] . identifier[extend] ( identifier[addl_tags] )
identifier[self] . identifier[_set_metrics_from_json] (
identifier[counter] , identifier[self] . identifier[MAPREDUCE_JOB_COUNTER_METRICS] , identifier[tags]
) | def _mapreduce_job_counters_metrics(self, running_jobs, auth, ssl_verify, addl_tags):
"""
Get custom metrics specified for each counter
"""
for job_metrics in itervalues(running_jobs):
job_name = job_metrics['job_name']
# Check if the job_name exist in the custom metrics
if self.general_counters or job_name in self.job_specific_counters:
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(job_metrics['tracking_url'], auth, ssl_verify, 'counters', self.MAPREDUCE_SERVICE_CHECK, tags=addl_tags)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
# Cycle through all the counter groups for this job
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
# Add any counters in the job specific metrics
if job_specific_metrics and group_name in job_specific_metrics:
counter_metrics = counter_metrics.union(job_specific_metrics[group_name]) # depends on [control=['if'], data=[]]
# Add any counters in the general metrics
if group_name in self.general_counters:
counter_metrics = counter_metrics.union(self.general_counters[group_name]) # depends on [control=['if'], data=['group_name']]
if counter_metrics:
# Cycle through all the counters in this counter group
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
# Check if the counter name is in the custom metrics for this group name
if counter_name and counter_name in counter_metrics:
tags = ['app_name:' + job_metrics.get('app_name'), 'user_name:' + job_metrics.get('user_name'), 'job_name:' + job_name, 'counter_name:' + str(counter_name).lower()]
tags.extend(addl_tags)
self._set_metrics_from_json(counter, self.MAPREDUCE_JOB_COUNTER_METRICS, tags) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['counter']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['counter_group']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['job_metrics']] |
def get_all(cls):
"""Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index.
"""
req = datastore.RunQueryRequest()
q = req.query
set_kind(q, kind='Todo')
add_property_orders(q, 'created')
resp = datastore.run_query(req)
todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results]
return todos | def function[get_all, parameter[cls]]:
constant[Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index.
]
variable[req] assign[=] call[name[datastore].RunQueryRequest, parameter[]]
variable[q] assign[=] name[req].query
call[name[set_kind], parameter[name[q]]]
call[name[add_property_orders], parameter[name[q], constant[created]]]
variable[resp] assign[=] call[name[datastore].run_query, parameter[name[req]]]
variable[todos] assign[=] <ast.ListComp object at 0x7da1b0651e10>
return[name[todos]] | keyword[def] identifier[get_all] ( identifier[cls] ):
literal[string]
identifier[req] = identifier[datastore] . identifier[RunQueryRequest] ()
identifier[q] = identifier[req] . identifier[query]
identifier[set_kind] ( identifier[q] , identifier[kind] = literal[string] )
identifier[add_property_orders] ( identifier[q] , literal[string] )
identifier[resp] = identifier[datastore] . identifier[run_query] ( identifier[req] )
identifier[todos] =[ identifier[Todo] . identifier[from_proto] ( identifier[r] . identifier[entity] ) keyword[for] identifier[r] keyword[in] identifier[resp] . identifier[batch] . identifier[entity_results] ]
keyword[return] identifier[todos] | def get_all(cls):
"""Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index.
"""
req = datastore.RunQueryRequest()
q = req.query
set_kind(q, kind='Todo')
add_property_orders(q, 'created')
resp = datastore.run_query(req)
todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results]
return todos |
def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) | def function[copy_reference, parameter[resource, doc, env]]:
constant[A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
]
<ast.YieldFrom object at 0x7da1b1858e80> | keyword[def] identifier[copy_reference] ( identifier[resource] , identifier[doc] , identifier[env] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[yield] keyword[from] identifier[doc] . identifier[reference] ( identifier[resource] . identifier[name] ) | def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) |
def lowess(x, y, f=2.0 / 3.0, iterations=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
yest = np.zeros(n)
delta = np.ones(n)
for _ in range(iterations):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)], [np.sum(weights * x), np.sum(weights * x * x)]])
# I think it is safe to assume this.
# pylint: disable=unexpected-keyword-arg
beta = linalg.solve(A, b, assume_a="pos", check_finite=False)
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest | def function[lowess, parameter[x, y, f, iterations]]:
constant[lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
]
variable[n] assign[=] call[name[len], parameter[name[x]]]
variable[r] assign[=] call[name[int], parameter[call[name[ceil], parameter[binary_operation[name[f] * name[n]]]]]]
variable[h] assign[=] <ast.ListComp object at 0x7da18f09f6d0>
variable[w] assign[=] call[name[np].clip, parameter[call[name[np].abs, parameter[binary_operation[binary_operation[call[name[x]][tuple[[<ast.Slice object at 0x7da18f09f250>, <ast.Constant object at 0x7da18f09fb80>]]] - call[name[x]][tuple[[<ast.Constant object at 0x7da18f09d390>, <ast.Slice object at 0x7da18f09ef80>]]]] / name[h]]]], constant[0.0], constant[1.0]]]
variable[w] assign[=] binary_operation[binary_operation[constant[1] - binary_operation[name[w] ** constant[3]]] ** constant[3]]
variable[yest] assign[=] call[name[np].zeros, parameter[name[n]]]
variable[delta] assign[=] call[name[np].ones, parameter[name[n]]]
for taget[name[_]] in starred[call[name[range], parameter[name[iterations]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
variable[weights] assign[=] binary_operation[name[delta] * call[name[w]][tuple[[<ast.Slice object at 0x7da18f09faf0>, <ast.Name object at 0x7da18f09cc10>]]]]
variable[b] assign[=] call[name[np].array, parameter[list[[<ast.Call object at 0x7da18f09d8d0>, <ast.Call object at 0x7da18f09f700>]]]]
variable[A] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da18f09c520>, <ast.List object at 0x7da18f09de10>]]]]
variable[beta] assign[=] call[name[linalg].solve, parameter[name[A], name[b]]]
call[name[yest]][name[i]] assign[=] binary_operation[call[name[beta]][constant[0]] + binary_operation[call[name[beta]][constant[1]] * call[name[x]][name[i]]]]
variable[residuals] assign[=] binary_operation[name[y] - name[yest]]
variable[s] assign[=] call[name[np].median, parameter[call[name[np].abs, parameter[name[residuals]]]]]
variable[delta] assign[=] call[name[np].clip, parameter[binary_operation[name[residuals] / binary_operation[constant[6.0] * name[s]]], <ast.UnaryOp object at 0x7da20c76fee0>, constant[1]]]
variable[delta] assign[=] binary_operation[binary_operation[constant[1] - binary_operation[name[delta] ** constant[2]]] ** constant[2]]
return[name[yest]] | keyword[def] identifier[lowess] ( identifier[x] , identifier[y] , identifier[f] = literal[int] / literal[int] , identifier[iterations] = literal[int] ):
literal[string]
identifier[n] = identifier[len] ( identifier[x] )
identifier[r] = identifier[int] ( identifier[ceil] ( identifier[f] * identifier[n] ))
identifier[h] =[ identifier[np] . identifier[sort] ( identifier[np] . identifier[abs] ( identifier[x] - identifier[x] [ identifier[i] ]))[ identifier[r] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] )]
identifier[w] = identifier[np] . identifier[clip] ( identifier[np] . identifier[abs] (( identifier[x] [:, keyword[None] ]- identifier[x] [ keyword[None] ,:])/ identifier[h] ), literal[int] , literal[int] )
identifier[w] =( literal[int] - identifier[w] ** literal[int] )** literal[int]
identifier[yest] = identifier[np] . identifier[zeros] ( identifier[n] )
identifier[delta] = identifier[np] . identifier[ones] ( identifier[n] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[iterations] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[weights] = identifier[delta] * identifier[w] [:, identifier[i] ]
identifier[b] = identifier[np] . identifier[array] ([ identifier[np] . identifier[sum] ( identifier[weights] * identifier[y] ), identifier[np] . identifier[sum] ( identifier[weights] * identifier[y] * identifier[x] )])
identifier[A] = identifier[np] . identifier[array] ([[ identifier[np] . identifier[sum] ( identifier[weights] ), identifier[np] . identifier[sum] ( identifier[weights] * identifier[x] )],[ identifier[np] . identifier[sum] ( identifier[weights] * identifier[x] ), identifier[np] . identifier[sum] ( identifier[weights] * identifier[x] * identifier[x] )]])
identifier[beta] = identifier[linalg] . identifier[solve] ( identifier[A] , identifier[b] , identifier[assume_a] = literal[string] , identifier[check_finite] = keyword[False] )
identifier[yest] [ identifier[i] ]= identifier[beta] [ literal[int] ]+ identifier[beta] [ literal[int] ]* identifier[x] [ identifier[i] ]
identifier[residuals] = identifier[y] - identifier[yest]
identifier[s] = identifier[np] . identifier[median] ( identifier[np] . identifier[abs] ( identifier[residuals] ))
identifier[delta] = identifier[np] . identifier[clip] ( identifier[residuals] /( literal[int] * identifier[s] ),- literal[int] , literal[int] )
identifier[delta] =( literal[int] - identifier[delta] ** literal[int] )** literal[int]
keyword[return] identifier[yest] | def lowess(x, y, f=2.0 / 3.0, iterations=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
yest = np.zeros(n)
delta = np.ones(n)
for _ in range(iterations):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)], [np.sum(weights * x), np.sum(weights * x * x)]])
# I think it is safe to assume this.
# pylint: disable=unexpected-keyword-arg
beta = linalg.solve(A, b, assume_a='pos', check_finite=False)
yest[i] = beta[0] + beta[1] * x[i] # depends on [control=['for'], data=['i']]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2 # depends on [control=['for'], data=[]]
return yest |
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int) | def function[str_find, parameter[arr, sub, start, end, side]]:
constant[
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
]
if <ast.UnaryOp object at 0x7da1b1ec7ac0> begin[:]
variable[msg] assign[=] constant[expected a string object, not {0}]
<ast.Raise object at 0x7da1b1ec6fb0>
if compare[name[side] equal[==] constant[left]] begin[:]
variable[method] assign[=] constant[find]
if compare[name[end] is constant[None]] begin[:]
variable[f] assign[=] <ast.Lambda object at 0x7da1b1ec4e80>
return[call[name[_na_map], parameter[name[f], name[arr]]]] | keyword[def] identifier[str_find] ( identifier[arr] , identifier[sub] , identifier[start] = literal[int] , identifier[end] = keyword[None] , identifier[side] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[sub] , identifier[str] ):
identifier[msg] = literal[string]
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( identifier[type] ( identifier[sub] ). identifier[__name__] ))
keyword[if] identifier[side] == literal[string] :
identifier[method] = literal[string]
keyword[elif] identifier[side] == literal[string] :
identifier[method] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[end] keyword[is] keyword[None] :
identifier[f] = keyword[lambda] identifier[x] : identifier[getattr] ( identifier[x] , identifier[method] )( identifier[sub] , identifier[start] )
keyword[else] :
identifier[f] = keyword[lambda] identifier[x] : identifier[getattr] ( identifier[x] , identifier[method] )( identifier[sub] , identifier[start] , identifier[end] )
keyword[return] identifier[_na_map] ( identifier[f] , identifier[arr] , identifier[dtype] = identifier[int] ) | def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__)) # depends on [control=['if'], data=[]]
if side == 'left':
method = 'find' # depends on [control=['if'], data=[]]
elif side == 'right':
method = 'rfind' # depends on [control=['if'], data=[]]
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start) # depends on [control=['if'], data=[]]
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int) |
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None):
"""
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""The id of the contextual course for this API call"""
path["course_id"] = course_id
# OPTIONAL - assignment_id
"""The ID of the assignment for which you want to see submissions. If
absent, versions of submissions from any assignment in the course are
included."""
if assignment_id is not None:
params["assignment_id"] = assignment_id
# OPTIONAL - user_id
"""The ID of the user for which you want to see submissions. If absent,
versions of submissions from any user in the course are included."""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - ascending
"""Returns submission versions in ascending date order (oldest first). If
absent, returns submission versions in descending date order (newest
first)."""
if ascending is not None:
params["ascending"] = ascending
self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True) | def function[list_uncollated_submission_versions, parameter[self, course_id, ascending, assignment_id, user_id]]:
constant[
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[The id of the contextual course for this API call]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[The ID of the assignment for which you want to see submissions. If
absent, versions of submissions from any assignment in the course are
included.]
if compare[name[assignment_id] is_not constant[None]] begin[:]
call[name[params]][constant[assignment_id]] assign[=] name[assignment_id]
constant[The ID of the user for which you want to see submissions. If absent,
versions of submissions from any user in the course are included.]
if compare[name[user_id] is_not constant[None]] begin[:]
call[name[params]][constant[user_id]] assign[=] name[user_id]
constant[Returns submission versions in ascending date order (oldest first). If
absent, returns submission versions in descending date order (newest
first).]
if compare[name[ascending] is_not constant[None]] begin[:]
call[name[params]][constant[ascending]] assign[=] name[ascending]
call[name[self].logger.debug, parameter[call[constant[GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[GET], call[constant[/api/v1/courses/{course_id}/gradebook_history/feed].format, parameter[]]]]] | keyword[def] identifier[list_uncollated_submission_versions] ( identifier[self] , identifier[course_id] , identifier[ascending] = keyword[None] , identifier[assignment_id] = keyword[None] , identifier[user_id] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
keyword[if] identifier[assignment_id] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[assignment_id]
literal[string]
keyword[if] identifier[user_id] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[user_id]
literal[string]
keyword[if] identifier[ascending] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[ascending]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[all_pages] = keyword[True] ) | def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None):
"""
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'The id of the contextual course for this API call'
path['course_id'] = course_id # OPTIONAL - assignment_id
'The ID of the assignment for which you want to see submissions. If\n absent, versions of submissions from any assignment in the course are\n included.'
if assignment_id is not None:
params['assignment_id'] = assignment_id # depends on [control=['if'], data=['assignment_id']] # OPTIONAL - user_id
'The ID of the user for which you want to see submissions. If absent,\n versions of submissions from any user in the course are included.'
if user_id is not None:
params['user_id'] = user_id # depends on [control=['if'], data=['user_id']] # OPTIONAL - ascending
'Returns submission versions in ascending date order (oldest first). If\n absent, returns submission versions in descending date order (newest\n first).'
if ascending is not None:
params['ascending'] = ascending # depends on [control=['if'], data=['ascending']]
self.logger.debug('GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('GET', '/api/v1/courses/{course_id}/gradebook_history/feed'.format(**path), data=data, params=params, all_pages=True) |
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with open(fullname, 'rU') as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines | def function[updatecache, parameter[filename, module_globals]]:
constant[Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list.]
if compare[name[filename] in name[cache]] begin[:]
<ast.Delete object at 0x7da1b002a920>
if <ast.BoolOp object at 0x7da1b002bd90> begin[:]
return[list[[]]]
variable[fullname] assign[=] name[filename]
<ast.Try object at 0x7da1b002bf40>
<ast.Try object at 0x7da18c4ccca0>
if <ast.BoolOp object at 0x7da18c4cd270> begin[:]
<ast.AugAssign object at 0x7da18c4cfee0>
<ast.Tuple object at 0x7da207f00280> assign[=] tuple[[<ast.Attribute object at 0x7da207f01720>, <ast.Attribute object at 0x7da207f03b20>]]
call[name[cache]][name[filename]] assign[=] tuple[[<ast.Name object at 0x7da207f01ba0>, <ast.Name object at 0x7da207f00490>, <ast.Name object at 0x7da207f02260>, <ast.Name object at 0x7da207f018d0>]]
return[name[lines]] | keyword[def] identifier[updatecache] ( identifier[filename] , identifier[module_globals] = keyword[None] ):
literal[string]
keyword[if] identifier[filename] keyword[in] identifier[cache] :
keyword[del] identifier[cache] [ identifier[filename] ]
keyword[if] keyword[not] identifier[filename] keyword[or] ( identifier[filename] . identifier[startswith] ( literal[string] ) keyword[and] identifier[filename] . identifier[endswith] ( literal[string] )):
keyword[return] []
identifier[fullname] = identifier[filename]
keyword[try] :
identifier[stat] = identifier[os] . identifier[stat] ( identifier[fullname] )
keyword[except] identifier[OSError] :
identifier[basename] = identifier[filename]
keyword[if] identifier[module_globals] keyword[and] literal[string] keyword[in] identifier[module_globals] :
identifier[name] = identifier[module_globals] . identifier[get] ( literal[string] )
identifier[loader] = identifier[module_globals] [ literal[string] ]
identifier[get_source] = identifier[getattr] ( identifier[loader] , literal[string] , keyword[None] )
keyword[if] identifier[name] keyword[and] identifier[get_source] :
keyword[try] :
identifier[data] = identifier[get_source] ( identifier[name] )
keyword[except] ( identifier[ImportError] , identifier[IOError] ):
keyword[pass]
keyword[else] :
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] []
identifier[cache] [ identifier[filename] ]=(
identifier[len] ( identifier[data] ), keyword[None] ,
[ identifier[line] + literal[string] keyword[for] identifier[line] keyword[in] identifier[data] . identifier[splitlines] ()], identifier[fullname]
)
keyword[return] identifier[cache] [ identifier[filename] ][ literal[int] ]
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ):
keyword[return] []
keyword[for] identifier[dirname] keyword[in] identifier[sys] . identifier[path] :
keyword[try] :
identifier[fullname] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[basename] )
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
keyword[continue]
keyword[try] :
identifier[stat] = identifier[os] . identifier[stat] ( identifier[fullname] )
keyword[break]
keyword[except] identifier[os] . identifier[error] :
keyword[pass]
keyword[else] :
keyword[return] []
keyword[try] :
keyword[with] identifier[open] ( identifier[fullname] , literal[string] ) keyword[as] identifier[fp] :
identifier[lines] = identifier[fp] . identifier[readlines] ()
keyword[except] identifier[IOError] :
keyword[return] []
keyword[if] identifier[lines] keyword[and] keyword[not] identifier[lines] [- literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[lines] [- literal[int] ]+= literal[string]
identifier[size] , identifier[mtime] = identifier[stat] . identifier[st_size] , identifier[stat] . identifier[st_mtime]
identifier[cache] [ identifier[filename] ]= identifier[size] , identifier[mtime] , identifier[lines] , identifier[fullname]
keyword[return] identifier[lines] | def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename] # depends on [control=['if'], data=['filename', 'cache']]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return [] # depends on [control=['if'], data=[]]
fullname = filename
try:
stat = os.stat(fullname) # depends on [control=['try'], data=[]]
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name) # depends on [control=['try'], data=[]]
except (ImportError, IOError):
pass # depends on [control=['except'], data=[]]
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return [] # depends on [control=['if'], data=[]]
cache[filename] = (len(data), None, [line + '\n' for line in data.splitlines()], fullname)
return cache[filename][2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return [] # depends on [control=['if'], data=[]]
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
try:
fullname = os.path.join(dirname, basename) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue # depends on [control=['except'], data=[]]
try:
stat = os.stat(fullname)
break # depends on [control=['try'], data=[]]
except os.error:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['dirname']]
else:
return [] # depends on [control=['except'], data=[]]
try:
with open(fullname, 'rU') as fp:
lines = fp.readlines() # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]]
except IOError:
return [] # depends on [control=['except'], data=[]]
if lines and (not lines[-1].endswith('\n')):
lines[-1] += '\n' # depends on [control=['if'], data=[]]
(size, mtime) = (stat.st_size, stat.st_mtime)
cache[filename] = (size, mtime, lines, fullname)
return lines |
def setLoggingFromOptions(options):
"""Sets the logging from a dictionary of name/value options.
"""
#We can now set up the logging info.
if options.logLevel is not None:
setLogLevel(options.logLevel) #Use log level, unless flags are set..
if options.logOff:
setLogLevel("OFF")
elif options.logInfo:
setLogLevel("INFO")
elif options.logDebug:
setLogLevel("DEBUG")
logger.info("Logging set at level: %s" % logLevelString)
if options.logFile is not None:
addLoggingFileHandler(options.logFile, options.logRotating)
logger.info("Logging to file: %s" % options.logFile) | def function[setLoggingFromOptions, parameter[options]]:
constant[Sets the logging from a dictionary of name/value options.
]
if compare[name[options].logLevel is_not constant[None]] begin[:]
call[name[setLogLevel], parameter[name[options].logLevel]]
if name[options].logOff begin[:]
call[name[setLogLevel], parameter[constant[OFF]]]
call[name[logger].info, parameter[binary_operation[constant[Logging set at level: %s] <ast.Mod object at 0x7da2590d6920> name[logLevelString]]]]
if compare[name[options].logFile is_not constant[None]] begin[:]
call[name[addLoggingFileHandler], parameter[name[options].logFile, name[options].logRotating]]
call[name[logger].info, parameter[binary_operation[constant[Logging to file: %s] <ast.Mod object at 0x7da2590d6920> name[options].logFile]]] | keyword[def] identifier[setLoggingFromOptions] ( identifier[options] ):
literal[string]
keyword[if] identifier[options] . identifier[logLevel] keyword[is] keyword[not] keyword[None] :
identifier[setLogLevel] ( identifier[options] . identifier[logLevel] )
keyword[if] identifier[options] . identifier[logOff] :
identifier[setLogLevel] ( literal[string] )
keyword[elif] identifier[options] . identifier[logInfo] :
identifier[setLogLevel] ( literal[string] )
keyword[elif] identifier[options] . identifier[logDebug] :
identifier[setLogLevel] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] % identifier[logLevelString] )
keyword[if] identifier[options] . identifier[logFile] keyword[is] keyword[not] keyword[None] :
identifier[addLoggingFileHandler] ( identifier[options] . identifier[logFile] , identifier[options] . identifier[logRotating] )
identifier[logger] . identifier[info] ( literal[string] % identifier[options] . identifier[logFile] ) | def setLoggingFromOptions(options):
"""Sets the logging from a dictionary of name/value options.
"""
#We can now set up the logging info.
if options.logLevel is not None:
setLogLevel(options.logLevel) #Use log level, unless flags are set.. # depends on [control=['if'], data=[]]
if options.logOff:
setLogLevel('OFF') # depends on [control=['if'], data=[]]
elif options.logInfo:
setLogLevel('INFO') # depends on [control=['if'], data=[]]
elif options.logDebug:
setLogLevel('DEBUG') # depends on [control=['if'], data=[]]
logger.info('Logging set at level: %s' % logLevelString)
if options.logFile is not None:
addLoggingFileHandler(options.logFile, options.logRotating) # depends on [control=['if'], data=[]]
logger.info('Logging to file: %s' % options.logFile) |
def isSurrounded(self):
""" Returns if the object is separating and applying to
a malefic considering bad aspects.
"""
malefics = [const.MARS, const.SATURN]
return self.__sepApp(malefics, aspList=[0, 90, 180]) | def function[isSurrounded, parameter[self]]:
constant[ Returns if the object is separating and applying to
a malefic considering bad aspects.
]
variable[malefics] assign[=] list[[<ast.Attribute object at 0x7da1b11a64d0>, <ast.Attribute object at 0x7da1b11a77f0>]]
return[call[name[self].__sepApp, parameter[name[malefics]]]] | keyword[def] identifier[isSurrounded] ( identifier[self] ):
literal[string]
identifier[malefics] =[ identifier[const] . identifier[MARS] , identifier[const] . identifier[SATURN] ]
keyword[return] identifier[self] . identifier[__sepApp] ( identifier[malefics] , identifier[aspList] =[ literal[int] , literal[int] , literal[int] ]) | def isSurrounded(self):
""" Returns if the object is separating and applying to
a malefic considering bad aspects.
"""
malefics = [const.MARS, const.SATURN]
return self.__sepApp(malefics, aspList=[0, 90, 180]) |
def getLockByID(self, lockid):
"""Convert a Lock identifier into an actual Lock instance.
@param lockid: a locks.MasterLock or locks.WorkerLock instance
@return: a locks.RealMasterLock or locks.RealWorkerLock instance
"""
assert isinstance(lockid, (locks.MasterLock, locks.WorkerLock))
if lockid not in self.locks:
self.locks[lockid] = lockid.lockClass(lockid)
# if the master.cfg file has changed maxCount= on the lock, the next
# time a build is started, they'll get a new RealLock instance. Note
# that this requires that MasterLock and WorkerLock (marker) instances
# be hashable and that they should compare properly.
return self.locks[lockid] | def function[getLockByID, parameter[self, lockid]]:
constant[Convert a Lock identifier into an actual Lock instance.
@param lockid: a locks.MasterLock or locks.WorkerLock instance
@return: a locks.RealMasterLock or locks.RealWorkerLock instance
]
assert[call[name[isinstance], parameter[name[lockid], tuple[[<ast.Attribute object at 0x7da18f8110c0>, <ast.Attribute object at 0x7da18f8137f0>]]]]]
if compare[name[lockid] <ast.NotIn object at 0x7da2590d7190> name[self].locks] begin[:]
call[name[self].locks][name[lockid]] assign[=] call[name[lockid].lockClass, parameter[name[lockid]]]
return[call[name[self].locks][name[lockid]]] | keyword[def] identifier[getLockByID] ( identifier[self] , identifier[lockid] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[lockid] ,( identifier[locks] . identifier[MasterLock] , identifier[locks] . identifier[WorkerLock] ))
keyword[if] identifier[lockid] keyword[not] keyword[in] identifier[self] . identifier[locks] :
identifier[self] . identifier[locks] [ identifier[lockid] ]= identifier[lockid] . identifier[lockClass] ( identifier[lockid] )
keyword[return] identifier[self] . identifier[locks] [ identifier[lockid] ] | def getLockByID(self, lockid):
"""Convert a Lock identifier into an actual Lock instance.
@param lockid: a locks.MasterLock or locks.WorkerLock instance
@return: a locks.RealMasterLock or locks.RealWorkerLock instance
"""
assert isinstance(lockid, (locks.MasterLock, locks.WorkerLock))
if lockid not in self.locks:
self.locks[lockid] = lockid.lockClass(lockid) # depends on [control=['if'], data=['lockid']]
# if the master.cfg file has changed maxCount= on the lock, the next
# time a build is started, they'll get a new RealLock instance. Note
# that this requires that MasterLock and WorkerLock (marker) instances
# be hashable and that they should compare properly.
return self.locks[lockid] |
def FromResponse(cls, response):
"""Create a DeviceFlowInfo from a server response.
The response should be a dict containing entries as described here:
http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
"""
# device_code, user_code, and verification_url are required.
kwargs = {
'device_code': response['device_code'],
'user_code': response['user_code'],
}
# The response may list the verification address as either
# verification_url or verification_uri, so we check for both.
verification_url = response.get(
'verification_url', response.get('verification_uri'))
if verification_url is None:
raise OAuth2DeviceCodeError(
'No verification_url provided in server response')
kwargs['verification_url'] = verification_url
# expires_in and interval are optional.
kwargs.update({
'interval': response.get('interval'),
'user_code_expiry': None,
})
if 'expires_in' in response:
kwargs['user_code_expiry'] = (
_UTCNOW() +
datetime.timedelta(seconds=int(response['expires_in'])))
return cls(**kwargs) | def function[FromResponse, parameter[cls, response]]:
constant[Create a DeviceFlowInfo from a server response.
The response should be a dict containing entries as described here:
http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b01fd930>, <ast.Constant object at 0x7da1b01bac80>], [<ast.Subscript object at 0x7da1b01bb9a0>, <ast.Subscript object at 0x7da1b01b9600>]]
variable[verification_url] assign[=] call[name[response].get, parameter[constant[verification_url], call[name[response].get, parameter[constant[verification_uri]]]]]
if compare[name[verification_url] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b01ba800>
call[name[kwargs]][constant[verification_url]] assign[=] name[verification_url]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b01bb280>, <ast.Constant object at 0x7da1b01bac50>], [<ast.Call object at 0x7da1b01b9960>, <ast.Constant object at 0x7da1b01b9ab0>]]]]
if compare[constant[expires_in] in name[response]] begin[:]
call[name[kwargs]][constant[user_code_expiry]] assign[=] binary_operation[call[name[_UTCNOW], parameter[]] + call[name[datetime].timedelta, parameter[]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[FromResponse] ( identifier[cls] , identifier[response] ):
literal[string]
identifier[kwargs] ={
literal[string] : identifier[response] [ literal[string] ],
literal[string] : identifier[response] [ literal[string] ],
}
identifier[verification_url] = identifier[response] . identifier[get] (
literal[string] , identifier[response] . identifier[get] ( literal[string] ))
keyword[if] identifier[verification_url] keyword[is] keyword[None] :
keyword[raise] identifier[OAuth2DeviceCodeError] (
literal[string] )
identifier[kwargs] [ literal[string] ]= identifier[verification_url]
identifier[kwargs] . identifier[update] ({
literal[string] : identifier[response] . identifier[get] ( literal[string] ),
literal[string] : keyword[None] ,
})
keyword[if] literal[string] keyword[in] identifier[response] :
identifier[kwargs] [ literal[string] ]=(
identifier[_UTCNOW] ()+
identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[int] ( identifier[response] [ literal[string] ])))
keyword[return] identifier[cls] (** identifier[kwargs] ) | def FromResponse(cls, response):
"""Create a DeviceFlowInfo from a server response.
The response should be a dict containing entries as described here:
http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
"""
# device_code, user_code, and verification_url are required.
kwargs = {'device_code': response['device_code'], 'user_code': response['user_code']}
# The response may list the verification address as either
# verification_url or verification_uri, so we check for both.
verification_url = response.get('verification_url', response.get('verification_uri'))
if verification_url is None:
raise OAuth2DeviceCodeError('No verification_url provided in server response') # depends on [control=['if'], data=[]]
kwargs['verification_url'] = verification_url
# expires_in and interval are optional.
kwargs.update({'interval': response.get('interval'), 'user_code_expiry': None})
if 'expires_in' in response:
kwargs['user_code_expiry'] = _UTCNOW() + datetime.timedelta(seconds=int(response['expires_in'])) # depends on [control=['if'], data=['response']]
return cls(**kwargs) |
def add_payload(self, payload):
"""Add new the stanza payload. Fails if there is already some
payload element attached (<iq/> stanza can contain only one payload
element)
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to add
:Types:
- `payload`: :etree:`ElementTree.Element` or
`interfaces.StanzaPayload`
"""
if self._payload is None:
self.decode_payload()
if len(self._payload) >= 1:
raise ValueError("Cannot add more payload to Iq stanza")
return Stanza.add_payload(self, payload) | def function[add_payload, parameter[self, payload]]:
constant[Add new the stanza payload. Fails if there is already some
payload element attached (<iq/> stanza can contain only one payload
element)
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to add
:Types:
- `payload`: :etree:`ElementTree.Element` or
`interfaces.StanzaPayload`
]
if compare[name[self]._payload is constant[None]] begin[:]
call[name[self].decode_payload, parameter[]]
if compare[call[name[len], parameter[name[self]._payload]] greater_or_equal[>=] constant[1]] begin[:]
<ast.Raise object at 0x7da20cabc880>
return[call[name[Stanza].add_payload, parameter[name[self], name[payload]]]] | keyword[def] identifier[add_payload] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[if] identifier[self] . identifier[_payload] keyword[is] keyword[None] :
identifier[self] . identifier[decode_payload] ()
keyword[if] identifier[len] ( identifier[self] . identifier[_payload] )>= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[Stanza] . identifier[add_payload] ( identifier[self] , identifier[payload] ) | def add_payload(self, payload):
"""Add new the stanza payload. Fails if there is already some
payload element attached (<iq/> stanza can contain only one payload
element)
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to add
:Types:
- `payload`: :etree:`ElementTree.Element` or
`interfaces.StanzaPayload`
"""
if self._payload is None:
self.decode_payload() # depends on [control=['if'], data=[]]
if len(self._payload) >= 1:
raise ValueError('Cannot add more payload to Iq stanza') # depends on [control=['if'], data=[]]
return Stanza.add_payload(self, payload) |
def extract_pp_helices(in_pdb):
"""Uses DSSP to find polyproline helices in a pdb file.
Returns a length 3 list with a helix id, the chain id and a dict
containing the coordinates of each residues CA.
Parameters
----------
in_pdb : string
Path to a PDB file.
"""
t_phi = -75.0
t_phi_d = 29.0
t_psi = 145.0
t_psi_d = 29.0
pph_dssp = subprocess.check_output(
[global_settings['dssp']['path'], in_pdb])
dssp_residues = []
go = False
for line in pph_dssp.splitlines():
if go:
res_num = int(line[:5].strip())
chain = line[11:13].strip()
ss_type = line[16]
phi = float(line[103:109].strip())
psi = float(line[109:116].strip())
dssp_residues.append((res_num, ss_type, chain, phi, psi))
else:
if line[2] == '#':
go = True
pass
pp_chains = []
chain = []
ch_on = False
for item in dssp_residues:
if (item[1] == ' ') and (
t_phi - t_phi_d < item[3] < t_phi + t_phi_d) and (
t_psi - t_psi_d < item[4] < t_psi + t_psi_d):
chain.append(item)
ch_on = True
else:
if ch_on:
pp_chains.append(chain)
chain = []
ch_on = False
pp_chains = [x for x in pp_chains if len(x) > 1]
pp_helices = []
with open(in_pdb, 'r') as pdb:
pdb_atoms = split_pdb_lines(pdb.read())
for pp_helix in pp_chains:
chain_id = pp_helix[0][2]
res_range = [x[0] for x in pp_helix]
helix = []
for atom in pdb_atoms:
if (atom[2] == "CA") and (
atom[5] == chain_id) and (
atom[6] in res_range):
helix.append(tuple(atom[8:11]))
pp_helices.append(helix)
return pp_helices | def function[extract_pp_helices, parameter[in_pdb]]:
constant[Uses DSSP to find polyproline helices in a pdb file.
Returns a length 3 list with a helix id, the chain id and a dict
containing the coordinates of each residues CA.
Parameters
----------
in_pdb : string
Path to a PDB file.
]
variable[t_phi] assign[=] <ast.UnaryOp object at 0x7da1b2679120>
variable[t_phi_d] assign[=] constant[29.0]
variable[t_psi] assign[=] constant[145.0]
variable[t_psi_d] assign[=] constant[29.0]
variable[pph_dssp] assign[=] call[name[subprocess].check_output, parameter[list[[<ast.Subscript object at 0x7da1b267a590>, <ast.Name object at 0x7da1b267a6e0>]]]]
variable[dssp_residues] assign[=] list[[]]
variable[go] assign[=] constant[False]
for taget[name[line]] in starred[call[name[pph_dssp].splitlines, parameter[]]] begin[:]
if name[go] begin[:]
variable[res_num] assign[=] call[name[int], parameter[call[call[name[line]][<ast.Slice object at 0x7da1b2678bb0>].strip, parameter[]]]]
variable[chain] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b267bc70>].strip, parameter[]]
variable[ss_type] assign[=] call[name[line]][constant[16]]
variable[phi] assign[=] call[name[float], parameter[call[call[name[line]][<ast.Slice object at 0x7da1b2678490>].strip, parameter[]]]]
variable[psi] assign[=] call[name[float], parameter[call[call[name[line]][<ast.Slice object at 0x7da1b26793c0>].strip, parameter[]]]]
call[name[dssp_residues].append, parameter[tuple[[<ast.Name object at 0x7da1b267a800>, <ast.Name object at 0x7da1b267a050>, <ast.Name object at 0x7da1b26786a0>, <ast.Name object at 0x7da1b26788e0>, <ast.Name object at 0x7da1b267a1a0>]]]]
variable[pp_chains] assign[=] list[[]]
variable[chain] assign[=] list[[]]
variable[ch_on] assign[=] constant[False]
for taget[name[item]] in starred[name[dssp_residues]] begin[:]
if <ast.BoolOp object at 0x7da1b267bc40> begin[:]
call[name[chain].append, parameter[name[item]]]
variable[ch_on] assign[=] constant[True]
variable[pp_chains] assign[=] <ast.ListComp object at 0x7da1b262a890>
variable[pp_helices] assign[=] list[[]]
with call[name[open], parameter[name[in_pdb], constant[r]]] begin[:]
variable[pdb_atoms] assign[=] call[name[split_pdb_lines], parameter[call[name[pdb].read, parameter[]]]]
for taget[name[pp_helix]] in starred[name[pp_chains]] begin[:]
variable[chain_id] assign[=] call[call[name[pp_helix]][constant[0]]][constant[2]]
variable[res_range] assign[=] <ast.ListComp object at 0x7da1b2625780>
variable[helix] assign[=] list[[]]
for taget[name[atom]] in starred[name[pdb_atoms]] begin[:]
if <ast.BoolOp object at 0x7da1b2624d90> begin[:]
call[name[helix].append, parameter[call[name[tuple], parameter[call[name[atom]][<ast.Slice object at 0x7da1b2624a90>]]]]]
call[name[pp_helices].append, parameter[name[helix]]]
return[name[pp_helices]] | keyword[def] identifier[extract_pp_helices] ( identifier[in_pdb] ):
literal[string]
identifier[t_phi] =- literal[int]
identifier[t_phi_d] = literal[int]
identifier[t_psi] = literal[int]
identifier[t_psi_d] = literal[int]
identifier[pph_dssp] = identifier[subprocess] . identifier[check_output] (
[ identifier[global_settings] [ literal[string] ][ literal[string] ], identifier[in_pdb] ])
identifier[dssp_residues] =[]
identifier[go] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[pph_dssp] . identifier[splitlines] ():
keyword[if] identifier[go] :
identifier[res_num] = identifier[int] ( identifier[line] [: literal[int] ]. identifier[strip] ())
identifier[chain] = identifier[line] [ literal[int] : literal[int] ]. identifier[strip] ()
identifier[ss_type] = identifier[line] [ literal[int] ]
identifier[phi] = identifier[float] ( identifier[line] [ literal[int] : literal[int] ]. identifier[strip] ())
identifier[psi] = identifier[float] ( identifier[line] [ literal[int] : literal[int] ]. identifier[strip] ())
identifier[dssp_residues] . identifier[append] (( identifier[res_num] , identifier[ss_type] , identifier[chain] , identifier[phi] , identifier[psi] ))
keyword[else] :
keyword[if] identifier[line] [ literal[int] ]== literal[string] :
identifier[go] = keyword[True]
keyword[pass]
identifier[pp_chains] =[]
identifier[chain] =[]
identifier[ch_on] = keyword[False]
keyword[for] identifier[item] keyword[in] identifier[dssp_residues] :
keyword[if] ( identifier[item] [ literal[int] ]== literal[string] ) keyword[and] (
identifier[t_phi] - identifier[t_phi_d] < identifier[item] [ literal[int] ]< identifier[t_phi] + identifier[t_phi_d] ) keyword[and] (
identifier[t_psi] - identifier[t_psi_d] < identifier[item] [ literal[int] ]< identifier[t_psi] + identifier[t_psi_d] ):
identifier[chain] . identifier[append] ( identifier[item] )
identifier[ch_on] = keyword[True]
keyword[else] :
keyword[if] identifier[ch_on] :
identifier[pp_chains] . identifier[append] ( identifier[chain] )
identifier[chain] =[]
identifier[ch_on] = keyword[False]
identifier[pp_chains] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[pp_chains] keyword[if] identifier[len] ( identifier[x] )> literal[int] ]
identifier[pp_helices] =[]
keyword[with] identifier[open] ( identifier[in_pdb] , literal[string] ) keyword[as] identifier[pdb] :
identifier[pdb_atoms] = identifier[split_pdb_lines] ( identifier[pdb] . identifier[read] ())
keyword[for] identifier[pp_helix] keyword[in] identifier[pp_chains] :
identifier[chain_id] = identifier[pp_helix] [ literal[int] ][ literal[int] ]
identifier[res_range] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[pp_helix] ]
identifier[helix] =[]
keyword[for] identifier[atom] keyword[in] identifier[pdb_atoms] :
keyword[if] ( identifier[atom] [ literal[int] ]== literal[string] ) keyword[and] (
identifier[atom] [ literal[int] ]== identifier[chain_id] ) keyword[and] (
identifier[atom] [ literal[int] ] keyword[in] identifier[res_range] ):
identifier[helix] . identifier[append] ( identifier[tuple] ( identifier[atom] [ literal[int] : literal[int] ]))
identifier[pp_helices] . identifier[append] ( identifier[helix] )
keyword[return] identifier[pp_helices] | def extract_pp_helices(in_pdb):
"""Uses DSSP to find polyproline helices in a pdb file.
Returns a length 3 list with a helix id, the chain id and a dict
containing the coordinates of each residues CA.
Parameters
----------
in_pdb : string
Path to a PDB file.
"""
t_phi = -75.0
t_phi_d = 29.0
t_psi = 145.0
t_psi_d = 29.0
pph_dssp = subprocess.check_output([global_settings['dssp']['path'], in_pdb])
dssp_residues = []
go = False
for line in pph_dssp.splitlines():
if go:
res_num = int(line[:5].strip())
chain = line[11:13].strip()
ss_type = line[16]
phi = float(line[103:109].strip())
psi = float(line[109:116].strip())
dssp_residues.append((res_num, ss_type, chain, phi, psi)) # depends on [control=['if'], data=[]]
else:
if line[2] == '#':
go = True # depends on [control=['if'], data=[]]
pass # depends on [control=['for'], data=['line']]
pp_chains = []
chain = []
ch_on = False
for item in dssp_residues:
if item[1] == ' ' and t_phi - t_phi_d < item[3] < t_phi + t_phi_d and (t_psi - t_psi_d < item[4] < t_psi + t_psi_d):
chain.append(item)
ch_on = True # depends on [control=['if'], data=[]]
elif ch_on:
pp_chains.append(chain)
chain = []
ch_on = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
pp_chains = [x for x in pp_chains if len(x) > 1]
pp_helices = []
with open(in_pdb, 'r') as pdb:
pdb_atoms = split_pdb_lines(pdb.read()) # depends on [control=['with'], data=['pdb']]
for pp_helix in pp_chains:
chain_id = pp_helix[0][2]
res_range = [x[0] for x in pp_helix]
helix = []
for atom in pdb_atoms:
if atom[2] == 'CA' and atom[5] == chain_id and (atom[6] in res_range):
helix.append(tuple(atom[8:11])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['atom']]
pp_helices.append(helix) # depends on [control=['for'], data=['pp_helix']]
return pp_helices |
def create_reply(reply, message=None, render=False):
"""
Create a reply quickly
"""
r = None
if not reply:
r = EmptyReply()
elif isinstance(reply, BaseReply):
r = reply
if message:
r.source = message.target
r.target = message.source
elif isinstance(reply, six.string_types):
r = TextReply(
message=message,
content=reply
)
elif isinstance(reply, (tuple, list)):
if len(reply) > 10:
raise AttributeError("Can't add more than 10 articles"
" in an ArticlesReply")
r = ArticlesReply(
message=message,
articles=reply
)
if r and render:
return r.render()
return r | def function[create_reply, parameter[reply, message, render]]:
constant[
Create a reply quickly
]
variable[r] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da204960640> begin[:]
variable[r] assign[=] call[name[EmptyReply], parameter[]]
if <ast.BoolOp object at 0x7da204961930> begin[:]
return[call[name[r].render, parameter[]]]
return[name[r]] | keyword[def] identifier[create_reply] ( identifier[reply] , identifier[message] = keyword[None] , identifier[render] = keyword[False] ):
literal[string]
identifier[r] = keyword[None]
keyword[if] keyword[not] identifier[reply] :
identifier[r] = identifier[EmptyReply] ()
keyword[elif] identifier[isinstance] ( identifier[reply] , identifier[BaseReply] ):
identifier[r] = identifier[reply]
keyword[if] identifier[message] :
identifier[r] . identifier[source] = identifier[message] . identifier[target]
identifier[r] . identifier[target] = identifier[message] . identifier[source]
keyword[elif] identifier[isinstance] ( identifier[reply] , identifier[six] . identifier[string_types] ):
identifier[r] = identifier[TextReply] (
identifier[message] = identifier[message] ,
identifier[content] = identifier[reply]
)
keyword[elif] identifier[isinstance] ( identifier[reply] ,( identifier[tuple] , identifier[list] )):
keyword[if] identifier[len] ( identifier[reply] )> literal[int] :
keyword[raise] identifier[AttributeError] ( literal[string]
literal[string] )
identifier[r] = identifier[ArticlesReply] (
identifier[message] = identifier[message] ,
identifier[articles] = identifier[reply]
)
keyword[if] identifier[r] keyword[and] identifier[render] :
keyword[return] identifier[r] . identifier[render] ()
keyword[return] identifier[r] | def create_reply(reply, message=None, render=False):
"""
Create a reply quickly
"""
r = None
if not reply:
r = EmptyReply() # depends on [control=['if'], data=[]]
elif isinstance(reply, BaseReply):
r = reply
if message:
r.source = message.target
r.target = message.source # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(reply, six.string_types):
r = TextReply(message=message, content=reply) # depends on [control=['if'], data=[]]
elif isinstance(reply, (tuple, list)):
if len(reply) > 10:
raise AttributeError("Can't add more than 10 articles in an ArticlesReply") # depends on [control=['if'], data=[]]
r = ArticlesReply(message=message, articles=reply) # depends on [control=['if'], data=[]]
if r and render:
return r.render() # depends on [control=['if'], data=[]]
return r |
def SetValue(self, identifier, value):
"""Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, py2to3.STRING_TYPES):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
self._values[identifier] = value | def function[SetValue, parameter[self, identifier, value]]:
constant[Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
]
if <ast.UnaryOp object at 0x7da20cabc310> begin[:]
<ast.Raise object at 0x7da20cabe590>
variable[identifier] assign[=] call[name[identifier].lower, parameter[]]
call[name[self]._values][name[identifier]] assign[=] name[value] | keyword[def] identifier[SetValue] ( identifier[self] , identifier[identifier] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[identifier] , identifier[py2to3] . identifier[STRING_TYPES] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[identifier] = identifier[identifier] . identifier[lower] ()
identifier[self] . identifier[_values] [ identifier[identifier] ]= identifier[value] | def SetValue(self, identifier, value):
"""Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, py2to3.STRING_TYPES):
raise TypeError('Identifier not a string type.') # depends on [control=['if'], data=[]]
identifier = identifier.lower()
self._values[identifier] = value |
def _upgrade_broker(broker):
"""
Extract the poller state from Broker and replace it with the industrial
strength poller for this OS. Must run on the Broker thread.
"""
# This function is deadly! The act of calling start_receive() generates log
# messages which must be silenced as the upgrade progresses, otherwise the
# poller state will change as it is copied, resulting in write fds that are
# lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream
# only calls start_transmit() when transitioning from empty to non-empty
# buffer. If the start_transmit() is lost, writes from the child hang
# permanently).
root = logging.getLogger()
old_level = root.level
root.setLevel(logging.CRITICAL)
old = broker.poller
new = PREFERRED_POLLER()
for fd, data in old.readers:
new.start_receive(fd, data)
for fd, data in old.writers:
new.start_transmit(fd, data)
old.close()
broker.poller = new
root.setLevel(old_level)
LOG.debug('replaced %r with %r (new: %d readers, %d writers; '
'old: %d readers, %d writers)', old, new,
len(new.readers), len(new.writers),
len(old.readers), len(old.writers)) | def function[_upgrade_broker, parameter[broker]]:
constant[
Extract the poller state from Broker and replace it with the industrial
strength poller for this OS. Must run on the Broker thread.
]
variable[root] assign[=] call[name[logging].getLogger, parameter[]]
variable[old_level] assign[=] name[root].level
call[name[root].setLevel, parameter[name[logging].CRITICAL]]
variable[old] assign[=] name[broker].poller
variable[new] assign[=] call[name[PREFERRED_POLLER], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d05030>, <ast.Name object at 0x7da1b1d04370>]]] in starred[name[old].readers] begin[:]
call[name[new].start_receive, parameter[name[fd], name[data]]]
for taget[tuple[[<ast.Name object at 0x7da1b1d06980>, <ast.Name object at 0x7da1b1d061a0>]]] in starred[name[old].writers] begin[:]
call[name[new].start_transmit, parameter[name[fd], name[data]]]
call[name[old].close, parameter[]]
name[broker].poller assign[=] name[new]
call[name[root].setLevel, parameter[name[old_level]]]
call[name[LOG].debug, parameter[constant[replaced %r with %r (new: %d readers, %d writers; old: %d readers, %d writers)], name[old], name[new], call[name[len], parameter[name[new].readers]], call[name[len], parameter[name[new].writers]], call[name[len], parameter[name[old].readers]], call[name[len], parameter[name[old].writers]]]] | keyword[def] identifier[_upgrade_broker] ( identifier[broker] ):
literal[string]
identifier[root] = identifier[logging] . identifier[getLogger] ()
identifier[old_level] = identifier[root] . identifier[level]
identifier[root] . identifier[setLevel] ( identifier[logging] . identifier[CRITICAL] )
identifier[old] = identifier[broker] . identifier[poller]
identifier[new] = identifier[PREFERRED_POLLER] ()
keyword[for] identifier[fd] , identifier[data] keyword[in] identifier[old] . identifier[readers] :
identifier[new] . identifier[start_receive] ( identifier[fd] , identifier[data] )
keyword[for] identifier[fd] , identifier[data] keyword[in] identifier[old] . identifier[writers] :
identifier[new] . identifier[start_transmit] ( identifier[fd] , identifier[data] )
identifier[old] . identifier[close] ()
identifier[broker] . identifier[poller] = identifier[new]
identifier[root] . identifier[setLevel] ( identifier[old_level] )
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] , identifier[old] , identifier[new] ,
identifier[len] ( identifier[new] . identifier[readers] ), identifier[len] ( identifier[new] . identifier[writers] ),
identifier[len] ( identifier[old] . identifier[readers] ), identifier[len] ( identifier[old] . identifier[writers] )) | def _upgrade_broker(broker):
"""
Extract the poller state from Broker and replace it with the industrial
strength poller for this OS. Must run on the Broker thread.
"""
# This function is deadly! The act of calling start_receive() generates log
# messages which must be silenced as the upgrade progresses, otherwise the
# poller state will change as it is copied, resulting in write fds that are
# lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream
# only calls start_transmit() when transitioning from empty to non-empty
# buffer. If the start_transmit() is lost, writes from the child hang
# permanently).
root = logging.getLogger()
old_level = root.level
root.setLevel(logging.CRITICAL)
old = broker.poller
new = PREFERRED_POLLER()
for (fd, data) in old.readers:
new.start_receive(fd, data) # depends on [control=['for'], data=[]]
for (fd, data) in old.writers:
new.start_transmit(fd, data) # depends on [control=['for'], data=[]]
old.close()
broker.poller = new
root.setLevel(old_level)
LOG.debug('replaced %r with %r (new: %d readers, %d writers; old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), len(old.readers), len(old.writers)) |
def create(self, data, fields=[], models={}):
'''
Create model attributes
'''
if not fields: fields = self.fields
if not models and hasattr(self, 'models'): models = self.models
for field in fields:
setattr(self,field,None)
if not data: return None
for k, v in data.iteritems():
if type(v) in (str, unicode):
v = v.strip()
if models and k in models:
if type(v) == dict:
lists = []
for k2, v2 in v.iteritems():
if type(v2) == list:
for d in v2:
model = models[k]()
lists.append(model.create(d))
if not lists:
model = models[k]()
v = model.create(v)
else:
v = lists
else:
model = models[k]()
v = model.create(v)
setattr(self,k,v)
return self | def function[create, parameter[self, data, fields, models]]:
constant[
Create model attributes
]
if <ast.UnaryOp object at 0x7da1b253ee30> begin[:]
variable[fields] assign[=] name[self].fields
if <ast.BoolOp object at 0x7da1b253d390> begin[:]
variable[models] assign[=] name[self].models
for taget[name[field]] in starred[name[fields]] begin[:]
call[name[setattr], parameter[name[self], name[field], constant[None]]]
if <ast.UnaryOp object at 0x7da1b253c850> begin[:]
return[constant[None]]
for taget[tuple[[<ast.Name object at 0x7da1b253dc30>, <ast.Name object at 0x7da1b253e950>]]] in starred[call[name[data].iteritems, parameter[]]] begin[:]
if compare[call[name[type], parameter[name[v]]] in tuple[[<ast.Name object at 0x7da1b253c730>, <ast.Name object at 0x7da1b253d960>]]] begin[:]
variable[v] assign[=] call[name[v].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b253c610> begin[:]
if compare[call[name[type], parameter[name[v]]] equal[==] name[dict]] begin[:]
variable[lists] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b253f130>, <ast.Name object at 0x7da1b253e7a0>]]] in starred[call[name[v].iteritems, parameter[]]] begin[:]
if compare[call[name[type], parameter[name[v2]]] equal[==] name[list]] begin[:]
for taget[name[d]] in starred[name[v2]] begin[:]
variable[model] assign[=] call[call[name[models]][name[k]], parameter[]]
call[name[lists].append, parameter[call[name[model].create, parameter[name[d]]]]]
if <ast.UnaryOp object at 0x7da1b253edd0> begin[:]
variable[model] assign[=] call[call[name[models]][name[k]], parameter[]]
variable[v] assign[=] call[name[model].create, parameter[name[v]]]
call[name[setattr], parameter[name[self], name[k], name[v]]]
return[name[self]] | keyword[def] identifier[create] ( identifier[self] , identifier[data] , identifier[fields] =[], identifier[models] ={}):
literal[string]
keyword[if] keyword[not] identifier[fields] : identifier[fields] = identifier[self] . identifier[fields]
keyword[if] keyword[not] identifier[models] keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[models] = identifier[self] . identifier[models]
keyword[for] identifier[field] keyword[in] identifier[fields] :
identifier[setattr] ( identifier[self] , identifier[field] , keyword[None] )
keyword[if] keyword[not] identifier[data] : keyword[return] keyword[None]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[iteritems] ():
keyword[if] identifier[type] ( identifier[v] ) keyword[in] ( identifier[str] , identifier[unicode] ):
identifier[v] = identifier[v] . identifier[strip] ()
keyword[if] identifier[models] keyword[and] identifier[k] keyword[in] identifier[models] :
keyword[if] identifier[type] ( identifier[v] )== identifier[dict] :
identifier[lists] =[]
keyword[for] identifier[k2] , identifier[v2] keyword[in] identifier[v] . identifier[iteritems] ():
keyword[if] identifier[type] ( identifier[v2] )== identifier[list] :
keyword[for] identifier[d] keyword[in] identifier[v2] :
identifier[model] = identifier[models] [ identifier[k] ]()
identifier[lists] . identifier[append] ( identifier[model] . identifier[create] ( identifier[d] ))
keyword[if] keyword[not] identifier[lists] :
identifier[model] = identifier[models] [ identifier[k] ]()
identifier[v] = identifier[model] . identifier[create] ( identifier[v] )
keyword[else] :
identifier[v] = identifier[lists]
keyword[else] :
identifier[model] = identifier[models] [ identifier[k] ]()
identifier[v] = identifier[model] . identifier[create] ( identifier[v] )
identifier[setattr] ( identifier[self] , identifier[k] , identifier[v] )
keyword[return] identifier[self] | def create(self, data, fields=[], models={}):
"""
Create model attributes
"""
if not fields:
fields = self.fields # depends on [control=['if'], data=[]]
if not models and hasattr(self, 'models'):
models = self.models # depends on [control=['if'], data=[]]
for field in fields:
setattr(self, field, None) # depends on [control=['for'], data=['field']]
if not data:
return None # depends on [control=['if'], data=[]]
for (k, v) in data.iteritems():
if type(v) in (str, unicode):
v = v.strip() # depends on [control=['if'], data=[]]
if models and k in models:
if type(v) == dict:
lists = []
for (k2, v2) in v.iteritems():
if type(v2) == list:
for d in v2:
model = models[k]()
lists.append(model.create(d)) # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not lists:
model = models[k]()
v = model.create(v) # depends on [control=['if'], data=[]]
else:
v = lists # depends on [control=['if'], data=[]]
else:
model = models[k]()
v = model.create(v) # depends on [control=['if'], data=[]]
setattr(self, k, v) # depends on [control=['for'], data=[]]
return self |
def is_ip_address(value, **kwargs):
"""Indicate whether ``value`` is a valid IP address (version 4 or version 6).
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.ip_address(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | def function[is_ip_address, parameter[value]]:
constant[Indicate whether ``value`` is a valid IP address (version 4 or version 6).
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
]
<ast.Try object at 0x7da1b06f1930>
return[constant[True]] | keyword[def] identifier[is_ip_address] ( identifier[value] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[value] = identifier[validators] . identifier[ip_address] ( identifier[value] ,** identifier[kwargs] )
keyword[except] identifier[SyntaxError] keyword[as] identifier[error] :
keyword[raise] identifier[error]
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_ip_address(value, **kwargs):
"""Indicate whether ``value`` is a valid IP address (version 4 or version 6).
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.ip_address(value, **kwargs) # depends on [control=['try'], data=[]]
except SyntaxError as error:
raise error # depends on [control=['except'], data=['error']]
except Exception:
return False # depends on [control=['except'], data=[]]
return True |
def norm_locale (loc):
"""Normalize a locale."""
loc = locale.normalize(loc)
# split up the locale into its base components
pos = loc.find('@')
if pos >= 0:
loc = loc[:pos]
pos = loc.find('.')
if pos >= 0:
loc = loc[:pos]
pos = loc.find('_')
if pos >= 0:
loc = loc[:pos]
return loc | def function[norm_locale, parameter[loc]]:
constant[Normalize a locale.]
variable[loc] assign[=] call[name[locale].normalize, parameter[name[loc]]]
variable[pos] assign[=] call[name[loc].find, parameter[constant[@]]]
if compare[name[pos] greater_or_equal[>=] constant[0]] begin[:]
variable[loc] assign[=] call[name[loc]][<ast.Slice object at 0x7da1b2345f30>]
variable[pos] assign[=] call[name[loc].find, parameter[constant[.]]]
if compare[name[pos] greater_or_equal[>=] constant[0]] begin[:]
variable[loc] assign[=] call[name[loc]][<ast.Slice object at 0x7da1b2347cd0>]
variable[pos] assign[=] call[name[loc].find, parameter[constant[_]]]
if compare[name[pos] greater_or_equal[>=] constant[0]] begin[:]
variable[loc] assign[=] call[name[loc]][<ast.Slice object at 0x7da1b2344a30>]
return[name[loc]] | keyword[def] identifier[norm_locale] ( identifier[loc] ):
literal[string]
identifier[loc] = identifier[locale] . identifier[normalize] ( identifier[loc] )
identifier[pos] = identifier[loc] . identifier[find] ( literal[string] )
keyword[if] identifier[pos] >= literal[int] :
identifier[loc] = identifier[loc] [: identifier[pos] ]
identifier[pos] = identifier[loc] . identifier[find] ( literal[string] )
keyword[if] identifier[pos] >= literal[int] :
identifier[loc] = identifier[loc] [: identifier[pos] ]
identifier[pos] = identifier[loc] . identifier[find] ( literal[string] )
keyword[if] identifier[pos] >= literal[int] :
identifier[loc] = identifier[loc] [: identifier[pos] ]
keyword[return] identifier[loc] | def norm_locale(loc):
"""Normalize a locale."""
loc = locale.normalize(loc)
# split up the locale into its base components
pos = loc.find('@')
if pos >= 0:
loc = loc[:pos] # depends on [control=['if'], data=['pos']]
pos = loc.find('.')
if pos >= 0:
loc = loc[:pos] # depends on [control=['if'], data=['pos']]
pos = loc.find('_')
if pos >= 0:
loc = loc[:pos] # depends on [control=['if'], data=['pos']]
return loc |
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values | def function[interpolate_2d, parameter[values, method, axis, limit, fill_value, dtype]]:
constant[
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
]
variable[transf] assign[=] <ast.IfExp object at 0x7da18fe91de0>
variable[ndim] assign[=] name[values].ndim
if compare[name[values].ndim equal[==] constant[1]] begin[:]
if compare[name[axis] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18fe90f10>
variable[values] assign[=] call[name[values].reshape, parameter[call[name[tuple], parameter[binary_operation[tuple[[<ast.Constant object at 0x7da18fe933a0>]] + name[values].shape]]]]]
if compare[name[fill_value] is constant[None]] begin[:]
variable[mask] assign[=] constant[None]
variable[method] assign[=] call[name[clean_fill_method], parameter[name[method]]]
if compare[name[method] equal[==] constant[pad]] begin[:]
variable[values] assign[=] call[name[transf], parameter[call[name[pad_2d], parameter[call[name[transf], parameter[name[values]]]]]]]
if compare[name[ndim] equal[==] constant[1]] begin[:]
variable[values] assign[=] call[name[values]][constant[0]]
return[name[values]] | keyword[def] identifier[interpolate_2d] ( identifier[values] , identifier[method] = literal[string] , identifier[axis] = literal[int] , identifier[limit] = keyword[None] , identifier[fill_value] = keyword[None] ,
identifier[dtype] = keyword[None] ):
literal[string]
identifier[transf] =( keyword[lambda] identifier[x] : identifier[x] ) keyword[if] identifier[axis] == literal[int] keyword[else] ( keyword[lambda] identifier[x] : identifier[x] . identifier[T] )
identifier[ndim] = identifier[values] . identifier[ndim]
keyword[if] identifier[values] . identifier[ndim] == literal[int] :
keyword[if] identifier[axis] != literal[int] :
keyword[raise] identifier[AssertionError] ( literal[string]
literal[string] )
identifier[values] = identifier[values] . identifier[reshape] ( identifier[tuple] (( literal[int] ,)+ identifier[values] . identifier[shape] ))
keyword[if] identifier[fill_value] keyword[is] keyword[None] :
identifier[mask] = keyword[None]
keyword[else] :
identifier[mask] = identifier[mask_missing] ( identifier[transf] ( identifier[values] ), identifier[fill_value] )
identifier[method] = identifier[clean_fill_method] ( identifier[method] )
keyword[if] identifier[method] == literal[string] :
identifier[values] = identifier[transf] ( identifier[pad_2d] (
identifier[transf] ( identifier[values] ), identifier[limit] = identifier[limit] , identifier[mask] = identifier[mask] , identifier[dtype] = identifier[dtype] ))
keyword[else] :
identifier[values] = identifier[transf] ( identifier[backfill_2d] (
identifier[transf] ( identifier[values] ), identifier[limit] = identifier[limit] , identifier[mask] = identifier[mask] , identifier[dtype] = identifier[dtype] ))
keyword[if] identifier[ndim] == literal[int] :
identifier[values] = identifier[values] [ literal[int] ]
keyword[return] identifier[values] | def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else lambda x: x.T
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError('cannot interpolate on a ndim == 1 with axis != 0') # depends on [control=['if'], data=[]]
values = values.reshape(tuple((1,) + values.shape)) # depends on [control=['if'], data=[]]
if fill_value is None:
mask = None # depends on [control=['if'], data=[]]
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype)) # depends on [control=['if'], data=[]]
else:
values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0] # depends on [control=['if'], data=[]]
return values |
def run(self):
""" Plan:
* We read into a fresh instance of IO obj until marker encountered.
* When marker is detected, we attach that IO obj to "results" array
and signal the calling code (through threading.Event flag) that
results are available
* repeat until .stop() was called on the thread.
"""
marker = ['' for l in self._stream_delimiter] # '' is there on purpose
tf = self._obj[0](*self._obj[1], **self._obj[2])
while not self._stop:
l = os.read(self._r, 1)
marker.pop(0)
marker.append(l)
if marker != self._stream_delimiter:
tf.write(unicode(l))
else:
# chopping off the marker first
tf.seek(self._stream_roll_back_len, 2)
tf.truncate()
tf.seek(0)
self._data_unoccupied.wait(5) # seriously, how much time is needed to get your items off the stack?
self._data.append(tf)
self._data_available.set()
tf = self._obj[0](*self._obj[1], **self._obj[2])
os.close(self._r)
tf.close()
del tf | def function[run, parameter[self]]:
constant[ Plan:
* We read into a fresh instance of IO obj until marker encountered.
* When marker is detected, we attach that IO obj to "results" array
and signal the calling code (through threading.Event flag) that
results are available
* repeat until .stop() was called on the thread.
]
variable[marker] assign[=] <ast.ListComp object at 0x7da1b0b9cc40>
variable[tf] assign[=] call[call[name[self]._obj][constant[0]], parameter[<ast.Starred object at 0x7da1b0b9c970>]]
while <ast.UnaryOp object at 0x7da1b0b9dcc0> begin[:]
variable[l] assign[=] call[name[os].read, parameter[name[self]._r, constant[1]]]
call[name[marker].pop, parameter[constant[0]]]
call[name[marker].append, parameter[name[l]]]
if compare[name[marker] not_equal[!=] name[self]._stream_delimiter] begin[:]
call[name[tf].write, parameter[call[name[unicode], parameter[name[l]]]]]
call[name[os].close, parameter[name[self]._r]]
call[name[tf].close, parameter[]]
<ast.Delete object at 0x7da1b0ca50f0> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[marker] =[ literal[string] keyword[for] identifier[l] keyword[in] identifier[self] . identifier[_stream_delimiter] ]
identifier[tf] = identifier[self] . identifier[_obj] [ literal[int] ](* identifier[self] . identifier[_obj] [ literal[int] ],** identifier[self] . identifier[_obj] [ literal[int] ])
keyword[while] keyword[not] identifier[self] . identifier[_stop] :
identifier[l] = identifier[os] . identifier[read] ( identifier[self] . identifier[_r] , literal[int] )
identifier[marker] . identifier[pop] ( literal[int] )
identifier[marker] . identifier[append] ( identifier[l] )
keyword[if] identifier[marker] != identifier[self] . identifier[_stream_delimiter] :
identifier[tf] . identifier[write] ( identifier[unicode] ( identifier[l] ))
keyword[else] :
identifier[tf] . identifier[seek] ( identifier[self] . identifier[_stream_roll_back_len] , literal[int] )
identifier[tf] . identifier[truncate] ()
identifier[tf] . identifier[seek] ( literal[int] )
identifier[self] . identifier[_data_unoccupied] . identifier[wait] ( literal[int] )
identifier[self] . identifier[_data] . identifier[append] ( identifier[tf] )
identifier[self] . identifier[_data_available] . identifier[set] ()
identifier[tf] = identifier[self] . identifier[_obj] [ literal[int] ](* identifier[self] . identifier[_obj] [ literal[int] ],** identifier[self] . identifier[_obj] [ literal[int] ])
identifier[os] . identifier[close] ( identifier[self] . identifier[_r] )
identifier[tf] . identifier[close] ()
keyword[del] identifier[tf] | def run(self):
""" Plan:
* We read into a fresh instance of IO obj until marker encountered.
* When marker is detected, we attach that IO obj to "results" array
and signal the calling code (through threading.Event flag) that
results are available
* repeat until .stop() was called on the thread.
"""
marker = ['' for l in self._stream_delimiter] # '' is there on purpose
tf = self._obj[0](*self._obj[1], **self._obj[2])
while not self._stop:
l = os.read(self._r, 1)
marker.pop(0)
marker.append(l)
if marker != self._stream_delimiter:
tf.write(unicode(l)) # depends on [control=['if'], data=[]]
else:
# chopping off the marker first
tf.seek(self._stream_roll_back_len, 2)
tf.truncate()
tf.seek(0)
self._data_unoccupied.wait(5) # seriously, how much time is needed to get your items off the stack?
self._data.append(tf)
self._data_available.set()
tf = self._obj[0](*self._obj[1], **self._obj[2]) # depends on [control=['while'], data=[]]
os.close(self._r)
tf.close()
del tf |
def _serialize_ep(ep, varprops, version=_default_version):
"""Serialize an Elementary Predication into the SimpleMRS encoding."""
# ('nodeid', 'pred', 'label', 'args', 'lnk', 'surface', 'base')
args = ep[3]
arglist = ' '.join([_serialize_argument(rarg, args[rarg], varprops)
for rarg in sorted(args, key=rargname_sortkey)])
if version < 1.1 or len(ep) < 6 or ep[5] is None:
surface = ''
else:
surface = ' "%s"' % ep[5]
lnk = None if len(ep) < 5 else ep[4]
pred = ep[1]
predstr = pred.string
return '[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]'.format(
pred=predstr,
lnk=_serialize_lnk(lnk),
surface=surface,
label=str(ep[2]),
s=' ' if arglist else '',
args=arglist
) | def function[_serialize_ep, parameter[ep, varprops, version]]:
constant[Serialize an Elementary Predication into the SimpleMRS encoding.]
variable[args] assign[=] call[name[ep]][constant[3]]
variable[arglist] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b0431a50>]]
if <ast.BoolOp object at 0x7da1b0432f80> begin[:]
variable[surface] assign[=] constant[]
variable[lnk] assign[=] <ast.IfExp object at 0x7da1b0430d30>
variable[pred] assign[=] call[name[ep]][constant[1]]
variable[predstr] assign[=] name[pred].string
return[call[constant[[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]].format, parameter[]]] | keyword[def] identifier[_serialize_ep] ( identifier[ep] , identifier[varprops] , identifier[version] = identifier[_default_version] ):
literal[string]
identifier[args] = identifier[ep] [ literal[int] ]
identifier[arglist] = literal[string] . identifier[join] ([ identifier[_serialize_argument] ( identifier[rarg] , identifier[args] [ identifier[rarg] ], identifier[varprops] )
keyword[for] identifier[rarg] keyword[in] identifier[sorted] ( identifier[args] , identifier[key] = identifier[rargname_sortkey] )])
keyword[if] identifier[version] < literal[int] keyword[or] identifier[len] ( identifier[ep] )< literal[int] keyword[or] identifier[ep] [ literal[int] ] keyword[is] keyword[None] :
identifier[surface] = literal[string]
keyword[else] :
identifier[surface] = literal[string] % identifier[ep] [ literal[int] ]
identifier[lnk] = keyword[None] keyword[if] identifier[len] ( identifier[ep] )< literal[int] keyword[else] identifier[ep] [ literal[int] ]
identifier[pred] = identifier[ep] [ literal[int] ]
identifier[predstr] = identifier[pred] . identifier[string]
keyword[return] literal[string] . identifier[format] (
identifier[pred] = identifier[predstr] ,
identifier[lnk] = identifier[_serialize_lnk] ( identifier[lnk] ),
identifier[surface] = identifier[surface] ,
identifier[label] = identifier[str] ( identifier[ep] [ literal[int] ]),
identifier[s] = literal[string] keyword[if] identifier[arglist] keyword[else] literal[string] ,
identifier[args] = identifier[arglist]
) | def _serialize_ep(ep, varprops, version=_default_version):
"""Serialize an Elementary Predication into the SimpleMRS encoding."""
# ('nodeid', 'pred', 'label', 'args', 'lnk', 'surface', 'base')
args = ep[3]
arglist = ' '.join([_serialize_argument(rarg, args[rarg], varprops) for rarg in sorted(args, key=rargname_sortkey)])
if version < 1.1 or len(ep) < 6 or ep[5] is None:
surface = '' # depends on [control=['if'], data=[]]
else:
surface = ' "%s"' % ep[5]
lnk = None if len(ep) < 5 else ep[4]
pred = ep[1]
predstr = pred.string
return '[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]'.format(pred=predstr, lnk=_serialize_lnk(lnk), surface=surface, label=str(ep[2]), s=' ' if arglist else '', args=arglist) |
def encrypt(key, message):
'''encrypt leverages KMS encrypt and base64-encode encrypted blob
More info on KMS encrypt API:
https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html
'''
try:
ret = kms.encrypt(KeyId=key, Plaintext=message)
encrypted_data = base64.encodestring(ret.get('CiphertextBlob'))
except Exception as e:
# returns http 500 back to user and log error details in Cloudwatch Logs
raise Exception("Unable to encrypt data: ", e)
return encrypted_data.decode() | def function[encrypt, parameter[key, message]]:
constant[encrypt leverages KMS encrypt and base64-encode encrypted blob
More info on KMS encrypt API:
https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html
]
<ast.Try object at 0x7da20e956f80>
return[call[name[encrypted_data].decode, parameter[]]] | keyword[def] identifier[encrypt] ( identifier[key] , identifier[message] ):
literal[string]
keyword[try] :
identifier[ret] = identifier[kms] . identifier[encrypt] ( identifier[KeyId] = identifier[key] , identifier[Plaintext] = identifier[message] )
identifier[encrypted_data] = identifier[base64] . identifier[encodestring] ( identifier[ret] . identifier[get] ( literal[string] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( literal[string] , identifier[e] )
keyword[return] identifier[encrypted_data] . identifier[decode] () | def encrypt(key, message):
"""encrypt leverages KMS encrypt and base64-encode encrypted blob
More info on KMS encrypt API:
https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html
"""
try:
ret = kms.encrypt(KeyId=key, Plaintext=message)
encrypted_data = base64.encodestring(ret.get('CiphertextBlob')) # depends on [control=['try'], data=[]]
except Exception as e:
# returns http 500 back to user and log error details in Cloudwatch Logs
raise Exception('Unable to encrypt data: ', e) # depends on [control=['except'], data=['e']]
return encrypted_data.decode() |
async def _step(self):
"""
Main loop of the behaviour.
checks whether behaviour is done or killed,
ortherwise it calls run() coroutine.
"""
while not self._done() and not self.is_killed():
try:
await self._run()
await asyncio.sleep(0) # relinquish cpu
except Exception as e:
logger.error("Exception running behaviour {}: {}".format(self, e))
self.kill(exit_code=e)
try:
await self.on_end()
except Exception as e:
logger.error("Exception running on_end in behaviour {}: {}".format(self, e))
self.kill(exit_code=e) | <ast.AsyncFunctionDef object at 0x7da1b0790b20> | keyword[async] keyword[def] identifier[_step] ( identifier[self] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[_done] () keyword[and] keyword[not] identifier[self] . identifier[is_killed] ():
keyword[try] :
keyword[await] identifier[self] . identifier[_run] ()
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] , identifier[e] ))
identifier[self] . identifier[kill] ( identifier[exit_code] = identifier[e] )
keyword[try] :
keyword[await] identifier[self] . identifier[on_end] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] , identifier[e] ))
identifier[self] . identifier[kill] ( identifier[exit_code] = identifier[e] ) | async def _step(self):
"""
Main loop of the behaviour.
checks whether behaviour is done or killed,
ortherwise it calls run() coroutine.
"""
while not self._done() and (not self.is_killed()):
try:
await self._run()
await asyncio.sleep(0) # relinquish cpu # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Exception running behaviour {}: {}'.format(self, e))
self.kill(exit_code=e) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
try:
await self.on_end() # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Exception running on_end in behaviour {}: {}'.format(self, e))
self.kill(exit_code=e) # depends on [control=['except'], data=['e']] |
def createPhysicalInterface(self, name, description=None):
"""
Create a physical interface.
Parameters:
- name (string)
- description (string, optional)
Returns: physical interface id, response.
Throws APIException on failure.
"""
req = ApiClient.allPhysicalInterfacesUrl % (self.host, "/draft")
body = {"name" : name}
if description:
body["description"] = description
resp = requests.post(req, auth=self.credentials, headers={"Content-Type":"application/json"},
data=json.dumps(body), verify=self.verify)
if resp.status_code == 201:
self.logger.debug("physical interface created")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error creating physical interface", resp)
return resp.json()["id"], resp.json() | def function[createPhysicalInterface, parameter[self, name, description]]:
constant[
Create a physical interface.
Parameters:
- name (string)
- description (string, optional)
Returns: physical interface id, response.
Throws APIException on failure.
]
variable[req] assign[=] binary_operation[name[ApiClient].allPhysicalInterfacesUrl <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f721ff0>, <ast.Constant object at 0x7da18f7238b0>]]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f722d70>], [<ast.Name object at 0x7da18f722950>]]
if name[description] begin[:]
call[name[body]][constant[description]] assign[=] name[description]
variable[resp] assign[=] call[name[requests].post, parameter[name[req]]]
if compare[name[resp].status_code equal[==] constant[201]] begin[:]
call[name[self].logger.debug, parameter[constant[physical interface created]]]
return[tuple[[<ast.Subscript object at 0x7da18ede4fa0>, <ast.Call object at 0x7da18ede4760>]]] | keyword[def] identifier[createPhysicalInterface] ( identifier[self] , identifier[name] , identifier[description] = keyword[None] ):
literal[string]
identifier[req] = identifier[ApiClient] . identifier[allPhysicalInterfacesUrl] %( identifier[self] . identifier[host] , literal[string] )
identifier[body] ={ literal[string] : identifier[name] }
keyword[if] identifier[description] :
identifier[body] [ literal[string] ]= identifier[description]
identifier[resp] = identifier[requests] . identifier[post] ( identifier[req] , identifier[auth] = identifier[self] . identifier[credentials] , identifier[headers] ={ literal[string] : literal[string] },
identifier[data] = identifier[json] . identifier[dumps] ( identifier[body] ), identifier[verify] = identifier[self] . identifier[verify] )
keyword[if] identifier[resp] . identifier[status_code] == literal[int] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ibmiotf] . identifier[APIException] ( identifier[resp] . identifier[status_code] , literal[string] , identifier[resp] )
keyword[return] identifier[resp] . identifier[json] ()[ literal[string] ], identifier[resp] . identifier[json] () | def createPhysicalInterface(self, name, description=None):
"""
Create a physical interface.
Parameters:
- name (string)
- description (string, optional)
Returns: physical interface id, response.
Throws APIException on failure.
"""
req = ApiClient.allPhysicalInterfacesUrl % (self.host, '/draft')
body = {'name': name}
if description:
body['description'] = description # depends on [control=['if'], data=[]]
resp = requests.post(req, auth=self.credentials, headers={'Content-Type': 'application/json'}, data=json.dumps(body), verify=self.verify)
if resp.status_code == 201:
self.logger.debug('physical interface created') # depends on [control=['if'], data=[]]
else:
raise ibmiotf.APIException(resp.status_code, 'HTTP error creating physical interface', resp)
return (resp.json()['id'], resp.json()) |
def new_job(self, task, inputdata, launcher_name="Unknown", debug=False):
"""
Runs a new job.
It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple
(result, grade, problems, tests, custom, archive).
"""
job_semaphore = threading.Semaphore(0)
def manage_output(result, grade, problems, tests, custom, state, archive, stdout, stderr):
""" Manages the output of this job """
manage_output.job_return = (result, grade, problems, tests, custom, state, archive, stdout, stderr)
job_semaphore.release()
manage_output.job_return = None
self._client.new_job(task, inputdata, manage_output, launcher_name, debug)
job_semaphore.acquire()
job_return = manage_output.job_return
return job_return | def function[new_job, parameter[self, task, inputdata, launcher_name, debug]]:
constant[
Runs a new job.
It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple
(result, grade, problems, tests, custom, archive).
]
variable[job_semaphore] assign[=] call[name[threading].Semaphore, parameter[constant[0]]]
def function[manage_output, parameter[result, grade, problems, tests, custom, state, archive, stdout, stderr]]:
constant[ Manages the output of this job ]
name[manage_output].job_return assign[=] tuple[[<ast.Name object at 0x7da207f98af0>, <ast.Name object at 0x7da207f9bf70>, <ast.Name object at 0x7da207f98040>, <ast.Name object at 0x7da207f98e20>, <ast.Name object at 0x7da207f9ab60>, <ast.Name object at 0x7da207f99030>, <ast.Name object at 0x7da18f7224d0>, <ast.Name object at 0x7da18f723dc0>, <ast.Name object at 0x7da18f720580>]]
call[name[job_semaphore].release, parameter[]]
name[manage_output].job_return assign[=] constant[None]
call[name[self]._client.new_job, parameter[name[task], name[inputdata], name[manage_output], name[launcher_name], name[debug]]]
call[name[job_semaphore].acquire, parameter[]]
variable[job_return] assign[=] name[manage_output].job_return
return[name[job_return]] | keyword[def] identifier[new_job] ( identifier[self] , identifier[task] , identifier[inputdata] , identifier[launcher_name] = literal[string] , identifier[debug] = keyword[False] ):
literal[string]
identifier[job_semaphore] = identifier[threading] . identifier[Semaphore] ( literal[int] )
keyword[def] identifier[manage_output] ( identifier[result] , identifier[grade] , identifier[problems] , identifier[tests] , identifier[custom] , identifier[state] , identifier[archive] , identifier[stdout] , identifier[stderr] ):
literal[string]
identifier[manage_output] . identifier[job_return] =( identifier[result] , identifier[grade] , identifier[problems] , identifier[tests] , identifier[custom] , identifier[state] , identifier[archive] , identifier[stdout] , identifier[stderr] )
identifier[job_semaphore] . identifier[release] ()
identifier[manage_output] . identifier[job_return] = keyword[None]
identifier[self] . identifier[_client] . identifier[new_job] ( identifier[task] , identifier[inputdata] , identifier[manage_output] , identifier[launcher_name] , identifier[debug] )
identifier[job_semaphore] . identifier[acquire] ()
identifier[job_return] = identifier[manage_output] . identifier[job_return]
keyword[return] identifier[job_return] | def new_job(self, task, inputdata, launcher_name='Unknown', debug=False):
"""
Runs a new job.
It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple
(result, grade, problems, tests, custom, archive).
"""
job_semaphore = threading.Semaphore(0)
def manage_output(result, grade, problems, tests, custom, state, archive, stdout, stderr):
""" Manages the output of this job """
manage_output.job_return = (result, grade, problems, tests, custom, state, archive, stdout, stderr)
job_semaphore.release()
manage_output.job_return = None
self._client.new_job(task, inputdata, manage_output, launcher_name, debug)
job_semaphore.acquire()
job_return = manage_output.job_return
return job_return |
def generate_export_pipeline_code(pipeline_tree, operators):
"""Generate code specific to the construction of the sklearn Pipeline for export_pipeline.
Parameters
----------
pipeline_tree: list
List of operators in the current optimized pipeline
Returns
-------
Source code for the sklearn pipeline
"""
steps = _process_operator(pipeline_tree, operators)
# number of steps in a pipeline
num_step = len(steps)
if num_step > 1:
pipeline_text = "make_pipeline(\n{STEPS}\n)".format(STEPS=_indent(",\n".join(steps), 4))
# only one operator (root = True)
else:
pipeline_text = "{STEPS}".format(STEPS=_indent(",\n".join(steps), 0))
return pipeline_text | def function[generate_export_pipeline_code, parameter[pipeline_tree, operators]]:
constant[Generate code specific to the construction of the sklearn Pipeline for export_pipeline.
Parameters
----------
pipeline_tree: list
List of operators in the current optimized pipeline
Returns
-------
Source code for the sklearn pipeline
]
variable[steps] assign[=] call[name[_process_operator], parameter[name[pipeline_tree], name[operators]]]
variable[num_step] assign[=] call[name[len], parameter[name[steps]]]
if compare[name[num_step] greater[>] constant[1]] begin[:]
variable[pipeline_text] assign[=] call[constant[make_pipeline(
{STEPS}
)].format, parameter[]]
return[name[pipeline_text]] | keyword[def] identifier[generate_export_pipeline_code] ( identifier[pipeline_tree] , identifier[operators] ):
literal[string]
identifier[steps] = identifier[_process_operator] ( identifier[pipeline_tree] , identifier[operators] )
identifier[num_step] = identifier[len] ( identifier[steps] )
keyword[if] identifier[num_step] > literal[int] :
identifier[pipeline_text] = literal[string] . identifier[format] ( identifier[STEPS] = identifier[_indent] ( literal[string] . identifier[join] ( identifier[steps] ), literal[int] ))
keyword[else] :
identifier[pipeline_text] = literal[string] . identifier[format] ( identifier[STEPS] = identifier[_indent] ( literal[string] . identifier[join] ( identifier[steps] ), literal[int] ))
keyword[return] identifier[pipeline_text] | def generate_export_pipeline_code(pipeline_tree, operators):
"""Generate code specific to the construction of the sklearn Pipeline for export_pipeline.
Parameters
----------
pipeline_tree: list
List of operators in the current optimized pipeline
Returns
-------
Source code for the sklearn pipeline
"""
steps = _process_operator(pipeline_tree, operators)
# number of steps in a pipeline
num_step = len(steps)
if num_step > 1:
pipeline_text = 'make_pipeline(\n{STEPS}\n)'.format(STEPS=_indent(',\n'.join(steps), 4)) # depends on [control=['if'], data=[]]
else:
# only one operator (root = True)
pipeline_text = '{STEPS}'.format(STEPS=_indent(',\n'.join(steps), 0))
return pipeline_text |
def get_full_order_book(self, symbol):
"""Get a list of all bids and asks aggregated by price for a symbol.
This call is generally used by professional traders because it uses more server resources and traffic,
and Kucoin has strict access frequency control.
https://docs.kucoin.com/#get-full-order-book-aggregated
:param symbol: Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
orders = client.get_order_book('KCS-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "3262786978",
"bids": [
["6500.12", "0.45054140"], # [price size]
["6500.11", "0.45054140"]
],
"asks": [
["6500.16", "0.57753524"],
["6500.15", "0.57753524"]
]
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'symbol': symbol
}
return self._get('market/orderbook/level2', False, data=data) | def function[get_full_order_book, parameter[self, symbol]]:
constant[Get a list of all bids and asks aggregated by price for a symbol.
This call is generally used by professional traders because it uses more server resources and traffic,
and Kucoin has strict access frequency control.
https://docs.kucoin.com/#get-full-order-book-aggregated
:param symbol: Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
orders = client.get_order_book('KCS-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "3262786978",
"bids": [
["6500.12", "0.45054140"], # [price size]
["6500.11", "0.45054140"]
],
"asks": [
["6500.16", "0.57753524"],
["6500.15", "0.57753524"]
]
}
:raises: KucoinResponseException, KucoinAPIException
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b084caf0>], [<ast.Name object at 0x7da1b084fee0>]]
return[call[name[self]._get, parameter[constant[market/orderbook/level2], constant[False]]]] | keyword[def] identifier[get_full_order_book] ( identifier[self] , identifier[symbol] ):
literal[string]
identifier[data] ={
literal[string] : identifier[symbol]
}
keyword[return] identifier[self] . identifier[_get] ( literal[string] , keyword[False] , identifier[data] = identifier[data] ) | def get_full_order_book(self, symbol):
"""Get a list of all bids and asks aggregated by price for a symbol.
This call is generally used by professional traders because it uses more server resources and traffic,
and Kucoin has strict access frequency control.
https://docs.kucoin.com/#get-full-order-book-aggregated
:param symbol: Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
orders = client.get_order_book('KCS-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "3262786978",
"bids": [
["6500.12", "0.45054140"], # [price size]
["6500.11", "0.45054140"]
],
"asks": [
["6500.16", "0.57753524"],
["6500.15", "0.57753524"]
]
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {'symbol': symbol}
return self._get('market/orderbook/level2', False, data=data) |
def load(self, url, offset, length):
"""
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
"""
headers = {}
if offset != 0 or length != -1:
headers['Range'] = BlockLoader._make_range_header(offset, length)
if self.cookie_maker:
if isinstance(self.cookie_maker, six.string_types):
headers['Cookie'] = self.cookie_maker
else:
headers['Cookie'] = self.cookie_maker.make()
if not self.session:
self.session = requests.Session()
r = self.session.get(url, headers=headers, stream=True)
r.raise_for_status()
return r.raw | def function[load, parameter[self, url, offset, length]]:
constant[
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
]
variable[headers] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b26ae200> begin[:]
call[name[headers]][constant[Range]] assign[=] call[name[BlockLoader]._make_range_header, parameter[name[offset], name[length]]]
if name[self].cookie_maker begin[:]
if call[name[isinstance], parameter[name[self].cookie_maker, name[six].string_types]] begin[:]
call[name[headers]][constant[Cookie]] assign[=] name[self].cookie_maker
if <ast.UnaryOp object at 0x7da1b1e99270> begin[:]
name[self].session assign[=] call[name[requests].Session, parameter[]]
variable[r] assign[=] call[name[self].session.get, parameter[name[url]]]
call[name[r].raise_for_status, parameter[]]
return[name[r].raw] | keyword[def] identifier[load] ( identifier[self] , identifier[url] , identifier[offset] , identifier[length] ):
literal[string]
identifier[headers] ={}
keyword[if] identifier[offset] != literal[int] keyword[or] identifier[length] !=- literal[int] :
identifier[headers] [ literal[string] ]= identifier[BlockLoader] . identifier[_make_range_header] ( identifier[offset] , identifier[length] )
keyword[if] identifier[self] . identifier[cookie_maker] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[cookie_maker] , identifier[six] . identifier[string_types] ):
identifier[headers] [ literal[string] ]= identifier[self] . identifier[cookie_maker]
keyword[else] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[cookie_maker] . identifier[make] ()
keyword[if] keyword[not] identifier[self] . identifier[session] :
identifier[self] . identifier[session] = identifier[requests] . identifier[Session] ()
identifier[r] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[stream] = keyword[True] )
identifier[r] . identifier[raise_for_status] ()
keyword[return] identifier[r] . identifier[raw] | def load(self, url, offset, length):
"""
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
"""
headers = {}
if offset != 0 or length != -1:
headers['Range'] = BlockLoader._make_range_header(offset, length) # depends on [control=['if'], data=[]]
if self.cookie_maker:
if isinstance(self.cookie_maker, six.string_types):
headers['Cookie'] = self.cookie_maker # depends on [control=['if'], data=[]]
else:
headers['Cookie'] = self.cookie_maker.make() # depends on [control=['if'], data=[]]
if not self.session:
self.session = requests.Session() # depends on [control=['if'], data=[]]
r = self.session.get(url, headers=headers, stream=True)
r.raise_for_status()
return r.raw |
def surface_poisson(script, octree_depth=10, solver_divide=8,
samples_per_node=1.0, offset=1.0):
""" Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
octree_depth (int): Set the depth of the Octree used for extracting the
final surface. Suggested range 5..10. Higher numbers mean higher
precision in the reconstruction but also higher processing times.
Be patient.
solver_divide (int): This integer argument specifies the depth at which
a block Gauss-Seidel solver is used to solve the Laplacian
equation. Using this parameter helps reduce the memory overhead at
the cost of a small increase in reconstruction time. In practice,
the authors have found that for reconstructions of depth 9 or
higher a subdivide depth of 7 or 8 can reduce the memory usage. The
default value is 8.
samples_per_node (float): This floating point value specifies the
minimum number of sample points that should fall within an octree
node as the octree
construction is adapted to sampling density.
For noise-free samples, small values in the range [1.0 - 5.0] can
be used. For more noisy samples, larger values in the range
[15.0 - 20.0] may be needed to provide a smoother, noise-reduced,
reconstruction. The default value is 1.0.
offset (float): This floating point value specifies a correction value
for the isosurface threshold that is chosen. Values less than 1
mean internal offsetting, greater than 1 mean external offsetting.
Good values are in the range 0.5 .. 2. The default value is 1.0
(no offsetting).
Layer stack:
Creates 1 new layer 'Poisson mesh'
Current layer is changed to new layer
MeshLab versions:
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Surface Reconstruction: Poisson">\n',
' <Param name="OctDepth" ',
'value="{:d}" '.format(octree_depth),
'description="Octree Depth" ',
'type="RichInt" ',
'/>\n',
' <Param name="SolverDivide" ',
'value="{:d}" '.format(solver_divide),
'description="Solver Divide" ',
'type="RichInt" ',
'/>\n',
' <Param name="SamplesPerNode" ',
'value="{}" '.format(samples_per_node),
'description="Samples per Node" ',
'type="RichFloat" ',
'/>\n',
' <Param name="Offset" ',
'value="{}" '.format(offset),
'description="Surface offsetting" ',
'type="RichFloat" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Poisson mesh', change_layer=True)
return None | def function[surface_poisson, parameter[script, octree_depth, solver_divide, samples_per_node, offset]]:
constant[ Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
octree_depth (int): Set the depth of the Octree used for extracting the
final surface. Suggested range 5..10. Higher numbers mean higher
precision in the reconstruction but also higher processing times.
Be patient.
solver_divide (int): This integer argument specifies the depth at which
a block Gauss-Seidel solver is used to solve the Laplacian
equation. Using this parameter helps reduce the memory overhead at
the cost of a small increase in reconstruction time. In practice,
the authors have found that for reconstructions of depth 9 or
higher a subdivide depth of 7 or 8 can reduce the memory usage. The
default value is 8.
samples_per_node (float): This floating point value specifies the
minimum number of sample points that should fall within an octree
node as the octree
construction is adapted to sampling density.
For noise-free samples, small values in the range [1.0 - 5.0] can
be used. For more noisy samples, larger values in the range
[15.0 - 20.0] may be needed to provide a smoother, noise-reduced,
reconstruction. The default value is 1.0.
offset (float): This floating point value specifies a correction value
for the isosurface threshold that is chosen. Values less than 1
mean internal offsetting, greater than 1 mean external offsetting.
Good values are in the range 0.5 .. 2. The default value is 1.0
(no offsetting).
Layer stack:
Creates 1 new layer 'Poisson mesh'
Current layer is changed to new layer
MeshLab versions:
1.3.4BETA
]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da20c9922f0>, <ast.Constant object at 0x7da20c992740>, <ast.Call object at 0x7da20c993700>, <ast.Constant object at 0x7da20c9933d0>, <ast.Constant object at 0x7da20c993eb0>, <ast.Constant object at 0x7da20c9930a0>, <ast.Constant object at 0x7da20c993f10>, <ast.Call object at 0x7da20c990040>, <ast.Constant object at 0x7da20c9930d0>, <ast.Constant object at 0x7da20c991420>, <ast.Constant object at 0x7da20c9936a0>, <ast.Constant object at 0x7da20c990fa0>, <ast.Call object at 0x7da20c992f80>, <ast.Constant object at 0x7da20c991180>, <ast.Constant object at 0x7da20c9913c0>, <ast.Constant object at 0x7da20c992c80>, <ast.Constant object at 0x7da20c991360>, <ast.Call object at 0x7da20c991240>, <ast.Constant object at 0x7da20c993250>, <ast.Constant object at 0x7da20c993490>, <ast.Constant object at 0x7da20c991540>, <ast.Constant object at 0x7da20c990c70>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
if call[name[isinstance], parameter[name[script], name[FilterScript]]] begin[:]
call[name[script].add_layer, parameter[constant[Poisson mesh]]]
return[constant[None]] | keyword[def] identifier[surface_poisson] ( identifier[script] , identifier[octree_depth] = literal[int] , identifier[solver_divide] = literal[int] ,
identifier[samples_per_node] = literal[int] , identifier[offset] = literal[int] ):
literal[string]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[octree_depth] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[solver_divide] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[samples_per_node] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[offset] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[if] identifier[isinstance] ( identifier[script] , identifier[FilterScript] ):
identifier[script] . identifier[add_layer] ( literal[string] , identifier[change_layer] = keyword[True] )
keyword[return] keyword[None] | def surface_poisson(script, octree_depth=10, solver_divide=8, samples_per_node=1.0, offset=1.0):
""" Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
octree_depth (int): Set the depth of the Octree used for extracting the
final surface. Suggested range 5..10. Higher numbers mean higher
precision in the reconstruction but also higher processing times.
Be patient.
solver_divide (int): This integer argument specifies the depth at which
a block Gauss-Seidel solver is used to solve the Laplacian
equation. Using this parameter helps reduce the memory overhead at
the cost of a small increase in reconstruction time. In practice,
the authors have found that for reconstructions of depth 9 or
higher a subdivide depth of 7 or 8 can reduce the memory usage. The
default value is 8.
samples_per_node (float): This floating point value specifies the
minimum number of sample points that should fall within an octree
node as the octree
construction is adapted to sampling density.
For noise-free samples, small values in the range [1.0 - 5.0] can
be used. For more noisy samples, larger values in the range
[15.0 - 20.0] may be needed to provide a smoother, noise-reduced,
reconstruction. The default value is 1.0.
offset (float): This floating point value specifies a correction value
for the isosurface threshold that is chosen. Values less than 1
mean internal offsetting, greater than 1 mean external offsetting.
Good values are in the range 0.5 .. 2. The default value is 1.0
(no offsetting).
Layer stack:
Creates 1 new layer 'Poisson mesh'
Current layer is changed to new layer
MeshLab versions:
1.3.4BETA
"""
filter_xml = ''.join([' <filter name="Surface Reconstruction: Poisson">\n', ' <Param name="OctDepth" ', 'value="{:d}" '.format(octree_depth), 'description="Octree Depth" ', 'type="RichInt" ', '/>\n', ' <Param name="SolverDivide" ', 'value="{:d}" '.format(solver_divide), 'description="Solver Divide" ', 'type="RichInt" ', '/>\n', ' <Param name="SamplesPerNode" ', 'value="{}" '.format(samples_per_node), 'description="Samples per Node" ', 'type="RichFloat" ', '/>\n', ' <Param name="Offset" ', 'value="{}" '.format(offset), 'description="Surface offsetting" ', 'type="RichFloat" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Poisson mesh', change_layer=True) # depends on [control=['if'], data=[]]
return None |
def parse_reports(self):
""" Find Qualimap BamQC reports and parse their data """
# General stats - genome_results.txt
self.qualimap_bamqc_genome_results = dict()
for f in self.find_log_files('qualimap/bamqc/genome_results'):
parse_genome_results(self, f)
self.qualimap_bamqc_genome_results = self.ignore_samples(self.qualimap_bamqc_genome_results)
# Coverage - coverage_histogram.txt
self.qualimap_bamqc_coverage_hist = dict()
for f in self.find_log_files('qualimap/bamqc/coverage', filehandles=True):
parse_coverage(self, f)
self.qualimap_bamqc_coverage_hist = self.ignore_samples(self.qualimap_bamqc_coverage_hist)
# Insert size - insert_size_histogram.txt
self.qualimap_bamqc_insert_size_hist = dict()
for f in self.find_log_files('qualimap/bamqc/insert_size', filehandles=True):
parse_insert_size(self, f)
self.qualimap_bamqc_insert_size_hist = self.ignore_samples(self.qualimap_bamqc_insert_size_hist)
# GC distribution - mapped_reads_gc-content_distribution.txt
self.qualimap_bamqc_gc_content_dist = dict()
self.qualimap_bamqc_gc_by_species = dict() # {'HUMAN': data_dict, 'MOUSE': data_dict}
for f in self.find_log_files('qualimap/bamqc/gc_dist', filehandles=True):
parse_gc_dist(self, f)
self.qualimap_bamqc_gc_by_species = self.ignore_samples(self.qualimap_bamqc_gc_by_species)
num_parsed = max(
len(self.qualimap_bamqc_genome_results),
len(self.qualimap_bamqc_coverage_hist),
len(self.qualimap_bamqc_insert_size_hist),
len(self.qualimap_bamqc_gc_content_dist)
)
# Go no further if nothing found
if num_parsed == 0:
return 0
try:
covs = config.qualimap_config['general_stats_coverage']
assert type(covs) == list
assert len(covs) > 0
covs = [str(i) for i in covs]
log.debug("Custom Qualimap thresholds: {}".format(", ".join([i for i in covs])))
except (AttributeError, TypeError, AssertionError):
covs = [1, 5, 10, 30, 50]
covs = [str(i) for i in covs]
log.debug("Using default Qualimap thresholds: {}".format(", ".join([i for i in covs])))
self.covs = covs
# Make the plots for the report
report_sections(self)
# Set up the general stats table
general_stats_headers(self)
# Return the number of reports we found
return num_parsed | def function[parse_reports, parameter[self]]:
constant[ Find Qualimap BamQC reports and parse their data ]
name[self].qualimap_bamqc_genome_results assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[qualimap/bamqc/genome_results]]]] begin[:]
call[name[parse_genome_results], parameter[name[self], name[f]]]
name[self].qualimap_bamqc_genome_results assign[=] call[name[self].ignore_samples, parameter[name[self].qualimap_bamqc_genome_results]]
name[self].qualimap_bamqc_coverage_hist assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[qualimap/bamqc/coverage]]]] begin[:]
call[name[parse_coverage], parameter[name[self], name[f]]]
name[self].qualimap_bamqc_coverage_hist assign[=] call[name[self].ignore_samples, parameter[name[self].qualimap_bamqc_coverage_hist]]
name[self].qualimap_bamqc_insert_size_hist assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[qualimap/bamqc/insert_size]]]] begin[:]
call[name[parse_insert_size], parameter[name[self], name[f]]]
name[self].qualimap_bamqc_insert_size_hist assign[=] call[name[self].ignore_samples, parameter[name[self].qualimap_bamqc_insert_size_hist]]
name[self].qualimap_bamqc_gc_content_dist assign[=] call[name[dict], parameter[]]
name[self].qualimap_bamqc_gc_by_species assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[qualimap/bamqc/gc_dist]]]] begin[:]
call[name[parse_gc_dist], parameter[name[self], name[f]]]
name[self].qualimap_bamqc_gc_by_species assign[=] call[name[self].ignore_samples, parameter[name[self].qualimap_bamqc_gc_by_species]]
variable[num_parsed] assign[=] call[name[max], parameter[call[name[len], parameter[name[self].qualimap_bamqc_genome_results]], call[name[len], parameter[name[self].qualimap_bamqc_coverage_hist]], call[name[len], parameter[name[self].qualimap_bamqc_insert_size_hist]], call[name[len], parameter[name[self].qualimap_bamqc_gc_content_dist]]]]
if compare[name[num_parsed] equal[==] constant[0]] begin[:]
return[constant[0]]
<ast.Try object at 0x7da20e9b0670>
name[self].covs assign[=] name[covs]
call[name[report_sections], parameter[name[self]]]
call[name[general_stats_headers], parameter[name[self]]]
return[name[num_parsed]] | keyword[def] identifier[parse_reports] ( identifier[self] ):
literal[string]
identifier[self] . identifier[qualimap_bamqc_genome_results] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] ):
identifier[parse_genome_results] ( identifier[self] , identifier[f] )
identifier[self] . identifier[qualimap_bamqc_genome_results] = identifier[self] . identifier[ignore_samples] ( identifier[self] . identifier[qualimap_bamqc_genome_results] )
identifier[self] . identifier[qualimap_bamqc_coverage_hist] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] , identifier[filehandles] = keyword[True] ):
identifier[parse_coverage] ( identifier[self] , identifier[f] )
identifier[self] . identifier[qualimap_bamqc_coverage_hist] = identifier[self] . identifier[ignore_samples] ( identifier[self] . identifier[qualimap_bamqc_coverage_hist] )
identifier[self] . identifier[qualimap_bamqc_insert_size_hist] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] , identifier[filehandles] = keyword[True] ):
identifier[parse_insert_size] ( identifier[self] , identifier[f] )
identifier[self] . identifier[qualimap_bamqc_insert_size_hist] = identifier[self] . identifier[ignore_samples] ( identifier[self] . identifier[qualimap_bamqc_insert_size_hist] )
identifier[self] . identifier[qualimap_bamqc_gc_content_dist] = identifier[dict] ()
identifier[self] . identifier[qualimap_bamqc_gc_by_species] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] , identifier[filehandles] = keyword[True] ):
identifier[parse_gc_dist] ( identifier[self] , identifier[f] )
identifier[self] . identifier[qualimap_bamqc_gc_by_species] = identifier[self] . identifier[ignore_samples] ( identifier[self] . identifier[qualimap_bamqc_gc_by_species] )
identifier[num_parsed] = identifier[max] (
identifier[len] ( identifier[self] . identifier[qualimap_bamqc_genome_results] ),
identifier[len] ( identifier[self] . identifier[qualimap_bamqc_coverage_hist] ),
identifier[len] ( identifier[self] . identifier[qualimap_bamqc_insert_size_hist] ),
identifier[len] ( identifier[self] . identifier[qualimap_bamqc_gc_content_dist] )
)
keyword[if] identifier[num_parsed] == literal[int] :
keyword[return] literal[int]
keyword[try] :
identifier[covs] = identifier[config] . identifier[qualimap_config] [ literal[string] ]
keyword[assert] identifier[type] ( identifier[covs] )== identifier[list]
keyword[assert] identifier[len] ( identifier[covs] )> literal[int]
identifier[covs] =[ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[covs] ]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[covs] ])))
keyword[except] ( identifier[AttributeError] , identifier[TypeError] , identifier[AssertionError] ):
identifier[covs] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[covs] =[ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[covs] ]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[covs] ])))
identifier[self] . identifier[covs] = identifier[covs]
identifier[report_sections] ( identifier[self] )
identifier[general_stats_headers] ( identifier[self] )
keyword[return] identifier[num_parsed] | def parse_reports(self):
""" Find Qualimap BamQC reports and parse their data """
# General stats - genome_results.txt
self.qualimap_bamqc_genome_results = dict()
for f in self.find_log_files('qualimap/bamqc/genome_results'):
parse_genome_results(self, f) # depends on [control=['for'], data=['f']]
self.qualimap_bamqc_genome_results = self.ignore_samples(self.qualimap_bamqc_genome_results)
# Coverage - coverage_histogram.txt
self.qualimap_bamqc_coverage_hist = dict()
for f in self.find_log_files('qualimap/bamqc/coverage', filehandles=True):
parse_coverage(self, f) # depends on [control=['for'], data=['f']]
self.qualimap_bamqc_coverage_hist = self.ignore_samples(self.qualimap_bamqc_coverage_hist)
# Insert size - insert_size_histogram.txt
self.qualimap_bamqc_insert_size_hist = dict()
for f in self.find_log_files('qualimap/bamqc/insert_size', filehandles=True):
parse_insert_size(self, f) # depends on [control=['for'], data=['f']]
self.qualimap_bamqc_insert_size_hist = self.ignore_samples(self.qualimap_bamqc_insert_size_hist)
# GC distribution - mapped_reads_gc-content_distribution.txt
self.qualimap_bamqc_gc_content_dist = dict()
self.qualimap_bamqc_gc_by_species = dict() # {'HUMAN': data_dict, 'MOUSE': data_dict}
for f in self.find_log_files('qualimap/bamqc/gc_dist', filehandles=True):
parse_gc_dist(self, f) # depends on [control=['for'], data=['f']]
self.qualimap_bamqc_gc_by_species = self.ignore_samples(self.qualimap_bamqc_gc_by_species)
num_parsed = max(len(self.qualimap_bamqc_genome_results), len(self.qualimap_bamqc_coverage_hist), len(self.qualimap_bamqc_insert_size_hist), len(self.qualimap_bamqc_gc_content_dist))
# Go no further if nothing found
if num_parsed == 0:
return 0 # depends on [control=['if'], data=[]]
try:
covs = config.qualimap_config['general_stats_coverage']
assert type(covs) == list
assert len(covs) > 0
covs = [str(i) for i in covs]
log.debug('Custom Qualimap thresholds: {}'.format(', '.join([i for i in covs]))) # depends on [control=['try'], data=[]]
except (AttributeError, TypeError, AssertionError):
covs = [1, 5, 10, 30, 50]
covs = [str(i) for i in covs]
log.debug('Using default Qualimap thresholds: {}'.format(', '.join([i for i in covs]))) # depends on [control=['except'], data=[]]
self.covs = covs
# Make the plots for the report
report_sections(self)
# Set up the general stats table
general_stats_headers(self)
# Return the number of reports we found
return num_parsed |
def getData(self, type: str) -> str:
"""Get data of type format.
If this DataTransfer object does not have `type` data, return empty
string.
:arg str type: Data format of the data, like 'text/plain'.
"""
return self.__data.get(normalize_type(type), '') | def function[getData, parameter[self, type]]:
constant[Get data of type format.
If this DataTransfer object does not have `type` data, return empty
string.
:arg str type: Data format of the data, like 'text/plain'.
]
return[call[name[self].__data.get, parameter[call[name[normalize_type], parameter[name[type]]], constant[]]]] | keyword[def] identifier[getData] ( identifier[self] , identifier[type] : identifier[str] )-> identifier[str] :
literal[string]
keyword[return] identifier[self] . identifier[__data] . identifier[get] ( identifier[normalize_type] ( identifier[type] ), literal[string] ) | def getData(self, type: str) -> str:
"""Get data of type format.
If this DataTransfer object does not have `type` data, return empty
string.
:arg str type: Data format of the data, like 'text/plain'.
"""
return self.__data.get(normalize_type(type), '') |
def files(patterns,
require_tags=("require",),
include_tags=("include",),
exclude_tags=("exclude",),
root=".",
always_exclude=("**/.git*", "**/.lfs*", "**/.c9*", "**/.~c9*")):
"""
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded.
"""
require_tags = list(require_tags)
include_tags = list(include_tags)
exclude_tags = list(exclude_tags)
# Ensure every tag starts with !
for tags in [require_tags, include_tags, exclude_tags]:
for i, tag in enumerate(tags):
tags[i] = tag if tag.startswith("!") else "!" + tag
with cd(root):
# Include everything by default
included = _glob("*")
excluded = set()
if patterns:
missing_files = []
# Per line in files
for pattern in patterns:
# Include all files that are tagged with !require
if pattern.tag in require_tags:
file = str(Path(pattern.value))
if not Path(file).exists():
missing_files.append(file)
else:
try:
excluded.remove(file)
except KeyError:
pass
else:
included.add(file)
# Include all files that are tagged with !include
elif pattern.tag in include_tags:
new_included = _glob(pattern.value)
excluded -= new_included
included.update(new_included)
# Exclude all files that are tagged with !exclude
elif pattern.tag in exclude_tags:
new_excluded = _glob(pattern.value)
included -= new_excluded
excluded.update(new_excluded)
if missing_files:
raise MissingFilesError(missing_files)
# Exclude all files that match a pattern from always_exclude
for line in always_exclude:
included -= _glob(line)
# Exclude any files that are not valid utf8
invalid = set()
for file in included:
try:
file.encode("utf8")
except UnicodeEncodeError:
excluded.add(file.encode("utf8", "replace").decode())
invalid.add(file)
included -= invalid
return included, excluded | def function[files, parameter[patterns, require_tags, include_tags, exclude_tags, root, always_exclude]]:
constant[
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded.
]
variable[require_tags] assign[=] call[name[list], parameter[name[require_tags]]]
variable[include_tags] assign[=] call[name[list], parameter[name[include_tags]]]
variable[exclude_tags] assign[=] call[name[list], parameter[name[exclude_tags]]]
for taget[name[tags]] in starred[list[[<ast.Name object at 0x7da20c6aa410>, <ast.Name object at 0x7da20c6a9cf0>, <ast.Name object at 0x7da20c6a91e0>]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6a88b0>, <ast.Name object at 0x7da20c6a94b0>]]] in starred[call[name[enumerate], parameter[name[tags]]]] begin[:]
call[name[tags]][name[i]] assign[=] <ast.IfExp object at 0x7da20c6a8d00>
with call[name[cd], parameter[name[root]]] begin[:]
variable[included] assign[=] call[name[_glob], parameter[constant[*]]]
variable[excluded] assign[=] call[name[set], parameter[]]
if name[patterns] begin[:]
variable[missing_files] assign[=] list[[]]
for taget[name[pattern]] in starred[name[patterns]] begin[:]
if compare[name[pattern].tag in name[require_tags]] begin[:]
variable[file] assign[=] call[name[str], parameter[call[name[Path], parameter[name[pattern].value]]]]
if <ast.UnaryOp object at 0x7da20c6a8460> begin[:]
call[name[missing_files].append, parameter[name[file]]]
if name[missing_files] begin[:]
<ast.Raise object at 0x7da207f98e20>
for taget[name[line]] in starred[name[always_exclude]] begin[:]
<ast.AugAssign object at 0x7da207f9b7c0>
variable[invalid] assign[=] call[name[set], parameter[]]
for taget[name[file]] in starred[name[included]] begin[:]
<ast.Try object at 0x7da207f9b0a0>
<ast.AugAssign object at 0x7da207f9b040>
return[tuple[[<ast.Name object at 0x7da207f99390>, <ast.Name object at 0x7da207f9ace0>]]] | keyword[def] identifier[files] ( identifier[patterns] ,
identifier[require_tags] =( literal[string] ,),
identifier[include_tags] =( literal[string] ,),
identifier[exclude_tags] =( literal[string] ,),
identifier[root] = literal[string] ,
identifier[always_exclude] =( literal[string] , literal[string] , literal[string] , literal[string] )):
literal[string]
identifier[require_tags] = identifier[list] ( identifier[require_tags] )
identifier[include_tags] = identifier[list] ( identifier[include_tags] )
identifier[exclude_tags] = identifier[list] ( identifier[exclude_tags] )
keyword[for] identifier[tags] keyword[in] [ identifier[require_tags] , identifier[include_tags] , identifier[exclude_tags] ]:
keyword[for] identifier[i] , identifier[tag] keyword[in] identifier[enumerate] ( identifier[tags] ):
identifier[tags] [ identifier[i] ]= identifier[tag] keyword[if] identifier[tag] . identifier[startswith] ( literal[string] ) keyword[else] literal[string] + identifier[tag]
keyword[with] identifier[cd] ( identifier[root] ):
identifier[included] = identifier[_glob] ( literal[string] )
identifier[excluded] = identifier[set] ()
keyword[if] identifier[patterns] :
identifier[missing_files] =[]
keyword[for] identifier[pattern] keyword[in] identifier[patterns] :
keyword[if] identifier[pattern] . identifier[tag] keyword[in] identifier[require_tags] :
identifier[file] = identifier[str] ( identifier[Path] ( identifier[pattern] . identifier[value] ))
keyword[if] keyword[not] identifier[Path] ( identifier[file] ). identifier[exists] ():
identifier[missing_files] . identifier[append] ( identifier[file] )
keyword[else] :
keyword[try] :
identifier[excluded] . identifier[remove] ( identifier[file] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[else] :
identifier[included] . identifier[add] ( identifier[file] )
keyword[elif] identifier[pattern] . identifier[tag] keyword[in] identifier[include_tags] :
identifier[new_included] = identifier[_glob] ( identifier[pattern] . identifier[value] )
identifier[excluded] -= identifier[new_included]
identifier[included] . identifier[update] ( identifier[new_included] )
keyword[elif] identifier[pattern] . identifier[tag] keyword[in] identifier[exclude_tags] :
identifier[new_excluded] = identifier[_glob] ( identifier[pattern] . identifier[value] )
identifier[included] -= identifier[new_excluded]
identifier[excluded] . identifier[update] ( identifier[new_excluded] )
keyword[if] identifier[missing_files] :
keyword[raise] identifier[MissingFilesError] ( identifier[missing_files] )
keyword[for] identifier[line] keyword[in] identifier[always_exclude] :
identifier[included] -= identifier[_glob] ( identifier[line] )
identifier[invalid] = identifier[set] ()
keyword[for] identifier[file] keyword[in] identifier[included] :
keyword[try] :
identifier[file] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[excluded] . identifier[add] ( identifier[file] . identifier[encode] ( literal[string] , literal[string] ). identifier[decode] ())
identifier[invalid] . identifier[add] ( identifier[file] )
identifier[included] -= identifier[invalid]
keyword[return] identifier[included] , identifier[excluded] | def files(patterns, require_tags=('require',), include_tags=('include',), exclude_tags=('exclude',), root='.', always_exclude=('**/.git*', '**/.lfs*', '**/.c9*', '**/.~c9*')):
"""
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded.
"""
require_tags = list(require_tags)
include_tags = list(include_tags)
exclude_tags = list(exclude_tags)
# Ensure every tag starts with !
for tags in [require_tags, include_tags, exclude_tags]:
for (i, tag) in enumerate(tags):
tags[i] = tag if tag.startswith('!') else '!' + tag # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['tags']]
with cd(root):
# Include everything by default
included = _glob('*')
excluded = set()
if patterns:
missing_files = []
# Per line in files
for pattern in patterns:
# Include all files that are tagged with !require
if pattern.tag in require_tags:
file = str(Path(pattern.value))
if not Path(file).exists():
missing_files.append(file) # depends on [control=['if'], data=[]]
else:
try:
excluded.remove(file) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
else:
included.add(file) # depends on [control=['if'], data=[]]
# Include all files that are tagged with !include
elif pattern.tag in include_tags:
new_included = _glob(pattern.value)
excluded -= new_included
included.update(new_included) # depends on [control=['if'], data=[]]
# Exclude all files that are tagged with !exclude
elif pattern.tag in exclude_tags:
new_excluded = _glob(pattern.value)
included -= new_excluded
excluded.update(new_excluded) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']]
if missing_files:
raise MissingFilesError(missing_files) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
# Exclude all files that match a pattern from always_exclude
for line in always_exclude:
included -= _glob(line) # depends on [control=['for'], data=['line']]
# Exclude any files that are not valid utf8
invalid = set()
for file in included:
try:
file.encode('utf8') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
excluded.add(file.encode('utf8', 'replace').decode())
invalid.add(file) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['file']]
included -= invalid
return (included, excluded) |
def _fit(self, dataset):
"""Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
"""
sc = SparkContext.getOrCreate()
logging.info("===== 1. train args: {0}".format(self.args))
logging.info("===== 2. train params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. train args + params: {0}".format(local_args))
if local_args.input_mode == TFCluster.InputMode.TENSORFLOW:
if dfutil.isLoadedDF(dataset):
# if just a DataFrame loaded from tfrecords, just point to original source path
logging.info("Loaded DataFrame of TFRecord.")
local_args.tfrecord_dir = dfutil.loadedDF[dataset]
else:
# otherwise, save as tfrecords and point to save path
assert local_args.tfrecord_dir, "Please specify --tfrecord_dir to export DataFrame to TFRecord."
if self.getInputMapping():
# if input mapping provided, filter only required columns before exporting
dataset = dataset.select(list(self.getInputMapping()))
logging.info("Exporting DataFrame {} as TFRecord to: {}".format(dataset.dtypes, local_args.tfrecord_dir))
dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir)
logging.info("Done saving")
tf_args = self.args.argv if self.args.argv else local_args
cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps,
local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes)
if local_args.input_mode == TFCluster.InputMode.SPARK:
# feed data, using a deterministic order for input columns (lexicographic by key)
input_cols = sorted(self.getInputMapping())
cluster.train(dataset.select(input_cols).rdd, local_args.epochs)
cluster.shutdown(grace_secs=30)
# Run export function, if provided
if self.export_fn:
assert local_args.export_dir, "Export function requires --export_dir to be set"
logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir))
def _export(iterator, fn, args):
single_node_env(args)
fn(args)
# Run on a single exeucutor
sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args))
return self._copyValues(TFModel(self.args)) | def function[_fit, parameter[self, dataset]]:
constant[Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
]
variable[sc] assign[=] call[name[SparkContext].getOrCreate, parameter[]]
call[name[logging].info, parameter[call[constant[===== 1. train args: {0}].format, parameter[name[self].args]]]]
call[name[logging].info, parameter[call[constant[===== 2. train params: {0}].format, parameter[name[self]._paramMap]]]]
variable[local_args] assign[=] call[name[self].merge_args_params, parameter[]]
call[name[logging].info, parameter[call[constant[===== 3. train args + params: {0}].format, parameter[name[local_args]]]]]
if compare[name[local_args].input_mode equal[==] name[TFCluster].InputMode.TENSORFLOW] begin[:]
if call[name[dfutil].isLoadedDF, parameter[name[dataset]]] begin[:]
call[name[logging].info, parameter[constant[Loaded DataFrame of TFRecord.]]]
name[local_args].tfrecord_dir assign[=] call[name[dfutil].loadedDF][name[dataset]]
variable[tf_args] assign[=] <ast.IfExp object at 0x7da18c4cd150>
variable[cluster] assign[=] call[name[TFCluster].run, parameter[name[sc], name[self].train_fn, name[tf_args], name[local_args].cluster_size, name[local_args].num_ps, name[local_args].tensorboard, name[local_args].input_mode]]
if compare[name[local_args].input_mode equal[==] name[TFCluster].InputMode.SPARK] begin[:]
variable[input_cols] assign[=] call[name[sorted], parameter[call[name[self].getInputMapping, parameter[]]]]
call[name[cluster].train, parameter[call[name[dataset].select, parameter[name[input_cols]]].rdd, name[local_args].epochs]]
call[name[cluster].shutdown, parameter[]]
if name[self].export_fn begin[:]
assert[name[local_args].export_dir]
call[name[logging].info, parameter[call[constant[Exporting saved_model (via export_fn) to: {}].format, parameter[name[local_args].export_dir]]]]
def function[_export, parameter[iterator, fn, args]]:
call[name[single_node_env], parameter[name[args]]]
call[name[fn], parameter[name[args]]]
call[call[name[sc].parallelize, parameter[list[[<ast.Constant object at 0x7da2041d82e0>]], constant[1]]].foreachPartition, parameter[<ast.Lambda object at 0x7da2041dbd30>]]
return[call[name[self]._copyValues, parameter[call[name[TFModel], parameter[name[self].args]]]]] | keyword[def] identifier[_fit] ( identifier[self] , identifier[dataset] ):
literal[string]
identifier[sc] = identifier[SparkContext] . identifier[getOrCreate] ()
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[args] ))
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[_paramMap] ))
identifier[local_args] = identifier[self] . identifier[merge_args_params] ()
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[local_args] ))
keyword[if] identifier[local_args] . identifier[input_mode] == identifier[TFCluster] . identifier[InputMode] . identifier[TENSORFLOW] :
keyword[if] identifier[dfutil] . identifier[isLoadedDF] ( identifier[dataset] ):
identifier[logging] . identifier[info] ( literal[string] )
identifier[local_args] . identifier[tfrecord_dir] = identifier[dfutil] . identifier[loadedDF] [ identifier[dataset] ]
keyword[else] :
keyword[assert] identifier[local_args] . identifier[tfrecord_dir] , literal[string]
keyword[if] identifier[self] . identifier[getInputMapping] ():
identifier[dataset] = identifier[dataset] . identifier[select] ( identifier[list] ( identifier[self] . identifier[getInputMapping] ()))
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[dataset] . identifier[dtypes] , identifier[local_args] . identifier[tfrecord_dir] ))
identifier[dfutil] . identifier[saveAsTFRecords] ( identifier[dataset] , identifier[local_args] . identifier[tfrecord_dir] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[tf_args] = identifier[self] . identifier[args] . identifier[argv] keyword[if] identifier[self] . identifier[args] . identifier[argv] keyword[else] identifier[local_args]
identifier[cluster] = identifier[TFCluster] . identifier[run] ( identifier[sc] , identifier[self] . identifier[train_fn] , identifier[tf_args] , identifier[local_args] . identifier[cluster_size] , identifier[local_args] . identifier[num_ps] ,
identifier[local_args] . identifier[tensorboard] , identifier[local_args] . identifier[input_mode] , identifier[driver_ps_nodes] = identifier[local_args] . identifier[driver_ps_nodes] )
keyword[if] identifier[local_args] . identifier[input_mode] == identifier[TFCluster] . identifier[InputMode] . identifier[SPARK] :
identifier[input_cols] = identifier[sorted] ( identifier[self] . identifier[getInputMapping] ())
identifier[cluster] . identifier[train] ( identifier[dataset] . identifier[select] ( identifier[input_cols] ). identifier[rdd] , identifier[local_args] . identifier[epochs] )
identifier[cluster] . identifier[shutdown] ( identifier[grace_secs] = literal[int] )
keyword[if] identifier[self] . identifier[export_fn] :
keyword[assert] identifier[local_args] . identifier[export_dir] , literal[string]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[local_args] . identifier[export_dir] ))
keyword[def] identifier[_export] ( identifier[iterator] , identifier[fn] , identifier[args] ):
identifier[single_node_env] ( identifier[args] )
identifier[fn] ( identifier[args] )
identifier[sc] . identifier[parallelize] ([ literal[int] ], literal[int] ). identifier[foreachPartition] ( keyword[lambda] identifier[it] : identifier[_export] ( identifier[it] , identifier[self] . identifier[export_fn] , identifier[tf_args] ))
keyword[return] identifier[self] . identifier[_copyValues] ( identifier[TFModel] ( identifier[self] . identifier[args] )) | def _fit(self, dataset):
"""Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
"""
sc = SparkContext.getOrCreate()
logging.info('===== 1. train args: {0}'.format(self.args))
logging.info('===== 2. train params: {0}'.format(self._paramMap))
local_args = self.merge_args_params()
logging.info('===== 3. train args + params: {0}'.format(local_args))
if local_args.input_mode == TFCluster.InputMode.TENSORFLOW:
if dfutil.isLoadedDF(dataset):
# if just a DataFrame loaded from tfrecords, just point to original source path
logging.info('Loaded DataFrame of TFRecord.')
local_args.tfrecord_dir = dfutil.loadedDF[dataset] # depends on [control=['if'], data=[]]
else:
# otherwise, save as tfrecords and point to save path
assert local_args.tfrecord_dir, 'Please specify --tfrecord_dir to export DataFrame to TFRecord.'
if self.getInputMapping():
# if input mapping provided, filter only required columns before exporting
dataset = dataset.select(list(self.getInputMapping())) # depends on [control=['if'], data=[]]
logging.info('Exporting DataFrame {} as TFRecord to: {}'.format(dataset.dtypes, local_args.tfrecord_dir))
dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir)
logging.info('Done saving') # depends on [control=['if'], data=[]]
tf_args = self.args.argv if self.args.argv else local_args
cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps, local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes)
if local_args.input_mode == TFCluster.InputMode.SPARK:
# feed data, using a deterministic order for input columns (lexicographic by key)
input_cols = sorted(self.getInputMapping())
cluster.train(dataset.select(input_cols).rdd, local_args.epochs) # depends on [control=['if'], data=[]]
cluster.shutdown(grace_secs=30)
# Run export function, if provided
if self.export_fn:
assert local_args.export_dir, 'Export function requires --export_dir to be set'
logging.info('Exporting saved_model (via export_fn) to: {}'.format(local_args.export_dir))
def _export(iterator, fn, args):
single_node_env(args)
fn(args)
# Run on a single exeucutor
sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args)) # depends on [control=['if'], data=[]]
return self._copyValues(TFModel(self.args)) |
def remove_child(self, child_pid, reorder=False):
"""Remove a child from a PID concept."""
super(PIDNodeOrdered, self).remove_child(child_pid)
child_relations = self._resolved_pid.child_relations.filter(
PIDRelation.relation_type == self.relation_type.id).order_by(
PIDRelation.index).all()
if reorder:
for idx, c in enumerate(child_relations):
c.index = idx | def function[remove_child, parameter[self, child_pid, reorder]]:
constant[Remove a child from a PID concept.]
call[call[name[super], parameter[name[PIDNodeOrdered], name[self]]].remove_child, parameter[name[child_pid]]]
variable[child_relations] assign[=] call[call[call[name[self]._resolved_pid.child_relations.filter, parameter[compare[name[PIDRelation].relation_type equal[==] name[self].relation_type.id]]].order_by, parameter[name[PIDRelation].index]].all, parameter[]]
if name[reorder] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f58f490>, <ast.Name object at 0x7da18f58f640>]]] in starred[call[name[enumerate], parameter[name[child_relations]]]] begin[:]
name[c].index assign[=] name[idx] | keyword[def] identifier[remove_child] ( identifier[self] , identifier[child_pid] , identifier[reorder] = keyword[False] ):
literal[string]
identifier[super] ( identifier[PIDNodeOrdered] , identifier[self] ). identifier[remove_child] ( identifier[child_pid] )
identifier[child_relations] = identifier[self] . identifier[_resolved_pid] . identifier[child_relations] . identifier[filter] (
identifier[PIDRelation] . identifier[relation_type] == identifier[self] . identifier[relation_type] . identifier[id] ). identifier[order_by] (
identifier[PIDRelation] . identifier[index] ). identifier[all] ()
keyword[if] identifier[reorder] :
keyword[for] identifier[idx] , identifier[c] keyword[in] identifier[enumerate] ( identifier[child_relations] ):
identifier[c] . identifier[index] = identifier[idx] | def remove_child(self, child_pid, reorder=False):
"""Remove a child from a PID concept."""
super(PIDNodeOrdered, self).remove_child(child_pid)
child_relations = self._resolved_pid.child_relations.filter(PIDRelation.relation_type == self.relation_type.id).order_by(PIDRelation.index).all()
if reorder:
for (idx, c) in enumerate(child_relations):
c.index = idx # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [int(c) if c.isdigit() else c for c in re.split(r'(\d+)', text)] | def function[natural_keys, parameter[text]]:
constant[
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
]
return[<ast.ListComp object at 0x7da1b13840a0>] | keyword[def] identifier[natural_keys] ( identifier[text] ):
literal[string]
keyword[return] [ identifier[int] ( identifier[c] ) keyword[if] identifier[c] . identifier[isdigit] () keyword[else] identifier[c] keyword[for] identifier[c] keyword[in] identifier[re] . identifier[split] ( literal[string] , identifier[text] )] | def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [int(c) if c.isdigit() else c for c in re.split('(\\d+)', text)] |
def mask_struct(mask, struct, replace=no_value):
"""
Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]}
"""
# interpret lazy iterables lists
if is_lazy_iterable(struct):
struct = list(struct)
# when mask is a bool, or struct is not a dict or sequence, apply the mask immediately
if isinstance(mask, bool) or not isinstance(struct, (list, tuple, dict)):
return struct if mask else replace
# check list and tuple types
elif isinstance(struct, (list, tuple)) and isinstance(mask, (list, tuple)):
new_struct = []
for i, val in enumerate(struct):
if i >= len(mask):
new_struct.append(val)
else:
repl = replace
if isinstance(replace, (list, tuple)) and len(replace) > i:
repl = replace[i]
val = mask_struct(mask[i], val, replace=repl)
if val != no_value:
new_struct.append(val)
return struct.__class__(new_struct) if new_struct else replace
# check dict types
elif isinstance(struct, dict) and isinstance(mask, dict):
new_struct = struct.__class__()
for key, val in six.iteritems(struct):
if key not in mask:
new_struct[key] = val
else:
repl = replace
if isinstance(replace, dict) and key in replace:
repl = replace[key]
val = mask_struct(mask[key], val, replace=repl)
if val != no_value:
new_struct[key] = val
return new_struct or replace
# when this point is reached, mask and struct have incompatible types
raise TypeError("mask and struct must have the same type, got '{}' and '{}'".format(type(mask),
type(struct))) | def function[mask_struct, parameter[mask, struct, replace]]:
constant[
Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]}
]
if call[name[is_lazy_iterable], parameter[name[struct]]] begin[:]
variable[struct] assign[=] call[name[list], parameter[name[struct]]]
if <ast.BoolOp object at 0x7da1b0556770> begin[:]
return[<ast.IfExp object at 0x7da1b0557490>]
<ast.Raise object at 0x7da1b050ff10> | keyword[def] identifier[mask_struct] ( identifier[mask] , identifier[struct] , identifier[replace] = identifier[no_value] ):
literal[string]
keyword[if] identifier[is_lazy_iterable] ( identifier[struct] ):
identifier[struct] = identifier[list] ( identifier[struct] )
keyword[if] identifier[isinstance] ( identifier[mask] , identifier[bool] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[struct] ,( identifier[list] , identifier[tuple] , identifier[dict] )):
keyword[return] identifier[struct] keyword[if] identifier[mask] keyword[else] identifier[replace]
keyword[elif] identifier[isinstance] ( identifier[struct] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[isinstance] ( identifier[mask] ,( identifier[list] , identifier[tuple] )):
identifier[new_struct] =[]
keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[struct] ):
keyword[if] identifier[i] >= identifier[len] ( identifier[mask] ):
identifier[new_struct] . identifier[append] ( identifier[val] )
keyword[else] :
identifier[repl] = identifier[replace]
keyword[if] identifier[isinstance] ( identifier[replace] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[len] ( identifier[replace] )> identifier[i] :
identifier[repl] = identifier[replace] [ identifier[i] ]
identifier[val] = identifier[mask_struct] ( identifier[mask] [ identifier[i] ], identifier[val] , identifier[replace] = identifier[repl] )
keyword[if] identifier[val] != identifier[no_value] :
identifier[new_struct] . identifier[append] ( identifier[val] )
keyword[return] identifier[struct] . identifier[__class__] ( identifier[new_struct] ) keyword[if] identifier[new_struct] keyword[else] identifier[replace]
keyword[elif] identifier[isinstance] ( identifier[struct] , identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[mask] , identifier[dict] ):
identifier[new_struct] = identifier[struct] . identifier[__class__] ()
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[struct] ):
keyword[if] identifier[key] keyword[not] keyword[in] identifier[mask] :
identifier[new_struct] [ identifier[key] ]= identifier[val]
keyword[else] :
identifier[repl] = identifier[replace]
keyword[if] identifier[isinstance] ( identifier[replace] , identifier[dict] ) keyword[and] identifier[key] keyword[in] identifier[replace] :
identifier[repl] = identifier[replace] [ identifier[key] ]
identifier[val] = identifier[mask_struct] ( identifier[mask] [ identifier[key] ], identifier[val] , identifier[replace] = identifier[repl] )
keyword[if] identifier[val] != identifier[no_value] :
identifier[new_struct] [ identifier[key] ]= identifier[val]
keyword[return] identifier[new_struct] keyword[or] identifier[replace]
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[mask] ),
identifier[type] ( identifier[struct] ))) | def mask_struct(mask, struct, replace=no_value):
"""
Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]}
"""
# interpret lazy iterables lists
if is_lazy_iterable(struct):
struct = list(struct) # depends on [control=['if'], data=[]]
# when mask is a bool, or struct is not a dict or sequence, apply the mask immediately
if isinstance(mask, bool) or not isinstance(struct, (list, tuple, dict)):
return struct if mask else replace # depends on [control=['if'], data=[]]
# check list and tuple types
elif isinstance(struct, (list, tuple)) and isinstance(mask, (list, tuple)):
new_struct = []
for (i, val) in enumerate(struct):
if i >= len(mask):
new_struct.append(val) # depends on [control=['if'], data=[]]
else:
repl = replace
if isinstance(replace, (list, tuple)) and len(replace) > i:
repl = replace[i] # depends on [control=['if'], data=[]]
val = mask_struct(mask[i], val, replace=repl)
if val != no_value:
new_struct.append(val) # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=[]]
return struct.__class__(new_struct) if new_struct else replace # depends on [control=['if'], data=[]]
# check dict types
elif isinstance(struct, dict) and isinstance(mask, dict):
new_struct = struct.__class__()
for (key, val) in six.iteritems(struct):
if key not in mask:
new_struct[key] = val # depends on [control=['if'], data=['key']]
else:
repl = replace
if isinstance(replace, dict) and key in replace:
repl = replace[key] # depends on [control=['if'], data=[]]
val = mask_struct(mask[key], val, replace=repl)
if val != no_value:
new_struct[key] = val # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=[]]
return new_struct or replace # depends on [control=['if'], data=[]]
# when this point is reached, mask and struct have incompatible types
raise TypeError("mask and struct must have the same type, got '{}' and '{}'".format(type(mask), type(struct))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.