code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def run(path, code=None, params=None, **meta):
"""Eradicate code checking.
:return list: List of errors.
"""
code = converter(code)
line_numbers = commented_out_code_line_numbers(code)
lines = code.split('\n')
result = []
for line_number in line_numbers:
line = lines[line_number - 1]
result.append(dict(
lnum=line_number,
offset=len(line) - len(line.rstrip()),
# https://github.com/sobolevn/flake8-eradicate#output-example
text=converter('E800 Found commented out code: ') + line,
# https://github.com/sobolevn/flake8-eradicate#error-codes
type='E800',
))
return result | def function[run, parameter[path, code, params]]:
constant[Eradicate code checking.
:return list: List of errors.
]
variable[code] assign[=] call[name[converter], parameter[name[code]]]
variable[line_numbers] assign[=] call[name[commented_out_code_line_numbers], parameter[name[code]]]
variable[lines] assign[=] call[name[code].split, parameter[constant[
]]]
variable[result] assign[=] list[[]]
for taget[name[line_number]] in starred[name[line_numbers]] begin[:]
variable[line] assign[=] call[name[lines]][binary_operation[name[line_number] - constant[1]]]
call[name[result].append, parameter[call[name[dict], parameter[]]]]
return[name[result]] | keyword[def] identifier[run] ( identifier[path] , identifier[code] = keyword[None] , identifier[params] = keyword[None] ,** identifier[meta] ):
literal[string]
identifier[code] = identifier[converter] ( identifier[code] )
identifier[line_numbers] = identifier[commented_out_code_line_numbers] ( identifier[code] )
identifier[lines] = identifier[code] . identifier[split] ( literal[string] )
identifier[result] =[]
keyword[for] identifier[line_number] keyword[in] identifier[line_numbers] :
identifier[line] = identifier[lines] [ identifier[line_number] - literal[int] ]
identifier[result] . identifier[append] ( identifier[dict] (
identifier[lnum] = identifier[line_number] ,
identifier[offset] = identifier[len] ( identifier[line] )- identifier[len] ( identifier[line] . identifier[rstrip] ()),
identifier[text] = identifier[converter] ( literal[string] )+ identifier[line] ,
identifier[type] = literal[string] ,
))
keyword[return] identifier[result] | def run(path, code=None, params=None, **meta):
"""Eradicate code checking.
:return list: List of errors.
"""
code = converter(code)
line_numbers = commented_out_code_line_numbers(code)
lines = code.split('\n')
result = []
for line_number in line_numbers:
line = lines[line_number - 1]
# https://github.com/sobolevn/flake8-eradicate#output-example
# https://github.com/sobolevn/flake8-eradicate#error-codes
result.append(dict(lnum=line_number, offset=len(line) - len(line.rstrip()), text=converter('E800 Found commented out code: ') + line, type='E800')) # depends on [control=['for'], data=['line_number']]
return result |
def _get_sorted_iterator(self, iterator):
"""
Get the iterator over the sorted items.
This function decides whether the items can be sorted in memory or on disk.
:return:
"""
lines = list(next(iterator))
if len(lines) < self.max_lines:
return iter(sorted(lines, key=self.key))
import tempfile
tmp_dir = tempfile.mkdtemp()
fnames = self._split(chain([lines], iterator), tmp_dir)
return SortedIteratorMerger([unpickle_iter(open(fname, 'rb')) for fname in fnames], self.key) | def function[_get_sorted_iterator, parameter[self, iterator]]:
constant[
Get the iterator over the sorted items.
This function decides whether the items can be sorted in memory or on disk.
:return:
]
variable[lines] assign[=] call[name[list], parameter[call[name[next], parameter[name[iterator]]]]]
if compare[call[name[len], parameter[name[lines]]] less[<] name[self].max_lines] begin[:]
return[call[name[iter], parameter[call[name[sorted], parameter[name[lines]]]]]]
import module[tempfile]
variable[tmp_dir] assign[=] call[name[tempfile].mkdtemp, parameter[]]
variable[fnames] assign[=] call[name[self]._split, parameter[call[name[chain], parameter[list[[<ast.Name object at 0x7da18f723b50>]], name[iterator]]], name[tmp_dir]]]
return[call[name[SortedIteratorMerger], parameter[<ast.ListComp object at 0x7da18f7209a0>, name[self].key]]] | keyword[def] identifier[_get_sorted_iterator] ( identifier[self] , identifier[iterator] ):
literal[string]
identifier[lines] = identifier[list] ( identifier[next] ( identifier[iterator] ))
keyword[if] identifier[len] ( identifier[lines] )< identifier[self] . identifier[max_lines] :
keyword[return] identifier[iter] ( identifier[sorted] ( identifier[lines] , identifier[key] = identifier[self] . identifier[key] ))
keyword[import] identifier[tempfile]
identifier[tmp_dir] = identifier[tempfile] . identifier[mkdtemp] ()
identifier[fnames] = identifier[self] . identifier[_split] ( identifier[chain] ([ identifier[lines] ], identifier[iterator] ), identifier[tmp_dir] )
keyword[return] identifier[SortedIteratorMerger] ([ identifier[unpickle_iter] ( identifier[open] ( identifier[fname] , literal[string] )) keyword[for] identifier[fname] keyword[in] identifier[fnames] ], identifier[self] . identifier[key] ) | def _get_sorted_iterator(self, iterator):
"""
Get the iterator over the sorted items.
This function decides whether the items can be sorted in memory or on disk.
:return:
"""
lines = list(next(iterator))
if len(lines) < self.max_lines:
return iter(sorted(lines, key=self.key)) # depends on [control=['if'], data=[]]
import tempfile
tmp_dir = tempfile.mkdtemp()
fnames = self._split(chain([lines], iterator), tmp_dir)
return SortedIteratorMerger([unpickle_iter(open(fname, 'rb')) for fname in fnames], self.key) |
def step_indices(group_idx):
""" Get the edges of areas within group_idx, which are filled
with the same value
"""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices | def function[step_indices, parameter[group_idx]]:
constant[ Get the edges of areas within group_idx, which are filled
with the same value
]
variable[ilen] assign[=] binary_operation[call[name[step_count], parameter[name[group_idx]]] + constant[1]]
variable[indices] assign[=] call[name[np].empty, parameter[name[ilen], name[int]]]
call[name[indices]][constant[0]] assign[=] constant[0]
call[name[indices]][<ast.UnaryOp object at 0x7da1b0627b50>] assign[=] name[group_idx].size
call[name[inline], parameter[name[c_step_indices], list[[<ast.Constant object at 0x7da1b0627460>, <ast.Constant object at 0x7da1b0624a60>]]]]
return[name[indices]] | keyword[def] identifier[step_indices] ( identifier[group_idx] ):
literal[string]
identifier[ilen] = identifier[step_count] ( identifier[group_idx] )+ literal[int]
identifier[indices] = identifier[np] . identifier[empty] ( identifier[ilen] , identifier[int] )
identifier[indices] [ literal[int] ]= literal[int]
identifier[indices] [- literal[int] ]= identifier[group_idx] . identifier[size]
identifier[inline] ( identifier[c_step_indices] ,[ literal[string] , literal[string] ], identifier[define_macros] = identifier[c_macros] , identifier[extra_compile_args] = identifier[c_args] )
keyword[return] identifier[indices] | def step_indices(group_idx):
""" Get the edges of areas within group_idx, which are filled
with the same value
"""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices |
def add_views(cls, config, rule_prefix, routename_prefix=None):
"""
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
"""
methods = ('GET', 'POST', 'PUT', 'DELETE')
config.add_route(
cls.build_routename('list', routename_prefix),
rule_prefix
)
config.add_view(
cls.as_list(),
route_name=cls.build_routename('list', routename_prefix),
request_method=methods
)
config.add_route(
cls.build_routename('detail', routename_prefix),
rule_prefix + '{name}/'
)
config.add_view(
cls.as_detail(),
route_name=cls.build_routename('detail', routename_prefix),
request_method=methods
)
return config | def function[add_views, parameter[cls, config, rule_prefix, routename_prefix]]:
constant[
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
]
variable[methods] assign[=] tuple[[<ast.Constant object at 0x7da18f00cc40>, <ast.Constant object at 0x7da18f00fdc0>, <ast.Constant object at 0x7da18f00ee90>, <ast.Constant object at 0x7da18f00d9f0>]]
call[name[config].add_route, parameter[call[name[cls].build_routename, parameter[constant[list], name[routename_prefix]]], name[rule_prefix]]]
call[name[config].add_view, parameter[call[name[cls].as_list, parameter[]]]]
call[name[config].add_route, parameter[call[name[cls].build_routename, parameter[constant[detail], name[routename_prefix]]], binary_operation[name[rule_prefix] + constant[{name}/]]]]
call[name[config].add_view, parameter[call[name[cls].as_detail, parameter[]]]]
return[name[config]] | keyword[def] identifier[add_views] ( identifier[cls] , identifier[config] , identifier[rule_prefix] , identifier[routename_prefix] = keyword[None] ):
literal[string]
identifier[methods] =( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[config] . identifier[add_route] (
identifier[cls] . identifier[build_routename] ( literal[string] , identifier[routename_prefix] ),
identifier[rule_prefix]
)
identifier[config] . identifier[add_view] (
identifier[cls] . identifier[as_list] (),
identifier[route_name] = identifier[cls] . identifier[build_routename] ( literal[string] , identifier[routename_prefix] ),
identifier[request_method] = identifier[methods]
)
identifier[config] . identifier[add_route] (
identifier[cls] . identifier[build_routename] ( literal[string] , identifier[routename_prefix] ),
identifier[rule_prefix] + literal[string]
)
identifier[config] . identifier[add_view] (
identifier[cls] . identifier[as_detail] (),
identifier[route_name] = identifier[cls] . identifier[build_routename] ( literal[string] , identifier[routename_prefix] ),
identifier[request_method] = identifier[methods]
)
keyword[return] identifier[config] | def add_views(cls, config, rule_prefix, routename_prefix=None):
"""
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
"""
methods = ('GET', 'POST', 'PUT', 'DELETE')
config.add_route(cls.build_routename('list', routename_prefix), rule_prefix)
config.add_view(cls.as_list(), route_name=cls.build_routename('list', routename_prefix), request_method=methods)
config.add_route(cls.build_routename('detail', routename_prefix), rule_prefix + '{name}/')
config.add_view(cls.as_detail(), route_name=cls.build_routename('detail', routename_prefix), request_method=methods)
return config |
def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) | def function[_process_keystroke_commands, parameter[self, inp]]:
constant[Process keystrokes that issue commands (side effects).]
if compare[name[inp] in tuple[[<ast.Constant object at 0x7da18eb54760>, <ast.Constant object at 0x7da18eb56500>]]] begin[:]
if compare[call[name[int], parameter[name[inp]]] not_equal[!=] name[self].screen.wide] begin[:]
name[self].screen.wide assign[=] call[name[int], parameter[name[inp]]]
call[name[self].on_resize, parameter[constant[None], constant[None]]] | keyword[def] identifier[_process_keystroke_commands] ( identifier[self] , identifier[inp] ):
literal[string]
keyword[if] identifier[inp] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[int] ( identifier[inp] )!= identifier[self] . identifier[screen] . identifier[wide] :
identifier[self] . identifier[screen] . identifier[wide] = identifier[int] ( identifier[inp] )
identifier[self] . identifier[on_resize] ( keyword[None] , keyword[None] )
keyword[elif] identifier[inp] keyword[in] ( literal[string] , literal[string] ):
identifier[nlen] = identifier[max] ( literal[int] , identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] - literal[int] )
keyword[if] identifier[nlen] != identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] :
identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] = identifier[nlen]
identifier[self] . identifier[on_resize] ( keyword[None] , keyword[None] )
keyword[elif] identifier[inp] keyword[in] ( literal[string] , literal[string] ):
identifier[nlen] = identifier[min] ( identifier[self] . identifier[term] . identifier[width] - literal[int] , identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] + literal[int] )
keyword[if] identifier[nlen] != identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] :
identifier[self] . identifier[screen] . identifier[style] . identifier[name_len] = identifier[nlen]
identifier[self] . identifier[on_resize] ( keyword[None] , keyword[None] )
keyword[elif] identifier[inp] == literal[string] keyword[and] identifier[self] . identifier[screen] . identifier[wide] != literal[int] :
identifier[self] . identifier[screen] . identifier[wide] = literal[int]
identifier[self] . identifier[on_resize] ( keyword[None] , keyword[None] ) | def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['inp']]
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None) # depends on [control=['if'], data=['nlen']] # depends on [control=['if'], data=[]]
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None) # depends on [control=['if'], data=['nlen']] # depends on [control=['if'], data=[]]
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) # depends on [control=['if'], data=[]] |
def completion(self, device, folder):
""" Returns the completion percentage (0 to 100) for a given device
and folder.
Args:
device (str): The Syncthing device the folder is syncing to.
folder (str): The folder that is being synced.
Returs:
int
"""
return self.get(
'completion',
params={'folder': folder, 'device': device}
).get('completion', None) | def function[completion, parameter[self, device, folder]]:
constant[ Returns the completion percentage (0 to 100) for a given device
and folder.
Args:
device (str): The Syncthing device the folder is syncing to.
folder (str): The folder that is being synced.
Returs:
int
]
return[call[call[name[self].get, parameter[constant[completion]]].get, parameter[constant[completion], constant[None]]]] | keyword[def] identifier[completion] ( identifier[self] , identifier[device] , identifier[folder] ):
literal[string]
keyword[return] identifier[self] . identifier[get] (
literal[string] ,
identifier[params] ={ literal[string] : identifier[folder] , literal[string] : identifier[device] }
). identifier[get] ( literal[string] , keyword[None] ) | def completion(self, device, folder):
""" Returns the completion percentage (0 to 100) for a given device
and folder.
Args:
device (str): The Syncthing device the folder is syncing to.
folder (str): The folder that is being synced.
Returs:
int
"""
return self.get('completion', params={'folder': folder, 'device': device}).get('completion', None) |
def get_readme():
"""Get the contents of the ``README.rst`` file as a Unicode string."""
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = open('README.md').read()
return description | def function[get_readme, parameter[]]:
constant[Get the contents of the ``README.rst`` file as a Unicode string.]
<ast.Try object at 0x7da1b1d21840>
return[name[description]] | keyword[def] identifier[get_readme] ():
literal[string]
keyword[try] :
keyword[import] identifier[pypandoc]
identifier[description] = identifier[pypandoc] . identifier[convert] ( literal[string] , literal[string] )
keyword[except] ( identifier[IOError] , identifier[ImportError] ):
identifier[description] = identifier[open] ( literal[string] ). identifier[read] ()
keyword[return] identifier[description] | def get_readme():
"""Get the contents of the ``README.rst`` file as a Unicode string."""
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst') # depends on [control=['try'], data=[]]
except (IOError, ImportError):
description = open('README.md').read() # depends on [control=['except'], data=[]]
return description |
def maxcut_qaoa(n_step, edges, minimizer=None, sampler=None, verbose=True):
"""Setup QAOA.
:param n_step: The number of step of QAOA
:param n_sample: The number of sampling time of each measurement in VQE.
If None, use calculated ideal value.
:param edges: The edges list of the graph.
:returns Vqe object
"""
sampler = sampler or vqe.non_sampling_sampler
minimizer = minimizer or vqe.get_scipy_minimizer(
method="Powell",
options={"ftol": 5.0e-2, "xtol": 5.0e-2, "maxiter": 1000, "disp": True}
)
hamiltonian = pauli.I() * 0
for i, j in edges:
hamiltonian += pauli.Z(i) * pauli.Z(j)
return vqe.Vqe(vqe.QaoaAnsatz(hamiltonian, n_step), minimizer, sampler) | def function[maxcut_qaoa, parameter[n_step, edges, minimizer, sampler, verbose]]:
constant[Setup QAOA.
:param n_step: The number of step of QAOA
:param n_sample: The number of sampling time of each measurement in VQE.
If None, use calculated ideal value.
:param edges: The edges list of the graph.
:returns Vqe object
]
variable[sampler] assign[=] <ast.BoolOp object at 0x7da2044c0100>
variable[minimizer] assign[=] <ast.BoolOp object at 0x7da2044c0d00>
variable[hamiltonian] assign[=] binary_operation[call[name[pauli].I, parameter[]] * constant[0]]
for taget[tuple[[<ast.Name object at 0x7da2044c3c70>, <ast.Name object at 0x7da2044c38b0>]]] in starred[name[edges]] begin[:]
<ast.AugAssign object at 0x7da2044c35b0>
return[call[name[vqe].Vqe, parameter[call[name[vqe].QaoaAnsatz, parameter[name[hamiltonian], name[n_step]]], name[minimizer], name[sampler]]]] | keyword[def] identifier[maxcut_qaoa] ( identifier[n_step] , identifier[edges] , identifier[minimizer] = keyword[None] , identifier[sampler] = keyword[None] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[sampler] = identifier[sampler] keyword[or] identifier[vqe] . identifier[non_sampling_sampler]
identifier[minimizer] = identifier[minimizer] keyword[or] identifier[vqe] . identifier[get_scipy_minimizer] (
identifier[method] = literal[string] ,
identifier[options] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[True] }
)
identifier[hamiltonian] = identifier[pauli] . identifier[I] ()* literal[int]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[edges] :
identifier[hamiltonian] += identifier[pauli] . identifier[Z] ( identifier[i] )* identifier[pauli] . identifier[Z] ( identifier[j] )
keyword[return] identifier[vqe] . identifier[Vqe] ( identifier[vqe] . identifier[QaoaAnsatz] ( identifier[hamiltonian] , identifier[n_step] ), identifier[minimizer] , identifier[sampler] ) | def maxcut_qaoa(n_step, edges, minimizer=None, sampler=None, verbose=True):
"""Setup QAOA.
:param n_step: The number of step of QAOA
:param n_sample: The number of sampling time of each measurement in VQE.
If None, use calculated ideal value.
:param edges: The edges list of the graph.
:returns Vqe object
"""
sampler = sampler or vqe.non_sampling_sampler
minimizer = minimizer or vqe.get_scipy_minimizer(method='Powell', options={'ftol': 0.05, 'xtol': 0.05, 'maxiter': 1000, 'disp': True})
hamiltonian = pauli.I() * 0
for (i, j) in edges:
hamiltonian += pauli.Z(i) * pauli.Z(j) # depends on [control=['for'], data=[]]
return vqe.Vqe(vqe.QaoaAnsatz(hamiltonian, n_step), minimizer, sampler) |
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path) | def function[_saveState, parameter[self, path]]:
constant[save current state and add a new state]
call[name[self].addSession, parameter[]]
call[name[self]._save, parameter[call[name[str], parameter[name[self].n_sessions]], name[path]]] | keyword[def] identifier[_saveState] ( identifier[self] , identifier[path] ):
literal[string]
identifier[self] . identifier[addSession] ()
identifier[self] . identifier[_save] ( identifier[str] ( identifier[self] . identifier[n_sessions] ), identifier[path] ) | def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path) |
def build_kal_scan_band_string(kal_bin, band, args):
"""Return string for CLI invocation of kal, for band scan."""
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
if not sanity.scan_band_is_valid(band):
err_txt = "Unsupported band designation: %" % band
raise ValueError(err_txt)
base_string = "%s -v -s %s" % (kal_bin, band)
base_string += options_string_builder(option_mapping, args)
return(base_string) | def function[build_kal_scan_band_string, parameter[kal_bin, band, args]]:
constant[Return string for CLI invocation of kal, for band scan.]
variable[option_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da204960520>, <ast.Constant object at 0x7da204962920>, <ast.Constant object at 0x7da204961c90>], [<ast.Constant object at 0x7da2049623e0>, <ast.Constant object at 0x7da2049637f0>, <ast.Constant object at 0x7da204962b60>]]
if <ast.UnaryOp object at 0x7da204960190> begin[:]
variable[err_txt] assign[=] binary_operation[constant[Unsupported band designation: %] <ast.Mod object at 0x7da2590d6920> name[band]]
<ast.Raise object at 0x7da204963f70>
variable[base_string] assign[=] binary_operation[constant[%s -v -s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204961b70>, <ast.Name object at 0x7da204960cd0>]]]
<ast.AugAssign object at 0x7da204962d40>
return[name[base_string]] | keyword[def] identifier[build_kal_scan_band_string] ( identifier[kal_bin] , identifier[band] , identifier[args] ):
literal[string]
identifier[option_mapping] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[if] keyword[not] identifier[sanity] . identifier[scan_band_is_valid] ( identifier[band] ):
identifier[err_txt] = literal[string] % identifier[band]
keyword[raise] identifier[ValueError] ( identifier[err_txt] )
identifier[base_string] = literal[string] %( identifier[kal_bin] , identifier[band] )
identifier[base_string] += identifier[options_string_builder] ( identifier[option_mapping] , identifier[args] )
keyword[return] ( identifier[base_string] ) | def build_kal_scan_band_string(kal_bin, band, args):
"""Return string for CLI invocation of kal, for band scan."""
option_mapping = {'gain': '-g', 'device': '-d', 'error': '-e'}
if not sanity.scan_band_is_valid(band):
err_txt = 'Unsupported band designation: %' % band
raise ValueError(err_txt) # depends on [control=['if'], data=[]]
base_string = '%s -v -s %s' % (kal_bin, band)
base_string += options_string_builder(option_mapping, args)
return base_string |
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit() | def function[add_assembly_names, parameter[opts]]:
constant[add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
]
variable[seqrepo_dir] assign[=] call[name[os].path.join, parameter[name[opts].root_directory, name[opts].instance_name]]
variable[sr] assign[=] call[name[SeqRepo], parameter[name[seqrepo_dir]]]
variable[assemblies] assign[=] call[name[bioutils].assemblies.get_assemblies, parameter[]]
if name[opts].reload_all begin[:]
variable[assemblies_to_load] assign[=] call[name[sorted], parameter[name[assemblies]]]
call[name[_logger].info, parameter[call[constant[{} assemblies to load].format, parameter[call[name[len], parameter[name[assemblies_to_load]]]]]]]
variable[ncbi_alias_map] assign[=] <ast.DictComp object at 0x7da1b04daf80>
for taget[name[assy_name]] in starred[call[name[tqdm].tqdm, parameter[name[assemblies_to_load]]]] begin[:]
call[name[_logger].debug, parameter[binary_operation[constant[loading ] + name[assy_name]]]]
variable[sequences] assign[=] call[call[name[assemblies]][name[assy_name]]][constant[sequences]]
variable[eq_sequences] assign[=] <ast.ListComp object at 0x7da1b031dae0>
if <ast.UnaryOp object at 0x7da1b031eec0> begin[:]
call[name[_logger].info, parameter[call[constant[No '=' sequences to load for {an}; skipping].format, parameter[]]]]
continue
variable[not_in_seqrepo] assign[=] <ast.ListComp object at 0x7da1b031eef0>
if name[not_in_seqrepo] begin[:]
call[name[_logger].warning, parameter[call[constant[Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})].format, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b04db4c0> begin[:]
call[name[_logger].warning, parameter[call[constant[Skipping {an} (-p to enable partial loading)].format, parameter[]]]]
continue
variable[eq_sequences] assign[=] <ast.ListComp object at 0x7da1b04dbac0>
call[name[_logger].info, parameter[call[constant[Loading {n} new accessions for assembly {an}].format, parameter[]]]]
for taget[name[s]] in starred[name[eq_sequences]] begin[:]
variable[seq_id] assign[=] call[name[ncbi_alias_map]][call[name[s]][constant[refseq_ac]]]
variable[aliases] assign[=] <ast.ListComp object at 0x7da1b04760e0>
if <ast.BoolOp object at 0x7da1b0476320> begin[:]
<ast.AugAssign object at 0x7da1b0475a50>
for taget[name[alias]] in starred[name[aliases]] begin[:]
call[name[sr].aliases.store_alias, parameter[]]
call[name[_logger].debug, parameter[call[constant[Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}].format, parameter[]]]]
call[name[sr].commit, parameter[]] | keyword[def] identifier[add_assembly_names] ( identifier[opts] ):
literal[string]
identifier[seqrepo_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[opts] . identifier[root_directory] , identifier[opts] . identifier[instance_name] )
identifier[sr] = identifier[SeqRepo] ( identifier[seqrepo_dir] , identifier[writeable] = keyword[True] )
identifier[assemblies] = identifier[bioutils] . identifier[assemblies] . identifier[get_assemblies] ()
keyword[if] identifier[opts] . identifier[reload_all] :
identifier[assemblies_to_load] = identifier[sorted] ( identifier[assemblies] )
keyword[else] :
identifier[namespaces] =[ identifier[r] [ literal[string] ] keyword[for] identifier[r] keyword[in] identifier[sr] . identifier[aliases] . identifier[_db] . identifier[execute] ( literal[string] )]
identifier[assemblies_to_load] = identifier[sorted] ( identifier[k] keyword[for] identifier[k] keyword[in] identifier[assemblies] keyword[if] identifier[k] keyword[not] keyword[in] identifier[namespaces] )
identifier[_logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[assemblies_to_load] )))
identifier[ncbi_alias_map] ={ identifier[r] [ literal[string] ]: identifier[r] [ literal[string] ] keyword[for] identifier[r] keyword[in] identifier[sr] . identifier[aliases] . identifier[find_aliases] ( identifier[namespace] = literal[string] , identifier[current_only] = keyword[False] )}
keyword[for] identifier[assy_name] keyword[in] identifier[tqdm] . identifier[tqdm] ( identifier[assemblies_to_load] , identifier[unit] = literal[string] ):
identifier[_logger] . identifier[debug] ( literal[string] + identifier[assy_name] )
identifier[sequences] = identifier[assemblies] [ identifier[assy_name] ][ literal[string] ]
identifier[eq_sequences] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[sequences] keyword[if] identifier[s] [ literal[string] ] keyword[in] ( literal[string] , literal[string] )]
keyword[if] keyword[not] identifier[eq_sequences] :
identifier[_logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[an] = identifier[assy_name] ))
keyword[continue]
identifier[not_in_seqrepo] =[ identifier[s] [ literal[string] ] keyword[for] identifier[s] keyword[in] identifier[eq_sequences] keyword[if] identifier[s] [ literal[string] ] keyword[not] keyword[in] identifier[ncbi_alias_map] ]
keyword[if] identifier[not_in_seqrepo] :
identifier[_logger] . identifier[warning] ( literal[string] . identifier[format] (
identifier[an] = identifier[assy_name] , identifier[n] = identifier[len] ( identifier[not_in_seqrepo] ), identifier[opts] = identifier[opts] , identifier[acs] = literal[string] . identifier[join] ( identifier[not_in_seqrepo] [: literal[int] ]+[ literal[string] ]), identifier[seqrepo_dir] = identifier[seqrepo_dir] ))
keyword[if] keyword[not] identifier[opts] . identifier[partial_load] :
identifier[_logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[an] = identifier[assy_name] ))
keyword[continue]
identifier[eq_sequences] =[ identifier[es] keyword[for] identifier[es] keyword[in] identifier[eq_sequences] keyword[if] identifier[es] [ literal[string] ] keyword[in] identifier[ncbi_alias_map] ]
identifier[_logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[an] = identifier[assy_name] , identifier[n] = identifier[len] ( identifier[eq_sequences] )))
keyword[for] identifier[s] keyword[in] identifier[eq_sequences] :
identifier[seq_id] = identifier[ncbi_alias_map] [ identifier[s] [ literal[string] ]]
identifier[aliases] =[{ literal[string] : identifier[assy_name] , literal[string] : identifier[a] } keyword[for] identifier[a] keyword[in] [ identifier[s] [ literal[string] ]]+ identifier[s] [ literal[string] ]]
keyword[if] literal[string] keyword[in] identifier[s] keyword[and] identifier[s] [ literal[string] ]:
identifier[aliases] +=[{ literal[string] : literal[string] , literal[string] : identifier[s] [ literal[string] ]}]
keyword[for] identifier[alias] keyword[in] identifier[aliases] :
identifier[sr] . identifier[aliases] . identifier[store_alias] ( identifier[seq_id] = identifier[seq_id] ,** identifier[alias] )
identifier[_logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[a] = identifier[alias] , identifier[seq_id] = identifier[seq_id] ))
identifier[sr] . identifier[commit] () | def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies) # depends on [control=['if'], data=[]]
else:
namespaces = [r['namespace'] for r in sr.aliases._db.execute('select distinct namespace from seqalias')]
assemblies_to_load = sorted((k for k in assemblies if k not in namespaces))
_logger.info('{} assemblies to load'.format(len(assemblies_to_load)))
ncbi_alias_map = {r['alias']: r['seq_id'] for r in sr.aliases.find_aliases(namespace='NCBI', current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit='assembly'):
_logger.debug('loading ' + assy_name)
sequences = assemblies[assy_name]['sequences']
eq_sequences = [s for s in sequences if s['relationship'] in ('=', '<>')]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue # depends on [control=['if'], data=[]]
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s['refseq_ac'] for s in eq_sequences if s['refseq_ac'] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning('Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})'.format(an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=', '.join(not_in_seqrepo[:5] + ['...']), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning('Skipping {an} (-p to enable partial loading)'.format(an=assy_name))
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
eq_sequences = [es for es in eq_sequences if es['refseq_ac'] in ncbi_alias_map]
_logger.info('Loading {n} new accessions for assembly {an}'.format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s['refseq_ac']]
aliases = [{'namespace': assy_name, 'alias': a} for a in [s['name']] + s['aliases']]
if 'genbank_ac' in s and s['genbank_ac']:
aliases += [{'namespace': 'genbank', 'alias': s['genbank_ac']}] # depends on [control=['if'], data=[]]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug('Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}'.format(a=alias, seq_id=seq_id)) # depends on [control=['for'], data=['alias']] # depends on [control=['for'], data=['s']]
sr.commit() # depends on [control=['for'], data=['assy_name']] |
def instance_from_str(instance_str):
"""
Given an instance string in the form "app.Model:pk", returns a tuple of
``(model, instance)``. If the pk part is empty, ``instance`` will be
``None``. Raises ``ValueError`` on invalid model strings or missing
instances.
"""
match = instance_str_re.match(instance_str)
if not match:
raise ValueError("Invalid instance string")
model_string = match.group(1)
try:
model = apps.get_model(model_string)
except (LookupError, ValueError):
raise ValueError("Invalid instance string")
pk = match.group(2)
if pk:
try:
return model, model._default_manager.get(pk=pk)
except model.DoesNotExist:
raise ValueError("Invalid instance string")
return model, None | def function[instance_from_str, parameter[instance_str]]:
constant[
Given an instance string in the form "app.Model:pk", returns a tuple of
``(model, instance)``. If the pk part is empty, ``instance`` will be
``None``. Raises ``ValueError`` on invalid model strings or missing
instances.
]
variable[match] assign[=] call[name[instance_str_re].match, parameter[name[instance_str]]]
if <ast.UnaryOp object at 0x7da1b00e5ae0> begin[:]
<ast.Raise object at 0x7da1b00e5a80>
variable[model_string] assign[=] call[name[match].group, parameter[constant[1]]]
<ast.Try object at 0x7da1b00e58a0>
variable[pk] assign[=] call[name[match].group, parameter[constant[2]]]
if name[pk] begin[:]
<ast.Try object at 0x7da1b00e5450>
return[tuple[[<ast.Name object at 0x7da1b00e4340>, <ast.Constant object at 0x7da1b00e4370>]]] | keyword[def] identifier[instance_from_str] ( identifier[instance_str] ):
literal[string]
identifier[match] = identifier[instance_str_re] . identifier[match] ( identifier[instance_str] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[model_string] = identifier[match] . identifier[group] ( literal[int] )
keyword[try] :
identifier[model] = identifier[apps] . identifier[get_model] ( identifier[model_string] )
keyword[except] ( identifier[LookupError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[pk] = identifier[match] . identifier[group] ( literal[int] )
keyword[if] identifier[pk] :
keyword[try] :
keyword[return] identifier[model] , identifier[model] . identifier[_default_manager] . identifier[get] ( identifier[pk] = identifier[pk] )
keyword[except] identifier[model] . identifier[DoesNotExist] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[model] , keyword[None] | def instance_from_str(instance_str):
"""
Given an instance string in the form "app.Model:pk", returns a tuple of
``(model, instance)``. If the pk part is empty, ``instance`` will be
``None``. Raises ``ValueError`` on invalid model strings or missing
instances.
"""
match = instance_str_re.match(instance_str)
if not match:
raise ValueError('Invalid instance string') # depends on [control=['if'], data=[]]
model_string = match.group(1)
try:
model = apps.get_model(model_string) # depends on [control=['try'], data=[]]
except (LookupError, ValueError):
raise ValueError('Invalid instance string') # depends on [control=['except'], data=[]]
pk = match.group(2)
if pk:
try:
return (model, model._default_manager.get(pk=pk)) # depends on [control=['try'], data=[]]
except model.DoesNotExist:
raise ValueError('Invalid instance string') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return (model, None) |
def get_all_destinations(self, server_id):
"""
Return all listener destinations in a WBEM server.
This function contacts the WBEM server and retrieves the listener
destinations by enumerating the instances of CIM class
"CIM_ListenerDestinationCIMXML" in the Interop namespace of the WBEM
server.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The listener
destination instances.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# Validate server_id
server = self._get_server(server_id)
return server.conn.EnumerateInstances(DESTINATION_CLASSNAME,
namespace=server.interop_ns) | def function[get_all_destinations, parameter[self, server_id]]:
constant[
Return all listener destinations in a WBEM server.
This function contacts the WBEM server and retrieves the listener
destinations by enumerating the instances of CIM class
"CIM_ListenerDestinationCIMXML" in the Interop namespace of the WBEM
server.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The listener
destination instances.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
]
variable[server] assign[=] call[name[self]._get_server, parameter[name[server_id]]]
return[call[name[server].conn.EnumerateInstances, parameter[name[DESTINATION_CLASSNAME]]]] | keyword[def] identifier[get_all_destinations] ( identifier[self] , identifier[server_id] ):
literal[string]
identifier[server] = identifier[self] . identifier[_get_server] ( identifier[server_id] )
keyword[return] identifier[server] . identifier[conn] . identifier[EnumerateInstances] ( identifier[DESTINATION_CLASSNAME] ,
identifier[namespace] = identifier[server] . identifier[interop_ns] ) | def get_all_destinations(self, server_id):
"""
Return all listener destinations in a WBEM server.
This function contacts the WBEM server and retrieves the listener
destinations by enumerating the instances of CIM class
"CIM_ListenerDestinationCIMXML" in the Interop namespace of the WBEM
server.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The listener
destination instances.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# Validate server_id
server = self._get_server(server_id)
return server.conn.EnumerateInstances(DESTINATION_CLASSNAME, namespace=server.interop_ns) |
def cancelOperation(self):
'''
Cancels the ongoing operation if any.
'''
if self.isLongTouchingPoint:
self.toggleLongTouchPoint()
elif self.isTouchingPoint:
self.toggleTouchPoint()
elif self.isGeneratingTestCondition:
self.toggleGenerateTestCondition() | def function[cancelOperation, parameter[self]]:
constant[
Cancels the ongoing operation if any.
]
if name[self].isLongTouchingPoint begin[:]
call[name[self].toggleLongTouchPoint, parameter[]] | keyword[def] identifier[cancelOperation] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[isLongTouchingPoint] :
identifier[self] . identifier[toggleLongTouchPoint] ()
keyword[elif] identifier[self] . identifier[isTouchingPoint] :
identifier[self] . identifier[toggleTouchPoint] ()
keyword[elif] identifier[self] . identifier[isGeneratingTestCondition] :
identifier[self] . identifier[toggleGenerateTestCondition] () | def cancelOperation(self):
"""
Cancels the ongoing operation if any.
"""
if self.isLongTouchingPoint:
self.toggleLongTouchPoint() # depends on [control=['if'], data=[]]
elif self.isTouchingPoint:
self.toggleTouchPoint() # depends on [control=['if'], data=[]]
elif self.isGeneratingTestCondition:
self.toggleGenerateTestCondition() # depends on [control=['if'], data=[]] |
def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model counts map for a single source, a list of
sources, or for the sum of all sources in the ROI. The
exclude parameter can be used to exclude one or more
components when generating the model map.
Parameters
----------
name : str or list of str
Parameter controlling the set of sources for which the
model counts map will be calculated. If name=None the
model map will be generated for all sources in the ROI.
exclude : str or list of str
List of sources that will be excluded when calculating the
model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~gammapy.maps.Map`
"""
maps = [c.model_counts_map(name, exclude, use_mask=use_mask)
for c in self.components]
return skymap.coadd_maps(self.geom, maps) | def function[model_counts_map, parameter[self, name, exclude, use_mask]]:
constant[Return the model counts map for a single source, a list of
sources, or for the sum of all sources in the ROI. The
exclude parameter can be used to exclude one or more
components when generating the model map.
Parameters
----------
name : str or list of str
Parameter controlling the set of sources for which the
model counts map will be calculated. If name=None the
model map will be generated for all sources in the ROI.
exclude : str or list of str
List of sources that will be excluded when calculating the
model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~gammapy.maps.Map`
]
variable[maps] assign[=] <ast.ListComp object at 0x7da18fe910c0>
return[call[name[skymap].coadd_maps, parameter[name[self].geom, name[maps]]]] | keyword[def] identifier[model_counts_map] ( identifier[self] , identifier[name] = keyword[None] , identifier[exclude] = keyword[None] , identifier[use_mask] = keyword[False] ):
literal[string]
identifier[maps] =[ identifier[c] . identifier[model_counts_map] ( identifier[name] , identifier[exclude] , identifier[use_mask] = identifier[use_mask] )
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[components] ]
keyword[return] identifier[skymap] . identifier[coadd_maps] ( identifier[self] . identifier[geom] , identifier[maps] ) | def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model counts map for a single source, a list of
sources, or for the sum of all sources in the ROI. The
exclude parameter can be used to exclude one or more
components when generating the model map.
Parameters
----------
name : str or list of str
Parameter controlling the set of sources for which the
model counts map will be calculated. If name=None the
model map will be generated for all sources in the ROI.
exclude : str or list of str
List of sources that will be excluded when calculating the
model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~gammapy.maps.Map`
"""
maps = [c.model_counts_map(name, exclude, use_mask=use_mask) for c in self.components]
return skymap.coadd_maps(self.geom, maps) |
def subdivide(self, N=1, method=0):
"""Increase the number of vertices of a surface mesh.
:param int N: number of subdivisions.
:param int method: Loop(0), Linear(1), Adaptive(2), Butterfly(3)
.. hint:: |tutorial_subdivide| |tutorial.py|_
"""
triangles = vtk.vtkTriangleFilter()
triangles.SetInputData(self.polydata())
triangles.Update()
originalMesh = triangles.GetOutput()
if method == 0:
sdf = vtk.vtkLoopSubdivisionFilter()
elif method == 1:
sdf = vtk.vtkLinearSubdivisionFilter()
elif method == 2:
sdf = vtk.vtkAdaptiveSubdivisionFilter()
elif method == 3:
sdf = vtk.vtkButterflySubdivisionFilter()
else:
colors.printc("~times Error in subdivide: unknown method.", c="r")
exit()
if method != 2:
sdf.SetNumberOfSubdivisions(N)
sdf.SetInputData(originalMesh)
sdf.Update()
return self.updateMesh(sdf.GetOutput()) | def function[subdivide, parameter[self, N, method]]:
constant[Increase the number of vertices of a surface mesh.
:param int N: number of subdivisions.
:param int method: Loop(0), Linear(1), Adaptive(2), Butterfly(3)
.. hint:: |tutorial_subdivide| |tutorial.py|_
]
variable[triangles] assign[=] call[name[vtk].vtkTriangleFilter, parameter[]]
call[name[triangles].SetInputData, parameter[call[name[self].polydata, parameter[]]]]
call[name[triangles].Update, parameter[]]
variable[originalMesh] assign[=] call[name[triangles].GetOutput, parameter[]]
if compare[name[method] equal[==] constant[0]] begin[:]
variable[sdf] assign[=] call[name[vtk].vtkLoopSubdivisionFilter, parameter[]]
if compare[name[method] not_equal[!=] constant[2]] begin[:]
call[name[sdf].SetNumberOfSubdivisions, parameter[name[N]]]
call[name[sdf].SetInputData, parameter[name[originalMesh]]]
call[name[sdf].Update, parameter[]]
return[call[name[self].updateMesh, parameter[call[name[sdf].GetOutput, parameter[]]]]] | keyword[def] identifier[subdivide] ( identifier[self] , identifier[N] = literal[int] , identifier[method] = literal[int] ):
literal[string]
identifier[triangles] = identifier[vtk] . identifier[vtkTriangleFilter] ()
identifier[triangles] . identifier[SetInputData] ( identifier[self] . identifier[polydata] ())
identifier[triangles] . identifier[Update] ()
identifier[originalMesh] = identifier[triangles] . identifier[GetOutput] ()
keyword[if] identifier[method] == literal[int] :
identifier[sdf] = identifier[vtk] . identifier[vtkLoopSubdivisionFilter] ()
keyword[elif] identifier[method] == literal[int] :
identifier[sdf] = identifier[vtk] . identifier[vtkLinearSubdivisionFilter] ()
keyword[elif] identifier[method] == literal[int] :
identifier[sdf] = identifier[vtk] . identifier[vtkAdaptiveSubdivisionFilter] ()
keyword[elif] identifier[method] == literal[int] :
identifier[sdf] = identifier[vtk] . identifier[vtkButterflySubdivisionFilter] ()
keyword[else] :
identifier[colors] . identifier[printc] ( literal[string] , identifier[c] = literal[string] )
identifier[exit] ()
keyword[if] identifier[method] != literal[int] :
identifier[sdf] . identifier[SetNumberOfSubdivisions] ( identifier[N] )
identifier[sdf] . identifier[SetInputData] ( identifier[originalMesh] )
identifier[sdf] . identifier[Update] ()
keyword[return] identifier[self] . identifier[updateMesh] ( identifier[sdf] . identifier[GetOutput] ()) | def subdivide(self, N=1, method=0):
"""Increase the number of vertices of a surface mesh.
:param int N: number of subdivisions.
:param int method: Loop(0), Linear(1), Adaptive(2), Butterfly(3)
.. hint:: |tutorial_subdivide| |tutorial.py|_
"""
triangles = vtk.vtkTriangleFilter()
triangles.SetInputData(self.polydata())
triangles.Update()
originalMesh = triangles.GetOutput()
if method == 0:
sdf = vtk.vtkLoopSubdivisionFilter() # depends on [control=['if'], data=[]]
elif method == 1:
sdf = vtk.vtkLinearSubdivisionFilter() # depends on [control=['if'], data=[]]
elif method == 2:
sdf = vtk.vtkAdaptiveSubdivisionFilter() # depends on [control=['if'], data=[]]
elif method == 3:
sdf = vtk.vtkButterflySubdivisionFilter() # depends on [control=['if'], data=[]]
else:
colors.printc('~times Error in subdivide: unknown method.', c='r')
exit()
if method != 2:
sdf.SetNumberOfSubdivisions(N) # depends on [control=['if'], data=[]]
sdf.SetInputData(originalMesh)
sdf.Update()
return self.updateMesh(sdf.GetOutput()) |
def places_radar(client, location, radius, keyword=None, min_price=None,
max_price=None, name=None, open_now=False, type=None):
"""
Performs radar search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
"""
if not (keyword or name or type):
raise ValueError("either a keyword, name, or type arg is required")
from warnings import warn
warn("places_radar is deprecated, see http://goo.gl/BGiumE",
DeprecationWarning)
return _places(client, "radar", location=location, radius=radius,
keyword=keyword, min_price=min_price, max_price=max_price,
name=name, open_now=open_now, type=type) | def function[places_radar, parameter[client, location, radius, keyword, min_price, max_price, name, open_now, type]]:
constant[
Performs radar search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
]
if <ast.UnaryOp object at 0x7da18ede6b90> begin[:]
<ast.Raise object at 0x7da18ede7d90>
from relative_module[warnings] import module[warn]
call[name[warn], parameter[constant[places_radar is deprecated, see http://goo.gl/BGiumE], name[DeprecationWarning]]]
return[call[name[_places], parameter[name[client], constant[radar]]]] | keyword[def] identifier[places_radar] ( identifier[client] , identifier[location] , identifier[radius] , identifier[keyword] = keyword[None] , identifier[min_price] = keyword[None] ,
identifier[max_price] = keyword[None] , identifier[name] = keyword[None] , identifier[open_now] = keyword[False] , identifier[type] = keyword[None] ):
literal[string]
keyword[if] keyword[not] ( identifier[keyword] keyword[or] identifier[name] keyword[or] identifier[type] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[from] identifier[warnings] keyword[import] identifier[warn]
identifier[warn] ( literal[string] ,
identifier[DeprecationWarning] )
keyword[return] identifier[_places] ( identifier[client] , literal[string] , identifier[location] = identifier[location] , identifier[radius] = identifier[radius] ,
identifier[keyword] = identifier[keyword] , identifier[min_price] = identifier[min_price] , identifier[max_price] = identifier[max_price] ,
identifier[name] = identifier[name] , identifier[open_now] = identifier[open_now] , identifier[type] = identifier[type] ) | def places_radar(client, location, radius, keyword=None, min_price=None, max_price=None, name=None, open_now=False, type=None):
"""
Performs radar search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
"""
if not (keyword or name or type):
raise ValueError('either a keyword, name, or type arg is required') # depends on [control=['if'], data=[]]
from warnings import warn
warn('places_radar is deprecated, see http://goo.gl/BGiumE', DeprecationWarning)
return _places(client, 'radar', location=location, radius=radius, keyword=keyword, min_price=min_price, max_price=max_price, name=name, open_now=open_now, type=type) |
def render_ranks (graph, ranks, dot_file="graph.dot"):
"""
render the TextRank graph for visual formats
"""
if dot_file:
write_dot(graph, ranks, path=dot_file) | def function[render_ranks, parameter[graph, ranks, dot_file]]:
constant[
render the TextRank graph for visual formats
]
if name[dot_file] begin[:]
call[name[write_dot], parameter[name[graph], name[ranks]]] | keyword[def] identifier[render_ranks] ( identifier[graph] , identifier[ranks] , identifier[dot_file] = literal[string] ):
literal[string]
keyword[if] identifier[dot_file] :
identifier[write_dot] ( identifier[graph] , identifier[ranks] , identifier[path] = identifier[dot_file] ) | def render_ranks(graph, ranks, dot_file='graph.dot'):
"""
render the TextRank graph for visual formats
"""
if dot_file:
write_dot(graph, ranks, path=dot_file) # depends on [control=['if'], data=[]] |
def purge():
"""
Deletes all the cached files since the last call to reset_used that have not been used.
"""
all_hashes = read_all()
used_hashes = read_used()
for kind, hashes in used_hashes.items():
hashes = set(hashes)
to_remove = set(all_hashes[kind]).difference(hashes)
delete_from_directory_by_hashes(kind, to_remove)
reset_used()
write_out() | def function[purge, parameter[]]:
constant[
Deletes all the cached files since the last call to reset_used that have not been used.
]
variable[all_hashes] assign[=] call[name[read_all], parameter[]]
variable[used_hashes] assign[=] call[name[read_used], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0a81870>, <ast.Name object at 0x7da1b0a83fd0>]]] in starred[call[name[used_hashes].items, parameter[]]] begin[:]
variable[hashes] assign[=] call[name[set], parameter[name[hashes]]]
variable[to_remove] assign[=] call[call[name[set], parameter[call[name[all_hashes]][name[kind]]]].difference, parameter[name[hashes]]]
call[name[delete_from_directory_by_hashes], parameter[name[kind], name[to_remove]]]
call[name[reset_used], parameter[]]
call[name[write_out], parameter[]] | keyword[def] identifier[purge] ():
literal[string]
identifier[all_hashes] = identifier[read_all] ()
identifier[used_hashes] = identifier[read_used] ()
keyword[for] identifier[kind] , identifier[hashes] keyword[in] identifier[used_hashes] . identifier[items] ():
identifier[hashes] = identifier[set] ( identifier[hashes] )
identifier[to_remove] = identifier[set] ( identifier[all_hashes] [ identifier[kind] ]). identifier[difference] ( identifier[hashes] )
identifier[delete_from_directory_by_hashes] ( identifier[kind] , identifier[to_remove] )
identifier[reset_used] ()
identifier[write_out] () | def purge():
"""
Deletes all the cached files since the last call to reset_used that have not been used.
"""
all_hashes = read_all()
used_hashes = read_used()
for (kind, hashes) in used_hashes.items():
hashes = set(hashes)
to_remove = set(all_hashes[kind]).difference(hashes)
delete_from_directory_by_hashes(kind, to_remove) # depends on [control=['for'], data=[]]
reset_used()
write_out() |
def unmute_stdio(self) -> None:
"""
Intended to re-store the temporarily disabled logging of `mute_stdio()` by removing the `BlockAll` filter.
"""
for logger in self.loggers.values():
if logger.hasHandlers():
logger.handlers[0].removeFilter(self.block_all_filter) | def function[unmute_stdio, parameter[self]]:
constant[
Intended to re-store the temporarily disabled logging of `mute_stdio()` by removing the `BlockAll` filter.
]
for taget[name[logger]] in starred[call[name[self].loggers.values, parameter[]]] begin[:]
if call[name[logger].hasHandlers, parameter[]] begin[:]
call[call[name[logger].handlers][constant[0]].removeFilter, parameter[name[self].block_all_filter]] | keyword[def] identifier[unmute_stdio] ( identifier[self] )-> keyword[None] :
literal[string]
keyword[for] identifier[logger] keyword[in] identifier[self] . identifier[loggers] . identifier[values] ():
keyword[if] identifier[logger] . identifier[hasHandlers] ():
identifier[logger] . identifier[handlers] [ literal[int] ]. identifier[removeFilter] ( identifier[self] . identifier[block_all_filter] ) | def unmute_stdio(self) -> None:
"""
Intended to re-store the temporarily disabled logging of `mute_stdio()` by removing the `BlockAll` filter.
"""
for logger in self.loggers.values():
if logger.hasHandlers():
logger.handlers[0].removeFilter(self.block_all_filter) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['logger']] |
def send_rpc_message(self, method, request):
'''Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
'''
log.debug("############## SENDING ##############")
#0. RpcRequestHeaderProto
rpc_request_header = self.create_rpc_request_header()
#1. RequestHeaderProto
request_header = self.create_request_header(method)
#2. Param
param = request.SerializeToString()
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Request", request)
rpc_message_length = len(rpc_request_header) + encoder._VarintSize(len(rpc_request_header)) + \
len(request_header) + encoder._VarintSize(len(request_header)) + \
len(param) + encoder._VarintSize(len(param))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("RPC message length: %s (%s)" % (rpc_message_length, format_bytes(struct.pack('!I', rpc_message_length))))
self.write(struct.pack('!I', rpc_message_length))
self.write_delimited(rpc_request_header)
self.write_delimited(request_header)
self.write_delimited(param) | def function[send_rpc_message, parameter[self, method, request]]:
constant[Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
]
call[name[log].debug, parameter[constant[############## SENDING ##############]]]
variable[rpc_request_header] assign[=] call[name[self].create_rpc_request_header, parameter[]]
variable[request_header] assign[=] call[name[self].create_request_header, parameter[name[method]]]
variable[param] assign[=] call[name[request].SerializeToString, parameter[]]
if compare[call[name[log].getEffectiveLevel, parameter[]] equal[==] name[logging].DEBUG] begin[:]
call[name[log_protobuf_message], parameter[constant[Request], name[request]]]
variable[rpc_message_length] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[len], parameter[name[rpc_request_header]]] + call[name[encoder]._VarintSize, parameter[call[name[len], parameter[name[rpc_request_header]]]]]] + call[name[len], parameter[name[request_header]]]] + call[name[encoder]._VarintSize, parameter[call[name[len], parameter[name[request_header]]]]]] + call[name[len], parameter[name[param]]]] + call[name[encoder]._VarintSize, parameter[call[name[len], parameter[name[param]]]]]]
if compare[call[name[log].getEffectiveLevel, parameter[]] equal[==] name[logging].DEBUG] begin[:]
call[name[log].debug, parameter[binary_operation[constant[RPC message length: %s (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0846fb0>, <ast.Call object at 0x7da1b0846f50>]]]]]
call[name[self].write, parameter[call[name[struct].pack, parameter[constant[!I], name[rpc_message_length]]]]]
call[name[self].write_delimited, parameter[name[rpc_request_header]]]
call[name[self].write_delimited, parameter[name[request_header]]]
call[name[self].write_delimited, parameter[name[param]]] | keyword[def] identifier[send_rpc_message] ( identifier[self] , identifier[method] , identifier[request] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[rpc_request_header] = identifier[self] . identifier[create_rpc_request_header] ()
identifier[request_header] = identifier[self] . identifier[create_request_header] ( identifier[method] )
identifier[param] = identifier[request] . identifier[SerializeToString] ()
keyword[if] identifier[log] . identifier[getEffectiveLevel] ()== identifier[logging] . identifier[DEBUG] :
identifier[log_protobuf_message] ( literal[string] , identifier[request] )
identifier[rpc_message_length] = identifier[len] ( identifier[rpc_request_header] )+ identifier[encoder] . identifier[_VarintSize] ( identifier[len] ( identifier[rpc_request_header] ))+ identifier[len] ( identifier[request_header] )+ identifier[encoder] . identifier[_VarintSize] ( identifier[len] ( identifier[request_header] ))+ identifier[len] ( identifier[param] )+ identifier[encoder] . identifier[_VarintSize] ( identifier[len] ( identifier[param] ))
keyword[if] identifier[log] . identifier[getEffectiveLevel] ()== identifier[logging] . identifier[DEBUG] :
identifier[log] . identifier[debug] ( literal[string] %( identifier[rpc_message_length] , identifier[format_bytes] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[rpc_message_length] ))))
identifier[self] . identifier[write] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[rpc_message_length] ))
identifier[self] . identifier[write_delimited] ( identifier[rpc_request_header] )
identifier[self] . identifier[write_delimited] ( identifier[request_header] )
identifier[self] . identifier[write_delimited] ( identifier[param] ) | def send_rpc_message(self, method, request):
"""Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
"""
log.debug('############## SENDING ##############')
#0. RpcRequestHeaderProto
rpc_request_header = self.create_rpc_request_header()
#1. RequestHeaderProto
request_header = self.create_request_header(method)
#2. Param
param = request.SerializeToString()
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message('Request', request) # depends on [control=['if'], data=[]]
rpc_message_length = len(rpc_request_header) + encoder._VarintSize(len(rpc_request_header)) + len(request_header) + encoder._VarintSize(len(request_header)) + len(param) + encoder._VarintSize(len(param))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug('RPC message length: %s (%s)' % (rpc_message_length, format_bytes(struct.pack('!I', rpc_message_length)))) # depends on [control=['if'], data=[]]
self.write(struct.pack('!I', rpc_message_length))
self.write_delimited(rpc_request_header)
self.write_delimited(request_header)
self.write_delimited(param) |
def handle_single_message(self, msg):
"""
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
"""
job_id = msg.message['job_id']
actual_msg = msg.message
if msg.type == MessageType.JOB_UPDATED:
progress = actual_msg['progress']
total_progress = actual_msg['total_progress']
self.storage_backend.update_job_progress(job_id, progress,
total_progress)
elif msg.type == MessageType.JOB_COMPLETED:
self.storage_backend.complete_job(job_id)
elif msg.type == MessageType.JOB_FAILED:
exc = actual_msg['exception']
trace = actual_msg['traceback']
self.storage_backend.mark_job_as_failed(job_id, exc, trace)
elif msg.type == MessageType.JOB_CANCELED:
self.storage_backend.mark_job_as_canceled(job_id)
else:
self.logger.error("Unknown message type: {}".format(msg.type)) | def function[handle_single_message, parameter[self, msg]]:
constant[
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
]
variable[job_id] assign[=] call[name[msg].message][constant[job_id]]
variable[actual_msg] assign[=] name[msg].message
if compare[name[msg].type equal[==] name[MessageType].JOB_UPDATED] begin[:]
variable[progress] assign[=] call[name[actual_msg]][constant[progress]]
variable[total_progress] assign[=] call[name[actual_msg]][constant[total_progress]]
call[name[self].storage_backend.update_job_progress, parameter[name[job_id], name[progress], name[total_progress]]] | keyword[def] identifier[handle_single_message] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[job_id] = identifier[msg] . identifier[message] [ literal[string] ]
identifier[actual_msg] = identifier[msg] . identifier[message]
keyword[if] identifier[msg] . identifier[type] == identifier[MessageType] . identifier[JOB_UPDATED] :
identifier[progress] = identifier[actual_msg] [ literal[string] ]
identifier[total_progress] = identifier[actual_msg] [ literal[string] ]
identifier[self] . identifier[storage_backend] . identifier[update_job_progress] ( identifier[job_id] , identifier[progress] ,
identifier[total_progress] )
keyword[elif] identifier[msg] . identifier[type] == identifier[MessageType] . identifier[JOB_COMPLETED] :
identifier[self] . identifier[storage_backend] . identifier[complete_job] ( identifier[job_id] )
keyword[elif] identifier[msg] . identifier[type] == identifier[MessageType] . identifier[JOB_FAILED] :
identifier[exc] = identifier[actual_msg] [ literal[string] ]
identifier[trace] = identifier[actual_msg] [ literal[string] ]
identifier[self] . identifier[storage_backend] . identifier[mark_job_as_failed] ( identifier[job_id] , identifier[exc] , identifier[trace] )
keyword[elif] identifier[msg] . identifier[type] == identifier[MessageType] . identifier[JOB_CANCELED] :
identifier[self] . identifier[storage_backend] . identifier[mark_job_as_canceled] ( identifier[job_id] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[msg] . identifier[type] )) | def handle_single_message(self, msg):
"""
Handle one message and modify the job storage appropriately.
:param msg: the message to handle
:return: None
"""
job_id = msg.message['job_id']
actual_msg = msg.message
if msg.type == MessageType.JOB_UPDATED:
progress = actual_msg['progress']
total_progress = actual_msg['total_progress']
self.storage_backend.update_job_progress(job_id, progress, total_progress) # depends on [control=['if'], data=[]]
elif msg.type == MessageType.JOB_COMPLETED:
self.storage_backend.complete_job(job_id) # depends on [control=['if'], data=[]]
elif msg.type == MessageType.JOB_FAILED:
exc = actual_msg['exception']
trace = actual_msg['traceback']
self.storage_backend.mark_job_as_failed(job_id, exc, trace) # depends on [control=['if'], data=[]]
elif msg.type == MessageType.JOB_CANCELED:
self.storage_backend.mark_job_as_canceled(job_id) # depends on [control=['if'], data=[]]
else:
self.logger.error('Unknown message type: {}'.format(msg.type)) |
def samples_to_batches(samples: Iterable, batch_size: int):
"""Chunk a series of network inputs and outputs into larger batches"""
it = iter(samples)
while True:
with suppress(StopIteration):
batch_in, batch_out = [], []
for i in range(batch_size):
sample_in, sample_out = next(it)
batch_in.append(sample_in)
batch_out.append(sample_out)
if not batch_in:
raise StopIteration
yield np.array(batch_in), np.array(batch_out) | def function[samples_to_batches, parameter[samples, batch_size]]:
constant[Chunk a series of network inputs and outputs into larger batches]
variable[it] assign[=] call[name[iter], parameter[name[samples]]]
while constant[True] begin[:]
with call[name[suppress], parameter[name[StopIteration]]] begin[:]
<ast.Tuple object at 0x7da1b184c2b0> assign[=] tuple[[<ast.List object at 0x7da1b184dff0>, <ast.List object at 0x7da1b184cb20>]]
for taget[name[i]] in starred[call[name[range], parameter[name[batch_size]]]] begin[:]
<ast.Tuple object at 0x7da1b184cf40> assign[=] call[name[next], parameter[name[it]]]
call[name[batch_in].append, parameter[name[sample_in]]]
call[name[batch_out].append, parameter[name[sample_out]]]
if <ast.UnaryOp object at 0x7da1b18dc8b0> begin[:]
<ast.Raise object at 0x7da1b18dc610>
<ast.Yield object at 0x7da1b18ddab0> | keyword[def] identifier[samples_to_batches] ( identifier[samples] : identifier[Iterable] , identifier[batch_size] : identifier[int] ):
literal[string]
identifier[it] = identifier[iter] ( identifier[samples] )
keyword[while] keyword[True] :
keyword[with] identifier[suppress] ( identifier[StopIteration] ):
identifier[batch_in] , identifier[batch_out] =[],[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[batch_size] ):
identifier[sample_in] , identifier[sample_out] = identifier[next] ( identifier[it] )
identifier[batch_in] . identifier[append] ( identifier[sample_in] )
identifier[batch_out] . identifier[append] ( identifier[sample_out] )
keyword[if] keyword[not] identifier[batch_in] :
keyword[raise] identifier[StopIteration]
keyword[yield] identifier[np] . identifier[array] ( identifier[batch_in] ), identifier[np] . identifier[array] ( identifier[batch_out] ) | def samples_to_batches(samples: Iterable, batch_size: int):
"""Chunk a series of network inputs and outputs into larger batches"""
it = iter(samples)
while True:
with suppress(StopIteration):
(batch_in, batch_out) = ([], [])
for i in range(batch_size):
(sample_in, sample_out) = next(it)
batch_in.append(sample_in)
batch_out.append(sample_out) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
if not batch_in:
raise StopIteration # depends on [control=['if'], data=[]]
yield (np.array(batch_in), np.array(batch_out)) # depends on [control=['while'], data=[]] |
def gaps(args):
"""
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
"""
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug("Query gaps: {} Subject gaps: {}"\
.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig("query_gaps.pdf") | def function[gaps, parameter[args]]:
constant[
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[gaps].__doc__]]
<ast.Tuple object at 0x7da1b09e9c00> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b09e86d0>]]
<ast.Tuple object at 0x7da1b09eb0a0> assign[=] name[args]
variable[blast] assign[=] call[name[BlastSlow], parameter[name[blastfile]]]
call[name[logging].debug, parameter[call[constant[A total of {} records imported].format, parameter[call[name[len], parameter[name[blast]]]]]]]
variable[query_gaps] assign[=] call[name[list], parameter[call[name[collect_gaps], parameter[name[blast]]]]]
variable[subject_gaps] assign[=] call[name[list], parameter[call[name[collect_gaps], parameter[name[blast]]]]]
call[name[logging].debug, parameter[call[constant[Query gaps: {} Subject gaps: {}].format, parameter[call[name[len], parameter[name[query_gaps]]], call[name[len], parameter[name[subject_gaps]]]]]]]
from relative_module[jcvi.graphics.base] import module[savefig]
import module[seaborn] as alias[sns]
call[name[sns].distplot, parameter[name[query_gaps]]]
call[name[savefig], parameter[constant[query_gaps.pdf]]] | keyword[def] identifier[gaps] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[gaps] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[blastfile] ,= identifier[args]
identifier[blast] = identifier[BlastSlow] ( identifier[blastfile] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[blast] )))
identifier[query_gaps] = identifier[list] ( identifier[collect_gaps] ( identifier[blast] ))
identifier[subject_gaps] = identifier[list] ( identifier[collect_gaps] ( identifier[blast] , identifier[use_subject] = keyword[True] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[query_gaps] ), identifier[len] ( identifier[subject_gaps] )))
keyword[from] identifier[jcvi] . identifier[graphics] . identifier[base] keyword[import] identifier[savefig]
keyword[import] identifier[seaborn] keyword[as] identifier[sns]
identifier[sns] . identifier[distplot] ( identifier[query_gaps] )
identifier[savefig] ( literal[string] ) | def gaps(args):
"""
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
"""
p = OptionParser(gaps.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(blastfile,) = args
blast = BlastSlow(blastfile)
logging.debug('A total of {} records imported'.format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug('Query gaps: {} Subject gaps: {}'.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig('query_gaps.pdf') |
def ensure_directory_exists(dirname, context=None):
"""
Ensures that a directory exits.
If it does not exist, it is automatically created.
"""
real_dirname = dirname
if context:
real_dirname = realpath_with_context(dirname, context)
if not os.path.exists(real_dirname):
os.makedirs(real_dirname)
assert os.path.exists(real_dirname), "ENSURE dir exists: %s" % dirname
assert os.path.isdir(real_dirname), "ENSURE isa dir: %s" % dirname | def function[ensure_directory_exists, parameter[dirname, context]]:
constant[
Ensures that a directory exits.
If it does not exist, it is automatically created.
]
variable[real_dirname] assign[=] name[dirname]
if name[context] begin[:]
variable[real_dirname] assign[=] call[name[realpath_with_context], parameter[name[dirname], name[context]]]
if <ast.UnaryOp object at 0x7da1b28fae90> begin[:]
call[name[os].makedirs, parameter[name[real_dirname]]]
assert[call[name[os].path.exists, parameter[name[real_dirname]]]]
assert[call[name[os].path.isdir, parameter[name[real_dirname]]]] | keyword[def] identifier[ensure_directory_exists] ( identifier[dirname] , identifier[context] = keyword[None] ):
literal[string]
identifier[real_dirname] = identifier[dirname]
keyword[if] identifier[context] :
identifier[real_dirname] = identifier[realpath_with_context] ( identifier[dirname] , identifier[context] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[real_dirname] ):
identifier[os] . identifier[makedirs] ( identifier[real_dirname] )
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[real_dirname] ), literal[string] % identifier[dirname]
keyword[assert] identifier[os] . identifier[path] . identifier[isdir] ( identifier[real_dirname] ), literal[string] % identifier[dirname] | def ensure_directory_exists(dirname, context=None):
"""
Ensures that a directory exits.
If it does not exist, it is automatically created.
"""
real_dirname = dirname
if context:
real_dirname = realpath_with_context(dirname, context) # depends on [control=['if'], data=[]]
if not os.path.exists(real_dirname):
os.makedirs(real_dirname) # depends on [control=['if'], data=[]]
assert os.path.exists(real_dirname), 'ENSURE dir exists: %s' % dirname
assert os.path.isdir(real_dirname), 'ENSURE isa dir: %s' % dirname |
def execute_command(args, shell=False, cwd=None, env=None, stdin=None, stdout=None, stderr=None, cmd_encoding='utf-8'):
"""
Execute external command
:param args: command line arguments : [unicode]
:param shell: True when using shell : boolean
:param cwd: working directory : string
:param env: environment variables : dict
:param stdin: standard input
:param stdout: standard output
:param stderr: standard error
:param cmd_encoding: command line encoding: string
:return: return code
"""
return subprocess.call(
args=__convert_args(args, shell, cmd_encoding), shell=shell, cwd=cwd, env=__convert_env(env, cmd_encoding),
stdin=stdin, stdout=stdout, stderr=stderr) | def function[execute_command, parameter[args, shell, cwd, env, stdin, stdout, stderr, cmd_encoding]]:
constant[
Execute external command
:param args: command line arguments : [unicode]
:param shell: True when using shell : boolean
:param cwd: working directory : string
:param env: environment variables : dict
:param stdin: standard input
:param stdout: standard output
:param stderr: standard error
:param cmd_encoding: command line encoding: string
:return: return code
]
return[call[name[subprocess].call, parameter[]]] | keyword[def] identifier[execute_command] ( identifier[args] , identifier[shell] = keyword[False] , identifier[cwd] = keyword[None] , identifier[env] = keyword[None] , identifier[stdin] = keyword[None] , identifier[stdout] = keyword[None] , identifier[stderr] = keyword[None] , identifier[cmd_encoding] = literal[string] ):
literal[string]
keyword[return] identifier[subprocess] . identifier[call] (
identifier[args] = identifier[__convert_args] ( identifier[args] , identifier[shell] , identifier[cmd_encoding] ), identifier[shell] = identifier[shell] , identifier[cwd] = identifier[cwd] , identifier[env] = identifier[__convert_env] ( identifier[env] , identifier[cmd_encoding] ),
identifier[stdin] = identifier[stdin] , identifier[stdout] = identifier[stdout] , identifier[stderr] = identifier[stderr] ) | def execute_command(args, shell=False, cwd=None, env=None, stdin=None, stdout=None, stderr=None, cmd_encoding='utf-8'):
"""
Execute external command
:param args: command line arguments : [unicode]
:param shell: True when using shell : boolean
:param cwd: working directory : string
:param env: environment variables : dict
:param stdin: standard input
:param stdout: standard output
:param stderr: standard error
:param cmd_encoding: command line encoding: string
:return: return code
"""
return subprocess.call(args=__convert_args(args, shell, cmd_encoding), shell=shell, cwd=cwd, env=__convert_env(env, cmd_encoding), stdin=stdin, stdout=stdout, stderr=stderr) |
def get_font(self, weight='medium', slant='upright', width='normal'):
"""Return the font matching or closest to the given style
If a font with the given weight, slant and width is available, return
it. Otherwise, return the font that is closest in style.
Args:
weight (FontWeight): weight of the font
slant (FontSlant): slant of the font
width (FontWidth): width of the font
Returns:
Font: the requested font
"""
def find_closest_style(style, styles, alternatives):
try:
return style, styles[style]
except KeyError:
for option in alternatives[style]:
try:
return option, styles[option]
except KeyError:
continue
def find_closest_weight(weight, weights):
index = FontWeight.values.index(weight)
min_distance = len(FontWeight.values)
closest = None
for i, option in enumerate(FontWeight.values):
if option in weights and abs(index - i) < min_distance:
min_distance = abs(index - i)
closest = option
return closest, weights[closest]
available_width, slants = find_closest_style(width, self,
FontWidth.alternatives)
available_slant, weights = find_closest_style(slant, slants,
FontSlant.alternatives)
available_weight, font = find_closest_weight(weight, weights)
if (available_width != width or available_slant != slant or
available_weight != weight):
warn('{} does not include a {} {} {} font. Falling back to {} {} '
'{}'.format(self.name, width, weight, slant, available_width,
available_weight, available_slant))
return font | def function[get_font, parameter[self, weight, slant, width]]:
constant[Return the font matching or closest to the given style
If a font with the given weight, slant and width is available, return
it. Otherwise, return the font that is closest in style.
Args:
weight (FontWeight): weight of the font
slant (FontSlant): slant of the font
width (FontWidth): width of the font
Returns:
Font: the requested font
]
def function[find_closest_style, parameter[style, styles, alternatives]]:
<ast.Try object at 0x7da1b26af6d0>
def function[find_closest_weight, parameter[weight, weights]]:
variable[index] assign[=] call[name[FontWeight].values.index, parameter[name[weight]]]
variable[min_distance] assign[=] call[name[len], parameter[name[FontWeight].values]]
variable[closest] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18f58edd0>, <ast.Name object at 0x7da18f58c2b0>]]] in starred[call[name[enumerate], parameter[name[FontWeight].values]]] begin[:]
if <ast.BoolOp object at 0x7da18f58f2e0> begin[:]
variable[min_distance] assign[=] call[name[abs], parameter[binary_operation[name[index] - name[i]]]]
variable[closest] assign[=] name[option]
return[tuple[[<ast.Name object at 0x7da18f58caf0>, <ast.Subscript object at 0x7da18f58d510>]]]
<ast.Tuple object at 0x7da18f58f460> assign[=] call[name[find_closest_style], parameter[name[width], name[self], name[FontWidth].alternatives]]
<ast.Tuple object at 0x7da18f58eec0> assign[=] call[name[find_closest_style], parameter[name[slant], name[slants], name[FontSlant].alternatives]]
<ast.Tuple object at 0x7da18f58f670> assign[=] call[name[find_closest_weight], parameter[name[weight], name[weights]]]
if <ast.BoolOp object at 0x7da18f58ca60> begin[:]
call[name[warn], parameter[call[constant[{} does not include a {} {} {} font. Falling back to {} {} {}].format, parameter[name[self].name, name[width], name[weight], name[slant], name[available_width], name[available_weight], name[available_slant]]]]]
return[name[font]] | keyword[def] identifier[get_font] ( identifier[self] , identifier[weight] = literal[string] , identifier[slant] = literal[string] , identifier[width] = literal[string] ):
literal[string]
keyword[def] identifier[find_closest_style] ( identifier[style] , identifier[styles] , identifier[alternatives] ):
keyword[try] :
keyword[return] identifier[style] , identifier[styles] [ identifier[style] ]
keyword[except] identifier[KeyError] :
keyword[for] identifier[option] keyword[in] identifier[alternatives] [ identifier[style] ]:
keyword[try] :
keyword[return] identifier[option] , identifier[styles] [ identifier[option] ]
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[def] identifier[find_closest_weight] ( identifier[weight] , identifier[weights] ):
identifier[index] = identifier[FontWeight] . identifier[values] . identifier[index] ( identifier[weight] )
identifier[min_distance] = identifier[len] ( identifier[FontWeight] . identifier[values] )
identifier[closest] = keyword[None]
keyword[for] identifier[i] , identifier[option] keyword[in] identifier[enumerate] ( identifier[FontWeight] . identifier[values] ):
keyword[if] identifier[option] keyword[in] identifier[weights] keyword[and] identifier[abs] ( identifier[index] - identifier[i] )< identifier[min_distance] :
identifier[min_distance] = identifier[abs] ( identifier[index] - identifier[i] )
identifier[closest] = identifier[option]
keyword[return] identifier[closest] , identifier[weights] [ identifier[closest] ]
identifier[available_width] , identifier[slants] = identifier[find_closest_style] ( identifier[width] , identifier[self] ,
identifier[FontWidth] . identifier[alternatives] )
identifier[available_slant] , identifier[weights] = identifier[find_closest_style] ( identifier[slant] , identifier[slants] ,
identifier[FontSlant] . identifier[alternatives] )
identifier[available_weight] , identifier[font] = identifier[find_closest_weight] ( identifier[weight] , identifier[weights] )
keyword[if] ( identifier[available_width] != identifier[width] keyword[or] identifier[available_slant] != identifier[slant] keyword[or]
identifier[available_weight] != identifier[weight] ):
identifier[warn] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[width] , identifier[weight] , identifier[slant] , identifier[available_width] ,
identifier[available_weight] , identifier[available_slant] ))
keyword[return] identifier[font] | def get_font(self, weight='medium', slant='upright', width='normal'):
"""Return the font matching or closest to the given style
If a font with the given weight, slant and width is available, return
it. Otherwise, return the font that is closest in style.
Args:
weight (FontWeight): weight of the font
slant (FontSlant): slant of the font
width (FontWidth): width of the font
Returns:
Font: the requested font
"""
def find_closest_style(style, styles, alternatives):
try:
return (style, styles[style]) # depends on [control=['try'], data=[]]
except KeyError:
for option in alternatives[style]:
try:
return (option, styles[option]) # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['option']] # depends on [control=['except'], data=[]]
def find_closest_weight(weight, weights):
index = FontWeight.values.index(weight)
min_distance = len(FontWeight.values)
closest = None
for (i, option) in enumerate(FontWeight.values):
if option in weights and abs(index - i) < min_distance:
min_distance = abs(index - i)
closest = option # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (closest, weights[closest])
(available_width, slants) = find_closest_style(width, self, FontWidth.alternatives)
(available_slant, weights) = find_closest_style(slant, slants, FontSlant.alternatives)
(available_weight, font) = find_closest_weight(weight, weights)
if available_width != width or available_slant != slant or available_weight != weight:
warn('{} does not include a {} {} {} font. Falling back to {} {} {}'.format(self.name, width, weight, slant, available_width, available_weight, available_slant)) # depends on [control=['if'], data=[]]
return font |
def scolor(self):
"""
Set a unique color from a serie
"""
global palette
color = palette[self.color_index]
if len(palette) - 1 == self.color_index:
self.color_index = 0
else:
self.color_index += 1
self.color(color) | def function[scolor, parameter[self]]:
constant[
Set a unique color from a serie
]
<ast.Global object at 0x7da18dc07040>
variable[color] assign[=] call[name[palette]][name[self].color_index]
if compare[binary_operation[call[name[len], parameter[name[palette]]] - constant[1]] equal[==] name[self].color_index] begin[:]
name[self].color_index assign[=] constant[0]
call[name[self].color, parameter[name[color]]] | keyword[def] identifier[scolor] ( identifier[self] ):
literal[string]
keyword[global] identifier[palette]
identifier[color] = identifier[palette] [ identifier[self] . identifier[color_index] ]
keyword[if] identifier[len] ( identifier[palette] )- literal[int] == identifier[self] . identifier[color_index] :
identifier[self] . identifier[color_index] = literal[int]
keyword[else] :
identifier[self] . identifier[color_index] += literal[int]
identifier[self] . identifier[color] ( identifier[color] ) | def scolor(self):
"""
Set a unique color from a serie
"""
global palette
color = palette[self.color_index]
if len(palette) - 1 == self.color_index:
self.color_index = 0 # depends on [control=['if'], data=[]]
else:
self.color_index += 1
self.color(color) |
def add_widgets(self, *widgets_or_spacings):
"""Add widgets/spacing to dialog vertical layout"""
layout = self.layout()
for widget_or_spacing in widgets_or_spacings:
if isinstance(widget_or_spacing, int):
layout.addSpacing(widget_or_spacing)
else:
layout.addWidget(widget_or_spacing) | def function[add_widgets, parameter[self]]:
constant[Add widgets/spacing to dialog vertical layout]
variable[layout] assign[=] call[name[self].layout, parameter[]]
for taget[name[widget_or_spacing]] in starred[name[widgets_or_spacings]] begin[:]
if call[name[isinstance], parameter[name[widget_or_spacing], name[int]]] begin[:]
call[name[layout].addSpacing, parameter[name[widget_or_spacing]]] | keyword[def] identifier[add_widgets] ( identifier[self] ,* identifier[widgets_or_spacings] ):
literal[string]
identifier[layout] = identifier[self] . identifier[layout] ()
keyword[for] identifier[widget_or_spacing] keyword[in] identifier[widgets_or_spacings] :
keyword[if] identifier[isinstance] ( identifier[widget_or_spacing] , identifier[int] ):
identifier[layout] . identifier[addSpacing] ( identifier[widget_or_spacing] )
keyword[else] :
identifier[layout] . identifier[addWidget] ( identifier[widget_or_spacing] ) | def add_widgets(self, *widgets_or_spacings):
"""Add widgets/spacing to dialog vertical layout"""
layout = self.layout()
for widget_or_spacing in widgets_or_spacings:
if isinstance(widget_or_spacing, int):
layout.addSpacing(widget_or_spacing) # depends on [control=['if'], data=[]]
else:
layout.addWidget(widget_or_spacing) # depends on [control=['for'], data=['widget_or_spacing']] |
def expand_config(dct,
separator='.',
skip_to=0,
key_func=lambda key: key.lower(),
key_parts_filter=lambda key_parts: True,
value_func=lambda value: value):
"""
Expand a dictionary recursively by splitting keys along the separator.
:param dct: a non-recursive dictionary
:param separator: a separator charactor for splitting dictionary keys
:param skip_to: index to start splitting keys on; can be used to skip over a key prefix
:param key_func: a key mapping function
:param key_parts_filter: a filter function for excluding keys
:param value_func: a value mapping func
"""
config = {}
for key, value in dct.items():
key_separator = separator(key) if callable(separator) else separator
key_parts = key.split(key_separator)
if not key_parts_filter(key_parts):
continue
key_config = config
# skip prefix
for key_part in key_parts[skip_to:-1]:
key_config = key_config.setdefault(key_func(key_part), dict())
key_config[key_func(key_parts[-1])] = value_func(value)
return config | def function[expand_config, parameter[dct, separator, skip_to, key_func, key_parts_filter, value_func]]:
constant[
Expand a dictionary recursively by splitting keys along the separator.
:param dct: a non-recursive dictionary
:param separator: a separator charactor for splitting dictionary keys
:param skip_to: index to start splitting keys on; can be used to skip over a key prefix
:param key_func: a key mapping function
:param key_parts_filter: a filter function for excluding keys
:param value_func: a value mapping func
]
variable[config] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0c92890>, <ast.Name object at 0x7da1b0c92980>]]] in starred[call[name[dct].items, parameter[]]] begin[:]
variable[key_separator] assign[=] <ast.IfExp object at 0x7da1b0c91f30>
variable[key_parts] assign[=] call[name[key].split, parameter[name[key_separator]]]
if <ast.UnaryOp object at 0x7da1b0c92440> begin[:]
continue
variable[key_config] assign[=] name[config]
for taget[name[key_part]] in starred[call[name[key_parts]][<ast.Slice object at 0x7da1b0c66710>]] begin[:]
variable[key_config] assign[=] call[name[key_config].setdefault, parameter[call[name[key_func], parameter[name[key_part]]], call[name[dict], parameter[]]]]
call[name[key_config]][call[name[key_func], parameter[call[name[key_parts]][<ast.UnaryOp object at 0x7da1b0c67700>]]]] assign[=] call[name[value_func], parameter[name[value]]]
return[name[config]] | keyword[def] identifier[expand_config] ( identifier[dct] ,
identifier[separator] = literal[string] ,
identifier[skip_to] = literal[int] ,
identifier[key_func] = keyword[lambda] identifier[key] : identifier[key] . identifier[lower] (),
identifier[key_parts_filter] = keyword[lambda] identifier[key_parts] : keyword[True] ,
identifier[value_func] = keyword[lambda] identifier[value] : identifier[value] ):
literal[string]
identifier[config] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[dct] . identifier[items] ():
identifier[key_separator] = identifier[separator] ( identifier[key] ) keyword[if] identifier[callable] ( identifier[separator] ) keyword[else] identifier[separator]
identifier[key_parts] = identifier[key] . identifier[split] ( identifier[key_separator] )
keyword[if] keyword[not] identifier[key_parts_filter] ( identifier[key_parts] ):
keyword[continue]
identifier[key_config] = identifier[config]
keyword[for] identifier[key_part] keyword[in] identifier[key_parts] [ identifier[skip_to] :- literal[int] ]:
identifier[key_config] = identifier[key_config] . identifier[setdefault] ( identifier[key_func] ( identifier[key_part] ), identifier[dict] ())
identifier[key_config] [ identifier[key_func] ( identifier[key_parts] [- literal[int] ])]= identifier[value_func] ( identifier[value] )
keyword[return] identifier[config] | def expand_config(dct, separator='.', skip_to=0, key_func=lambda key: key.lower(), key_parts_filter=lambda key_parts: True, value_func=lambda value: value):
"""
Expand a dictionary recursively by splitting keys along the separator.
:param dct: a non-recursive dictionary
:param separator: a separator charactor for splitting dictionary keys
:param skip_to: index to start splitting keys on; can be used to skip over a key prefix
:param key_func: a key mapping function
:param key_parts_filter: a filter function for excluding keys
:param value_func: a value mapping func
"""
config = {}
for (key, value) in dct.items():
key_separator = separator(key) if callable(separator) else separator
key_parts = key.split(key_separator)
if not key_parts_filter(key_parts):
continue # depends on [control=['if'], data=[]]
key_config = config
# skip prefix
for key_part in key_parts[skip_to:-1]:
key_config = key_config.setdefault(key_func(key_part), dict()) # depends on [control=['for'], data=['key_part']]
key_config[key_func(key_parts[-1])] = value_func(value) # depends on [control=['for'], data=[]]
return config |
def calcVmin(self,**kwargs):
"""
NAME:
calcVmin
PURPOSE:
calculate the v 'pericenter'
INPUT:
OUTPUT:
vmin
HISTORY:
2012-11-28 - Written - Bovy (IAS)
"""
if hasattr(self,'_vmin'): #pragma: no cover
return self._vmin
E, L= self._E, self._Lz
if nu.fabs(self._pvx) < 10.**-7.: #We are at vmin or vmax
eps= 10.**-8.
peps= _JzStaeckelIntegrandSquared(self._vx+eps,
E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot)
meps= _JzStaeckelIntegrandSquared(self._vx-eps,
E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot)
if peps < 0. and meps > 0.: #we are at vmax, which cannot happen
rstart= _vminFindStart(self._vx,
E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot)
if rstart == 0.: vmin= 0.
else:
try:
vmin= optimize.brentq(_JzStaeckelIntegrandSquared,
rstart,self._vx-eps,
(E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
elif peps > 0. and meps < 0.: #we are at vmin
vmin= self._vx
else: #planar orbit
vmin= self._vx
else:
rstart= _vminFindStart(self._vx,
E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot)
if rstart == 0.: vmin= 0.
else:
try:
vmin= optimize.brentq(_JzStaeckelIntegrandSquared,
rstart,rstart/0.9,
(E,L,self._I3V,self._delta,
self._ux,self._coshux**2.,
self._sinhux**2.,
self._potupi2,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
self._vmin= vmin
return self._vmin | def function[calcVmin, parameter[self]]:
constant[
NAME:
calcVmin
PURPOSE:
calculate the v 'pericenter'
INPUT:
OUTPUT:
vmin
HISTORY:
2012-11-28 - Written - Bovy (IAS)
]
if call[name[hasattr], parameter[name[self], constant[_vmin]]] begin[:]
return[name[self]._vmin]
<ast.Tuple object at 0x7da1b0e88730> assign[=] tuple[[<ast.Attribute object at 0x7da1b0e89510>, <ast.Attribute object at 0x7da1b0e893f0>]]
if compare[call[name[nu].fabs, parameter[name[self]._pvx]] less[<] binary_operation[constant[10.0] ** <ast.UnaryOp object at 0x7da18fe91750>]] begin[:]
variable[eps] assign[=] binary_operation[constant[10.0] ** <ast.UnaryOp object at 0x7da1b0cf5900>]
variable[peps] assign[=] call[name[_JzStaeckelIntegrandSquared], parameter[binary_operation[name[self]._vx + name[eps]], name[E], name[L], name[self]._I3V, name[self]._delta, name[self]._ux, binary_operation[name[self]._coshux ** constant[2.0]], binary_operation[name[self]._sinhux ** constant[2.0]], name[self]._potupi2, name[self]._pot]]
variable[meps] assign[=] call[name[_JzStaeckelIntegrandSquared], parameter[binary_operation[name[self]._vx - name[eps]], name[E], name[L], name[self]._I3V, name[self]._delta, name[self]._ux, binary_operation[name[self]._coshux ** constant[2.0]], binary_operation[name[self]._sinhux ** constant[2.0]], name[self]._potupi2, name[self]._pot]]
if <ast.BoolOp object at 0x7da1b0cf4040> begin[:]
variable[rstart] assign[=] call[name[_vminFindStart], parameter[name[self]._vx, name[E], name[L], name[self]._I3V, name[self]._delta, name[self]._ux, binary_operation[name[self]._coshux ** constant[2.0]], binary_operation[name[self]._sinhux ** constant[2.0]], name[self]._potupi2, name[self]._pot]]
if compare[name[rstart] equal[==] constant[0.0]] begin[:]
variable[vmin] assign[=] constant[0.0]
name[self]._vmin assign[=] name[vmin]
return[name[self]._vmin] | keyword[def] identifier[calcVmin] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[self] . identifier[_vmin]
identifier[E] , identifier[L] = identifier[self] . identifier[_E] , identifier[self] . identifier[_Lz]
keyword[if] identifier[nu] . identifier[fabs] ( identifier[self] . identifier[_pvx] )< literal[int] **- literal[int] :
identifier[eps] = literal[int] **- literal[int]
identifier[peps] = identifier[_JzStaeckelIntegrandSquared] ( identifier[self] . identifier[_vx] + identifier[eps] ,
identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] )
identifier[meps] = identifier[_JzStaeckelIntegrandSquared] ( identifier[self] . identifier[_vx] - identifier[eps] ,
identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] )
keyword[if] identifier[peps] < literal[int] keyword[and] identifier[meps] > literal[int] :
identifier[rstart] = identifier[_vminFindStart] ( identifier[self] . identifier[_vx] ,
identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] )
keyword[if] identifier[rstart] == literal[int] : identifier[vmin] = literal[int]
keyword[else] :
keyword[try] :
identifier[vmin] = identifier[optimize] . identifier[brentq] ( identifier[_JzStaeckelIntegrandSquared] ,
identifier[rstart] , identifier[self] . identifier[_vx] - identifier[eps] ,
( identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] ),
identifier[maxiter] = literal[int] )
keyword[except] identifier[RuntimeError] :
keyword[raise] identifier[UnboundError] ( literal[string] )
keyword[elif] identifier[peps] > literal[int] keyword[and] identifier[meps] < literal[int] :
identifier[vmin] = identifier[self] . identifier[_vx]
keyword[else] :
identifier[vmin] = identifier[self] . identifier[_vx]
keyword[else] :
identifier[rstart] = identifier[_vminFindStart] ( identifier[self] . identifier[_vx] ,
identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] )
keyword[if] identifier[rstart] == literal[int] : identifier[vmin] = literal[int]
keyword[else] :
keyword[try] :
identifier[vmin] = identifier[optimize] . identifier[brentq] ( identifier[_JzStaeckelIntegrandSquared] ,
identifier[rstart] , identifier[rstart] / literal[int] ,
( identifier[E] , identifier[L] , identifier[self] . identifier[_I3V] , identifier[self] . identifier[_delta] ,
identifier[self] . identifier[_ux] , identifier[self] . identifier[_coshux] ** literal[int] ,
identifier[self] . identifier[_sinhux] ** literal[int] ,
identifier[self] . identifier[_potupi2] , identifier[self] . identifier[_pot] ),
identifier[maxiter] = literal[int] )
keyword[except] identifier[RuntimeError] :
keyword[raise] identifier[UnboundError] ( literal[string] )
identifier[self] . identifier[_vmin] = identifier[vmin]
keyword[return] identifier[self] . identifier[_vmin] | def calcVmin(self, **kwargs):
"""
NAME:
calcVmin
PURPOSE:
calculate the v 'pericenter'
INPUT:
OUTPUT:
vmin
HISTORY:
2012-11-28 - Written - Bovy (IAS)
"""
if hasattr(self, '_vmin'): #pragma: no cover
return self._vmin # depends on [control=['if'], data=[]]
(E, L) = (self._E, self._Lz)
if nu.fabs(self._pvx) < 10.0 ** (-7.0): #We are at vmin or vmax
eps = 10.0 ** (-8.0)
peps = _JzStaeckelIntegrandSquared(self._vx + eps, E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot)
meps = _JzStaeckelIntegrandSquared(self._vx - eps, E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot)
if peps < 0.0 and meps > 0.0: #we are at vmax, which cannot happen
rstart = _vminFindStart(self._vx, E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot)
if rstart == 0.0:
vmin = 0.0 # depends on [control=['if'], data=[]]
else:
try:
vmin = optimize.brentq(_JzStaeckelIntegrandSquared, rstart, self._vx - eps, (E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot), maxiter=200) # depends on [control=['try'], data=[]]
except RuntimeError: #pragma: no cover
raise UnboundError('Orbit seems to be unbound') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif peps > 0.0 and meps < 0.0: #we are at vmin
vmin = self._vx # depends on [control=['if'], data=[]]
else: #planar orbit
vmin = self._vx # depends on [control=['if'], data=[]]
else:
rstart = _vminFindStart(self._vx, E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot)
if rstart == 0.0:
vmin = 0.0 # depends on [control=['if'], data=[]]
else:
try:
vmin = optimize.brentq(_JzStaeckelIntegrandSquared, rstart, rstart / 0.9, (E, L, self._I3V, self._delta, self._ux, self._coshux ** 2.0, self._sinhux ** 2.0, self._potupi2, self._pot), maxiter=200) # depends on [control=['try'], data=[]]
except RuntimeError: #pragma: no cover
raise UnboundError('Orbit seems to be unbound') # depends on [control=['except'], data=[]]
self._vmin = vmin
return self._vmin |
def get_seqprop_to_structprop_alignment_stats(self, seqprop, structprop, chain_id):
"""Get the sequence alignment information for a sequence to a structure's chain."""
alignment = self._get_seqprop_to_structprop_alignment(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
return ssbio.protein.sequence.utils.alignment.pairwise_alignment_stats(reference_seq_aln=alignment[0],
other_seq_aln=alignment[1]) | def function[get_seqprop_to_structprop_alignment_stats, parameter[self, seqprop, structprop, chain_id]]:
constant[Get the sequence alignment information for a sequence to a structure's chain.]
variable[alignment] assign[=] call[name[self]._get_seqprop_to_structprop_alignment, parameter[]]
return[call[name[ssbio].protein.sequence.utils.alignment.pairwise_alignment_stats, parameter[]]] | keyword[def] identifier[get_seqprop_to_structprop_alignment_stats] ( identifier[self] , identifier[seqprop] , identifier[structprop] , identifier[chain_id] ):
literal[string]
identifier[alignment] = identifier[self] . identifier[_get_seqprop_to_structprop_alignment] ( identifier[seqprop] = identifier[seqprop] , identifier[structprop] = identifier[structprop] , identifier[chain_id] = identifier[chain_id] )
keyword[return] identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[utils] . identifier[alignment] . identifier[pairwise_alignment_stats] ( identifier[reference_seq_aln] = identifier[alignment] [ literal[int] ],
identifier[other_seq_aln] = identifier[alignment] [ literal[int] ]) | def get_seqprop_to_structprop_alignment_stats(self, seqprop, structprop, chain_id):
"""Get the sequence alignment information for a sequence to a structure's chain."""
alignment = self._get_seqprop_to_structprop_alignment(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
return ssbio.protein.sequence.utils.alignment.pairwise_alignment_stats(reference_seq_aln=alignment[0], other_seq_aln=alignment[1]) |
def newDocTextLen(self, content, len):
"""Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. """
ret = libxml2mod.xmlNewDocTextLen(self._o, content, len)
if ret is None:raise treeError('xmlNewDocTextLen() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | def function[newDocTextLen, parameter[self, content, len]]:
constant[Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. ]
variable[ret] assign[=] call[name[libxml2mod].xmlNewDocTextLen, parameter[name[self]._o, name[content], name[len]]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1f61270>
variable[__tmp] assign[=] call[name[xmlNode], parameter[]]
return[name[__tmp]] | keyword[def] identifier[newDocTextLen] ( identifier[self] , identifier[content] , identifier[len] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlNewDocTextLen] ( identifier[self] . identifier[_o] , identifier[content] , identifier[len] )
keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] )
identifier[__tmp] = identifier[xmlNode] ( identifier[_obj] = identifier[ret] )
keyword[return] identifier[__tmp] | def newDocTextLen(self, content, len):
"""Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. """
ret = libxml2mod.xmlNewDocTextLen(self._o, content, len)
if ret is None:
raise treeError('xmlNewDocTextLen() failed') # depends on [control=['if'], data=[]]
__tmp = xmlNode(_obj=ret)
return __tmp |
def stop(ctx, yes):
"""Stop build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build stop
```
\b
```bash
$ polyaxon build -b 2 stop
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
if not yes and not click.confirm("Are sure you want to stop "
"job `{}`".format(_build)):
click.echo('Existing without stopping build job.')
sys.exit(0)
try:
PolyaxonClient().build_job.stop(user, project_name, _build)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Build job is being stopped.") | def function[stop, parameter[ctx, yes]]:
constant[Stop build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
```bash
$ polyaxon build stop
```
```bash
$ polyaxon build -b 2 stop
```
]
<ast.Tuple object at 0x7da1aff8c070> assign[=] call[name[get_build_or_local], parameter[call[name[ctx].obj.get, parameter[constant[project]]], call[name[ctx].obj.get, parameter[constant[build]]]]]
if <ast.BoolOp object at 0x7da1aff8cf70> begin[:]
call[name[click].echo, parameter[constant[Existing without stopping build job.]]]
call[name[sys].exit, parameter[constant[0]]]
<ast.Try object at 0x7da1aff8c460>
call[name[Printer].print_success, parameter[constant[Build job is being stopped.]]] | keyword[def] identifier[stop] ( identifier[ctx] , identifier[yes] ):
literal[string]
identifier[user] , identifier[project_name] , identifier[_build] = identifier[get_build_or_local] ( identifier[ctx] . identifier[obj] . identifier[get] ( literal[string] ), identifier[ctx] . identifier[obj] . identifier[get] ( literal[string] ))
keyword[if] keyword[not] identifier[yes] keyword[and] keyword[not] identifier[click] . identifier[confirm] ( literal[string]
literal[string] . identifier[format] ( identifier[_build] )):
identifier[click] . identifier[echo] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[try] :
identifier[PolyaxonClient] (). identifier[build_job] . identifier[stop] ( identifier[user] , identifier[project_name] , identifier[_build] )
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[_build] ))
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[Printer] . identifier[print_success] ( literal[string] ) | def stop(ctx, yes):
"""Stop build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\x08
```bash
$ polyaxon build stop
```
\x08
```bash
$ polyaxon build -b 2 stop
```
"""
(user, project_name, _build) = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
if not yes and (not click.confirm('Are sure you want to stop job `{}`'.format(_build))):
click.echo('Existing without stopping build job.')
sys.exit(0) # depends on [control=['if'], data=[]]
try:
PolyaxonClient().build_job.stop(user, project_name, _build) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
Printer.print_success('Build job is being stopped.') |
def del_option_by_name(self, name):
"""
Delete an option from the message by name
:type name: String
:param name: option name
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.name == name:
self._options.remove(o) | def function[del_option_by_name, parameter[self, name]]:
constant[
Delete an option from the message by name
:type name: String
:param name: option name
]
for taget[name[o]] in starred[call[name[list], parameter[name[self]._options]]] begin[:]
assert[call[name[isinstance], parameter[name[o], name[Option]]]]
if compare[name[o].name equal[==] name[name]] begin[:]
call[name[self]._options.remove, parameter[name[o]]] | keyword[def] identifier[del_option_by_name] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[o] keyword[in] identifier[list] ( identifier[self] . identifier[_options] ):
keyword[assert] identifier[isinstance] ( identifier[o] , identifier[Option] )
keyword[if] identifier[o] . identifier[name] == identifier[name] :
identifier[self] . identifier[_options] . identifier[remove] ( identifier[o] ) | def del_option_by_name(self, name):
"""
Delete an option from the message by name
:type name: String
:param name: option name
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.name == name:
self._options.remove(o) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] |
def merge(path_from, path_to):
""" Merges the data from one whisper file into another. Each file must have
the same archive configuration
"""
fh_from = open(path_from, 'rb')
fh_to = open(path_to, 'rb+')
return file_merge(fh_from, fh_to) | def function[merge, parameter[path_from, path_to]]:
constant[ Merges the data from one whisper file into another. Each file must have
the same archive configuration
]
variable[fh_from] assign[=] call[name[open], parameter[name[path_from], constant[rb]]]
variable[fh_to] assign[=] call[name[open], parameter[name[path_to], constant[rb+]]]
return[call[name[file_merge], parameter[name[fh_from], name[fh_to]]]] | keyword[def] identifier[merge] ( identifier[path_from] , identifier[path_to] ):
literal[string]
identifier[fh_from] = identifier[open] ( identifier[path_from] , literal[string] )
identifier[fh_to] = identifier[open] ( identifier[path_to] , literal[string] )
keyword[return] identifier[file_merge] ( identifier[fh_from] , identifier[fh_to] ) | def merge(path_from, path_to):
""" Merges the data from one whisper file into another. Each file must have
the same archive configuration
"""
fh_from = open(path_from, 'rb')
fh_to = open(path_to, 'rb+')
return file_merge(fh_from, fh_to) |
def _log_http_event(response):
"""
It will create a log event as werkzeug but at the end of request holding the request-id
Intended usage is a handler of Flask.after_request
:return: The same response object
"""
logger.info(
'{ip} - - "{method} {path} {status_code}"'.format(
ip=request.remote_addr,
method=request.method,
path=request.path,
status_code=response.status_code)
)
return response | def function[_log_http_event, parameter[response]]:
constant[
It will create a log event as werkzeug but at the end of request holding the request-id
Intended usage is a handler of Flask.after_request
:return: The same response object
]
call[name[logger].info, parameter[call[constant[{ip} - - "{method} {path} {status_code}"].format, parameter[]]]]
return[name[response]] | keyword[def] identifier[_log_http_event] ( identifier[response] ):
literal[string]
identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[ip] = identifier[request] . identifier[remote_addr] ,
identifier[method] = identifier[request] . identifier[method] ,
identifier[path] = identifier[request] . identifier[path] ,
identifier[status_code] = identifier[response] . identifier[status_code] )
)
keyword[return] identifier[response] | def _log_http_event(response):
"""
It will create a log event as werkzeug but at the end of request holding the request-id
Intended usage is a handler of Flask.after_request
:return: The same response object
"""
logger.info('{ip} - - "{method} {path} {status_code}"'.format(ip=request.remote_addr, method=request.method, path=request.path, status_code=response.status_code))
return response |
def create_controller(self):
"""Create a controller to handle the request
:returns: Controller, this Controller instance should be able to handle
the request
"""
body = None
req = self.request
res = self.response
rou = self.router
con = None
controller_info = {}
try:
controller_info = rou.find(req, res)
except IOError as e:
logger.warning(str(e), exc_info=True)
raise CallError(
408,
"The client went away before the request body was retrieved."
)
except (ImportError, AttributeError, TypeError) as e:
exc_info = sys.exc_info()
logger.warning(str(e), exc_info=exc_info)
raise CallError(
404,
"{} not found because of {} \"{}\" on {}:{}".format(
req.path,
exc_info[0].__name__,
str(e),
os.path.basename(exc_info[2].tb_frame.f_code.co_filename),
exc_info[2].tb_lineno
)
)
else:
con = controller_info['class_instance']
return con | def function[create_controller, parameter[self]]:
constant[Create a controller to handle the request
:returns: Controller, this Controller instance should be able to handle
the request
]
variable[body] assign[=] constant[None]
variable[req] assign[=] name[self].request
variable[res] assign[=] name[self].response
variable[rou] assign[=] name[self].router
variable[con] assign[=] constant[None]
variable[controller_info] assign[=] dictionary[[], []]
<ast.Try object at 0x7da18f58c100>
return[name[con]] | keyword[def] identifier[create_controller] ( identifier[self] ):
literal[string]
identifier[body] = keyword[None]
identifier[req] = identifier[self] . identifier[request]
identifier[res] = identifier[self] . identifier[response]
identifier[rou] = identifier[self] . identifier[router]
identifier[con] = keyword[None]
identifier[controller_info] ={}
keyword[try] :
identifier[controller_info] = identifier[rou] . identifier[find] ( identifier[req] , identifier[res] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( identifier[str] ( identifier[e] ), identifier[exc_info] = keyword[True] )
keyword[raise] identifier[CallError] (
literal[int] ,
literal[string]
)
keyword[except] ( identifier[ImportError] , identifier[AttributeError] , identifier[TypeError] ) keyword[as] identifier[e] :
identifier[exc_info] = identifier[sys] . identifier[exc_info] ()
identifier[logger] . identifier[warning] ( identifier[str] ( identifier[e] ), identifier[exc_info] = identifier[exc_info] )
keyword[raise] identifier[CallError] (
literal[int] ,
literal[string] . identifier[format] (
identifier[req] . identifier[path] ,
identifier[exc_info] [ literal[int] ]. identifier[__name__] ,
identifier[str] ( identifier[e] ),
identifier[os] . identifier[path] . identifier[basename] ( identifier[exc_info] [ literal[int] ]. identifier[tb_frame] . identifier[f_code] . identifier[co_filename] ),
identifier[exc_info] [ literal[int] ]. identifier[tb_lineno]
)
)
keyword[else] :
identifier[con] = identifier[controller_info] [ literal[string] ]
keyword[return] identifier[con] | def create_controller(self):
"""Create a controller to handle the request
:returns: Controller, this Controller instance should be able to handle
the request
"""
body = None
req = self.request
res = self.response
rou = self.router
con = None
controller_info = {}
try:
controller_info = rou.find(req, res) # depends on [control=['try'], data=[]]
except IOError as e:
logger.warning(str(e), exc_info=True)
raise CallError(408, 'The client went away before the request body was retrieved.') # depends on [control=['except'], data=['e']]
except (ImportError, AttributeError, TypeError) as e:
exc_info = sys.exc_info()
logger.warning(str(e), exc_info=exc_info)
raise CallError(404, '{} not found because of {} "{}" on {}:{}'.format(req.path, exc_info[0].__name__, str(e), os.path.basename(exc_info[2].tb_frame.f_code.co_filename), exc_info[2].tb_lineno)) # depends on [control=['except'], data=['e']]
else:
con = controller_info['class_instance']
return con |
def readSAM(SAMfile,header=False):
"""
Reads and parses a sam file.
:param SAMfile: /path/to/file.sam
:param header: logical, if True, reads the header information
:returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True
"""
if header==True:
f=open(SAMfile,"r+")
head=[]
for line in f.readlines():
if line[0]=="@":
head.append(line)
else:
continue
f.close()
sam=pd.read_table(SAMfile,sep="this_gives_one_column",comment="@",header=None)
sam=pd.DataFrame(sam[0].str.split("\t").tolist())
acols=[0,1,2,3,4,5,6,7,8,9]
sam_=sam[acols]
samcols=sam.columns.tolist()
bcols=[ s for s in samcols if s not in acols ]
sam_[10]=sam[bcols[0]]
if len(bcols) > 1:
for c in bcols[1:]:
sam_[10]=sam_[10].astype(str)
sam[c]=sam[c].astype(str)
sam_[10]=sam_[10]+"\t"+sam[c]
sam_.columns=['QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL']
if header==True:
return sam_, head
else:
return sam_ | def function[readSAM, parameter[SAMfile, header]]:
constant[
Reads and parses a sam file.
:param SAMfile: /path/to/file.sam
:param header: logical, if True, reads the header information
:returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True
]
if compare[name[header] equal[==] constant[True]] begin[:]
variable[f] assign[=] call[name[open], parameter[name[SAMfile], constant[r+]]]
variable[head] assign[=] list[[]]
for taget[name[line]] in starred[call[name[f].readlines, parameter[]]] begin[:]
if compare[call[name[line]][constant[0]] equal[==] constant[@]] begin[:]
call[name[head].append, parameter[name[line]]]
call[name[f].close, parameter[]]
variable[sam] assign[=] call[name[pd].read_table, parameter[name[SAMfile]]]
variable[sam] assign[=] call[name[pd].DataFrame, parameter[call[call[call[name[sam]][constant[0]].str.split, parameter[constant[ ]]].tolist, parameter[]]]]
variable[acols] assign[=] list[[<ast.Constant object at 0x7da18f58c610>, <ast.Constant object at 0x7da18f58d0f0>, <ast.Constant object at 0x7da18f58dd80>, <ast.Constant object at 0x7da18f58c430>, <ast.Constant object at 0x7da18f58c100>, <ast.Constant object at 0x7da18f58c4f0>, <ast.Constant object at 0x7da18f58d480>, <ast.Constant object at 0x7da18f58cbe0>, <ast.Constant object at 0x7da18f58ca90>, <ast.Constant object at 0x7da18f58c8b0>]]
variable[sam_] assign[=] call[name[sam]][name[acols]]
variable[samcols] assign[=] call[name[sam].columns.tolist, parameter[]]
variable[bcols] assign[=] <ast.ListComp object at 0x7da18f58f430>
call[name[sam_]][constant[10]] assign[=] call[name[sam]][call[name[bcols]][constant[0]]]
if compare[call[name[len], parameter[name[bcols]]] greater[>] constant[1]] begin[:]
for taget[name[c]] in starred[call[name[bcols]][<ast.Slice object at 0x7da18f58c5e0>]] begin[:]
call[name[sam_]][constant[10]] assign[=] call[call[name[sam_]][constant[10]].astype, parameter[name[str]]]
call[name[sam]][name[c]] assign[=] call[call[name[sam]][name[c]].astype, parameter[name[str]]]
call[name[sam_]][constant[10]] assign[=] binary_operation[binary_operation[call[name[sam_]][constant[10]] + constant[ ]] + call[name[sam]][name[c]]]
name[sam_].columns assign[=] list[[<ast.Constant object at 0x7da2054a65c0>, <ast.Constant object at 0x7da2054a48b0>, <ast.Constant object at 0x7da2054a66b0>, <ast.Constant object at 0x7da2054a7070>, <ast.Constant object at 0x7da2054a7670>, <ast.Constant object at 0x7da2054a4a30>, <ast.Constant object at 0x7da2054a4b50>, <ast.Constant object at 0x7da18bcca860>, <ast.Constant object at 0x7da18bcc90f0>, <ast.Constant object at 0x7da18bccb730>, <ast.Constant object at 0x7da18bcc9540>]]
if compare[name[header] equal[==] constant[True]] begin[:]
return[tuple[[<ast.Name object at 0x7da18bccba60>, <ast.Name object at 0x7da18bccbb50>]]] | keyword[def] identifier[readSAM] ( identifier[SAMfile] , identifier[header] = keyword[False] ):
literal[string]
keyword[if] identifier[header] == keyword[True] :
identifier[f] = identifier[open] ( identifier[SAMfile] , literal[string] )
identifier[head] =[]
keyword[for] identifier[line] keyword[in] identifier[f] . identifier[readlines] ():
keyword[if] identifier[line] [ literal[int] ]== literal[string] :
identifier[head] . identifier[append] ( identifier[line] )
keyword[else] :
keyword[continue]
identifier[f] . identifier[close] ()
identifier[sam] = identifier[pd] . identifier[read_table] ( identifier[SAMfile] , identifier[sep] = literal[string] , identifier[comment] = literal[string] , identifier[header] = keyword[None] )
identifier[sam] = identifier[pd] . identifier[DataFrame] ( identifier[sam] [ literal[int] ]. identifier[str] . identifier[split] ( literal[string] ). identifier[tolist] ())
identifier[acols] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[sam_] = identifier[sam] [ identifier[acols] ]
identifier[samcols] = identifier[sam] . identifier[columns] . identifier[tolist] ()
identifier[bcols] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[samcols] keyword[if] identifier[s] keyword[not] keyword[in] identifier[acols] ]
identifier[sam_] [ literal[int] ]= identifier[sam] [ identifier[bcols] [ literal[int] ]]
keyword[if] identifier[len] ( identifier[bcols] )> literal[int] :
keyword[for] identifier[c] keyword[in] identifier[bcols] [ literal[int] :]:
identifier[sam_] [ literal[int] ]= identifier[sam_] [ literal[int] ]. identifier[astype] ( identifier[str] )
identifier[sam] [ identifier[c] ]= identifier[sam] [ identifier[c] ]. identifier[astype] ( identifier[str] )
identifier[sam_] [ literal[int] ]= identifier[sam_] [ literal[int] ]+ literal[string] + identifier[sam] [ identifier[c] ]
identifier[sam_] . identifier[columns] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[header] == keyword[True] :
keyword[return] identifier[sam_] , identifier[head]
keyword[else] :
keyword[return] identifier[sam_] | def readSAM(SAMfile, header=False):
"""
Reads and parses a sam file.
:param SAMfile: /path/to/file.sam
:param header: logical, if True, reads the header information
:returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True
"""
if header == True:
f = open(SAMfile, 'r+')
head = []
for line in f.readlines():
if line[0] == '@':
head.append(line) # depends on [control=['if'], data=[]]
else:
continue # depends on [control=['for'], data=['line']]
f.close() # depends on [control=['if'], data=[]]
sam = pd.read_table(SAMfile, sep='this_gives_one_column', comment='@', header=None)
sam = pd.DataFrame(sam[0].str.split('\t').tolist())
acols = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
sam_ = sam[acols]
samcols = sam.columns.tolist()
bcols = [s for s in samcols if s not in acols]
sam_[10] = sam[bcols[0]]
if len(bcols) > 1:
for c in bcols[1:]:
sam_[10] = sam_[10].astype(str)
sam[c] = sam[c].astype(str)
sam_[10] = sam_[10] + '\t' + sam[c] # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=[]]
sam_.columns = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
if header == True:
return (sam_, head) # depends on [control=['if'], data=[]]
else:
return sam_ |
def _match(self, **kwargs):
"""Method which indicates if the object matches specified criteria.
Match accepts criteria as kwargs and looks them up on attributes.
Actual matching is performed with fnmatch, so shell-like wildcards
work within match strings. Examples:
obj._match(AXTitle='Terminal*')
obj._match(AXRole='TextField', AXRoleDescription='search text field')
"""
for k in kwargs.keys():
try:
val = getattr(self, k)
except _a11y.Error:
return False
# Not all values may be strings (e.g. size, position)
if sys.version_info[:2] <= (2, 6):
if isinstance(val, basestring):
if not fnmatch.fnmatch(unicode(val), kwargs[k]):
return False
else:
if val != kwargs[k]:
return False
elif sys.version_info[0] == 3:
if isinstance(val, str):
if not fnmatch.fnmatch(val, str(kwargs[k])):
return False
else:
if val != kwargs[k]:
return False
else:
if isinstance(val, str) or isinstance(val, unicode):
if not fnmatch.fnmatch(val, kwargs[k]):
return False
else:
if val != kwargs[k]:
return False
return True | def function[_match, parameter[self]]:
constant[Method which indicates if the object matches specified criteria.
Match accepts criteria as kwargs and looks them up on attributes.
Actual matching is performed with fnmatch, so shell-like wildcards
work within match strings. Examples:
obj._match(AXTitle='Terminal*')
obj._match(AXRole='TextField', AXRoleDescription='search text field')
]
for taget[name[k]] in starred[call[name[kwargs].keys, parameter[]]] begin[:]
<ast.Try object at 0x7da18f09ee60>
if compare[call[name[sys].version_info][<ast.Slice object at 0x7da18f09e8f0>] less_or_equal[<=] tuple[[<ast.Constant object at 0x7da18f09f670>, <ast.Constant object at 0x7da18f09cb80>]]] begin[:]
if call[name[isinstance], parameter[name[val], name[basestring]]] begin[:]
if <ast.UnaryOp object at 0x7da18f09ebf0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_match] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[k] keyword[in] identifier[kwargs] . identifier[keys] ():
keyword[try] :
identifier[val] = identifier[getattr] ( identifier[self] , identifier[k] )
keyword[except] identifier[_a11y] . identifier[Error] :
keyword[return] keyword[False]
keyword[if] identifier[sys] . identifier[version_info] [: literal[int] ]<=( literal[int] , literal[int] ):
keyword[if] identifier[isinstance] ( identifier[val] , identifier[basestring] ):
keyword[if] keyword[not] identifier[fnmatch] . identifier[fnmatch] ( identifier[unicode] ( identifier[val] ), identifier[kwargs] [ identifier[k] ]):
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[val] != identifier[kwargs] [ identifier[k] ]:
keyword[return] keyword[False]
keyword[elif] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] :
keyword[if] identifier[isinstance] ( identifier[val] , identifier[str] ):
keyword[if] keyword[not] identifier[fnmatch] . identifier[fnmatch] ( identifier[val] , identifier[str] ( identifier[kwargs] [ identifier[k] ])):
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[val] != identifier[kwargs] [ identifier[k] ]:
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[val] , identifier[str] ) keyword[or] identifier[isinstance] ( identifier[val] , identifier[unicode] ):
keyword[if] keyword[not] identifier[fnmatch] . identifier[fnmatch] ( identifier[val] , identifier[kwargs] [ identifier[k] ]):
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[val] != identifier[kwargs] [ identifier[k] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def _match(self, **kwargs):
"""Method which indicates if the object matches specified criteria.
Match accepts criteria as kwargs and looks them up on attributes.
Actual matching is performed with fnmatch, so shell-like wildcards
work within match strings. Examples:
obj._match(AXTitle='Terminal*')
obj._match(AXRole='TextField', AXRoleDescription='search text field')
"""
for k in kwargs.keys():
try:
val = getattr(self, k) # depends on [control=['try'], data=[]]
except _a11y.Error:
return False # depends on [control=['except'], data=[]]
# Not all values may be strings (e.g. size, position)
if sys.version_info[:2] <= (2, 6):
if isinstance(val, basestring):
if not fnmatch.fnmatch(unicode(val), kwargs[k]):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif val != kwargs[k]:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif sys.version_info[0] == 3:
if isinstance(val, str):
if not fnmatch.fnmatch(val, str(kwargs[k])):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif val != kwargs[k]:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(val, str) or isinstance(val, unicode):
if not fnmatch.fnmatch(val, kwargs[k]):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif val != kwargs[k]:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return True |
def _resolve_file(file_name):
"""
Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None
"""
if not file_name:
return None
path = os.path.realpath(file_name)
if os.path.isfile(path):
return path
return None | def function[_resolve_file, parameter[file_name]]:
constant[
Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None
]
if <ast.UnaryOp object at 0x7da1b034a0e0> begin[:]
return[constant[None]]
variable[path] assign[=] call[name[os].path.realpath, parameter[name[file_name]]]
if call[name[os].path.isfile, parameter[name[path]]] begin[:]
return[name[path]]
return[constant[None]] | keyword[def] identifier[_resolve_file] ( identifier[file_name] ):
literal[string]
keyword[if] keyword[not] identifier[file_name] :
keyword[return] keyword[None]
identifier[path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[file_name] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
keyword[return] identifier[path]
keyword[return] keyword[None] | def _resolve_file(file_name):
"""
Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None
"""
if not file_name:
return None # depends on [control=['if'], data=[]]
path = os.path.realpath(file_name)
if os.path.isfile(path):
return path # depends on [control=['if'], data=[]]
return None |
def _update_cache(self):
"""
If one of the original registries was changed. Update our merged
version.
"""
expected_version = (
tuple(r._version for r in self.registries) +
(self._extra_registry._version, ))
if self._last_version != expected_version:
registry2 = Registry()
for reg in self.registries:
registry2.key_bindings.extend(reg.key_bindings)
# Copy all bindings from `self._extra_registry`.
registry2.key_bindings.extend(self._extra_registry.key_bindings)
self._registry2 = registry2
self._last_version = expected_version | def function[_update_cache, parameter[self]]:
constant[
If one of the original registries was changed. Update our merged
version.
]
variable[expected_version] assign[=] binary_operation[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b063df00>]] + tuple[[<ast.Attribute object at 0x7da1b063e020>]]]
if compare[name[self]._last_version not_equal[!=] name[expected_version]] begin[:]
variable[registry2] assign[=] call[name[Registry], parameter[]]
for taget[name[reg]] in starred[name[self].registries] begin[:]
call[name[registry2].key_bindings.extend, parameter[name[reg].key_bindings]]
call[name[registry2].key_bindings.extend, parameter[name[self]._extra_registry.key_bindings]]
name[self]._registry2 assign[=] name[registry2]
name[self]._last_version assign[=] name[expected_version] | keyword[def] identifier[_update_cache] ( identifier[self] ):
literal[string]
identifier[expected_version] =(
identifier[tuple] ( identifier[r] . identifier[_version] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[registries] )+
( identifier[self] . identifier[_extra_registry] . identifier[_version] ,))
keyword[if] identifier[self] . identifier[_last_version] != identifier[expected_version] :
identifier[registry2] = identifier[Registry] ()
keyword[for] identifier[reg] keyword[in] identifier[self] . identifier[registries] :
identifier[registry2] . identifier[key_bindings] . identifier[extend] ( identifier[reg] . identifier[key_bindings] )
identifier[registry2] . identifier[key_bindings] . identifier[extend] ( identifier[self] . identifier[_extra_registry] . identifier[key_bindings] )
identifier[self] . identifier[_registry2] = identifier[registry2]
identifier[self] . identifier[_last_version] = identifier[expected_version] | def _update_cache(self):
"""
If one of the original registries was changed. Update our merged
version.
"""
expected_version = tuple((r._version for r in self.registries)) + (self._extra_registry._version,)
if self._last_version != expected_version:
registry2 = Registry()
for reg in self.registries:
registry2.key_bindings.extend(reg.key_bindings) # depends on [control=['for'], data=['reg']]
# Copy all bindings from `self._extra_registry`.
registry2.key_bindings.extend(self._extra_registry.key_bindings)
self._registry2 = registry2
self._last_version = expected_version # depends on [control=['if'], data=['expected_version']] |
def setEditable(self, state):
"""
Sets whether or not this combo box is editable.
:param state | <bool>
"""
self._editable = state
self._hourCombo.setEditable(state)
self._minuteCombo.setEditable(state)
self._secondCombo.setEditable(state)
self._timeOfDayCombo.setEditable(state) | def function[setEditable, parameter[self, state]]:
constant[
Sets whether or not this combo box is editable.
:param state | <bool>
]
name[self]._editable assign[=] name[state]
call[name[self]._hourCombo.setEditable, parameter[name[state]]]
call[name[self]._minuteCombo.setEditable, parameter[name[state]]]
call[name[self]._secondCombo.setEditable, parameter[name[state]]]
call[name[self]._timeOfDayCombo.setEditable, parameter[name[state]]] | keyword[def] identifier[setEditable] ( identifier[self] , identifier[state] ):
literal[string]
identifier[self] . identifier[_editable] = identifier[state]
identifier[self] . identifier[_hourCombo] . identifier[setEditable] ( identifier[state] )
identifier[self] . identifier[_minuteCombo] . identifier[setEditable] ( identifier[state] )
identifier[self] . identifier[_secondCombo] . identifier[setEditable] ( identifier[state] )
identifier[self] . identifier[_timeOfDayCombo] . identifier[setEditable] ( identifier[state] ) | def setEditable(self, state):
"""
Sets whether or not this combo box is editable.
:param state | <bool>
"""
self._editable = state
self._hourCombo.setEditable(state)
self._minuteCombo.setEditable(state)
self._secondCombo.setEditable(state)
self._timeOfDayCombo.setEditable(state) |
def _build_autoload_details(self, autoload_data, relative_path=""):
""" Build autoload details
:param autoload_data: dict:
:param relative_path: str: full relative path of current autoload resource
"""
self._autoload_details.attributes.extend([AutoLoadAttribute(relative_address=relative_path,
attribute_name=attribute_name,
attribute_value=attribute_value)
for attribute_name, attribute_value in
autoload_data.attributes.iteritems()])
for resource_relative_path, resource in self._validate_build_resource_structure(autoload_data.resources).iteritems():
full_relative_path = posixpath.join(relative_path, resource_relative_path)
self._autoload_details.resources.append(AutoLoadResource(model=resource.cloudshell_model_name,
name=resource.name,
relative_address=full_relative_path,
unique_identifier=resource.unique_identifier))
self._build_autoload_details(autoload_data=resource, relative_path=full_relative_path) | def function[_build_autoload_details, parameter[self, autoload_data, relative_path]]:
constant[ Build autoload details
:param autoload_data: dict:
:param relative_path: str: full relative path of current autoload resource
]
call[name[self]._autoload_details.attributes.extend, parameter[<ast.ListComp object at 0x7da20e9b2560>]]
for taget[tuple[[<ast.Name object at 0x7da1b1a3c0a0>, <ast.Name object at 0x7da1b1a3c370>]]] in starred[call[call[name[self]._validate_build_resource_structure, parameter[name[autoload_data].resources]].iteritems, parameter[]]] begin[:]
variable[full_relative_path] assign[=] call[name[posixpath].join, parameter[name[relative_path], name[resource_relative_path]]]
call[name[self]._autoload_details.resources.append, parameter[call[name[AutoLoadResource], parameter[]]]]
call[name[self]._build_autoload_details, parameter[]] | keyword[def] identifier[_build_autoload_details] ( identifier[self] , identifier[autoload_data] , identifier[relative_path] = literal[string] ):
literal[string]
identifier[self] . identifier[_autoload_details] . identifier[attributes] . identifier[extend] ([ identifier[AutoLoadAttribute] ( identifier[relative_address] = identifier[relative_path] ,
identifier[attribute_name] = identifier[attribute_name] ,
identifier[attribute_value] = identifier[attribute_value] )
keyword[for] identifier[attribute_name] , identifier[attribute_value] keyword[in]
identifier[autoload_data] . identifier[attributes] . identifier[iteritems] ()])
keyword[for] identifier[resource_relative_path] , identifier[resource] keyword[in] identifier[self] . identifier[_validate_build_resource_structure] ( identifier[autoload_data] . identifier[resources] ). identifier[iteritems] ():
identifier[full_relative_path] = identifier[posixpath] . identifier[join] ( identifier[relative_path] , identifier[resource_relative_path] )
identifier[self] . identifier[_autoload_details] . identifier[resources] . identifier[append] ( identifier[AutoLoadResource] ( identifier[model] = identifier[resource] . identifier[cloudshell_model_name] ,
identifier[name] = identifier[resource] . identifier[name] ,
identifier[relative_address] = identifier[full_relative_path] ,
identifier[unique_identifier] = identifier[resource] . identifier[unique_identifier] ))
identifier[self] . identifier[_build_autoload_details] ( identifier[autoload_data] = identifier[resource] , identifier[relative_path] = identifier[full_relative_path] ) | def _build_autoload_details(self, autoload_data, relative_path=''):
""" Build autoload details
:param autoload_data: dict:
:param relative_path: str: full relative path of current autoload resource
"""
self._autoload_details.attributes.extend([AutoLoadAttribute(relative_address=relative_path, attribute_name=attribute_name, attribute_value=attribute_value) for (attribute_name, attribute_value) in autoload_data.attributes.iteritems()])
for (resource_relative_path, resource) in self._validate_build_resource_structure(autoload_data.resources).iteritems():
full_relative_path = posixpath.join(relative_path, resource_relative_path)
self._autoload_details.resources.append(AutoLoadResource(model=resource.cloudshell_model_name, name=resource.name, relative_address=full_relative_path, unique_identifier=resource.unique_identifier))
self._build_autoload_details(autoload_data=resource, relative_path=full_relative_path) # depends on [control=['for'], data=[]] |
def search_user_group_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501
return data | def function[search_user_group_for_facets, parameter[self]]:
constant[Lists the values of one or more facets over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].search_user_group_for_facets_with_http_info, parameter[]]] | keyword[def] identifier[search_user_group_for_facets] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[search_user_group_for_facets_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[search_user_group_for_facets_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def search_user_group_for_facets(self, **kwargs): # noqa: E501
"Lists the values of one or more facets over the customer's user groups # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.search_user_group_for_facets(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param FacetsSearchRequestContainer body:\n :return: ResponseContainerFacetsResponseContainer\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501
return data |
def load_cash_balances_with_children(self, root_account_fullname: str):
""" loads data for cash balances """
assert isinstance(root_account_fullname, str)
svc = AccountsAggregate(self.book)
root_account = svc.get_by_fullname(root_account_fullname)
if not root_account:
raise ValueError("Account not found", root_account_fullname)
accounts = self.__get_all_child_accounts_as_array(root_account)
# read cash balances
model = {}
for account in accounts:
if account.commodity.namespace != "CURRENCY" or account.placeholder:
continue
# separate per currency
currency_symbol = account.commodity.mnemonic
if not currency_symbol in model:
# Add the currency branch.
currency_record = {
"name": currency_symbol,
"total": 0,
"rows": []
}
# Append to the root.
model[currency_symbol] = currency_record
else:
currency_record = model[currency_symbol]
#acct_svc = AccountAggregate(self.book, account)
balance = account.get_balance()
row = {
"name": account.name,
"fullname": account.fullname,
"currency": currency_symbol,
"balance": balance
}
currency_record["rows"].append(row)
# add to total
total = Decimal(currency_record["total"])
total += balance
currency_record["total"] = total
return model | def function[load_cash_balances_with_children, parameter[self, root_account_fullname]]:
constant[ loads data for cash balances ]
assert[call[name[isinstance], parameter[name[root_account_fullname], name[str]]]]
variable[svc] assign[=] call[name[AccountsAggregate], parameter[name[self].book]]
variable[root_account] assign[=] call[name[svc].get_by_fullname, parameter[name[root_account_fullname]]]
if <ast.UnaryOp object at 0x7da1b12881f0> begin[:]
<ast.Raise object at 0x7da1b128a530>
variable[accounts] assign[=] call[name[self].__get_all_child_accounts_as_array, parameter[name[root_account]]]
variable[model] assign[=] dictionary[[], []]
for taget[name[account]] in starred[name[accounts]] begin[:]
if <ast.BoolOp object at 0x7da1b128ae30> begin[:]
continue
variable[currency_symbol] assign[=] name[account].commodity.mnemonic
if <ast.UnaryOp object at 0x7da1b1289ed0> begin[:]
variable[currency_record] assign[=] dictionary[[<ast.Constant object at 0x7da1b128ad10>, <ast.Constant object at 0x7da1b12885b0>, <ast.Constant object at 0x7da1b128b4c0>], [<ast.Name object at 0x7da1b128b5b0>, <ast.Constant object at 0x7da1b128a2c0>, <ast.List object at 0x7da1b1288130>]]
call[name[model]][name[currency_symbol]] assign[=] name[currency_record]
variable[balance] assign[=] call[name[account].get_balance, parameter[]]
variable[row] assign[=] dictionary[[<ast.Constant object at 0x7da1b1289240>, <ast.Constant object at 0x7da1b128b8b0>, <ast.Constant object at 0x7da1b1289420>, <ast.Constant object at 0x7da1b128af20>], [<ast.Attribute object at 0x7da1b128ae60>, <ast.Attribute object at 0x7da1b128ad70>, <ast.Name object at 0x7da1b128bb80>, <ast.Name object at 0x7da1b128a140>]]
call[call[name[currency_record]][constant[rows]].append, parameter[name[row]]]
variable[total] assign[=] call[name[Decimal], parameter[call[name[currency_record]][constant[total]]]]
<ast.AugAssign object at 0x7da1b1288b80>
call[name[currency_record]][constant[total]] assign[=] name[total]
return[name[model]] | keyword[def] identifier[load_cash_balances_with_children] ( identifier[self] , identifier[root_account_fullname] : identifier[str] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[root_account_fullname] , identifier[str] )
identifier[svc] = identifier[AccountsAggregate] ( identifier[self] . identifier[book] )
identifier[root_account] = identifier[svc] . identifier[get_by_fullname] ( identifier[root_account_fullname] )
keyword[if] keyword[not] identifier[root_account] :
keyword[raise] identifier[ValueError] ( literal[string] , identifier[root_account_fullname] )
identifier[accounts] = identifier[self] . identifier[__get_all_child_accounts_as_array] ( identifier[root_account] )
identifier[model] ={}
keyword[for] identifier[account] keyword[in] identifier[accounts] :
keyword[if] identifier[account] . identifier[commodity] . identifier[namespace] != literal[string] keyword[or] identifier[account] . identifier[placeholder] :
keyword[continue]
identifier[currency_symbol] = identifier[account] . identifier[commodity] . identifier[mnemonic]
keyword[if] keyword[not] identifier[currency_symbol] keyword[in] identifier[model] :
identifier[currency_record] ={
literal[string] : identifier[currency_symbol] ,
literal[string] : literal[int] ,
literal[string] :[]
}
identifier[model] [ identifier[currency_symbol] ]= identifier[currency_record]
keyword[else] :
identifier[currency_record] = identifier[model] [ identifier[currency_symbol] ]
identifier[balance] = identifier[account] . identifier[get_balance] ()
identifier[row] ={
literal[string] : identifier[account] . identifier[name] ,
literal[string] : identifier[account] . identifier[fullname] ,
literal[string] : identifier[currency_symbol] ,
literal[string] : identifier[balance]
}
identifier[currency_record] [ literal[string] ]. identifier[append] ( identifier[row] )
identifier[total] = identifier[Decimal] ( identifier[currency_record] [ literal[string] ])
identifier[total] += identifier[balance]
identifier[currency_record] [ literal[string] ]= identifier[total]
keyword[return] identifier[model] | def load_cash_balances_with_children(self, root_account_fullname: str):
""" loads data for cash balances """
assert isinstance(root_account_fullname, str)
svc = AccountsAggregate(self.book)
root_account = svc.get_by_fullname(root_account_fullname)
if not root_account:
raise ValueError('Account not found', root_account_fullname) # depends on [control=['if'], data=[]]
accounts = self.__get_all_child_accounts_as_array(root_account)
# read cash balances
model = {}
for account in accounts:
if account.commodity.namespace != 'CURRENCY' or account.placeholder:
continue # depends on [control=['if'], data=[]]
# separate per currency
currency_symbol = account.commodity.mnemonic
if not currency_symbol in model:
# Add the currency branch.
currency_record = {'name': currency_symbol, 'total': 0, 'rows': []}
# Append to the root.
model[currency_symbol] = currency_record # depends on [control=['if'], data=[]]
else:
currency_record = model[currency_symbol]
#acct_svc = AccountAggregate(self.book, account)
balance = account.get_balance()
row = {'name': account.name, 'fullname': account.fullname, 'currency': currency_symbol, 'balance': balance}
currency_record['rows'].append(row)
# add to total
total = Decimal(currency_record['total'])
total += balance
currency_record['total'] = total # depends on [control=['for'], data=['account']]
return model |
def _exception_converter(callable: Callable) -> Callable:
"""
Decorator that converts exceptions from underlying libraries to native exceptions.
:param callable: the callable to convert exceptions of
:return: wrapped callable
"""
def wrapped(*args, **kwargs) -> Any:
try:
return callable(*args, **kwargs)
except ConsulLockBaseError as e:
raise e
except ACLPermissionDenied as e:
raise PermissionDeniedConsulError() from e
except ConsulException as e:
raise ConsulConnectionError() from e
return wrapped | def function[_exception_converter, parameter[callable]]:
constant[
Decorator that converts exceptions from underlying libraries to native exceptions.
:param callable: the callable to convert exceptions of
:return: wrapped callable
]
def function[wrapped, parameter[]]:
<ast.Try object at 0x7da20c6c6cb0>
return[name[wrapped]] | keyword[def] identifier[_exception_converter] ( identifier[callable] : identifier[Callable] )-> identifier[Callable] :
literal[string]
keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] )-> identifier[Any] :
keyword[try] :
keyword[return] identifier[callable] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[ConsulLockBaseError] keyword[as] identifier[e] :
keyword[raise] identifier[e]
keyword[except] identifier[ACLPermissionDenied] keyword[as] identifier[e] :
keyword[raise] identifier[PermissionDeniedConsulError] () keyword[from] identifier[e]
keyword[except] identifier[ConsulException] keyword[as] identifier[e] :
keyword[raise] identifier[ConsulConnectionError] () keyword[from] identifier[e]
keyword[return] identifier[wrapped] | def _exception_converter(callable: Callable) -> Callable:
"""
Decorator that converts exceptions from underlying libraries to native exceptions.
:param callable: the callable to convert exceptions of
:return: wrapped callable
"""
def wrapped(*args, **kwargs) -> Any:
try:
return callable(*args, **kwargs) # depends on [control=['try'], data=[]]
except ConsulLockBaseError as e:
raise e # depends on [control=['except'], data=['e']]
except ACLPermissionDenied as e:
raise PermissionDeniedConsulError() from e # depends on [control=['except'], data=['e']]
except ConsulException as e:
raise ConsulConnectionError() from e # depends on [control=['except'], data=['e']]
return wrapped |
def solve_with_sdpa(sdp, solverparameters=None):
"""Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
"""
solverexecutable = detect_sdpa(solverparameters)
if solverexecutable is None:
raise OSError("SDPA is not in the path or the executable provided is" +
" not correct")
primal, dual = 0, 0
tempfile_ = tempfile.NamedTemporaryFile()
tmp_filename = tempfile_.name
tempfile_.close()
tmp_dats_filename = tmp_filename + ".dat-s"
tmp_out_filename = tmp_filename + ".out"
write_to_sdpa(sdp, tmp_dats_filename)
command_line = [solverexecutable, "-ds", tmp_dats_filename,
"-o", tmp_out_filename]
if solverparameters is not None:
for key, value in list(solverparameters.items()):
if key == "executable":
continue
elif key == "paramsfile":
command_line.extend(["-p", value])
else:
raise ValueError("Unknown parameter for SDPA: " + key)
if sdp.verbose < 1:
with open(os.devnull, "w") as fnull:
call(command_line, stdout=fnull, stderr=fnull)
else:
call(command_line)
primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True,
True)
if sdp.verbose < 2:
os.remove(tmp_dats_filename)
os.remove(tmp_out_filename)
return primal+sdp.constant_term, \
dual+sdp.constant_term, x_mat, y_mat, status | def function[solve_with_sdpa, parameter[sdp, solverparameters]]:
constant[Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
]
variable[solverexecutable] assign[=] call[name[detect_sdpa], parameter[name[solverparameters]]]
if compare[name[solverexecutable] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0f0c2b0>
<ast.Tuple object at 0x7da1b0f0c610> assign[=] tuple[[<ast.Constant object at 0x7da1b0f0c400>, <ast.Constant object at 0x7da1b0f0dc60>]]
variable[tempfile_] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
variable[tmp_filename] assign[=] name[tempfile_].name
call[name[tempfile_].close, parameter[]]
variable[tmp_dats_filename] assign[=] binary_operation[name[tmp_filename] + constant[.dat-s]]
variable[tmp_out_filename] assign[=] binary_operation[name[tmp_filename] + constant[.out]]
call[name[write_to_sdpa], parameter[name[sdp], name[tmp_dats_filename]]]
variable[command_line] assign[=] list[[<ast.Name object at 0x7da1b0f0d120>, <ast.Constant object at 0x7da1b0f0e410>, <ast.Name object at 0x7da1b0f0da20>, <ast.Constant object at 0x7da1b0f0dba0>, <ast.Name object at 0x7da1b0f0dc00>]]
if compare[name[solverparameters] is_not constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0f0c7c0>, <ast.Name object at 0x7da1b0f0cb80>]]] in starred[call[name[list], parameter[call[name[solverparameters].items, parameter[]]]]] begin[:]
if compare[name[key] equal[==] constant[executable]] begin[:]
continue
if compare[name[sdp].verbose less[<] constant[1]] begin[:]
with call[name[open], parameter[name[os].devnull, constant[w]]] begin[:]
call[name[call], parameter[name[command_line]]]
<ast.Tuple object at 0x7da1b0f0cac0> assign[=] call[name[read_sdpa_out], parameter[name[tmp_out_filename], constant[True], constant[True]]]
if compare[name[sdp].verbose less[<] constant[2]] begin[:]
call[name[os].remove, parameter[name[tmp_dats_filename]]]
call[name[os].remove, parameter[name[tmp_out_filename]]]
return[tuple[[<ast.BinOp object at 0x7da1b0f0c100>, <ast.BinOp object at 0x7da1b0f0c6d0>, <ast.Name object at 0x7da1b0f0e3b0>, <ast.Name object at 0x7da1b0f0d390>, <ast.Name object at 0x7da1b0f0c130>]]] | keyword[def] identifier[solve_with_sdpa] ( identifier[sdp] , identifier[solverparameters] = keyword[None] ):
literal[string]
identifier[solverexecutable] = identifier[detect_sdpa] ( identifier[solverparameters] )
keyword[if] identifier[solverexecutable] keyword[is] keyword[None] :
keyword[raise] identifier[OSError] ( literal[string] +
literal[string] )
identifier[primal] , identifier[dual] = literal[int] , literal[int]
identifier[tempfile_] = identifier[tempfile] . identifier[NamedTemporaryFile] ()
identifier[tmp_filename] = identifier[tempfile_] . identifier[name]
identifier[tempfile_] . identifier[close] ()
identifier[tmp_dats_filename] = identifier[tmp_filename] + literal[string]
identifier[tmp_out_filename] = identifier[tmp_filename] + literal[string]
identifier[write_to_sdpa] ( identifier[sdp] , identifier[tmp_dats_filename] )
identifier[command_line] =[ identifier[solverexecutable] , literal[string] , identifier[tmp_dats_filename] ,
literal[string] , identifier[tmp_out_filename] ]
keyword[if] identifier[solverparameters] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[list] ( identifier[solverparameters] . identifier[items] ()):
keyword[if] identifier[key] == literal[string] :
keyword[continue]
keyword[elif] identifier[key] == literal[string] :
identifier[command_line] . identifier[extend] ([ literal[string] , identifier[value] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[key] )
keyword[if] identifier[sdp] . identifier[verbose] < literal[int] :
keyword[with] identifier[open] ( identifier[os] . identifier[devnull] , literal[string] ) keyword[as] identifier[fnull] :
identifier[call] ( identifier[command_line] , identifier[stdout] = identifier[fnull] , identifier[stderr] = identifier[fnull] )
keyword[else] :
identifier[call] ( identifier[command_line] )
identifier[primal] , identifier[dual] , identifier[x_mat] , identifier[y_mat] , identifier[status] = identifier[read_sdpa_out] ( identifier[tmp_out_filename] , keyword[True] ,
keyword[True] )
keyword[if] identifier[sdp] . identifier[verbose] < literal[int] :
identifier[os] . identifier[remove] ( identifier[tmp_dats_filename] )
identifier[os] . identifier[remove] ( identifier[tmp_out_filename] )
keyword[return] identifier[primal] + identifier[sdp] . identifier[constant_term] , identifier[dual] + identifier[sdp] . identifier[constant_term] , identifier[x_mat] , identifier[y_mat] , identifier[status] | def solve_with_sdpa(sdp, solverparameters=None):
"""Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
"""
solverexecutable = detect_sdpa(solverparameters)
if solverexecutable is None:
raise OSError('SDPA is not in the path or the executable provided is' + ' not correct') # depends on [control=['if'], data=[]]
(primal, dual) = (0, 0)
tempfile_ = tempfile.NamedTemporaryFile()
tmp_filename = tempfile_.name
tempfile_.close()
tmp_dats_filename = tmp_filename + '.dat-s'
tmp_out_filename = tmp_filename + '.out'
write_to_sdpa(sdp, tmp_dats_filename)
command_line = [solverexecutable, '-ds', tmp_dats_filename, '-o', tmp_out_filename]
if solverparameters is not None:
for (key, value) in list(solverparameters.items()):
if key == 'executable':
continue # depends on [control=['if'], data=[]]
elif key == 'paramsfile':
command_line.extend(['-p', value]) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown parameter for SDPA: ' + key) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['solverparameters']]
if sdp.verbose < 1:
with open(os.devnull, 'w') as fnull:
call(command_line, stdout=fnull, stderr=fnull) # depends on [control=['with'], data=['fnull']] # depends on [control=['if'], data=[]]
else:
call(command_line)
(primal, dual, x_mat, y_mat, status) = read_sdpa_out(tmp_out_filename, True, True)
if sdp.verbose < 2:
os.remove(tmp_dats_filename)
os.remove(tmp_out_filename) # depends on [control=['if'], data=[]]
return (primal + sdp.constant_term, dual + sdp.constant_term, x_mat, y_mat, status) |
def knob_end(self):
""" Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY.
"""
side_chain_atoms = self.knob_residue.side_chain
if not side_chain_atoms:
return self.knob_residue['CA']
distances = [distance(self.knob_residue['CB'], x) for x in side_chain_atoms]
max_d = max(distances)
knob_end_atoms = [atom for atom, d in zip(side_chain_atoms, distances) if d == max_d]
if len(knob_end_atoms) == 1:
return knob_end_atoms[0]._vector
else:
return numpy.mean([x._vector for x in knob_end_atoms], axis=0) | def function[knob_end, parameter[self]]:
constant[ Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY.
]
variable[side_chain_atoms] assign[=] name[self].knob_residue.side_chain
if <ast.UnaryOp object at 0x7da1b2627f10> begin[:]
return[call[name[self].knob_residue][constant[CA]]]
variable[distances] assign[=] <ast.ListComp object at 0x7da1b2629c90>
variable[max_d] assign[=] call[name[max], parameter[name[distances]]]
variable[knob_end_atoms] assign[=] <ast.ListComp object at 0x7da1b2629db0>
if compare[call[name[len], parameter[name[knob_end_atoms]]] equal[==] constant[1]] begin[:]
return[call[name[knob_end_atoms]][constant[0]]._vector] | keyword[def] identifier[knob_end] ( identifier[self] ):
literal[string]
identifier[side_chain_atoms] = identifier[self] . identifier[knob_residue] . identifier[side_chain]
keyword[if] keyword[not] identifier[side_chain_atoms] :
keyword[return] identifier[self] . identifier[knob_residue] [ literal[string] ]
identifier[distances] =[ identifier[distance] ( identifier[self] . identifier[knob_residue] [ literal[string] ], identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[side_chain_atoms] ]
identifier[max_d] = identifier[max] ( identifier[distances] )
identifier[knob_end_atoms] =[ identifier[atom] keyword[for] identifier[atom] , identifier[d] keyword[in] identifier[zip] ( identifier[side_chain_atoms] , identifier[distances] ) keyword[if] identifier[d] == identifier[max_d] ]
keyword[if] identifier[len] ( identifier[knob_end_atoms] )== literal[int] :
keyword[return] identifier[knob_end_atoms] [ literal[int] ]. identifier[_vector]
keyword[else] :
keyword[return] identifier[numpy] . identifier[mean] ([ identifier[x] . identifier[_vector] keyword[for] identifier[x] keyword[in] identifier[knob_end_atoms] ], identifier[axis] = literal[int] ) | def knob_end(self):
""" Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY.
"""
side_chain_atoms = self.knob_residue.side_chain
if not side_chain_atoms:
return self.knob_residue['CA'] # depends on [control=['if'], data=[]]
distances = [distance(self.knob_residue['CB'], x) for x in side_chain_atoms]
max_d = max(distances)
knob_end_atoms = [atom for (atom, d) in zip(side_chain_atoms, distances) if d == max_d]
if len(knob_end_atoms) == 1:
return knob_end_atoms[0]._vector # depends on [control=['if'], data=[]]
else:
return numpy.mean([x._vector for x in knob_end_atoms], axis=0) |
def query_record_report_this(comID):
"""
Increment the number of reports for a comment
:param comID: comment id
:return: tuple (success, new_total_nb_reports_for_this_comment) where
success is integer 1 if success, integer 0 if not, -2 if comment does not exist
"""
# retrieve nb_abuse_reports
query1 = """SELECT nb_abuse_reports FROM "cmtRECORDCOMMENT" WHERE id=%s"""
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return (-2, 0)
#increment and update
nb_abuse_reports = int(res1[0][0]) + 1
query2 = """UPDATE "cmtRECORDCOMMENT" SET nb_abuse_reports=%s WHERE id=%s"""
params2 = (nb_abuse_reports, comID)
res2 = run_sql(query2, params2)
return (int(res2), nb_abuse_reports) | def function[query_record_report_this, parameter[comID]]:
constant[
Increment the number of reports for a comment
:param comID: comment id
:return: tuple (success, new_total_nb_reports_for_this_comment) where
success is integer 1 if success, integer 0 if not, -2 if comment does not exist
]
variable[query1] assign[=] constant[SELECT nb_abuse_reports FROM "cmtRECORDCOMMENT" WHERE id=%s]
variable[params1] assign[=] tuple[[<ast.Name object at 0x7da18f8104f0>]]
variable[res1] assign[=] call[name[run_sql], parameter[name[query1], name[params1]]]
if compare[call[name[len], parameter[name[res1]]] equal[==] constant[0]] begin[:]
return[tuple[[<ast.UnaryOp object at 0x7da207f00d00>, <ast.Constant object at 0x7da207f02f50>]]]
variable[nb_abuse_reports] assign[=] binary_operation[call[name[int], parameter[call[call[name[res1]][constant[0]]][constant[0]]]] + constant[1]]
variable[query2] assign[=] constant[UPDATE "cmtRECORDCOMMENT" SET nb_abuse_reports=%s WHERE id=%s]
variable[params2] assign[=] tuple[[<ast.Name object at 0x7da207f01360>, <ast.Name object at 0x7da207f027a0>]]
variable[res2] assign[=] call[name[run_sql], parameter[name[query2], name[params2]]]
return[tuple[[<ast.Call object at 0x7da207f01ae0>, <ast.Name object at 0x7da207f01d50>]]] | keyword[def] identifier[query_record_report_this] ( identifier[comID] ):
literal[string]
identifier[query1] = literal[string]
identifier[params1] =( identifier[comID] ,)
identifier[res1] = identifier[run_sql] ( identifier[query1] , identifier[params1] )
keyword[if] identifier[len] ( identifier[res1] )== literal[int] :
keyword[return] (- literal[int] , literal[int] )
identifier[nb_abuse_reports] = identifier[int] ( identifier[res1] [ literal[int] ][ literal[int] ])+ literal[int]
identifier[query2] = literal[string]
identifier[params2] =( identifier[nb_abuse_reports] , identifier[comID] )
identifier[res2] = identifier[run_sql] ( identifier[query2] , identifier[params2] )
keyword[return] ( identifier[int] ( identifier[res2] ), identifier[nb_abuse_reports] ) | def query_record_report_this(comID):
"""
Increment the number of reports for a comment
:param comID: comment id
:return: tuple (success, new_total_nb_reports_for_this_comment) where
success is integer 1 if success, integer 0 if not, -2 if comment does not exist
"""
# retrieve nb_abuse_reports
query1 = 'SELECT nb_abuse_reports FROM "cmtRECORDCOMMENT" WHERE id=%s'
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return (-2, 0) # depends on [control=['if'], data=[]]
#increment and update
nb_abuse_reports = int(res1[0][0]) + 1
query2 = 'UPDATE "cmtRECORDCOMMENT" SET nb_abuse_reports=%s WHERE id=%s'
params2 = (nb_abuse_reports, comID)
res2 = run_sql(query2, params2)
return (int(res2), nb_abuse_reports) |
def authors(self):
"""Authors.
:return:
Returns of list of authors
"""
result = []
authors = self._safe_get_element('ItemAttributes.Author')
if authors is not None:
for author in authors:
result.append(author.text)
return result | def function[authors, parameter[self]]:
constant[Authors.
:return:
Returns of list of authors
]
variable[result] assign[=] list[[]]
variable[authors] assign[=] call[name[self]._safe_get_element, parameter[constant[ItemAttributes.Author]]]
if compare[name[authors] is_not constant[None]] begin[:]
for taget[name[author]] in starred[name[authors]] begin[:]
call[name[result].append, parameter[name[author].text]]
return[name[result]] | keyword[def] identifier[authors] ( identifier[self] ):
literal[string]
identifier[result] =[]
identifier[authors] = identifier[self] . identifier[_safe_get_element] ( literal[string] )
keyword[if] identifier[authors] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[author] keyword[in] identifier[authors] :
identifier[result] . identifier[append] ( identifier[author] . identifier[text] )
keyword[return] identifier[result] | def authors(self):
"""Authors.
:return:
Returns of list of authors
"""
result = []
authors = self._safe_get_element('ItemAttributes.Author')
if authors is not None:
for author in authors:
result.append(author.text) # depends on [control=['for'], data=['author']] # depends on [control=['if'], data=['authors']]
return result |
def timestamp_to_str(t, datetime_format=DATETIME_FORMAT, *, inverse=False):
"""
Given a POSIX timestamp (integer) ``t``,
format it as a datetime string in the given format.
If ``inverse``, then do the inverse, that is, assume ``t`` is
a datetime string in the given format and return its corresponding
timestamp.
If ``format is None``, then return ``t`` as a string
(if not ``inverse``) or as an integer (if ``inverse``) directly.
"""
if not inverse:
if datetime_format is None:
result = str(t)
else:
result = dt.datetime.fromtimestamp(t).strftime(datetime_format)
else:
if format is None:
result = int(t)
else:
result = dt.datetime.strptime(t, datetime_format).timestamp()
return result | def function[timestamp_to_str, parameter[t, datetime_format]]:
constant[
Given a POSIX timestamp (integer) ``t``,
format it as a datetime string in the given format.
If ``inverse``, then do the inverse, that is, assume ``t`` is
a datetime string in the given format and return its corresponding
timestamp.
If ``format is None``, then return ``t`` as a string
(if not ``inverse``) or as an integer (if ``inverse``) directly.
]
if <ast.UnaryOp object at 0x7da1b15f06d0> begin[:]
if compare[name[datetime_format] is constant[None]] begin[:]
variable[result] assign[=] call[name[str], parameter[name[t]]]
return[name[result]] | keyword[def] identifier[timestamp_to_str] ( identifier[t] , identifier[datetime_format] = identifier[DATETIME_FORMAT] ,*, identifier[inverse] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[inverse] :
keyword[if] identifier[datetime_format] keyword[is] keyword[None] :
identifier[result] = identifier[str] ( identifier[t] )
keyword[else] :
identifier[result] = identifier[dt] . identifier[datetime] . identifier[fromtimestamp] ( identifier[t] ). identifier[strftime] ( identifier[datetime_format] )
keyword[else] :
keyword[if] identifier[format] keyword[is] keyword[None] :
identifier[result] = identifier[int] ( identifier[t] )
keyword[else] :
identifier[result] = identifier[dt] . identifier[datetime] . identifier[strptime] ( identifier[t] , identifier[datetime_format] ). identifier[timestamp] ()
keyword[return] identifier[result] | def timestamp_to_str(t, datetime_format=DATETIME_FORMAT, *, inverse=False):
"""
Given a POSIX timestamp (integer) ``t``,
format it as a datetime string in the given format.
If ``inverse``, then do the inverse, that is, assume ``t`` is
a datetime string in the given format and return its corresponding
timestamp.
If ``format is None``, then return ``t`` as a string
(if not ``inverse``) or as an integer (if ``inverse``) directly.
"""
if not inverse:
if datetime_format is None:
result = str(t) # depends on [control=['if'], data=[]]
else:
result = dt.datetime.fromtimestamp(t).strftime(datetime_format) # depends on [control=['if'], data=[]]
elif format is None:
result = int(t) # depends on [control=['if'], data=[]]
else:
result = dt.datetime.strptime(t, datetime_format).timestamp()
return result |
def start(self, initial_delay=0):
"""Wait for push updates from device.
Will throw NoAsyncListenerError if no listner has been set.
"""
if self.listener is None:
raise exceptions.NoAsyncListenerError
elif self._future is not None:
return None
# Always start with 0 to trigger an immediate response for the
# first request
self._atv.playstatus_revision = 0
# This for some reason fails on travis but not in other places.
# Why is that (same python version)?
# pylint: disable=deprecated-method
self._future = asyncio.ensure_future(
self._poller(initial_delay), loop=self._loop)
return self._future | def function[start, parameter[self, initial_delay]]:
constant[Wait for push updates from device.
Will throw NoAsyncListenerError if no listner has been set.
]
if compare[name[self].listener is constant[None]] begin[:]
<ast.Raise object at 0x7da18fe912d0>
name[self]._atv.playstatus_revision assign[=] constant[0]
name[self]._future assign[=] call[name[asyncio].ensure_future, parameter[call[name[self]._poller, parameter[name[initial_delay]]]]]
return[name[self]._future] | keyword[def] identifier[start] ( identifier[self] , identifier[initial_delay] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[listener] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[NoAsyncListenerError]
keyword[elif] identifier[self] . identifier[_future] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[None]
identifier[self] . identifier[_atv] . identifier[playstatus_revision] = literal[int]
identifier[self] . identifier[_future] = identifier[asyncio] . identifier[ensure_future] (
identifier[self] . identifier[_poller] ( identifier[initial_delay] ), identifier[loop] = identifier[self] . identifier[_loop] )
keyword[return] identifier[self] . identifier[_future] | def start(self, initial_delay=0):
"""Wait for push updates from device.
Will throw NoAsyncListenerError if no listner has been set.
"""
if self.listener is None:
raise exceptions.NoAsyncListenerError # depends on [control=['if'], data=[]]
elif self._future is not None:
return None # depends on [control=['if'], data=[]]
# Always start with 0 to trigger an immediate response for the
# first request
self._atv.playstatus_revision = 0
# This for some reason fails on travis but not in other places.
# Why is that (same python version)?
# pylint: disable=deprecated-method
self._future = asyncio.ensure_future(self._poller(initial_delay), loop=self._loop)
return self._future |
def load(self, elem):
"""
Converts the inputted list tag to Python.
:param elem | <xml.etree.ElementTree>
:return <list>
"""
self.testTag(elem, 'list')
out = []
for xitem in elem:
out.append(XmlDataIO.fromXml(xitem))
return out | def function[load, parameter[self, elem]]:
constant[
Converts the inputted list tag to Python.
:param elem | <xml.etree.ElementTree>
:return <list>
]
call[name[self].testTag, parameter[name[elem], constant[list]]]
variable[out] assign[=] list[[]]
for taget[name[xitem]] in starred[name[elem]] begin[:]
call[name[out].append, parameter[call[name[XmlDataIO].fromXml, parameter[name[xitem]]]]]
return[name[out]] | keyword[def] identifier[load] ( identifier[self] , identifier[elem] ):
literal[string]
identifier[self] . identifier[testTag] ( identifier[elem] , literal[string] )
identifier[out] =[]
keyword[for] identifier[xitem] keyword[in] identifier[elem] :
identifier[out] . identifier[append] ( identifier[XmlDataIO] . identifier[fromXml] ( identifier[xitem] ))
keyword[return] identifier[out] | def load(self, elem):
"""
Converts the inputted list tag to Python.
:param elem | <xml.etree.ElementTree>
:return <list>
"""
self.testTag(elem, 'list')
out = []
for xitem in elem:
out.append(XmlDataIO.fromXml(xitem)) # depends on [control=['for'], data=['xitem']]
return out |
def _generate_subscribe_headers(self):
"""
generate the subscribe stub headers based on the supplied config
:return: i
"""
headers =[]
headers.append(('predix-zone-id', self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
headers.append(('subscribername', self._config.subscriber_name))
headers.append(('authorization', token[(token.index(' ') + 1):]))
if self._config.topics is []:
headers.append(('topic', self.eventhub_client.zone_id + '_topic'))
else:
for topic in self._config.topics:
headers.append(('topic', topic))
headers.append(('offset-newest', str(self._config.recency == self._config.Recency.NEWEST).lower()))
headers.append(('acks', str(self._config.acks_enabled).lower()))
if self._config.acks_enabled:
headers.append(('max-retries', str(self._config.ack_max_retries)))
headers.append(('retry-interval', str(self._config.ack_retry_interval_seconds) + 's'))
headers.append(('duration-before-retry', str(self._config.ack_duration_before_retry_seconds) + 's'))
if self._config.batching_enabled:
headers.append(('batch-size', str(self._config.batch_size)))
headers.append(('batch-interval', str(self._config.batch_interval_millis) + 'ms'))
return headers | def function[_generate_subscribe_headers, parameter[self]]:
constant[
generate the subscribe stub headers based on the supplied config
:return: i
]
variable[headers] assign[=] list[[]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da204622c50>, <ast.Attribute object at 0x7da2046219f0>]]]]
variable[token] assign[=] call[name[self].eventhub_client.service._get_bearer_token, parameter[]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da204621150>, <ast.Attribute object at 0x7da204622a40>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da204620ca0>, <ast.Subscript object at 0x7da204622e00>]]]]
if compare[name[self]._config.topics is list[[]]] begin[:]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58fa90>, <ast.BinOp object at 0x7da18f58e260>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58d4e0>, <ast.Call object at 0x7da18f58f2e0>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58f220>, <ast.Call object at 0x7da18f58ce20>]]]]
if name[self]._config.acks_enabled begin[:]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58c850>, <ast.Call object at 0x7da18f58d7b0>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58d960>, <ast.BinOp object at 0x7da18f58ee60>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58e170>, <ast.BinOp object at 0x7da18f58d480>]]]]
if name[self]._config.batching_enabled begin[:]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58fe50>, <ast.Call object at 0x7da18f58db70>]]]]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da18f58cac0>, <ast.BinOp object at 0x7da18f58e0e0>]]]]
return[name[headers]] | keyword[def] identifier[_generate_subscribe_headers] ( identifier[self] ):
literal[string]
identifier[headers] =[]
identifier[headers] . identifier[append] (( literal[string] , identifier[self] . identifier[eventhub_client] . identifier[zone_id] ))
identifier[token] = identifier[self] . identifier[eventhub_client] . identifier[service] . identifier[_get_bearer_token] ()
identifier[headers] . identifier[append] (( literal[string] , identifier[self] . identifier[_config] . identifier[subscriber_name] ))
identifier[headers] . identifier[append] (( literal[string] , identifier[token] [( identifier[token] . identifier[index] ( literal[string] )+ literal[int] ):]))
keyword[if] identifier[self] . identifier[_config] . identifier[topics] keyword[is] []:
identifier[headers] . identifier[append] (( literal[string] , identifier[self] . identifier[eventhub_client] . identifier[zone_id] + literal[string] ))
keyword[else] :
keyword[for] identifier[topic] keyword[in] identifier[self] . identifier[_config] . identifier[topics] :
identifier[headers] . identifier[append] (( literal[string] , identifier[topic] ))
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[recency] == identifier[self] . identifier[_config] . identifier[Recency] . identifier[NEWEST] ). identifier[lower] ()))
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[acks_enabled] ). identifier[lower] ()))
keyword[if] identifier[self] . identifier[_config] . identifier[acks_enabled] :
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[ack_max_retries] )))
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[ack_retry_interval_seconds] )+ literal[string] ))
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[ack_duration_before_retry_seconds] )+ literal[string] ))
keyword[if] identifier[self] . identifier[_config] . identifier[batching_enabled] :
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[batch_size] )))
identifier[headers] . identifier[append] (( literal[string] , identifier[str] ( identifier[self] . identifier[_config] . identifier[batch_interval_millis] )+ literal[string] ))
keyword[return] identifier[headers] | def _generate_subscribe_headers(self):
"""
generate the subscribe stub headers based on the supplied config
:return: i
"""
headers = []
headers.append(('predix-zone-id', self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
headers.append(('subscribername', self._config.subscriber_name))
headers.append(('authorization', token[token.index(' ') + 1:]))
if self._config.topics is []:
headers.append(('topic', self.eventhub_client.zone_id + '_topic')) # depends on [control=['if'], data=[]]
else:
for topic in self._config.topics:
headers.append(('topic', topic)) # depends on [control=['for'], data=['topic']]
headers.append(('offset-newest', str(self._config.recency == self._config.Recency.NEWEST).lower()))
headers.append(('acks', str(self._config.acks_enabled).lower()))
if self._config.acks_enabled:
headers.append(('max-retries', str(self._config.ack_max_retries)))
headers.append(('retry-interval', str(self._config.ack_retry_interval_seconds) + 's'))
headers.append(('duration-before-retry', str(self._config.ack_duration_before_retry_seconds) + 's')) # depends on [control=['if'], data=[]]
if self._config.batching_enabled:
headers.append(('batch-size', str(self._config.batch_size)))
headers.append(('batch-interval', str(self._config.batch_interval_millis) + 'ms')) # depends on [control=['if'], data=[]]
return headers |
def handle_scheduled(self, target):
"""
target is a Handler or simple callable
"""
if not isinstance(target, Handler):
return target()
return self._handle_scheduled(target) | def function[handle_scheduled, parameter[self, target]]:
constant[
target is a Handler or simple callable
]
if <ast.UnaryOp object at 0x7da1b04f5060> begin[:]
return[call[name[target], parameter[]]]
return[call[name[self]._handle_scheduled, parameter[name[target]]]] | keyword[def] identifier[handle_scheduled] ( identifier[self] , identifier[target] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[target] , identifier[Handler] ):
keyword[return] identifier[target] ()
keyword[return] identifier[self] . identifier[_handle_scheduled] ( identifier[target] ) | def handle_scheduled(self, target):
"""
target is a Handler or simple callable
"""
if not isinstance(target, Handler):
return target() # depends on [control=['if'], data=[]]
return self._handle_scheduled(target) |
def _preprocess_add_items(self, items):
""" Split the items into two lists of path strings and BaseEntries. """
paths = []
entries = []
for item in items:
if isinstance(item, string_types):
paths.append(self._to_relative_path(item))
elif isinstance(item, (Blob, Submodule)):
entries.append(BaseIndexEntry.from_blob(item))
elif isinstance(item, BaseIndexEntry):
entries.append(item)
else:
raise TypeError("Invalid Type: %r" % item)
# END for each item
return (paths, entries) | def function[_preprocess_add_items, parameter[self, items]]:
constant[ Split the items into two lists of path strings and BaseEntries. ]
variable[paths] assign[=] list[[]]
variable[entries] assign[=] list[[]]
for taget[name[item]] in starred[name[items]] begin[:]
if call[name[isinstance], parameter[name[item], name[string_types]]] begin[:]
call[name[paths].append, parameter[call[name[self]._to_relative_path, parameter[name[item]]]]]
return[tuple[[<ast.Name object at 0x7da1b1d5d570>, <ast.Name object at 0x7da1b1d5d9c0>]]] | keyword[def] identifier[_preprocess_add_items] ( identifier[self] , identifier[items] ):
literal[string]
identifier[paths] =[]
identifier[entries] =[]
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[string_types] ):
identifier[paths] . identifier[append] ( identifier[self] . identifier[_to_relative_path] ( identifier[item] ))
keyword[elif] identifier[isinstance] ( identifier[item] ,( identifier[Blob] , identifier[Submodule] )):
identifier[entries] . identifier[append] ( identifier[BaseIndexEntry] . identifier[from_blob] ( identifier[item] ))
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[BaseIndexEntry] ):
identifier[entries] . identifier[append] ( identifier[item] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[item] )
keyword[return] ( identifier[paths] , identifier[entries] ) | def _preprocess_add_items(self, items):
""" Split the items into two lists of path strings and BaseEntries. """
paths = []
entries = []
for item in items:
if isinstance(item, string_types):
paths.append(self._to_relative_path(item)) # depends on [control=['if'], data=[]]
elif isinstance(item, (Blob, Submodule)):
entries.append(BaseIndexEntry.from_blob(item)) # depends on [control=['if'], data=[]]
elif isinstance(item, BaseIndexEntry):
entries.append(item) # depends on [control=['if'], data=[]]
else:
raise TypeError('Invalid Type: %r' % item) # depends on [control=['for'], data=['item']]
# END for each item
return (paths, entries) |
def _download_from_s3(bucket, key, version=None):
"""
Download a file from given S3 location, if available.
Parameters
----------
bucket : str
S3 Bucket name
key : str
S3 Bucket Key aka file path
version : str
Optional Version ID of the file
Returns
-------
str
Contents of the file that was downloaded
Raises
------
botocore.exceptions.ClientError if we were unable to download the file from S3
"""
s3 = boto3.client('s3')
extra_args = {}
if version:
extra_args["VersionId"] = version
with tempfile.TemporaryFile() as fp:
try:
s3.download_fileobj(
bucket, key, fp,
ExtraArgs=extra_args)
# go to start of file
fp.seek(0)
# Read and return all the contents
return fp.read()
except botocore.exceptions.ClientError:
LOG.error("Unable to download Swagger document from S3 Bucket=%s Key=%s Version=%s",
bucket, key, version)
raise | def function[_download_from_s3, parameter[bucket, key, version]]:
constant[
Download a file from given S3 location, if available.
Parameters
----------
bucket : str
S3 Bucket name
key : str
S3 Bucket Key aka file path
version : str
Optional Version ID of the file
Returns
-------
str
Contents of the file that was downloaded
Raises
------
botocore.exceptions.ClientError if we were unable to download the file from S3
]
variable[s3] assign[=] call[name[boto3].client, parameter[constant[s3]]]
variable[extra_args] assign[=] dictionary[[], []]
if name[version] begin[:]
call[name[extra_args]][constant[VersionId]] assign[=] name[version]
with call[name[tempfile].TemporaryFile, parameter[]] begin[:]
<ast.Try object at 0x7da1b20bb340> | keyword[def] identifier[_download_from_s3] ( identifier[bucket] , identifier[key] , identifier[version] = keyword[None] ):
literal[string]
identifier[s3] = identifier[boto3] . identifier[client] ( literal[string] )
identifier[extra_args] ={}
keyword[if] identifier[version] :
identifier[extra_args] [ literal[string] ]= identifier[version]
keyword[with] identifier[tempfile] . identifier[TemporaryFile] () keyword[as] identifier[fp] :
keyword[try] :
identifier[s3] . identifier[download_fileobj] (
identifier[bucket] , identifier[key] , identifier[fp] ,
identifier[ExtraArgs] = identifier[extra_args] )
identifier[fp] . identifier[seek] ( literal[int] )
keyword[return] identifier[fp] . identifier[read] ()
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] :
identifier[LOG] . identifier[error] ( literal[string] ,
identifier[bucket] , identifier[key] , identifier[version] )
keyword[raise] | def _download_from_s3(bucket, key, version=None):
"""
Download a file from given S3 location, if available.
Parameters
----------
bucket : str
S3 Bucket name
key : str
S3 Bucket Key aka file path
version : str
Optional Version ID of the file
Returns
-------
str
Contents of the file that was downloaded
Raises
------
botocore.exceptions.ClientError if we were unable to download the file from S3
"""
s3 = boto3.client('s3')
extra_args = {}
if version:
extra_args['VersionId'] = version # depends on [control=['if'], data=[]]
with tempfile.TemporaryFile() as fp:
try:
s3.download_fileobj(bucket, key, fp, ExtraArgs=extra_args)
# go to start of file
fp.seek(0)
# Read and return all the contents
return fp.read() # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError:
LOG.error('Unable to download Swagger document from S3 Bucket=%s Key=%s Version=%s', bucket, key, version)
raise # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['fp']] |
def set_apps_list(self):
"""
gets installed apps and puts them into the available_apps list
"""
log.debug("getting apps and setting them in the internal app list...")
cmd, url = DEVICE_URLS["get_apps_list"]
result = self._exec(cmd, url)
self.available_apps = [
AppModel(result[app])
for app in result
] | def function[set_apps_list, parameter[self]]:
constant[
gets installed apps and puts them into the available_apps list
]
call[name[log].debug, parameter[constant[getting apps and setting them in the internal app list...]]]
<ast.Tuple object at 0x7da18bcca3e0> assign[=] call[name[DEVICE_URLS]][constant[get_apps_list]]
variable[result] assign[=] call[name[self]._exec, parameter[name[cmd], name[url]]]
name[self].available_apps assign[=] <ast.ListComp object at 0x7da18bcc9840> | keyword[def] identifier[set_apps_list] ( identifier[self] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[cmd] , identifier[url] = identifier[DEVICE_URLS] [ literal[string] ]
identifier[result] = identifier[self] . identifier[_exec] ( identifier[cmd] , identifier[url] )
identifier[self] . identifier[available_apps] =[
identifier[AppModel] ( identifier[result] [ identifier[app] ])
keyword[for] identifier[app] keyword[in] identifier[result]
] | def set_apps_list(self):
"""
gets installed apps and puts them into the available_apps list
"""
log.debug('getting apps and setting them in the internal app list...')
(cmd, url) = DEVICE_URLS['get_apps_list']
result = self._exec(cmd, url)
self.available_apps = [AppModel(result[app]) for app in result] |
def register_deprecated(deprecated_name: str):
"""Register a function as deprecated.
:param deprecated_name: The old name of the function
:return: A decorator
Usage:
This function must be applied last, since it introspects on the definitions from before
>>> @register_deprecated('my_function')
>>> @transformation
>>> def my_old_function()
>>> ... pass
"""
if deprecated_name in mapped:
raise DeprecationMappingError('function name already mapped. can not register as deprecated name.')
def register_deprecated_f(func):
name = func.__name__
log.debug('%s is deprecated. please migrate to %s', deprecated_name, name)
if name not in mapped:
raise MissingPipelineFunctionError('function not mapped with transformation, uni_transformation, etc.')
universe = name in universe_map
in_place = name in in_place_map
# Add back-reference from deprecated function name to actual function name
deprecated[deprecated_name] = name
return _register_function(deprecated_name, func, universe, in_place)
return register_deprecated_f | def function[register_deprecated, parameter[deprecated_name]]:
constant[Register a function as deprecated.
:param deprecated_name: The old name of the function
:return: A decorator
Usage:
This function must be applied last, since it introspects on the definitions from before
>>> @register_deprecated('my_function')
>>> @transformation
>>> def my_old_function()
>>> ... pass
]
if compare[name[deprecated_name] in name[mapped]] begin[:]
<ast.Raise object at 0x7da20c76c880>
def function[register_deprecated_f, parameter[func]]:
variable[name] assign[=] name[func].__name__
call[name[log].debug, parameter[constant[%s is deprecated. please migrate to %s], name[deprecated_name], name[name]]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[mapped]] begin[:]
<ast.Raise object at 0x7da20c76ff40>
variable[universe] assign[=] compare[name[name] in name[universe_map]]
variable[in_place] assign[=] compare[name[name] in name[in_place_map]]
call[name[deprecated]][name[deprecated_name]] assign[=] name[name]
return[call[name[_register_function], parameter[name[deprecated_name], name[func], name[universe], name[in_place]]]]
return[name[register_deprecated_f]] | keyword[def] identifier[register_deprecated] ( identifier[deprecated_name] : identifier[str] ):
literal[string]
keyword[if] identifier[deprecated_name] keyword[in] identifier[mapped] :
keyword[raise] identifier[DeprecationMappingError] ( literal[string] )
keyword[def] identifier[register_deprecated_f] ( identifier[func] ):
identifier[name] = identifier[func] . identifier[__name__]
identifier[log] . identifier[debug] ( literal[string] , identifier[deprecated_name] , identifier[name] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[mapped] :
keyword[raise] identifier[MissingPipelineFunctionError] ( literal[string] )
identifier[universe] = identifier[name] keyword[in] identifier[universe_map]
identifier[in_place] = identifier[name] keyword[in] identifier[in_place_map]
identifier[deprecated] [ identifier[deprecated_name] ]= identifier[name]
keyword[return] identifier[_register_function] ( identifier[deprecated_name] , identifier[func] , identifier[universe] , identifier[in_place] )
keyword[return] identifier[register_deprecated_f] | def register_deprecated(deprecated_name: str):
"""Register a function as deprecated.
:param deprecated_name: The old name of the function
:return: A decorator
Usage:
This function must be applied last, since it introspects on the definitions from before
>>> @register_deprecated('my_function')
>>> @transformation
>>> def my_old_function()
>>> ... pass
"""
if deprecated_name in mapped:
raise DeprecationMappingError('function name already mapped. can not register as deprecated name.') # depends on [control=['if'], data=[]]
def register_deprecated_f(func):
name = func.__name__
log.debug('%s is deprecated. please migrate to %s', deprecated_name, name)
if name not in mapped:
raise MissingPipelineFunctionError('function not mapped with transformation, uni_transformation, etc.') # depends on [control=['if'], data=[]]
universe = name in universe_map
in_place = name in in_place_map
# Add back-reference from deprecated function name to actual function name
deprecated[deprecated_name] = name
return _register_function(deprecated_name, func, universe, in_place)
return register_deprecated_f |
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list]) | def function[parallel, parameter[regex_list, sort]]:
constant[
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
]
if name[sort] begin[:]
variable[regex_list] assign[=] call[name[sorted], parameter[name[regex_list]]]
return[call[constant[|].join, parameter[<ast.ListComp object at 0x7da20c76f7c0>]]] | keyword[def] identifier[parallel] ( identifier[regex_list] , identifier[sort] = keyword[False] ):
literal[string]
keyword[if] identifier[sort] :
identifier[regex_list] = identifier[sorted] ( identifier[regex_list] , identifier[key] = identifier[len] , identifier[reverse] = keyword[True] )
keyword[return] literal[string] . identifier[join] ([ identifier[unpack] ( identifier[regex] ) keyword[for] identifier[regex] keyword[in] identifier[regex_list] ]) | def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True) # depends on [control=['if'], data=[]]
return '|'.join([unpack(regex) for regex in regex_list]) |
def remove_facts(argv: List[str]) -> bool:
"""
Convert a set of FHIR resources into their corresponding i2b2 counterparts.
:param argv: Command line arguments. See: create_parser for details
:return:
"""
parser = create_parser()
local_opts = parser.parse_args(argv) # Pull everything from the actual command line
if not (local_opts.uploadid or local_opts.sourcesystem or local_opts.testlist or local_opts.removetestlist):
parser.error("Option must be one of: -ss, -u, --testlist, --removetestlist")
if (local_opts.testlist or local_opts.removetestlist) and (local_opts.uploadid or local_opts.sourcesystem):
parser.error("Cannot combine -ss or -u option with testlist options. Use -p to specify ss prefix")
opts, _ = parser.parse_known_args(parser.decode_file_args(argv)) # Include the options file
if opts is None:
return False
opts.uploadid = local_opts.uploadid
opts.sourcesystem = local_opts.sourcesystem
process_parsed_args(opts, parser.error) # Update CRC and Meta table connection information
if opts.uploadid:
for uploadid in opts.uploadid:
print("---> Removing entries for id {}".format(uploadid))
clear_i2b2_tables(I2B2Tables(opts), uploadid)
if opts.sourcesystem:
print("---> Removing entries for sourcesystem_cd {}".format(opts.sourcesystem))
clear_i2b2_sourcesystems(I2B2Tables(opts), opts.sourcesystem)
if opts.testlist:
opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix
print(f"---> Listing orphan test elements for sourcesystem_cd starting with {opts.testprefix}")
list_test_artifacts(opts)
if opts.removetestlist:
opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix
print(f"---> Removing orphan test elements for sourcesystem_cd starting with {opts.testprefix}")
remove_test_artifacts(opts)
return True | def function[remove_facts, parameter[argv]]:
constant[
Convert a set of FHIR resources into their corresponding i2b2 counterparts.
:param argv: Command line arguments. See: create_parser for details
:return:
]
variable[parser] assign[=] call[name[create_parser], parameter[]]
variable[local_opts] assign[=] call[name[parser].parse_args, parameter[name[argv]]]
if <ast.UnaryOp object at 0x7da204564ee0> begin[:]
call[name[parser].error, parameter[constant[Option must be one of: -ss, -u, --testlist, --removetestlist]]]
if <ast.BoolOp object at 0x7da204567e50> begin[:]
call[name[parser].error, parameter[constant[Cannot combine -ss or -u option with testlist options. Use -p to specify ss prefix]]]
<ast.Tuple object at 0x7da207f990c0> assign[=] call[name[parser].parse_known_args, parameter[call[name[parser].decode_file_args, parameter[name[argv]]]]]
if compare[name[opts] is constant[None]] begin[:]
return[constant[False]]
name[opts].uploadid assign[=] name[local_opts].uploadid
name[opts].sourcesystem assign[=] name[local_opts].sourcesystem
call[name[process_parsed_args], parameter[name[opts], name[parser].error]]
if name[opts].uploadid begin[:]
for taget[name[uploadid]] in starred[name[opts].uploadid] begin[:]
call[name[print], parameter[call[constant[---> Removing entries for id {}].format, parameter[name[uploadid]]]]]
call[name[clear_i2b2_tables], parameter[call[name[I2B2Tables], parameter[name[opts]]], name[uploadid]]]
if name[opts].sourcesystem begin[:]
call[name[print], parameter[call[constant[---> Removing entries for sourcesystem_cd {}].format, parameter[name[opts].sourcesystem]]]]
call[name[clear_i2b2_sourcesystems], parameter[call[name[I2B2Tables], parameter[name[opts]]], name[opts].sourcesystem]]
if name[opts].testlist begin[:]
name[opts].testprefix assign[=] <ast.IfExp object at 0x7da207f9bc10>
call[name[print], parameter[<ast.JoinedStr object at 0x7da204344490>]]
call[name[list_test_artifacts], parameter[name[opts]]]
if name[opts].removetestlist begin[:]
name[opts].testprefix assign[=] <ast.IfExp object at 0x7da2043441c0>
call[name[print], parameter[<ast.JoinedStr object at 0x7da204347c40>]]
call[name[remove_test_artifacts], parameter[name[opts]]]
return[constant[True]] | keyword[def] identifier[remove_facts] ( identifier[argv] : identifier[List] [ identifier[str] ])-> identifier[bool] :
literal[string]
identifier[parser] = identifier[create_parser] ()
identifier[local_opts] = identifier[parser] . identifier[parse_args] ( identifier[argv] )
keyword[if] keyword[not] ( identifier[local_opts] . identifier[uploadid] keyword[or] identifier[local_opts] . identifier[sourcesystem] keyword[or] identifier[local_opts] . identifier[testlist] keyword[or] identifier[local_opts] . identifier[removetestlist] ):
identifier[parser] . identifier[error] ( literal[string] )
keyword[if] ( identifier[local_opts] . identifier[testlist] keyword[or] identifier[local_opts] . identifier[removetestlist] ) keyword[and] ( identifier[local_opts] . identifier[uploadid] keyword[or] identifier[local_opts] . identifier[sourcesystem] ):
identifier[parser] . identifier[error] ( literal[string] )
identifier[opts] , identifier[_] = identifier[parser] . identifier[parse_known_args] ( identifier[parser] . identifier[decode_file_args] ( identifier[argv] ))
keyword[if] identifier[opts] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[opts] . identifier[uploadid] = identifier[local_opts] . identifier[uploadid]
identifier[opts] . identifier[sourcesystem] = identifier[local_opts] . identifier[sourcesystem]
identifier[process_parsed_args] ( identifier[opts] , identifier[parser] . identifier[error] )
keyword[if] identifier[opts] . identifier[uploadid] :
keyword[for] identifier[uploadid] keyword[in] identifier[opts] . identifier[uploadid] :
identifier[print] ( literal[string] . identifier[format] ( identifier[uploadid] ))
identifier[clear_i2b2_tables] ( identifier[I2B2Tables] ( identifier[opts] ), identifier[uploadid] )
keyword[if] identifier[opts] . identifier[sourcesystem] :
identifier[print] ( literal[string] . identifier[format] ( identifier[opts] . identifier[sourcesystem] ))
identifier[clear_i2b2_sourcesystems] ( identifier[I2B2Tables] ( identifier[opts] ), identifier[opts] . identifier[sourcesystem] )
keyword[if] identifier[opts] . identifier[testlist] :
identifier[opts] . identifier[testprefix] = identifier[opts] . identifier[testprefix] keyword[if] ( identifier[opts] keyword[and] identifier[opts] . identifier[testprefix] ) keyword[else] identifier[default_test_prefix]
identifier[print] ( literal[string] )
identifier[list_test_artifacts] ( identifier[opts] )
keyword[if] identifier[opts] . identifier[removetestlist] :
identifier[opts] . identifier[testprefix] = identifier[opts] . identifier[testprefix] keyword[if] ( identifier[opts] keyword[and] identifier[opts] . identifier[testprefix] ) keyword[else] identifier[default_test_prefix]
identifier[print] ( literal[string] )
identifier[remove_test_artifacts] ( identifier[opts] )
keyword[return] keyword[True] | def remove_facts(argv: List[str]) -> bool:
"""
Convert a set of FHIR resources into their corresponding i2b2 counterparts.
:param argv: Command line arguments. See: create_parser for details
:return:
"""
parser = create_parser()
local_opts = parser.parse_args(argv) # Pull everything from the actual command line
if not (local_opts.uploadid or local_opts.sourcesystem or local_opts.testlist or local_opts.removetestlist):
parser.error('Option must be one of: -ss, -u, --testlist, --removetestlist') # depends on [control=['if'], data=[]]
if (local_opts.testlist or local_opts.removetestlist) and (local_opts.uploadid or local_opts.sourcesystem):
parser.error('Cannot combine -ss or -u option with testlist options. Use -p to specify ss prefix') # depends on [control=['if'], data=[]]
(opts, _) = parser.parse_known_args(parser.decode_file_args(argv)) # Include the options file
if opts is None:
return False # depends on [control=['if'], data=[]]
opts.uploadid = local_opts.uploadid
opts.sourcesystem = local_opts.sourcesystem
process_parsed_args(opts, parser.error) # Update CRC and Meta table connection information
if opts.uploadid:
for uploadid in opts.uploadid:
print('---> Removing entries for id {}'.format(uploadid))
clear_i2b2_tables(I2B2Tables(opts), uploadid) # depends on [control=['for'], data=['uploadid']] # depends on [control=['if'], data=[]]
if opts.sourcesystem:
print('---> Removing entries for sourcesystem_cd {}'.format(opts.sourcesystem))
clear_i2b2_sourcesystems(I2B2Tables(opts), opts.sourcesystem) # depends on [control=['if'], data=[]]
if opts.testlist:
opts.testprefix = opts.testprefix if opts and opts.testprefix else default_test_prefix
print(f'---> Listing orphan test elements for sourcesystem_cd starting with {opts.testprefix}')
list_test_artifacts(opts) # depends on [control=['if'], data=[]]
if opts.removetestlist:
opts.testprefix = opts.testprefix if opts and opts.testprefix else default_test_prefix
print(f'---> Removing orphan test elements for sourcesystem_cd starting with {opts.testprefix}')
remove_test_artifacts(opts) # depends on [control=['if'], data=[]]
return True |
def _open_np(pathfileext, Ves=None,
ReplacePath=None, out='full', Verb=False, Print=True):
if 'TFG' in pathfileext:
import tofu.geom as tfg
elif 'TFD' in pathfileext:
import tofu.data as tfd
#elif 'TFEq' in pathfileext:
# import tofu.Eq as tfEq
#elif 'TFM' in pathfileext:
# import tofu.mesh as TFM
#elif 'TFMC' in pathfileext:
# import tofu.matcomp as TFMC
#elif 'TFT' in pathfileext:
# import tofu.treat as tft
#elif 'TFI' in pathfileext:
# import tofu.inv as TFI
try:
Out = np.load(pathfileext,mmap_mode=None)
except UnicodeError:
Out = np.load(pathfileext,mmap_mode=None, encoding='latin1')
Id = ID(fromdict=Out['Id'].tolist())
if out=='Id':
return Id
if Id.Cls == 'Ves':
Lim = None if Out['Lim'].tolist() is None else Out['Lim']
obj = tfg.Ves(Id, Out['Poly'], Lim=Lim, Type=Id.Type,
Clock=bool(Out['Clock']),
arrayorder=str(Out['arrayorder']),
Sino_RefPt=Out['Sino_RefPt'], Sino_NP=int(Out['Sino_NP']))
elif Id.Cls == 'Struct':
Lim = None if Out['Lim'].tolist() is None else Out['Lim']
obj = tfg.Struct(Id, Out['Poly'], Type=Id.Type, Lim=Lim,
Clock=bool(Out['Clock']),
arrayorder=str(Out['arrayorder']),
mobile=Out['mobile'].tolist())
elif Id.Cls in ['Rays','LOS','LOSCam1D','LOSCam2D']:
Ves, LStruct = _tryloadVesStruct(Id, Print=Print)
dobj = {'Id':Id._todict(), 'dchans':Out['dchans'].tolist(),
'geom':Out['geom'].tolist(),
'sino':Out['sino'].tolist()}
if 'extra' in Out.keys():
dobj['extra'] = Out['extra'].tolist()
if Ves is None:
dobj['Ves'] = None
else:
dobj['Ves'] = Ves._todict()
if LStruct is None:
dobj['LStruct'] = None
else:
dobj['LStruct'] = [ss._todict() for ss in LStruct]
if Id.Cls=='Rays':
obj = tfg.Rays(fromdict=dobj)
elif Id.Cls=='LOSCam1D':
obj = tfg.LOSCam1D(fromdict=dobj)
elif Id.Cls=='LOSCam2D':
obj = tfg.LOSCam2D(fromdict=dobj)
elif Id.Cls in ['Data1D','Data2D']:
dobj = {'Id':Id._todict(), 'Ref':Out['Ref'].tolist(),
'dunits':Out['dunits'].tolist(), 'fft':Out['fft'].tolist(),
'data0':Out['data0'].tolist(), 'CamCls':Out['CamCls'].tolist()}
indt = None if Out['indt'].tolist() is None else Out['indt']
indch = None if Out['indch'].tolist() is None else Out['indch']
if Out['geom'].tolist() is None:
geom = None
else:
if 'Cam' in Out['geom'][0]:
LCam = [Open(ss)._todict() for ss in Out['geom']]
geom = {'LCam':LCam}
else:
Ves = Open(Out['geom'][0])._todict()
if len(Out['geom'])>1:
LStruct = [Open(ss)._todict() for ss in Out['geom'][1:]]
else:
LStruct = None
geom = {'LCam':None, 'Ves':Ves, 'LStruct':LStruct}
dobj['indt'] = indt
dobj['indch'] = indch
dobj['geom'] = geom
if 'dMag' in Out.keys():
dMag = Out['dMag'].tolist()
else:
dMag = None
dobj['dMag'] = dMag
if Id.Cls=='Data1D':
obj = tfd.Data1D(fromdict=dobj)
elif Id.Cls=='Data2D':
obj = tfd.Data2D(fromdict=dobj)
"""
elif Id.Cls == 'GLOS':
Ves = _tryloadVes(Id)
LLOS, IdLOS = [], Id.LObj['LOS']
for ii in range(0,len(IdLOS['Name'])):
Idl = _Id_recreateFromdict(Out['LIdLOS'][ii])
ll = TFG.LOS(Idl, Du=(Out['LDs'][:,ii],Out['Lus'][:,ii]), Ves=Ves, Sino_RefPt=Out['Sino_RefPt'], arrayorder=str(Out['arrayorder']))
LLOS.append(ll)
obj = TFG.GLOS(Id, LLOS, Ves=Ves, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, Sino_RefPt=Out['Sino_RefPt'], SavePath=Id.SavePath, arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']),
dtime=Id.dtime)
elif Id.Cls == 'Lens':
Ves = _tryloadVes(Id, Ves=Ves)
obj = TFG.Lens(Id, Out['O'], Out['nIn'], Out['Rad'][0], Out['F1'][0], F2=Out['F2'][0], Type=Id.Type, R1=Out['R1'][0], R2=Out['R2'][0], dd=Out['dd'][0], Ves=Ves,
Exp=Id.Exp, Clock=bool(Out['Clock']), Diag=Id.Diag, shot=Id.shot, arrayorder=str(Out['arrayorder']), SavePath=Id.SavePath, dtime=Id.dtime)
elif Id.Cls == 'Apert':
Ves = _tryloadVes(Id, Ves=Ves)
obj = TFG.Apert(Id, Out['Poly'], Clock=bool(Out['Clock']), arrayorder=str(Out['arrayorder']), Ves=Ves, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime)
elif Id.Cls == 'Detect':
Ves = _tryloadVes(Id, Ves=Ves)
if 'VesCalc'in Out.keys() and Out['VesCalc'][0]['SavePath'] is not None:
VesCalc = Open(Out['VesCalc'][0]['SavePath']+Out['VesCalc'][0]['SaveName']+'.npz')
else:
VesCalc = None
LOSprops, Sino, Span, Cone, SAng, Opt = Out['LOSprops'][0], Out['Sino'][0], Out['Span'][0], Out['Cone'][0], Out['SAng'][0], Out['Optics'][0]
(SynthDiag,Res) = (Out['SynthDiag'][0],Out['Res'][0]) if out=='full' else _get_light_SynthDiag_Res()
Optics = _tryLoadOpticsElseCreate(Id, Opt=Opt, Ves=Ves, Verb=Verb)
Poly = Out['Poly'] if type(Optics) is list else dict(Rad=float(Out['Rad']),O=Out['BaryS'],nIn=Out['nIn'])
obj = TFG.Detect(Id, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino['_Sino_RefPt'], CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))
obj = _resetDetectAttr(obj, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})
obj._LOS_NP = Out['LOSNP']
if obj._SynthDiag_Done and obj._SynthDiag_Points is None:
obj.set_SigPrecomp()
elif Id.Cls == 'GDetect':
LDetsave = list(Out['LDetsave'])
LDet = []
Ves = _tryloadVes(Id, Ves=Ves)
if out=='light':
SynthDiag, Res = _get_light_SynthDiag_Res()
else:
LDetSynthRes = Out['LDetSynthRes']
for ii in range(0,len(LDetsave)):
ddIdsave = _Id_recreateFromdict(LDetsave[ii]['Idsave'])
if 'VesCalc'in LDetsave[ii].keys() and LDetsave[ii]['VesCalc'][0]['SavePath'] is not None:
VesCalc = Open(LDetsave[ii]['VesCalc'][0]['SavePath']+LDetsave[ii]['VesCalc'][0]['SaveName']+'.npz')
else:
VesCalc = None
LOSprops, Sino, Span, Cone, SAng, Opt = LDetsave[ii]['LOSprops'][0], LDetsave[ii]['Sino'][0], LDetsave[ii]['Span'][0], LDetsave[ii]['Cone'][0], LDetsave[ii]['SAng'][0], LDetsave[ii]['Optics'][0]
if out=='full':
SynthDiag, Res = LDetSynthRes[ii]['SynthDiag'][0], LDetSynthRes[ii]['Res'][0]
Optics = _tryLoadOpticsElseCreate(ddIdsave, Opt=Opt, Ves=Ves, Verb=Verb)
Poly = LDetsave[ii]['Poly'] if type(Optics) is list else dict(Rad=float(LDetsave[ii]['Rad']),O=LDetsave[ii]['BaryS'],nIn=LDetsave[ii]['nIn'])
Sino_RefPt = None if Out['Sino_RefPt'].shape==() else Out['Sino_RefPt']
dd = TFG.Detect(ddIdsave, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino_RefPt, CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))
dd = _resetDetectAttr(dd, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})
dd._LOS_NP = LDetsave[ii]['LOSNP']
if dd._SynthDiag_Done and dd._SynthDiag_Points is None:
dd.set_SigPrecomp()
LDet.append(dd)
obj = TFG.GDetect(Id, LDet, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime, Sino_RefPt=Out['Sino_RefPt'], LOSRef=str(Out['LOSRef']),
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']), SavePath=Id.SavePath)
Res = Out['Res'][0] if out=='full' else Res
for kk in Res.keys():
setattr(obj,kk,Res[kk])
elif Id.Cls=='Eq2D':
Sep = [np.array(ss) for ss in Out['Sep'].tolist()]
obj = tfEq.Eq2D(Id, Out['PtsCross'], t=Out['t'], MagAx=Out['MagAx'], Sep=Sep, rho_p=Out['rho_p'].tolist(), rho_t=Out['rho_t'].tolist(), surf=Out['surf'].tolist(), vol=Out['vol'].tolist(),
q=Out['q'].tolist(), jp=Out['jp'].tolist(), pf=Out['pf'].tolist(), tf=Out['tf'].tolist(), theta=Out['theta'].tolist(), thetastar=Out['thetastar'].tolist(),
BTX=Out['BTX'].tolist(), BRY=Out['BRY'].tolist(), BZ=Out['BZ'].tolist(), Ref=str(Out['Ref']))
elif Id.Cls=='Mesh1D':
obj = TFM.Mesh1D(Id, Out['Knots'])
elif Id.Cls=='Mesh2D':
obj = TFM.Mesh2D(Id, [Out['Knots'][0],Out['Knots'][1]])
obj = TFM.Mesh2D(Id, Knots=obj, ind=Out['IndBg'])
for ii in range(0,len(Out['SubMinds'])):
obj.add_SubMesh(Name=Out['SubMinds'][ii]['Name'], ind=Out['SubMinds'][ii]['ind'])
elif Id.Cls=='Metric1D':
obj = TFM.Metric1D(Id)
elif Id.Cls=='Metric2D':
obj = TFM.Metric2D(Id)
elif Id.Cls in 'BF2D':
IdMesh = ID(str(Out['IdMesh'][0]), str(Out['IdMesh'][1]), SaveName=str(Out['IdMesh'][2]), SavePath=str(Out['IdMesh'][3]), dtime=Out['dtimeMesh'][0], dtFormat=str(Out['IdMesh'][4]))
M2 = TFM.Mesh2D(IdMesh, Knots=[Out['KnotsR'],Out['KnotsZ']])
M2bis = TFM.Mesh2D(IdMesh,Knots=M2,Ind=Out['Ind'])
obj = TFM.BF2D(Id, M2bis, int(Out['Deg'][0]))
elif Id.Cls=='GMat2D':
import ToFu_MatComp as TFMC
import scipy.sparse as scpsp
Id.set_LObj(open_np_IdObj(['Ves','BF2D','Detect'], [Out['Ves'],Out['BF2'],Out['LDetect']], [Out['VesUSR'],Out['BF2USR'],Out['LDetectUSR']]))
Mat = scpsp.csr_matrix((Out['Matdata'], Out['Matind'], Out['Matindpr']), shape=Out['Matshape'])
MatLOS = scpsp.csr_matrix((Out['MatLOSdata'], Out['MatLOSind'], Out['MatLOSindpr']), shape=Out['MatLOSshape'])
obj = TFMC.GMat2D(Id, None, None, Mat=None, indMat=None, MatLOS=None, Calcind=False, Calc=False, CalcLOS=False)
obj._init_CompParam(Mode=str(Out['CompParamStr'][0]), epsrel=Out['CompParamVal'][0], SubP=Out['CompParamVal'][1], SubMode=str(Out['CompParamStr'][1]), SubTheta=Out['CompParamVal'][2], SubThetaMode=str(Out['CompParamStr'][2]), Fast=bool(Out['CompParamVal'][-1]), SubPind=Out['CompParamVal'][3], ModeLOS=str(Out['CompParamStr'][3]), epsrelLOS=Out['CompParamVal'][4], SubPLOS=Out['CompParamVal'][5], SubModeLOS=str(Out['CompParamStr'][4]))
obj._BF2 = None
obj._BF2_Deg = int(Out['BF2Par'][0])
obj._BF2_NCents = int(Out['BF2Par'][2])
obj._BF2_NFunc = int(Out['BF2Par'][1])
obj._Ves = None
obj._LD = None
obj._LD_nDetect = int(Out['LD_nD'])
obj._set_indMat(indMat=Out['indMat'], Verb=False)
obj._set_MatLOS(MatLOS=MatLOS, Verb=False)
obj._set_Mat(Mat=Mat, Verb=False)
elif Id.Cls=='PreData':
LIdDet = Id.get_LObjasLId('Detect') if 'Detect' in Id.LObj.keys() else None
Init, Update = Out['Init'][0], Out['Update'][0]
obj = tft.PreData(Init['data'], Id=Id, t=Init['t'], Chans=Init['Chans'], DtRef=Init['DtRef'], LIdDet=LIdDet)
obj.set_Dt(Update['Dt'], Calc=False)
obj.set_Resamp(t=Update['Resamp_t'], f=Update['Resamp_f'], Method=Update['Resamp_Method'], interpkind=Update['Resamp_interpkind'], Calc=False)
obj.Out_add(indOut=Update['indOut'], Calc=False)
obj.Corr_add(indCorr=Update['indCorr'], Calc=False)
obj.interp(lt=Update['interp_lt'], lNames=Update['interp_lNames'], Calc=False)
obj.substract_Dt(tsub=Update['Subtract_tsub'], Calc=False)
obj.set_fft(Calc=True, **Update['FFTPar'])
if not Update['PhysNoiseParam'] is None:
Method = 'svd' if 'Modes' in Update['PhysNoiseParam'].keys() else 'fft'
obj.set_PhysNoise(**Update['PhysNoiseParam'].update({'Method':Method}))
#Id.set_LObj(open_np_IdObj(['Detect'],[Out['LDetect']], [Out['LDetectUSR']]))
#obj = TFT.PreData(Id=Id, shot=int(Out['shot']), DLPar=Out['DLPar'].item(), Exp=str(Out['StrPar'][0]), Dt=list(Out['Dt']), DtMargin=float(Out['DtMarg']), MovMeanfreq=float(Out['MovMeanfreq']), Resamp=bool(Out['Resamp']),
# interpkind=str(Out['StrPar'][1]), indOut=Out['indOut'], indCorr=Out['indCorr'], lt=Out['interp_lt'], lNames=Out['interp_lN'].tolist(), Test=True)
#if not Out['PhysNoise'].item() is None:
# obj.set_PhysNoise(Deg=int(Out['NoiseMod'].item()['Deg']), Nbin=int(Out['NoiseMod'].item()['Nbin']), LimRatio=float(Out['NoiseMod'].item()['LimRatio']), **Out['PhysNoise'].item()['Param'])
elif Id.Cls=='Sol2D':
Id.set_LObj(open_np_IdObj(['PreData','GMat2D','BF2D'],[Out['PreData'], Out['GMat2D'], Out['BF2D']], [Out['PreDataUSR'],Out['GMatUSR'],Out['BF2DUSR']]))
GMSaveName = Id.LObj['GMat2D']['SaveName'][0]
try:
GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')
except Exception:
GMSaveName = GMSaveName[:GMSaveName.index('All_')+4]+'sh'+GMSaveName[GMSaveName.index('All_')+4:]
GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')
obj = TFI.Sol2D(Id, PreData=None, GMat=GMat, InvParam=Out['InvParam'].item(), SVesePreData=False, SVeseGMat=True, SVeseBF=True)
obj._PreData = None
obj._GMat = obj.GMat.get_SubGMat2D(Val=list(Out['LNames']), Crit='Name',InOut='In')
obj._shot = int(Out['shot'])
try:
obj._LNames = Out['LNames'].tolist()
except Exception:
obj._LNames = obj.PreData.In_list()
obj._run = bool(Out['Run'])
if bool(Out['Run']):
obj._LOS = bool(Out['LOS'])
obj._t, obj._data = Out['t'], Out['data']
obj._Coefs, obj._sigma = Out['Coefs'], Out['sigma']
obj._Mu, obj._Chi2N, obj._R, obj._Nit = Out['Mu'], Out['Chi2N'], Out['R'], Out['Nit']
obj._Spec = list(Out['Spec'])
obj._timing = Out['t2']
obj._PostTreat = list(Out['PostTreat'])
"""
return obj | def function[_open_np, parameter[pathfileext, Ves, ReplacePath, out, Verb, Print]]:
if compare[constant[TFG] in name[pathfileext]] begin[:]
import module[tofu.geom] as alias[tfg]
<ast.Try object at 0x7da1b0ba6b30>
variable[Id] assign[=] call[name[ID], parameter[]]
if compare[name[out] equal[==] constant[Id]] begin[:]
return[name[Id]]
if compare[name[Id].Cls equal[==] constant[Ves]] begin[:]
variable[Lim] assign[=] <ast.IfExp object at 0x7da1b0ba64a0>
variable[obj] assign[=] call[name[tfg].Ves, parameter[name[Id], call[name[Out]][constant[Poly]]]]
constant[
elif Id.Cls == 'GLOS':
Ves = _tryloadVes(Id)
LLOS, IdLOS = [], Id.LObj['LOS']
for ii in range(0,len(IdLOS['Name'])):
Idl = _Id_recreateFromdict(Out['LIdLOS'][ii])
ll = TFG.LOS(Idl, Du=(Out['LDs'][:,ii],Out['Lus'][:,ii]), Ves=Ves, Sino_RefPt=Out['Sino_RefPt'], arrayorder=str(Out['arrayorder']))
LLOS.append(ll)
obj = TFG.GLOS(Id, LLOS, Ves=Ves, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, Sino_RefPt=Out['Sino_RefPt'], SavePath=Id.SavePath, arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']),
dtime=Id.dtime)
elif Id.Cls == 'Lens':
Ves = _tryloadVes(Id, Ves=Ves)
obj = TFG.Lens(Id, Out['O'], Out['nIn'], Out['Rad'][0], Out['F1'][0], F2=Out['F2'][0], Type=Id.Type, R1=Out['R1'][0], R2=Out['R2'][0], dd=Out['dd'][0], Ves=Ves,
Exp=Id.Exp, Clock=bool(Out['Clock']), Diag=Id.Diag, shot=Id.shot, arrayorder=str(Out['arrayorder']), SavePath=Id.SavePath, dtime=Id.dtime)
elif Id.Cls == 'Apert':
Ves = _tryloadVes(Id, Ves=Ves)
obj = TFG.Apert(Id, Out['Poly'], Clock=bool(Out['Clock']), arrayorder=str(Out['arrayorder']), Ves=Ves, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime)
elif Id.Cls == 'Detect':
Ves = _tryloadVes(Id, Ves=Ves)
if 'VesCalc'in Out.keys() and Out['VesCalc'][0]['SavePath'] is not None:
VesCalc = Open(Out['VesCalc'][0]['SavePath']+Out['VesCalc'][0]['SaveName']+'.npz')
else:
VesCalc = None
LOSprops, Sino, Span, Cone, SAng, Opt = Out['LOSprops'][0], Out['Sino'][0], Out['Span'][0], Out['Cone'][0], Out['SAng'][0], Out['Optics'][0]
(SynthDiag,Res) = (Out['SynthDiag'][0],Out['Res'][0]) if out=='full' else _get_light_SynthDiag_Res()
Optics = _tryLoadOpticsElseCreate(Id, Opt=Opt, Ves=Ves, Verb=Verb)
Poly = Out['Poly'] if type(Optics) is list else dict(Rad=float(Out['Rad']),O=Out['BaryS'],nIn=Out['nIn'])
obj = TFG.Detect(Id, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino['_Sino_RefPt'], CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))
obj = _resetDetectAttr(obj, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})
obj._LOS_NP = Out['LOSNP']
if obj._SynthDiag_Done and obj._SynthDiag_Points is None:
obj.set_SigPrecomp()
elif Id.Cls == 'GDetect':
LDetsave = list(Out['LDetsave'])
LDet = []
Ves = _tryloadVes(Id, Ves=Ves)
if out=='light':
SynthDiag, Res = _get_light_SynthDiag_Res()
else:
LDetSynthRes = Out['LDetSynthRes']
for ii in range(0,len(LDetsave)):
ddIdsave = _Id_recreateFromdict(LDetsave[ii]['Idsave'])
if 'VesCalc'in LDetsave[ii].keys() and LDetsave[ii]['VesCalc'][0]['SavePath'] is not None:
VesCalc = Open(LDetsave[ii]['VesCalc'][0]['SavePath']+LDetsave[ii]['VesCalc'][0]['SaveName']+'.npz')
else:
VesCalc = None
LOSprops, Sino, Span, Cone, SAng, Opt = LDetsave[ii]['LOSprops'][0], LDetsave[ii]['Sino'][0], LDetsave[ii]['Span'][0], LDetsave[ii]['Cone'][0], LDetsave[ii]['SAng'][0], LDetsave[ii]['Optics'][0]
if out=='full':
SynthDiag, Res = LDetSynthRes[ii]['SynthDiag'][0], LDetSynthRes[ii]['Res'][0]
Optics = _tryLoadOpticsElseCreate(ddIdsave, Opt=Opt, Ves=Ves, Verb=Verb)
Poly = LDetsave[ii]['Poly'] if type(Optics) is list else dict(Rad=float(LDetsave[ii]['Rad']),O=LDetsave[ii]['BaryS'],nIn=LDetsave[ii]['nIn'])
Sino_RefPt = None if Out['Sino_RefPt'].shape==() else Out['Sino_RefPt']
dd = TFG.Detect(ddIdsave, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino_RefPt, CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))
dd = _resetDetectAttr(dd, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})
dd._LOS_NP = LDetsave[ii]['LOSNP']
if dd._SynthDiag_Done and dd._SynthDiag_Points is None:
dd.set_SigPrecomp()
LDet.append(dd)
obj = TFG.GDetect(Id, LDet, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime, Sino_RefPt=Out['Sino_RefPt'], LOSRef=str(Out['LOSRef']),
arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']), SavePath=Id.SavePath)
Res = Out['Res'][0] if out=='full' else Res
for kk in Res.keys():
setattr(obj,kk,Res[kk])
elif Id.Cls=='Eq2D':
Sep = [np.array(ss) for ss in Out['Sep'].tolist()]
obj = tfEq.Eq2D(Id, Out['PtsCross'], t=Out['t'], MagAx=Out['MagAx'], Sep=Sep, rho_p=Out['rho_p'].tolist(), rho_t=Out['rho_t'].tolist(), surf=Out['surf'].tolist(), vol=Out['vol'].tolist(),
q=Out['q'].tolist(), jp=Out['jp'].tolist(), pf=Out['pf'].tolist(), tf=Out['tf'].tolist(), theta=Out['theta'].tolist(), thetastar=Out['thetastar'].tolist(),
BTX=Out['BTX'].tolist(), BRY=Out['BRY'].tolist(), BZ=Out['BZ'].tolist(), Ref=str(Out['Ref']))
elif Id.Cls=='Mesh1D':
obj = TFM.Mesh1D(Id, Out['Knots'])
elif Id.Cls=='Mesh2D':
obj = TFM.Mesh2D(Id, [Out['Knots'][0],Out['Knots'][1]])
obj = TFM.Mesh2D(Id, Knots=obj, ind=Out['IndBg'])
for ii in range(0,len(Out['SubMinds'])):
obj.add_SubMesh(Name=Out['SubMinds'][ii]['Name'], ind=Out['SubMinds'][ii]['ind'])
elif Id.Cls=='Metric1D':
obj = TFM.Metric1D(Id)
elif Id.Cls=='Metric2D':
obj = TFM.Metric2D(Id)
elif Id.Cls in 'BF2D':
IdMesh = ID(str(Out['IdMesh'][0]), str(Out['IdMesh'][1]), SaveName=str(Out['IdMesh'][2]), SavePath=str(Out['IdMesh'][3]), dtime=Out['dtimeMesh'][0], dtFormat=str(Out['IdMesh'][4]))
M2 = TFM.Mesh2D(IdMesh, Knots=[Out['KnotsR'],Out['KnotsZ']])
M2bis = TFM.Mesh2D(IdMesh,Knots=M2,Ind=Out['Ind'])
obj = TFM.BF2D(Id, M2bis, int(Out['Deg'][0]))
elif Id.Cls=='GMat2D':
import ToFu_MatComp as TFMC
import scipy.sparse as scpsp
Id.set_LObj(open_np_IdObj(['Ves','BF2D','Detect'], [Out['Ves'],Out['BF2'],Out['LDetect']], [Out['VesUSR'],Out['BF2USR'],Out['LDetectUSR']]))
Mat = scpsp.csr_matrix((Out['Matdata'], Out['Matind'], Out['Matindpr']), shape=Out['Matshape'])
MatLOS = scpsp.csr_matrix((Out['MatLOSdata'], Out['MatLOSind'], Out['MatLOSindpr']), shape=Out['MatLOSshape'])
obj = TFMC.GMat2D(Id, None, None, Mat=None, indMat=None, MatLOS=None, Calcind=False, Calc=False, CalcLOS=False)
obj._init_CompParam(Mode=str(Out['CompParamStr'][0]), epsrel=Out['CompParamVal'][0], SubP=Out['CompParamVal'][1], SubMode=str(Out['CompParamStr'][1]), SubTheta=Out['CompParamVal'][2], SubThetaMode=str(Out['CompParamStr'][2]), Fast=bool(Out['CompParamVal'][-1]), SubPind=Out['CompParamVal'][3], ModeLOS=str(Out['CompParamStr'][3]), epsrelLOS=Out['CompParamVal'][4], SubPLOS=Out['CompParamVal'][5], SubModeLOS=str(Out['CompParamStr'][4]))
obj._BF2 = None
obj._BF2_Deg = int(Out['BF2Par'][0])
obj._BF2_NCents = int(Out['BF2Par'][2])
obj._BF2_NFunc = int(Out['BF2Par'][1])
obj._Ves = None
obj._LD = None
obj._LD_nDetect = int(Out['LD_nD'])
obj._set_indMat(indMat=Out['indMat'], Verb=False)
obj._set_MatLOS(MatLOS=MatLOS, Verb=False)
obj._set_Mat(Mat=Mat, Verb=False)
elif Id.Cls=='PreData':
LIdDet = Id.get_LObjasLId('Detect') if 'Detect' in Id.LObj.keys() else None
Init, Update = Out['Init'][0], Out['Update'][0]
obj = tft.PreData(Init['data'], Id=Id, t=Init['t'], Chans=Init['Chans'], DtRef=Init['DtRef'], LIdDet=LIdDet)
obj.set_Dt(Update['Dt'], Calc=False)
obj.set_Resamp(t=Update['Resamp_t'], f=Update['Resamp_f'], Method=Update['Resamp_Method'], interpkind=Update['Resamp_interpkind'], Calc=False)
obj.Out_add(indOut=Update['indOut'], Calc=False)
obj.Corr_add(indCorr=Update['indCorr'], Calc=False)
obj.interp(lt=Update['interp_lt'], lNames=Update['interp_lNames'], Calc=False)
obj.substract_Dt(tsub=Update['Subtract_tsub'], Calc=False)
obj.set_fft(Calc=True, **Update['FFTPar'])
if not Update['PhysNoiseParam'] is None:
Method = 'svd' if 'Modes' in Update['PhysNoiseParam'].keys() else 'fft'
obj.set_PhysNoise(**Update['PhysNoiseParam'].update({'Method':Method}))
#Id.set_LObj(open_np_IdObj(['Detect'],[Out['LDetect']], [Out['LDetectUSR']]))
#obj = TFT.PreData(Id=Id, shot=int(Out['shot']), DLPar=Out['DLPar'].item(), Exp=str(Out['StrPar'][0]), Dt=list(Out['Dt']), DtMargin=float(Out['DtMarg']), MovMeanfreq=float(Out['MovMeanfreq']), Resamp=bool(Out['Resamp']),
# interpkind=str(Out['StrPar'][1]), indOut=Out['indOut'], indCorr=Out['indCorr'], lt=Out['interp_lt'], lNames=Out['interp_lN'].tolist(), Test=True)
#if not Out['PhysNoise'].item() is None:
# obj.set_PhysNoise(Deg=int(Out['NoiseMod'].item()['Deg']), Nbin=int(Out['NoiseMod'].item()['Nbin']), LimRatio=float(Out['NoiseMod'].item()['LimRatio']), **Out['PhysNoise'].item()['Param'])
elif Id.Cls=='Sol2D':
Id.set_LObj(open_np_IdObj(['PreData','GMat2D','BF2D'],[Out['PreData'], Out['GMat2D'], Out['BF2D']], [Out['PreDataUSR'],Out['GMatUSR'],Out['BF2DUSR']]))
GMSaveName = Id.LObj['GMat2D']['SaveName'][0]
try:
GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')
except Exception:
GMSaveName = GMSaveName[:GMSaveName.index('All_')+4]+'sh'+GMSaveName[GMSaveName.index('All_')+4:]
GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')
obj = TFI.Sol2D(Id, PreData=None, GMat=GMat, InvParam=Out['InvParam'].item(), SVesePreData=False, SVeseGMat=True, SVeseBF=True)
obj._PreData = None
obj._GMat = obj.GMat.get_SubGMat2D(Val=list(Out['LNames']), Crit='Name',InOut='In')
obj._shot = int(Out['shot'])
try:
obj._LNames = Out['LNames'].tolist()
except Exception:
obj._LNames = obj.PreData.In_list()
obj._run = bool(Out['Run'])
if bool(Out['Run']):
obj._LOS = bool(Out['LOS'])
obj._t, obj._data = Out['t'], Out['data']
obj._Coefs, obj._sigma = Out['Coefs'], Out['sigma']
obj._Mu, obj._Chi2N, obj._R, obj._Nit = Out['Mu'], Out['Chi2N'], Out['R'], Out['Nit']
obj._Spec = list(Out['Spec'])
obj._timing = Out['t2']
obj._PostTreat = list(Out['PostTreat'])
]
return[name[obj]] | keyword[def] identifier[_open_np] ( identifier[pathfileext] , identifier[Ves] = keyword[None] ,
identifier[ReplacePath] = keyword[None] , identifier[out] = literal[string] , identifier[Verb] = keyword[False] , identifier[Print] = keyword[True] ):
keyword[if] literal[string] keyword[in] identifier[pathfileext] :
keyword[import] identifier[tofu] . identifier[geom] keyword[as] identifier[tfg]
keyword[elif] literal[string] keyword[in] identifier[pathfileext] :
keyword[import] identifier[tofu] . identifier[data] keyword[as] identifier[tfd]
keyword[try] :
identifier[Out] = identifier[np] . identifier[load] ( identifier[pathfileext] , identifier[mmap_mode] = keyword[None] )
keyword[except] identifier[UnicodeError] :
identifier[Out] = identifier[np] . identifier[load] ( identifier[pathfileext] , identifier[mmap_mode] = keyword[None] , identifier[encoding] = literal[string] )
identifier[Id] = identifier[ID] ( identifier[fromdict] = identifier[Out] [ literal[string] ]. identifier[tolist] ())
keyword[if] identifier[out] == literal[string] :
keyword[return] identifier[Id]
keyword[if] identifier[Id] . identifier[Cls] == literal[string] :
identifier[Lim] = keyword[None] keyword[if] identifier[Out] [ literal[string] ]. identifier[tolist] () keyword[is] keyword[None] keyword[else] identifier[Out] [ literal[string] ]
identifier[obj] = identifier[tfg] . identifier[Ves] ( identifier[Id] , identifier[Out] [ literal[string] ], identifier[Lim] = identifier[Lim] , identifier[Type] = identifier[Id] . identifier[Type] ,
identifier[Clock] = identifier[bool] ( identifier[Out] [ literal[string] ]),
identifier[arrayorder] = identifier[str] ( identifier[Out] [ literal[string] ]),
identifier[Sino_RefPt] = identifier[Out] [ literal[string] ], identifier[Sino_NP] = identifier[int] ( identifier[Out] [ literal[string] ]))
keyword[elif] identifier[Id] . identifier[Cls] == literal[string] :
identifier[Lim] = keyword[None] keyword[if] identifier[Out] [ literal[string] ]. identifier[tolist] () keyword[is] keyword[None] keyword[else] identifier[Out] [ literal[string] ]
identifier[obj] = identifier[tfg] . identifier[Struct] ( identifier[Id] , identifier[Out] [ literal[string] ], identifier[Type] = identifier[Id] . identifier[Type] , identifier[Lim] = identifier[Lim] ,
identifier[Clock] = identifier[bool] ( identifier[Out] [ literal[string] ]),
identifier[arrayorder] = identifier[str] ( identifier[Out] [ literal[string] ]),
identifier[mobile] = identifier[Out] [ literal[string] ]. identifier[tolist] ())
keyword[elif] identifier[Id] . identifier[Cls] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[Ves] , identifier[LStruct] = identifier[_tryloadVesStruct] ( identifier[Id] , identifier[Print] = identifier[Print] )
identifier[dobj] ={ literal[string] : identifier[Id] . identifier[_todict] (), literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (),
literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (),
literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] ()}
keyword[if] literal[string] keyword[in] identifier[Out] . identifier[keys] ():
identifier[dobj] [ literal[string] ]= identifier[Out] [ literal[string] ]. identifier[tolist] ()
keyword[if] identifier[Ves] keyword[is] keyword[None] :
identifier[dobj] [ literal[string] ]= keyword[None]
keyword[else] :
identifier[dobj] [ literal[string] ]= identifier[Ves] . identifier[_todict] ()
keyword[if] identifier[LStruct] keyword[is] keyword[None] :
identifier[dobj] [ literal[string] ]= keyword[None]
keyword[else] :
identifier[dobj] [ literal[string] ]=[ identifier[ss] . identifier[_todict] () keyword[for] identifier[ss] keyword[in] identifier[LStruct] ]
keyword[if] identifier[Id] . identifier[Cls] == literal[string] :
identifier[obj] = identifier[tfg] . identifier[Rays] ( identifier[fromdict] = identifier[dobj] )
keyword[elif] identifier[Id] . identifier[Cls] == literal[string] :
identifier[obj] = identifier[tfg] . identifier[LOSCam1D] ( identifier[fromdict] = identifier[dobj] )
keyword[elif] identifier[Id] . identifier[Cls] == literal[string] :
identifier[obj] = identifier[tfg] . identifier[LOSCam2D] ( identifier[fromdict] = identifier[dobj] )
keyword[elif] identifier[Id] . identifier[Cls] keyword[in] [ literal[string] , literal[string] ]:
identifier[dobj] ={ literal[string] : identifier[Id] . identifier[_todict] (), literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (),
literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (), literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (),
literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] (), literal[string] : identifier[Out] [ literal[string] ]. identifier[tolist] ()}
identifier[indt] = keyword[None] keyword[if] identifier[Out] [ literal[string] ]. identifier[tolist] () keyword[is] keyword[None] keyword[else] identifier[Out] [ literal[string] ]
identifier[indch] = keyword[None] keyword[if] identifier[Out] [ literal[string] ]. identifier[tolist] () keyword[is] keyword[None] keyword[else] identifier[Out] [ literal[string] ]
keyword[if] identifier[Out] [ literal[string] ]. identifier[tolist] () keyword[is] keyword[None] :
identifier[geom] = keyword[None]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[Out] [ literal[string] ][ literal[int] ]:
identifier[LCam] =[ identifier[Open] ( identifier[ss] ). identifier[_todict] () keyword[for] identifier[ss] keyword[in] identifier[Out] [ literal[string] ]]
identifier[geom] ={ literal[string] : identifier[LCam] }
keyword[else] :
identifier[Ves] = identifier[Open] ( identifier[Out] [ literal[string] ][ literal[int] ]). identifier[_todict] ()
keyword[if] identifier[len] ( identifier[Out] [ literal[string] ])> literal[int] :
identifier[LStruct] =[ identifier[Open] ( identifier[ss] ). identifier[_todict] () keyword[for] identifier[ss] keyword[in] identifier[Out] [ literal[string] ][ literal[int] :]]
keyword[else] :
identifier[LStruct] = keyword[None]
identifier[geom] ={ literal[string] : keyword[None] , literal[string] : identifier[Ves] , literal[string] : identifier[LStruct] }
identifier[dobj] [ literal[string] ]= identifier[indt]
identifier[dobj] [ literal[string] ]= identifier[indch]
identifier[dobj] [ literal[string] ]= identifier[geom]
keyword[if] literal[string] keyword[in] identifier[Out] . identifier[keys] ():
identifier[dMag] = identifier[Out] [ literal[string] ]. identifier[tolist] ()
keyword[else] :
identifier[dMag] = keyword[None]
identifier[dobj] [ literal[string] ]= identifier[dMag]
keyword[if] identifier[Id] . identifier[Cls] == literal[string] :
identifier[obj] = identifier[tfd] . identifier[Data1D] ( identifier[fromdict] = identifier[dobj] )
keyword[elif] identifier[Id] . identifier[Cls] == literal[string] :
identifier[obj] = identifier[tfd] . identifier[Data2D] ( identifier[fromdict] = identifier[dobj] )
literal[string]
keyword[return] identifier[obj] | def _open_np(pathfileext, Ves=None, ReplacePath=None, out='full', Verb=False, Print=True):
if 'TFG' in pathfileext:
import tofu.geom as tfg # depends on [control=['if'], data=[]]
elif 'TFD' in pathfileext:
import tofu.data as tfd # depends on [control=['if'], data=[]]
#elif 'TFEq' in pathfileext:
# import tofu.Eq as tfEq
#elif 'TFM' in pathfileext:
# import tofu.mesh as TFM
#elif 'TFMC' in pathfileext:
# import tofu.matcomp as TFMC
#elif 'TFT' in pathfileext:
# import tofu.treat as tft
#elif 'TFI' in pathfileext:
# import tofu.inv as TFI
try:
Out = np.load(pathfileext, mmap_mode=None) # depends on [control=['try'], data=[]]
except UnicodeError:
Out = np.load(pathfileext, mmap_mode=None, encoding='latin1') # depends on [control=['except'], data=[]]
Id = ID(fromdict=Out['Id'].tolist())
if out == 'Id':
return Id # depends on [control=['if'], data=[]]
if Id.Cls == 'Ves':
Lim = None if Out['Lim'].tolist() is None else Out['Lim']
obj = tfg.Ves(Id, Out['Poly'], Lim=Lim, Type=Id.Type, Clock=bool(Out['Clock']), arrayorder=str(Out['arrayorder']), Sino_RefPt=Out['Sino_RefPt'], Sino_NP=int(Out['Sino_NP'])) # depends on [control=['if'], data=[]]
elif Id.Cls == 'Struct':
Lim = None if Out['Lim'].tolist() is None else Out['Lim']
obj = tfg.Struct(Id, Out['Poly'], Type=Id.Type, Lim=Lim, Clock=bool(Out['Clock']), arrayorder=str(Out['arrayorder']), mobile=Out['mobile'].tolist()) # depends on [control=['if'], data=[]]
elif Id.Cls in ['Rays', 'LOS', 'LOSCam1D', 'LOSCam2D']:
(Ves, LStruct) = _tryloadVesStruct(Id, Print=Print)
dobj = {'Id': Id._todict(), 'dchans': Out['dchans'].tolist(), 'geom': Out['geom'].tolist(), 'sino': Out['sino'].tolist()}
if 'extra' in Out.keys():
dobj['extra'] = Out['extra'].tolist() # depends on [control=['if'], data=[]]
if Ves is None:
dobj['Ves'] = None # depends on [control=['if'], data=[]]
else:
dobj['Ves'] = Ves._todict()
if LStruct is None:
dobj['LStruct'] = None # depends on [control=['if'], data=[]]
else:
dobj['LStruct'] = [ss._todict() for ss in LStruct]
if Id.Cls == 'Rays':
obj = tfg.Rays(fromdict=dobj) # depends on [control=['if'], data=[]]
elif Id.Cls == 'LOSCam1D':
obj = tfg.LOSCam1D(fromdict=dobj) # depends on [control=['if'], data=[]]
elif Id.Cls == 'LOSCam2D':
obj = tfg.LOSCam2D(fromdict=dobj) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif Id.Cls in ['Data1D', 'Data2D']:
dobj = {'Id': Id._todict(), 'Ref': Out['Ref'].tolist(), 'dunits': Out['dunits'].tolist(), 'fft': Out['fft'].tolist(), 'data0': Out['data0'].tolist(), 'CamCls': Out['CamCls'].tolist()}
indt = None if Out['indt'].tolist() is None else Out['indt']
indch = None if Out['indch'].tolist() is None else Out['indch']
if Out['geom'].tolist() is None:
geom = None # depends on [control=['if'], data=[]]
elif 'Cam' in Out['geom'][0]:
LCam = [Open(ss)._todict() for ss in Out['geom']]
geom = {'LCam': LCam} # depends on [control=['if'], data=[]]
else:
Ves = Open(Out['geom'][0])._todict()
if len(Out['geom']) > 1:
LStruct = [Open(ss)._todict() for ss in Out['geom'][1:]] # depends on [control=['if'], data=[]]
else:
LStruct = None
geom = {'LCam': None, 'Ves': Ves, 'LStruct': LStruct}
dobj['indt'] = indt
dobj['indch'] = indch
dobj['geom'] = geom
if 'dMag' in Out.keys():
dMag = Out['dMag'].tolist() # depends on [control=['if'], data=[]]
else:
dMag = None
dobj['dMag'] = dMag
if Id.Cls == 'Data1D':
obj = tfd.Data1D(fromdict=dobj) # depends on [control=['if'], data=[]]
elif Id.Cls == 'Data2D':
obj = tfd.Data2D(fromdict=dobj) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
"\n elif Id.Cls == 'GLOS':\n Ves = _tryloadVes(Id)\n LLOS, IdLOS = [], Id.LObj['LOS']\n for ii in range(0,len(IdLOS['Name'])):\n Idl = _Id_recreateFromdict(Out['LIdLOS'][ii])\n ll = TFG.LOS(Idl, Du=(Out['LDs'][:,ii],Out['Lus'][:,ii]), Ves=Ves, Sino_RefPt=Out['Sino_RefPt'], arrayorder=str(Out['arrayorder']))\n LLOS.append(ll)\n obj = TFG.GLOS(Id, LLOS, Ves=Ves, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, Sino_RefPt=Out['Sino_RefPt'], SavePath=Id.SavePath, arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']),\n dtime=Id.dtime)\n\n elif Id.Cls == 'Lens':\n Ves = _tryloadVes(Id, Ves=Ves)\n obj = TFG.Lens(Id, Out['O'], Out['nIn'], Out['Rad'][0], Out['F1'][0], F2=Out['F2'][0], Type=Id.Type, R1=Out['R1'][0], R2=Out['R2'][0], dd=Out['dd'][0], Ves=Ves,\n Exp=Id.Exp, Clock=bool(Out['Clock']), Diag=Id.Diag, shot=Id.shot, arrayorder=str(Out['arrayorder']), SavePath=Id.SavePath, dtime=Id.dtime)\n\n elif Id.Cls == 'Apert':\n Ves = _tryloadVes(Id, Ves=Ves)\n obj = TFG.Apert(Id, Out['Poly'], Clock=bool(Out['Clock']), arrayorder=str(Out['arrayorder']), Ves=Ves, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime)\n\n elif Id.Cls == 'Detect':\n Ves = _tryloadVes(Id, Ves=Ves)\n if 'VesCalc'in Out.keys() and Out['VesCalc'][0]['SavePath'] is not None:\n VesCalc = Open(Out['VesCalc'][0]['SavePath']+Out['VesCalc'][0]['SaveName']+'.npz')\n else:\n VesCalc = None\n LOSprops, Sino, Span, Cone, SAng, Opt = Out['LOSprops'][0], Out['Sino'][0], Out['Span'][0], Out['Cone'][0], Out['SAng'][0], Out['Optics'][0]\n (SynthDiag,Res) = (Out['SynthDiag'][0],Out['Res'][0]) if out=='full' else _get_light_SynthDiag_Res()\n Optics = _tryLoadOpticsElseCreate(Id, Opt=Opt, Ves=Ves, Verb=Verb)\n\n Poly = Out['Poly'] if type(Optics) is list else dict(Rad=float(Out['Rad']),O=Out['BaryS'],nIn=Out['nIn'])\n obj = TFG.Detect(Id, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino['_Sino_RefPt'], CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,\n arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))\n obj = _resetDetectAttr(obj, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})\n obj._LOS_NP = Out['LOSNP']\n if obj._SynthDiag_Done and obj._SynthDiag_Points is None:\n obj.set_SigPrecomp()\n\n elif Id.Cls == 'GDetect':\n LDetsave = list(Out['LDetsave'])\n LDet = []\n Ves = _tryloadVes(Id, Ves=Ves)\n if out=='light':\n SynthDiag, Res = _get_light_SynthDiag_Res()\n else:\n LDetSynthRes = Out['LDetSynthRes']\n for ii in range(0,len(LDetsave)):\n ddIdsave = _Id_recreateFromdict(LDetsave[ii]['Idsave'])\n if 'VesCalc'in LDetsave[ii].keys() and LDetsave[ii]['VesCalc'][0]['SavePath'] is not None:\n VesCalc = Open(LDetsave[ii]['VesCalc'][0]['SavePath']+LDetsave[ii]['VesCalc'][0]['SaveName']+'.npz')\n else:\n VesCalc = None\n LOSprops, Sino, Span, Cone, SAng, Opt = LDetsave[ii]['LOSprops'][0], LDetsave[ii]['Sino'][0], LDetsave[ii]['Span'][0], LDetsave[ii]['Cone'][0], LDetsave[ii]['SAng'][0], LDetsave[ii]['Optics'][0]\n if out=='full':\n SynthDiag, Res = LDetSynthRes[ii]['SynthDiag'][0], LDetSynthRes[ii]['Res'][0]\n Optics = _tryLoadOpticsElseCreate(ddIdsave, Opt=Opt, Ves=Ves, Verb=Verb)\n Poly = LDetsave[ii]['Poly'] if type(Optics) is list else dict(Rad=float(LDetsave[ii]['Rad']),O=LDetsave[ii]['BaryS'],nIn=LDetsave[ii]['nIn'])\n Sino_RefPt = None if Out['Sino_RefPt'].shape==() else Out['Sino_RefPt']\n dd = TFG.Detect(ddIdsave, Poly, Optics=Optics, Ves=Ves, VesCalc=VesCalc, Sino_RefPt=Sino_RefPt, CalcEtend=False, CalcSpanImp=False, CalcCone=False, CalcPreComp=False, Calc=True, Verb=Verb,\n arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']))\n dd = _resetDetectAttr(dd, {'LOSprops':LOSprops, 'Sino':Sino, 'Span':Span, 'Cone':Cone, 'SAng':SAng, 'SynthDiag':SynthDiag, 'Res':Res, 'Optics':Opt})\n dd._LOS_NP = LDetsave[ii]['LOSNP']\n if dd._SynthDiag_Done and dd._SynthDiag_Points is None:\n dd.set_SigPrecomp()\n LDet.append(dd)\n obj = TFG.GDetect(Id, LDet, Type=Id.Type, Exp=Id.Exp, Diag=Id.Diag, shot=Id.shot, dtime=Id.dtime, Sino_RefPt=Out['Sino_RefPt'], LOSRef=str(Out['LOSRef']),\n arrayorder=str(Out['arrayorder']), Clock=bool(Out['Clock']), SavePath=Id.SavePath)\n Res = Out['Res'][0] if out=='full' else Res\n for kk in Res.keys():\n setattr(obj,kk,Res[kk])\n\n elif Id.Cls=='Eq2D':\n Sep = [np.array(ss) for ss in Out['Sep'].tolist()]\n obj = tfEq.Eq2D(Id, Out['PtsCross'], t=Out['t'], MagAx=Out['MagAx'], Sep=Sep, rho_p=Out['rho_p'].tolist(), rho_t=Out['rho_t'].tolist(), surf=Out['surf'].tolist(), vol=Out['vol'].tolist(),\n q=Out['q'].tolist(), jp=Out['jp'].tolist(), pf=Out['pf'].tolist(), tf=Out['tf'].tolist(), theta=Out['theta'].tolist(), thetastar=Out['thetastar'].tolist(),\n BTX=Out['BTX'].tolist(), BRY=Out['BRY'].tolist(), BZ=Out['BZ'].tolist(), Ref=str(Out['Ref']))\n\n elif Id.Cls=='Mesh1D':\n obj = TFM.Mesh1D(Id, Out['Knots'])\n\n elif Id.Cls=='Mesh2D':\n obj = TFM.Mesh2D(Id, [Out['Knots'][0],Out['Knots'][1]])\n obj = TFM.Mesh2D(Id, Knots=obj, ind=Out['IndBg'])\n for ii in range(0,len(Out['SubMinds'])):\n obj.add_SubMesh(Name=Out['SubMinds'][ii]['Name'], ind=Out['SubMinds'][ii]['ind'])\n\n elif Id.Cls=='Metric1D':\n obj = TFM.Metric1D(Id)\n\n elif Id.Cls=='Metric2D':\n obj = TFM.Metric2D(Id)\n\n\n elif Id.Cls in 'BF2D':\n IdMesh = ID(str(Out['IdMesh'][0]), str(Out['IdMesh'][1]), SaveName=str(Out['IdMesh'][2]), SavePath=str(Out['IdMesh'][3]), dtime=Out['dtimeMesh'][0], dtFormat=str(Out['IdMesh'][4]))\n M2 = TFM.Mesh2D(IdMesh, Knots=[Out['KnotsR'],Out['KnotsZ']])\n M2bis = TFM.Mesh2D(IdMesh,Knots=M2,Ind=Out['Ind'])\n obj = TFM.BF2D(Id, M2bis, int(Out['Deg'][0]))\n elif Id.Cls=='GMat2D':\n import ToFu_MatComp as TFMC\n import scipy.sparse as scpsp\n Id.set_LObj(open_np_IdObj(['Ves','BF2D','Detect'], [Out['Ves'],Out['BF2'],Out['LDetect']], [Out['VesUSR'],Out['BF2USR'],Out['LDetectUSR']]))\n Mat = scpsp.csr_matrix((Out['Matdata'], Out['Matind'], Out['Matindpr']), shape=Out['Matshape'])\n MatLOS = scpsp.csr_matrix((Out['MatLOSdata'], Out['MatLOSind'], Out['MatLOSindpr']), shape=Out['MatLOSshape'])\n obj = TFMC.GMat2D(Id, None, None, Mat=None, indMat=None, MatLOS=None, Calcind=False, Calc=False, CalcLOS=False)\n obj._init_CompParam(Mode=str(Out['CompParamStr'][0]), epsrel=Out['CompParamVal'][0], SubP=Out['CompParamVal'][1], SubMode=str(Out['CompParamStr'][1]), SubTheta=Out['CompParamVal'][2], SubThetaMode=str(Out['CompParamStr'][2]), Fast=bool(Out['CompParamVal'][-1]), SubPind=Out['CompParamVal'][3], ModeLOS=str(Out['CompParamStr'][3]), epsrelLOS=Out['CompParamVal'][4], SubPLOS=Out['CompParamVal'][5], SubModeLOS=str(Out['CompParamStr'][4]))\n obj._BF2 = None\n obj._BF2_Deg = int(Out['BF2Par'][0])\n obj._BF2_NCents = int(Out['BF2Par'][2])\n obj._BF2_NFunc = int(Out['BF2Par'][1])\n obj._Ves = None\n obj._LD = None\n obj._LD_nDetect = int(Out['LD_nD'])\n obj._set_indMat(indMat=Out['indMat'], Verb=False)\n obj._set_MatLOS(MatLOS=MatLOS, Verb=False)\n obj._set_Mat(Mat=Mat, Verb=False)\n\n\n\n elif Id.Cls=='PreData':\n LIdDet = Id.get_LObjasLId('Detect') if 'Detect' in Id.LObj.keys() else None\n Init, Update = Out['Init'][0], Out['Update'][0]\n obj = tft.PreData(Init['data'], Id=Id, t=Init['t'], Chans=Init['Chans'], DtRef=Init['DtRef'], LIdDet=LIdDet)\n obj.set_Dt(Update['Dt'], Calc=False)\n obj.set_Resamp(t=Update['Resamp_t'], f=Update['Resamp_f'], Method=Update['Resamp_Method'], interpkind=Update['Resamp_interpkind'], Calc=False)\n obj.Out_add(indOut=Update['indOut'], Calc=False)\n obj.Corr_add(indCorr=Update['indCorr'], Calc=False)\n obj.interp(lt=Update['interp_lt'], lNames=Update['interp_lNames'], Calc=False)\n obj.substract_Dt(tsub=Update['Subtract_tsub'], Calc=False)\n obj.set_fft(Calc=True, **Update['FFTPar'])\n if not Update['PhysNoiseParam'] is None:\n Method = 'svd' if 'Modes' in Update['PhysNoiseParam'].keys() else 'fft'\n obj.set_PhysNoise(**Update['PhysNoiseParam'].update({'Method':Method}))\n\n\n #Id.set_LObj(open_np_IdObj(['Detect'],[Out['LDetect']], [Out['LDetectUSR']]))\n #obj = TFT.PreData(Id=Id, shot=int(Out['shot']), DLPar=Out['DLPar'].item(), Exp=str(Out['StrPar'][0]), Dt=list(Out['Dt']), DtMargin=float(Out['DtMarg']), MovMeanfreq=float(Out['MovMeanfreq']), Resamp=bool(Out['Resamp']),\n # interpkind=str(Out['StrPar'][1]), indOut=Out['indOut'], indCorr=Out['indCorr'], lt=Out['interp_lt'], lNames=Out['interp_lN'].tolist(), Test=True)\n #if not Out['PhysNoise'].item() is None:\n # obj.set_PhysNoise(Deg=int(Out['NoiseMod'].item()['Deg']), Nbin=int(Out['NoiseMod'].item()['Nbin']), LimRatio=float(Out['NoiseMod'].item()['LimRatio']), **Out['PhysNoise'].item()['Param'])\n\n\n elif Id.Cls=='Sol2D':\n Id.set_LObj(open_np_IdObj(['PreData','GMat2D','BF2D'],[Out['PreData'], Out['GMat2D'], Out['BF2D']], [Out['PreDataUSR'],Out['GMatUSR'],Out['BF2DUSR']]))\n GMSaveName = Id.LObj['GMat2D']['SaveName'][0]\n try:\n GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')\n except Exception:\n GMSaveName = GMSaveName[:GMSaveName.index('All_')+4]+'sh'+GMSaveName[GMSaveName.index('All_')+4:]\n GMat = Open(Id.LObj['GMat2D']['SavePath'][0]+GMSaveName+'.npz')\n obj = TFI.Sol2D(Id, PreData=None, GMat=GMat, InvParam=Out['InvParam'].item(), SVesePreData=False, SVeseGMat=True, SVeseBF=True)\n obj._PreData = None\n obj._GMat = obj.GMat.get_SubGMat2D(Val=list(Out['LNames']), Crit='Name',InOut='In')\n obj._shot = int(Out['shot'])\n try:\n obj._LNames = Out['LNames'].tolist()\n except Exception:\n obj._LNames = obj.PreData.In_list()\n obj._run = bool(Out['Run'])\n if bool(Out['Run']):\n obj._LOS = bool(Out['LOS'])\n obj._t, obj._data = Out['t'], Out['data']\n obj._Coefs, obj._sigma = Out['Coefs'], Out['sigma']\n obj._Mu, obj._Chi2N, obj._R, obj._Nit = Out['Mu'], Out['Chi2N'], Out['R'], Out['Nit']\n obj._Spec = list(Out['Spec'])\n obj._timing = Out['t2']\n obj._PostTreat = list(Out['PostTreat'])\n "
return obj |
def add_query_to_url(url, extra_query):
'''Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
'''
split = urllib.parse.urlsplit(url)
merged_query = urllib.parse.parse_qsl(split.query)
if isinstance(extra_query, dict):
for k, v in extra_query.items():
if not isinstance(v, (tuple, list)):
merged_query.append((k, v))
else:
for cv in v:
merged_query.append((k, cv))
else:
merged_query.extend(extra_query)
merged_split = urllib.parse.SplitResult(
split.scheme,
split.netloc,
split.path,
urllib.parse.urlencode(merged_query),
split.fragment,
)
return merged_split.geturl() | def function[add_query_to_url, parameter[url, extra_query]]:
constant[Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
]
variable[split] assign[=] call[name[urllib].parse.urlsplit, parameter[name[url]]]
variable[merged_query] assign[=] call[name[urllib].parse.parse_qsl, parameter[name[split].query]]
if call[name[isinstance], parameter[name[extra_query], name[dict]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b16bde40>, <ast.Name object at 0x7da1b16bd240>]]] in starred[call[name[extra_query].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b16be6b0> begin[:]
call[name[merged_query].append, parameter[tuple[[<ast.Name object at 0x7da1b16bdab0>, <ast.Name object at 0x7da1b16bd690>]]]]
variable[merged_split] assign[=] call[name[urllib].parse.SplitResult, parameter[name[split].scheme, name[split].netloc, name[split].path, call[name[urllib].parse.urlencode, parameter[name[merged_query]]], name[split].fragment]]
return[call[name[merged_split].geturl, parameter[]]] | keyword[def] identifier[add_query_to_url] ( identifier[url] , identifier[extra_query] ):
literal[string]
identifier[split] = identifier[urllib] . identifier[parse] . identifier[urlsplit] ( identifier[url] )
identifier[merged_query] = identifier[urllib] . identifier[parse] . identifier[parse_qsl] ( identifier[split] . identifier[query] )
keyword[if] identifier[isinstance] ( identifier[extra_query] , identifier[dict] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[extra_query] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] ,( identifier[tuple] , identifier[list] )):
identifier[merged_query] . identifier[append] (( identifier[k] , identifier[v] ))
keyword[else] :
keyword[for] identifier[cv] keyword[in] identifier[v] :
identifier[merged_query] . identifier[append] (( identifier[k] , identifier[cv] ))
keyword[else] :
identifier[merged_query] . identifier[extend] ( identifier[extra_query] )
identifier[merged_split] = identifier[urllib] . identifier[parse] . identifier[SplitResult] (
identifier[split] . identifier[scheme] ,
identifier[split] . identifier[netloc] ,
identifier[split] . identifier[path] ,
identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[merged_query] ),
identifier[split] . identifier[fragment] ,
)
keyword[return] identifier[merged_split] . identifier[geturl] () | def add_query_to_url(url, extra_query):
"""Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
"""
split = urllib.parse.urlsplit(url)
merged_query = urllib.parse.parse_qsl(split.query)
if isinstance(extra_query, dict):
for (k, v) in extra_query.items():
if not isinstance(v, (tuple, list)):
merged_query.append((k, v)) # depends on [control=['if'], data=[]]
else:
for cv in v:
merged_query.append((k, cv)) # depends on [control=['for'], data=['cv']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
merged_query.extend(extra_query)
merged_split = urllib.parse.SplitResult(split.scheme, split.netloc, split.path, urllib.parse.urlencode(merged_query), split.fragment)
return merged_split.geturl() |
def success_response(field=None, data=None, request_type=""):
"""Return a generic success response."""
data_out = {}
data_out["status"] = "success"
if field:
data_out[field] = data
print("{} request successful.".format(request_type))
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype='application/json') | def function[success_response, parameter[field, data, request_type]]:
constant[Return a generic success response.]
variable[data_out] assign[=] dictionary[[], []]
call[name[data_out]][constant[status]] assign[=] constant[success]
if name[field] begin[:]
call[name[data_out]][name[field]] assign[=] name[data]
call[name[print], parameter[call[constant[{} request successful.].format, parameter[name[request_type]]]]]
variable[js] assign[=] call[name[dumps], parameter[name[data_out]]]
return[call[name[Response], parameter[name[js]]]] | keyword[def] identifier[success_response] ( identifier[field] = keyword[None] , identifier[data] = keyword[None] , identifier[request_type] = literal[string] ):
literal[string]
identifier[data_out] ={}
identifier[data_out] [ literal[string] ]= literal[string]
keyword[if] identifier[field] :
identifier[data_out] [ identifier[field] ]= identifier[data]
identifier[print] ( literal[string] . identifier[format] ( identifier[request_type] ))
identifier[js] = identifier[dumps] ( identifier[data_out] , identifier[default] = identifier[date_handler] )
keyword[return] identifier[Response] ( identifier[js] , identifier[status] = literal[int] , identifier[mimetype] = literal[string] ) | def success_response(field=None, data=None, request_type=''):
"""Return a generic success response."""
data_out = {}
data_out['status'] = 'success'
if field:
data_out[field] = data # depends on [control=['if'], data=[]]
print('{} request successful.'.format(request_type))
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype='application/json') |
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the content in this
container, having *text* in a single run if present, and having
paragraph style *style*. If *style* is |None|, no paragraph style is
applied, which has the same effect as applying the 'Normal' style.
"""
paragraph = self._add_paragraph()
if text:
paragraph.add_run(text)
if style is not None:
paragraph.style = style
return paragraph | def function[add_paragraph, parameter[self, text, style]]:
constant[
Return a paragraph newly added to the end of the content in this
container, having *text* in a single run if present, and having
paragraph style *style*. If *style* is |None|, no paragraph style is
applied, which has the same effect as applying the 'Normal' style.
]
variable[paragraph] assign[=] call[name[self]._add_paragraph, parameter[]]
if name[text] begin[:]
call[name[paragraph].add_run, parameter[name[text]]]
if compare[name[style] is_not constant[None]] begin[:]
name[paragraph].style assign[=] name[style]
return[name[paragraph]] | keyword[def] identifier[add_paragraph] ( identifier[self] , identifier[text] = literal[string] , identifier[style] = keyword[None] ):
literal[string]
identifier[paragraph] = identifier[self] . identifier[_add_paragraph] ()
keyword[if] identifier[text] :
identifier[paragraph] . identifier[add_run] ( identifier[text] )
keyword[if] identifier[style] keyword[is] keyword[not] keyword[None] :
identifier[paragraph] . identifier[style] = identifier[style]
keyword[return] identifier[paragraph] | def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the content in this
container, having *text* in a single run if present, and having
paragraph style *style*. If *style* is |None|, no paragraph style is
applied, which has the same effect as applying the 'Normal' style.
"""
paragraph = self._add_paragraph()
if text:
paragraph.add_run(text) # depends on [control=['if'], data=[]]
if style is not None:
paragraph.style = style # depends on [control=['if'], data=['style']]
return paragraph |
def log(self, msg):
""" Log Normal Messages """
self._execActions('log', msg)
msg = self._execFilters('log', msg)
self._processMsg('log', msg)
self._sendMsg('log', msg) | def function[log, parameter[self, msg]]:
constant[ Log Normal Messages ]
call[name[self]._execActions, parameter[constant[log], name[msg]]]
variable[msg] assign[=] call[name[self]._execFilters, parameter[constant[log], name[msg]]]
call[name[self]._processMsg, parameter[constant[log], name[msg]]]
call[name[self]._sendMsg, parameter[constant[log], name[msg]]] | keyword[def] identifier[log] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[_execActions] ( literal[string] , identifier[msg] )
identifier[msg] = identifier[self] . identifier[_execFilters] ( literal[string] , identifier[msg] )
identifier[self] . identifier[_processMsg] ( literal[string] , identifier[msg] )
identifier[self] . identifier[_sendMsg] ( literal[string] , identifier[msg] ) | def log(self, msg):
""" Log Normal Messages """
self._execActions('log', msg)
msg = self._execFilters('log', msg)
self._processMsg('log', msg)
self._sendMsg('log', msg) |
def _read_frame(self):
"""Read and return the next time frame"""
# Read one frame, we assume that the current file position is at the
# line 'ITEM: TIMESTEP' and that this line marks the beginning of a
# time frame.
line = next(self._f)
if line != 'ITEM: TIMESTEP\n':
raise FileFormatError("Expecting line 'ITEM: TIMESTEP' at the beginning of a time frame.")
try:
line = next(self._f)
step = int(line)
except ValueError:
raise FileFormatError("Could not read the step number. Expected an integer. Got '%s'" % line[:-1])
# Now we assume that the next section contains (again) the number of
# atoms.
line = next(self._f)
if line != 'ITEM: NUMBER OF ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: NUMBER OF ATOMS'.")
try:
line = next(self._f)
num_atoms = int(line)
except ValueError:
raise FileFormatError("Could not read the number of atoms. Expected an integer. Got '%s'" % line[:-1])
if num_atoms != self.num_atoms:
raise FileFormatError("A variable number of atoms is not supported.")
# The next section contains the box boundaries. We will skip it
for i in range(4):
next(self._f)
# The next and last section contains the atom related properties
line = next(self._f)
if line != 'ITEM: ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: ATOMS'.")
fields = [list() for i in range(len(self.units))]
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()[1:]
for j in range(len(fields)):
fields[j].append(float(words[j]))
fields = [step] + [np.array(field)*unit for field, unit in zip(fields, self.units)]
return fields | def function[_read_frame, parameter[self]]:
constant[Read and return the next time frame]
variable[line] assign[=] call[name[next], parameter[name[self]._f]]
if compare[name[line] not_equal[!=] constant[ITEM: TIMESTEP
]] begin[:]
<ast.Raise object at 0x7da20c6e5b40>
<ast.Try object at 0x7da20c6e5c90>
variable[line] assign[=] call[name[next], parameter[name[self]._f]]
if compare[name[line] not_equal[!=] constant[ITEM: NUMBER OF ATOMS
]] begin[:]
<ast.Raise object at 0x7da20c6e72e0>
<ast.Try object at 0x7da20c6e4fd0>
if compare[name[num_atoms] not_equal[!=] name[self].num_atoms] begin[:]
<ast.Raise object at 0x7da20c6e79d0>
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
call[name[next], parameter[name[self]._f]]
variable[line] assign[=] call[name[next], parameter[name[self]._f]]
if compare[name[line] not_equal[!=] constant[ITEM: ATOMS
]] begin[:]
<ast.Raise object at 0x7da20c6e59c0>
variable[fields] assign[=] <ast.ListComp object at 0x7da20c6e6410>
for taget[name[i]] in starred[call[name[range], parameter[name[self].num_atoms]]] begin[:]
variable[line] assign[=] call[name[next], parameter[name[self]._f]]
variable[words] assign[=] call[call[name[line].split, parameter[]]][<ast.Slice object at 0x7da20c6e6c80>]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[fields]]]]]] begin[:]
call[call[name[fields]][name[j]].append, parameter[call[name[float], parameter[call[name[words]][name[j]]]]]]
variable[fields] assign[=] binary_operation[list[[<ast.Name object at 0x7da20c6e5210>]] + <ast.ListComp object at 0x7da20c6e7f40>]
return[name[fields]] | keyword[def] identifier[_read_frame] ( identifier[self] ):
literal[string]
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
keyword[if] identifier[line] != literal[string] :
keyword[raise] identifier[FileFormatError] ( literal[string] )
keyword[try] :
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
identifier[step] = identifier[int] ( identifier[line] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[FileFormatError] ( literal[string] % identifier[line] [:- literal[int] ])
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
keyword[if] identifier[line] != literal[string] :
keyword[raise] identifier[FileFormatError] ( literal[string] )
keyword[try] :
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
identifier[num_atoms] = identifier[int] ( identifier[line] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[FileFormatError] ( literal[string] % identifier[line] [:- literal[int] ])
keyword[if] identifier[num_atoms] != identifier[self] . identifier[num_atoms] :
keyword[raise] identifier[FileFormatError] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[next] ( identifier[self] . identifier[_f] )
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
keyword[if] identifier[line] != literal[string] :
keyword[raise] identifier[FileFormatError] ( literal[string] )
identifier[fields] =[ identifier[list] () keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[units] ))]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[num_atoms] ):
identifier[line] = identifier[next] ( identifier[self] . identifier[_f] )
identifier[words] = identifier[line] . identifier[split] ()[ literal[int] :]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[fields] )):
identifier[fields] [ identifier[j] ]. identifier[append] ( identifier[float] ( identifier[words] [ identifier[j] ]))
identifier[fields] =[ identifier[step] ]+[ identifier[np] . identifier[array] ( identifier[field] )* identifier[unit] keyword[for] identifier[field] , identifier[unit] keyword[in] identifier[zip] ( identifier[fields] , identifier[self] . identifier[units] )]
keyword[return] identifier[fields] | def _read_frame(self):
"""Read and return the next time frame"""
# Read one frame, we assume that the current file position is at the
# line 'ITEM: TIMESTEP' and that this line marks the beginning of a
# time frame.
line = next(self._f)
if line != 'ITEM: TIMESTEP\n':
raise FileFormatError("Expecting line 'ITEM: TIMESTEP' at the beginning of a time frame.") # depends on [control=['if'], data=[]]
try:
line = next(self._f)
step = int(line) # depends on [control=['try'], data=[]]
except ValueError:
raise FileFormatError("Could not read the step number. Expected an integer. Got '%s'" % line[:-1]) # depends on [control=['except'], data=[]]
# Now we assume that the next section contains (again) the number of
# atoms.
line = next(self._f)
if line != 'ITEM: NUMBER OF ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: NUMBER OF ATOMS'.") # depends on [control=['if'], data=[]]
try:
line = next(self._f)
num_atoms = int(line) # depends on [control=['try'], data=[]]
except ValueError:
raise FileFormatError("Could not read the number of atoms. Expected an integer. Got '%s'" % line[:-1]) # depends on [control=['except'], data=[]]
if num_atoms != self.num_atoms:
raise FileFormatError('A variable number of atoms is not supported.') # depends on [control=['if'], data=[]]
# The next section contains the box boundaries. We will skip it
for i in range(4):
next(self._f) # depends on [control=['for'], data=[]]
# The next and last section contains the atom related properties
line = next(self._f)
if line != 'ITEM: ATOMS\n':
raise FileFormatError("Expecting line 'ITEM: ATOMS'.") # depends on [control=['if'], data=[]]
fields = [list() for i in range(len(self.units))]
for i in range(self.num_atoms):
line = next(self._f)
words = line.split()[1:]
for j in range(len(fields)):
fields[j].append(float(words[j])) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=[]]
fields = [step] + [np.array(field) * unit for (field, unit) in zip(fields, self.units)]
return fields |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a NTFS $UsnJrnl metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
volume = pyfsntfs.volume()
try:
volume.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open NTFS volume with error: {0!s}'.format(exception))
try:
usn_change_journal = volume.get_usn_change_journal()
self._ParseUSNChangeJournal(parser_mediator, usn_change_journal)
finally:
volume.close() | def function[ParseFileObject, parameter[self, parser_mediator, file_object]]:
constant[Parses a NTFS $UsnJrnl metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
]
variable[volume] assign[=] call[name[pyfsntfs].volume, parameter[]]
<ast.Try object at 0x7da1b26ae770>
<ast.Try object at 0x7da1b26acd60> | keyword[def] identifier[ParseFileObject] ( identifier[self] , identifier[parser_mediator] , identifier[file_object] ):
literal[string]
identifier[volume] = identifier[pyfsntfs] . identifier[volume] ()
keyword[try] :
identifier[volume] . identifier[open_file_object] ( identifier[file_object] )
keyword[except] identifier[IOError] keyword[as] identifier[exception] :
identifier[parser_mediator] . identifier[ProduceExtractionWarning] (
literal[string] . identifier[format] ( identifier[exception] ))
keyword[try] :
identifier[usn_change_journal] = identifier[volume] . identifier[get_usn_change_journal] ()
identifier[self] . identifier[_ParseUSNChangeJournal] ( identifier[parser_mediator] , identifier[usn_change_journal] )
keyword[finally] :
identifier[volume] . identifier[close] () | def ParseFileObject(self, parser_mediator, file_object):
"""Parses a NTFS $UsnJrnl metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
volume = pyfsntfs.volume()
try:
volume.open_file_object(file_object) # depends on [control=['try'], data=[]]
except IOError as exception:
parser_mediator.ProduceExtractionWarning('unable to open NTFS volume with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
try:
usn_change_journal = volume.get_usn_change_journal()
self._ParseUSNChangeJournal(parser_mediator, usn_change_journal) # depends on [control=['try'], data=[]]
finally:
volume.close() |
def month(abbr=False, numerical=False):
"""Return a random (abbreviated if `abbr`) month name or month number if
`numerical`.
"""
if numerical:
return random.randint(1, 12)
else:
if abbr:
return random.choice(MONTHS_ABBR)
else:
return random.choice(MONTHS) | def function[month, parameter[abbr, numerical]]:
constant[Return a random (abbreviated if `abbr`) month name or month number if
`numerical`.
]
if name[numerical] begin[:]
return[call[name[random].randint, parameter[constant[1], constant[12]]]] | keyword[def] identifier[month] ( identifier[abbr] = keyword[False] , identifier[numerical] = keyword[False] ):
literal[string]
keyword[if] identifier[numerical] :
keyword[return] identifier[random] . identifier[randint] ( literal[int] , literal[int] )
keyword[else] :
keyword[if] identifier[abbr] :
keyword[return] identifier[random] . identifier[choice] ( identifier[MONTHS_ABBR] )
keyword[else] :
keyword[return] identifier[random] . identifier[choice] ( identifier[MONTHS] ) | def month(abbr=False, numerical=False):
"""Return a random (abbreviated if `abbr`) month name or month number if
`numerical`.
"""
if numerical:
return random.randint(1, 12) # depends on [control=['if'], data=[]]
elif abbr:
return random.choice(MONTHS_ABBR) # depends on [control=['if'], data=[]]
else:
return random.choice(MONTHS) |
def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self | def function[unify, parameter[self]]:
constant[Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
]
variable[length] assign[=] call[name[float], parameter[call[name[self].norm, parameter[]]]]
for taget[name[row]] in starred[call[name[xrange], parameter[call[name[self].get_height, parameter[]]]]] begin[:]
call[name[self].set_value, parameter[constant[0], name[row], binary_operation[call[name[self].get_value, parameter[constant[0], name[row]]] / name[length]]]]
return[name[self]] | keyword[def] identifier[unify] ( identifier[self] ):
literal[string]
identifier[length] = identifier[float] ( identifier[self] . identifier[norm] ())
keyword[for] identifier[row] keyword[in] identifier[xrange] ( identifier[self] . identifier[get_height] ()):
identifier[self] . identifier[set_value] ( literal[int] , identifier[row] , identifier[self] . identifier[get_value] ( literal[int] , identifier[row] )/ identifier[length] )
keyword[return] identifier[self] | def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length) # depends on [control=['for'], data=['row']]
return self |
def smooth_ot_semi_dual(a, b, M, reg, reg_type='l2', method="L-BFGS-B", stopThr=1e-9,
numItermax=500, verbose=False, log=False):
r"""
Solve the regularized OT problem in the semi-dual and return the OT matrix
The function solves the smooth relaxed dual formulation (10) in [17]_ :
.. math::
\max_{\alpha}\quad a^T\alpha-OT_\Omega^*(\alpha,b)
where :
.. math::
OT_\Omega^*(\alpha,b)=\sum_j b_j
- :math:`\mathbf{m}_j` is the jth column of the cost matrix
- :math:`OT_\Omega^*(\alpha,b)` is defined in Eq. (9) in [17]
- a and b are source and target weights (sum to 1)
The OT matrix can is reconstructed using [17]_ Proposition 2.
The optimization algorithm is using gradient decent (L-BFGS by default).
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,) or np.ndarray (nt,nbb)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
reg_type : str
Regularization type, can be the following (default ='l2'):
- 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_)
- 'l2' : Squared Euclidean regularization
method : str
Solver to use for scipy.optimize.minimize
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
.. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS).
See Also
--------
ot.lp.emd : Unregularized OT
ot.sinhorn : Entropic regularized OT
ot.optim.cg : General regularized OT
"""
if reg_type.lower() in ['l2', 'squaredl2']:
regul = SquaredL2(gamma=reg)
elif reg_type.lower() in ['entropic', 'negentropy', 'kl']:
regul = NegEntropy(gamma=reg)
else:
raise NotImplementedError('Unknown regularization')
# solve dual
alpha, res = solve_semi_dual(a, b, M, regul, max_iter=numItermax,
tol=stopThr, verbose=verbose)
# reconstruct transport matrix
G = get_plan_from_semi_dual(alpha, b, M, regul)
if log:
log = {'alpha': alpha, 'res': res}
return G, log
else:
return G | def function[smooth_ot_semi_dual, parameter[a, b, M, reg, reg_type, method, stopThr, numItermax, verbose, log]]:
constant[
Solve the regularized OT problem in the semi-dual and return the OT matrix
The function solves the smooth relaxed dual formulation (10) in [17]_ :
.. math::
\max_{\alpha}\quad a^T\alpha-OT_\Omega^*(\alpha,b)
where :
.. math::
OT_\Omega^*(\alpha,b)=\sum_j b_j
- :math:`\mathbf{m}_j` is the jth column of the cost matrix
- :math:`OT_\Omega^*(\alpha,b)` is defined in Eq. (9) in [17]
- a and b are source and target weights (sum to 1)
The OT matrix can is reconstructed using [17]_ Proposition 2.
The optimization algorithm is using gradient decent (L-BFGS by default).
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,) or np.ndarray (nt,nbb)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
reg_type : str
Regularization type, can be the following (default ='l2'):
- 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_)
- 'l2' : Squared Euclidean regularization
method : str
Solver to use for scipy.optimize.minimize
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
.. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS).
See Also
--------
ot.lp.emd : Unregularized OT
ot.sinhorn : Entropic regularized OT
ot.optim.cg : General regularized OT
]
if compare[call[name[reg_type].lower, parameter[]] in list[[<ast.Constant object at 0x7da1b1638f70>, <ast.Constant object at 0x7da1b1638c10>]]] begin[:]
variable[regul] assign[=] call[name[SquaredL2], parameter[]]
<ast.Tuple object at 0x7da1b16392d0> assign[=] call[name[solve_semi_dual], parameter[name[a], name[b], name[M], name[regul]]]
variable[G] assign[=] call[name[get_plan_from_semi_dual], parameter[name[alpha], name[b], name[M], name[regul]]]
if name[log] begin[:]
variable[log] assign[=] dictionary[[<ast.Constant object at 0x7da1b18dd690>, <ast.Constant object at 0x7da1b18dd0c0>], [<ast.Name object at 0x7da1b18dfcd0>, <ast.Name object at 0x7da1b18deb90>]]
return[tuple[[<ast.Name object at 0x7da1b18df9d0>, <ast.Name object at 0x7da1b18dd420>]]] | keyword[def] identifier[smooth_ot_semi_dual] ( identifier[a] , identifier[b] , identifier[M] , identifier[reg] , identifier[reg_type] = literal[string] , identifier[method] = literal[string] , identifier[stopThr] = literal[int] ,
identifier[numItermax] = literal[int] , identifier[verbose] = keyword[False] , identifier[log] = keyword[False] ):
literal[string]
keyword[if] identifier[reg_type] . identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[regul] = identifier[SquaredL2] ( identifier[gamma] = identifier[reg] )
keyword[elif] identifier[reg_type] . identifier[lower] () keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[regul] = identifier[NegEntropy] ( identifier[gamma] = identifier[reg] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[alpha] , identifier[res] = identifier[solve_semi_dual] ( identifier[a] , identifier[b] , identifier[M] , identifier[regul] , identifier[max_iter] = identifier[numItermax] ,
identifier[tol] = identifier[stopThr] , identifier[verbose] = identifier[verbose] )
identifier[G] = identifier[get_plan_from_semi_dual] ( identifier[alpha] , identifier[b] , identifier[M] , identifier[regul] )
keyword[if] identifier[log] :
identifier[log] ={ literal[string] : identifier[alpha] , literal[string] : identifier[res] }
keyword[return] identifier[G] , identifier[log]
keyword[else] :
keyword[return] identifier[G] | def smooth_ot_semi_dual(a, b, M, reg, reg_type='l2', method='L-BFGS-B', stopThr=1e-09, numItermax=500, verbose=False, log=False):
"""
Solve the regularized OT problem in the semi-dual and return the OT matrix
The function solves the smooth relaxed dual formulation (10) in [17]_ :
.. math::
\\max_{\\alpha}\\quad a^T\\alpha-OT_\\Omega^*(\\alpha,b)
where :
.. math::
OT_\\Omega^*(\\alpha,b)=\\sum_j b_j
- :math:`\\mathbf{m}_j` is the jth column of the cost matrix
- :math:`OT_\\Omega^*(\\alpha,b)` is defined in Eq. (9) in [17]
- a and b are source and target weights (sum to 1)
The OT matrix can is reconstructed using [17]_ Proposition 2.
The optimization algorithm is using gradient decent (L-BFGS by default).
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,) or np.ndarray (nt,nbb)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
reg_type : str
Regularization type, can be the following (default ='l2'):
- 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_)
- 'l2' : Squared Euclidean regularization
method : str
Solver to use for scipy.optimize.minimize
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
.. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS).
See Also
--------
ot.lp.emd : Unregularized OT
ot.sinhorn : Entropic regularized OT
ot.optim.cg : General regularized OT
"""
if reg_type.lower() in ['l2', 'squaredl2']:
regul = SquaredL2(gamma=reg) # depends on [control=['if'], data=[]]
elif reg_type.lower() in ['entropic', 'negentropy', 'kl']:
regul = NegEntropy(gamma=reg) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Unknown regularization')
# solve dual
(alpha, res) = solve_semi_dual(a, b, M, regul, max_iter=numItermax, tol=stopThr, verbose=verbose)
# reconstruct transport matrix
G = get_plan_from_semi_dual(alpha, b, M, regul)
if log:
log = {'alpha': alpha, 'res': res}
return (G, log) # depends on [control=['if'], data=[]]
else:
return G |
def get_assets(self):
"""Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AssetList(self._results, runtime=self._runtime) | def function[get_assets, parameter[self]]:
constant[Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
]
if name[self].retrieved begin[:]
<ast.Raise object at 0x7da20c7c8e80>
name[self].retrieved assign[=] constant[True]
return[call[name[objects].AssetList, parameter[name[self]._results]]] | keyword[def] identifier[get_assets] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[retrieved] :
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
identifier[self] . identifier[retrieved] = keyword[True]
keyword[return] identifier[objects] . identifier[AssetList] ( identifier[self] . identifier[_results] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_assets(self):
"""Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.') # depends on [control=['if'], data=[]]
self.retrieved = True
return objects.AssetList(self._results, runtime=self._runtime) |
def _verify_query(self, query_params):
"""Verify response from the Uber Auth server.
Parameters
query_params (dict)
Dictionary of query parameters attached to your redirect URL
after user approved your app and was redirected.
Returns
authorization_code (str)
Code received when user grants your app access. Use this code
to request an access token.
Raises
UberIllegalState (ApiError)
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
"""
error_message = None
if self.state_token is not False:
# Check CSRF State Token against state token from GET request
received_state_token = query_params.get('state')
if received_state_token is None:
error_message = 'Bad Request. Missing state parameter.'
raise UberIllegalState(error_message)
if self.state_token != received_state_token:
error_message = 'CSRF Error. Expected {}, got {}'
error_message = error_message.format(
self.state_token,
received_state_token,
)
raise UberIllegalState(error_message)
# Verify either 'code' or 'error' parameter exists
error = query_params.get('error')
authorization_code = query_params.get(auth.CODE_RESPONSE_TYPE)
if error and authorization_code:
error_message = (
'Code and Error query params code and error '
'can not both be set.'
)
raise UberIllegalState(error_message)
if error is None and authorization_code is None:
error_message = 'Neither query parameter code or error is set.'
raise UberIllegalState(error_message)
if error:
raise UberIllegalState(error)
return authorization_code | def function[_verify_query, parameter[self, query_params]]:
constant[Verify response from the Uber Auth server.
Parameters
query_params (dict)
Dictionary of query parameters attached to your redirect URL
after user approved your app and was redirected.
Returns
authorization_code (str)
Code received when user grants your app access. Use this code
to request an access token.
Raises
UberIllegalState (ApiError)
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
]
variable[error_message] assign[=] constant[None]
if compare[name[self].state_token is_not constant[False]] begin[:]
variable[received_state_token] assign[=] call[name[query_params].get, parameter[constant[state]]]
if compare[name[received_state_token] is constant[None]] begin[:]
variable[error_message] assign[=] constant[Bad Request. Missing state parameter.]
<ast.Raise object at 0x7da1b1006b60>
if compare[name[self].state_token not_equal[!=] name[received_state_token]] begin[:]
variable[error_message] assign[=] constant[CSRF Error. Expected {}, got {}]
variable[error_message] assign[=] call[name[error_message].format, parameter[name[self].state_token, name[received_state_token]]]
<ast.Raise object at 0x7da1b1005cc0>
variable[error] assign[=] call[name[query_params].get, parameter[constant[error]]]
variable[authorization_code] assign[=] call[name[query_params].get, parameter[name[auth].CODE_RESPONSE_TYPE]]
if <ast.BoolOp object at 0x7da1b1005e70> begin[:]
variable[error_message] assign[=] constant[Code and Error query params code and error can not both be set.]
<ast.Raise object at 0x7da1b10059c0>
if <ast.BoolOp object at 0x7da1b1004d60> begin[:]
variable[error_message] assign[=] constant[Neither query parameter code or error is set.]
<ast.Raise object at 0x7da1b1288ca0>
if name[error] begin[:]
<ast.Raise object at 0x7da1b1288ac0>
return[name[authorization_code]] | keyword[def] identifier[_verify_query] ( identifier[self] , identifier[query_params] ):
literal[string]
identifier[error_message] = keyword[None]
keyword[if] identifier[self] . identifier[state_token] keyword[is] keyword[not] keyword[False] :
identifier[received_state_token] = identifier[query_params] . identifier[get] ( literal[string] )
keyword[if] identifier[received_state_token] keyword[is] keyword[None] :
identifier[error_message] = literal[string]
keyword[raise] identifier[UberIllegalState] ( identifier[error_message] )
keyword[if] identifier[self] . identifier[state_token] != identifier[received_state_token] :
identifier[error_message] = literal[string]
identifier[error_message] = identifier[error_message] . identifier[format] (
identifier[self] . identifier[state_token] ,
identifier[received_state_token] ,
)
keyword[raise] identifier[UberIllegalState] ( identifier[error_message] )
identifier[error] = identifier[query_params] . identifier[get] ( literal[string] )
identifier[authorization_code] = identifier[query_params] . identifier[get] ( identifier[auth] . identifier[CODE_RESPONSE_TYPE] )
keyword[if] identifier[error] keyword[and] identifier[authorization_code] :
identifier[error_message] =(
literal[string]
literal[string]
)
keyword[raise] identifier[UberIllegalState] ( identifier[error_message] )
keyword[if] identifier[error] keyword[is] keyword[None] keyword[and] identifier[authorization_code] keyword[is] keyword[None] :
identifier[error_message] = literal[string]
keyword[raise] identifier[UberIllegalState] ( identifier[error_message] )
keyword[if] identifier[error] :
keyword[raise] identifier[UberIllegalState] ( identifier[error] )
keyword[return] identifier[authorization_code] | def _verify_query(self, query_params):
"""Verify response from the Uber Auth server.
Parameters
query_params (dict)
Dictionary of query parameters attached to your redirect URL
after user approved your app and was redirected.
Returns
authorization_code (str)
Code received when user grants your app access. Use this code
to request an access token.
Raises
UberIllegalState (ApiError)
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
"""
error_message = None
if self.state_token is not False:
# Check CSRF State Token against state token from GET request
received_state_token = query_params.get('state')
if received_state_token is None:
error_message = 'Bad Request. Missing state parameter.'
raise UberIllegalState(error_message) # depends on [control=['if'], data=[]]
if self.state_token != received_state_token:
error_message = 'CSRF Error. Expected {}, got {}'
error_message = error_message.format(self.state_token, received_state_token)
raise UberIllegalState(error_message) # depends on [control=['if'], data=['received_state_token']] # depends on [control=['if'], data=[]]
# Verify either 'code' or 'error' parameter exists
error = query_params.get('error')
authorization_code = query_params.get(auth.CODE_RESPONSE_TYPE)
if error and authorization_code:
error_message = 'Code and Error query params code and error can not both be set.'
raise UberIllegalState(error_message) # depends on [control=['if'], data=[]]
if error is None and authorization_code is None:
error_message = 'Neither query parameter code or error is set.'
raise UberIllegalState(error_message) # depends on [control=['if'], data=[]]
if error:
raise UberIllegalState(error) # depends on [control=['if'], data=[]]
return authorization_code |
def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open("README.md") as r:
description = "\n"
description += r.read()
with open("CHANGELOG.md") as c:
description += "\n"
description += c.read()
return description
except Exception:
return '''
PyCM is a multi-class confusion matrix library written in Python that
supports both input data vectors and direct matrix, and a proper tool for
post-classification model evaluation that supports most classes and overall
statistics parameters.
PyCM is the swiss-army knife of confusion matrices, targeted mainly at
data scientists that need a broad array of metrics for predictive models
and an accurate evaluation of large variety of classifiers.''' | def function[read_description, parameter[]]:
constant[Read README.md and CHANGELOG.md.]
<ast.Try object at 0x7da1b2345450> | keyword[def] identifier[read_description] ():
literal[string]
keyword[try] :
keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[r] :
identifier[description] = literal[string]
identifier[description] += identifier[r] . identifier[read] ()
keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[c] :
identifier[description] += literal[string]
identifier[description] += identifier[c] . identifier[read] ()
keyword[return] identifier[description]
keyword[except] identifier[Exception] :
keyword[return] literal[string] | def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open('README.md') as r:
description = '\n'
description += r.read() # depends on [control=['with'], data=['r']]
with open('CHANGELOG.md') as c:
description += '\n'
description += c.read() # depends on [control=['with'], data=['c']]
return description # depends on [control=['try'], data=[]]
except Exception:
return '\n PyCM is a multi-class confusion matrix library written in Python that\n supports both input data vectors and direct matrix, and a proper tool for\n post-classification model evaluation that supports most classes and overall\n statistics parameters.\n PyCM is the swiss-army knife of confusion matrices, targeted mainly at\n data scientists that need a broad array of metrics for predictive models\n and an accurate evaluation of large variety of classifiers.' # depends on [control=['except'], data=[]] |
def tigrload(args):
"""
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
"""
p = OptionParser(tigrload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
db, ev_type = args
runfile = "load.sh"
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents) | def function[tigrload, parameter[args]]:
constant[
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[tigrload].__doc__]]
<ast.Tuple object at 0x7da207f98d90> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da207f99810>]]
<ast.Tuple object at 0x7da207f9b730> assign[=] name[args]
variable[runfile] assign[=] constant[load.sh]
variable[contents] assign[=] call[name[EVMLOAD].format, parameter[name[db], name[ev_type]]]
call[name[write_file], parameter[name[runfile], name[contents]]] | keyword[def] identifier[tigrload] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[tigrload] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[db] , identifier[ev_type] = identifier[args]
identifier[runfile] = literal[string]
identifier[contents] = identifier[EVMLOAD] . identifier[format] ( identifier[db] , identifier[ev_type] )
identifier[write_file] ( identifier[runfile] , identifier[contents] ) | def tigrload(args):
"""
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
"""
p = OptionParser(tigrload.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(db, ev_type) = args
runfile = 'load.sh'
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents) |
def current_state_str(self):
"""Return string representation of the current state of the sensor."""
if self.sample_ok:
msg = ''
temperature = self._get_value_opc_attr('temperature')
if temperature is not None:
msg += 'Temp: %s ºC, ' % temperature
humidity = self._get_value_opc_attr('humidity')
if humidity is not None:
msg += 'Humid: %s %%, ' % humidity
pressure = self._get_value_opc_attr('pressure')
if pressure is not None:
msg += 'Press: %s mb, ' % pressure
light_level = self._get_value_opc_attr('light_level')
if light_level is not None:
msg += 'Light: %s lux, ' % light_level
return msg[:-2]
else:
return "Bad sample" | def function[current_state_str, parameter[self]]:
constant[Return string representation of the current state of the sensor.]
if name[self].sample_ok begin[:]
variable[msg] assign[=] constant[]
variable[temperature] assign[=] call[name[self]._get_value_opc_attr, parameter[constant[temperature]]]
if compare[name[temperature] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c7ca470>
variable[humidity] assign[=] call[name[self]._get_value_opc_attr, parameter[constant[humidity]]]
if compare[name[humidity] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c7cb850>
variable[pressure] assign[=] call[name[self]._get_value_opc_attr, parameter[constant[pressure]]]
if compare[name[pressure] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c7cbb50>
variable[light_level] assign[=] call[name[self]._get_value_opc_attr, parameter[constant[light_level]]]
if compare[name[light_level] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c76cc70>
return[call[name[msg]][<ast.Slice object at 0x7da20c76fee0>]] | keyword[def] identifier[current_state_str] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[sample_ok] :
identifier[msg] = literal[string]
identifier[temperature] = identifier[self] . identifier[_get_value_opc_attr] ( literal[string] )
keyword[if] identifier[temperature] keyword[is] keyword[not] keyword[None] :
identifier[msg] += literal[string] % identifier[temperature]
identifier[humidity] = identifier[self] . identifier[_get_value_opc_attr] ( literal[string] )
keyword[if] identifier[humidity] keyword[is] keyword[not] keyword[None] :
identifier[msg] += literal[string] % identifier[humidity]
identifier[pressure] = identifier[self] . identifier[_get_value_opc_attr] ( literal[string] )
keyword[if] identifier[pressure] keyword[is] keyword[not] keyword[None] :
identifier[msg] += literal[string] % identifier[pressure]
identifier[light_level] = identifier[self] . identifier[_get_value_opc_attr] ( literal[string] )
keyword[if] identifier[light_level] keyword[is] keyword[not] keyword[None] :
identifier[msg] += literal[string] % identifier[light_level]
keyword[return] identifier[msg] [:- literal[int] ]
keyword[else] :
keyword[return] literal[string] | def current_state_str(self):
"""Return string representation of the current state of the sensor."""
if self.sample_ok:
msg = ''
temperature = self._get_value_opc_attr('temperature')
if temperature is not None:
msg += 'Temp: %s ºC, ' % temperature # depends on [control=['if'], data=['temperature']]
humidity = self._get_value_opc_attr('humidity')
if humidity is not None:
msg += 'Humid: %s %%, ' % humidity # depends on [control=['if'], data=['humidity']]
pressure = self._get_value_opc_attr('pressure')
if pressure is not None:
msg += 'Press: %s mb, ' % pressure # depends on [control=['if'], data=['pressure']]
light_level = self._get_value_opc_attr('light_level')
if light_level is not None:
msg += 'Light: %s lux, ' % light_level # depends on [control=['if'], data=['light_level']]
return msg[:-2] # depends on [control=['if'], data=[]]
else:
return 'Bad sample' |
def score_n2(matrix, matrix_size):
"""\
Implements the penalty score feature 2.
ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54)
============================== ==================== ===============
Feature Evaluation condition Points
============================== ==================== ===============
Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1)
============================== ==================== ===============
N2 = 3
:param matrix: The matrix to evaluate
:param matrix_size: The width (or height) of the matrix.
:return int: The penalty score (feature 2) of the matrix.
"""
score = 0
for i in range(matrix_size - 1):
for j in range(matrix_size - 1):
bit = matrix[i][j]
if bit == matrix[i][j + 1] and bit == matrix[i + 1][j] \
and bit == matrix[i + 1][j + 1]:
score += 1
return score * 3 | def function[score_n2, parameter[matrix, matrix_size]]:
constant[ Implements the penalty score feature 2.
ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54)
============================== ==================== ===============
Feature Evaluation condition Points
============================== ==================== ===============
Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1)
============================== ==================== ===============
N2 = 3
:param matrix: The matrix to evaluate
:param matrix_size: The width (or height) of the matrix.
:return int: The penalty score (feature 2) of the matrix.
]
variable[score] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[matrix_size] - constant[1]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[binary_operation[name[matrix_size] - constant[1]]]]] begin[:]
variable[bit] assign[=] call[call[name[matrix]][name[i]]][name[j]]
if <ast.BoolOp object at 0x7da18bcc9300> begin[:]
<ast.AugAssign object at 0x7da204620340>
return[binary_operation[name[score] * constant[3]]] | keyword[def] identifier[score_n2] ( identifier[matrix] , identifier[matrix_size] ):
literal[string]
identifier[score] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[matrix_size] - literal[int] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[matrix_size] - literal[int] ):
identifier[bit] = identifier[matrix] [ identifier[i] ][ identifier[j] ]
keyword[if] identifier[bit] == identifier[matrix] [ identifier[i] ][ identifier[j] + literal[int] ] keyword[and] identifier[bit] == identifier[matrix] [ identifier[i] + literal[int] ][ identifier[j] ] keyword[and] identifier[bit] == identifier[matrix] [ identifier[i] + literal[int] ][ identifier[j] + literal[int] ]:
identifier[score] += literal[int]
keyword[return] identifier[score] * literal[int] | def score_n2(matrix, matrix_size):
""" Implements the penalty score feature 2.
ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54)
============================== ==================== ===============
Feature Evaluation condition Points
============================== ==================== ===============
Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1)
============================== ==================== ===============
N2 = 3
:param matrix: The matrix to evaluate
:param matrix_size: The width (or height) of the matrix.
:return int: The penalty score (feature 2) of the matrix.
"""
score = 0
for i in range(matrix_size - 1):
for j in range(matrix_size - 1):
bit = matrix[i][j]
if bit == matrix[i][j + 1] and bit == matrix[i + 1][j] and (bit == matrix[i + 1][j + 1]):
score += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return score * 3 |
def _add_domains_xml(self, document):
"""
Generates the XML elements for allowed domains.
"""
for domain, attrs in self.domains.items():
domain_element = document.createElement('allow-access-from')
domain_element.setAttribute('domain', domain)
if attrs['to_ports'] is not None:
domain_element.setAttribute(
'to-ports',
','.join(attrs['to_ports'])
)
if not attrs['secure']:
domain_element.setAttribute('secure', 'false')
document.documentElement.appendChild(domain_element) | def function[_add_domains_xml, parameter[self, document]]:
constant[
Generates the XML elements for allowed domains.
]
for taget[tuple[[<ast.Name object at 0x7da1b1769d20>, <ast.Name object at 0x7da1b1768b80>]]] in starred[call[name[self].domains.items, parameter[]]] begin[:]
variable[domain_element] assign[=] call[name[document].createElement, parameter[constant[allow-access-from]]]
call[name[domain_element].setAttribute, parameter[constant[domain], name[domain]]]
if compare[call[name[attrs]][constant[to_ports]] is_not constant[None]] begin[:]
call[name[domain_element].setAttribute, parameter[constant[to-ports], call[constant[,].join, parameter[call[name[attrs]][constant[to_ports]]]]]]
if <ast.UnaryOp object at 0x7da1b1768310> begin[:]
call[name[domain_element].setAttribute, parameter[constant[secure], constant[false]]]
call[name[document].documentElement.appendChild, parameter[name[domain_element]]] | keyword[def] identifier[_add_domains_xml] ( identifier[self] , identifier[document] ):
literal[string]
keyword[for] identifier[domain] , identifier[attrs] keyword[in] identifier[self] . identifier[domains] . identifier[items] ():
identifier[domain_element] = identifier[document] . identifier[createElement] ( literal[string] )
identifier[domain_element] . identifier[setAttribute] ( literal[string] , identifier[domain] )
keyword[if] identifier[attrs] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
identifier[domain_element] . identifier[setAttribute] (
literal[string] ,
literal[string] . identifier[join] ( identifier[attrs] [ literal[string] ])
)
keyword[if] keyword[not] identifier[attrs] [ literal[string] ]:
identifier[domain_element] . identifier[setAttribute] ( literal[string] , literal[string] )
identifier[document] . identifier[documentElement] . identifier[appendChild] ( identifier[domain_element] ) | def _add_domains_xml(self, document):
"""
Generates the XML elements for allowed domains.
"""
for (domain, attrs) in self.domains.items():
domain_element = document.createElement('allow-access-from')
domain_element.setAttribute('domain', domain)
if attrs['to_ports'] is not None:
domain_element.setAttribute('to-ports', ','.join(attrs['to_ports'])) # depends on [control=['if'], data=[]]
if not attrs['secure']:
domain_element.setAttribute('secure', 'false') # depends on [control=['if'], data=[]]
document.documentElement.appendChild(domain_element) # depends on [control=['for'], data=[]] |
def convert_1x_args(bucket, **kwargs):
"""
Converts arguments for 1.x constructors to their 2.x forms
"""
host = kwargs.pop('host', 'localhost')
port = kwargs.pop('port', None)
if not 'connstr' in kwargs and 'connection_string' not in kwargs:
kwargs['connection_string'] = _build_connstr(host, port, bucket)
return kwargs | def function[convert_1x_args, parameter[bucket]]:
constant[
Converts arguments for 1.x constructors to their 2.x forms
]
variable[host] assign[=] call[name[kwargs].pop, parameter[constant[host], constant[localhost]]]
variable[port] assign[=] call[name[kwargs].pop, parameter[constant[port], constant[None]]]
if <ast.BoolOp object at 0x7da2054a7df0> begin[:]
call[name[kwargs]][constant[connection_string]] assign[=] call[name[_build_connstr], parameter[name[host], name[port], name[bucket]]]
return[name[kwargs]] | keyword[def] identifier[convert_1x_args] ( identifier[bucket] ,** identifier[kwargs] ):
literal[string]
identifier[host] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[port] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] keyword[not] literal[string] keyword[in] identifier[kwargs] keyword[and] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[_build_connstr] ( identifier[host] , identifier[port] , identifier[bucket] )
keyword[return] identifier[kwargs] | def convert_1x_args(bucket, **kwargs):
"""
Converts arguments for 1.x constructors to their 2.x forms
"""
host = kwargs.pop('host', 'localhost')
port = kwargs.pop('port', None)
if not 'connstr' in kwargs and 'connection_string' not in kwargs:
kwargs['connection_string'] = _build_connstr(host, port, bucket) # depends on [control=['if'], data=[]]
return kwargs |
def background_sum(self):
"""
The sum of ``background`` values within the source segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the background array.
"""
if self._background is not None:
if self._is_completely_masked:
return np.nan * self._background_unit # unit for table
else:
return np.sum(self._background_values)
else:
return None | def function[background_sum, parameter[self]]:
constant[
The sum of ``background`` values within the source segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the background array.
]
if compare[name[self]._background is_not constant[None]] begin[:]
if name[self]._is_completely_masked begin[:]
return[binary_operation[name[np].nan * name[self]._background_unit]] | keyword[def] identifier[background_sum] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_background] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[_is_completely_masked] :
keyword[return] identifier[np] . identifier[nan] * identifier[self] . identifier[_background_unit]
keyword[else] :
keyword[return] identifier[np] . identifier[sum] ( identifier[self] . identifier[_background_values] )
keyword[else] :
keyword[return] keyword[None] | def background_sum(self):
"""
The sum of ``background`` values within the source segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the background array.
"""
if self._background is not None:
if self._is_completely_masked:
return np.nan * self._background_unit # unit for table # depends on [control=['if'], data=[]]
else:
return np.sum(self._background_values) # depends on [control=['if'], data=[]]
else:
return None |
def get_image_name(self,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]) -> str:
""" Returns the name for images with installed requirements and dependencies.
"""
if self.inherit_image is None:
return self.get_arca_base_name()
else:
name, tag = str(self.inherit_image).split(":")
return f"arca_{name}_{tag}" | def function[get_image_name, parameter[self, repo_path, requirements_option, dependencies]]:
constant[ Returns the name for images with installed requirements and dependencies.
]
if compare[name[self].inherit_image is constant[None]] begin[:]
return[call[name[self].get_arca_base_name, parameter[]]] | keyword[def] identifier[get_image_name] ( identifier[self] ,
identifier[repo_path] : identifier[Path] ,
identifier[requirements_option] : identifier[RequirementsOptions] ,
identifier[dependencies] : identifier[Optional] [ identifier[List] [ identifier[str] ]])-> identifier[str] :
literal[string]
keyword[if] identifier[self] . identifier[inherit_image] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[get_arca_base_name] ()
keyword[else] :
identifier[name] , identifier[tag] = identifier[str] ( identifier[self] . identifier[inherit_image] ). identifier[split] ( literal[string] )
keyword[return] literal[string] | def get_image_name(self, repo_path: Path, requirements_option: RequirementsOptions, dependencies: Optional[List[str]]) -> str:
""" Returns the name for images with installed requirements and dependencies.
"""
if self.inherit_image is None:
return self.get_arca_base_name() # depends on [control=['if'], data=[]]
else:
(name, tag) = str(self.inherit_image).split(':')
return f'arca_{name}_{tag}' |
def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name)
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, self.instance.pk, section, name, self.instance.pk) | def function[get_cache_key, parameter[self, section, name]]:
constant[Return the cache key corresponding to a given preference]
if <ast.UnaryOp object at 0x7da1b112aad0> begin[:]
return[call[constant[dynamic_preferences_{0}_{1}_{2}].format, parameter[name[self].model.__name__, name[section], name[name]]]]
return[call[constant[dynamic_preferences_{0}_{1}_{2}_{3}].format, parameter[name[self].model.__name__, name[self].instance.pk, name[section], name[name], name[self].instance.pk]]] | keyword[def] identifier[get_cache_key] ( identifier[self] , identifier[section] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[instance] :
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[model] . identifier[__name__] , identifier[section] , identifier[name] )
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[model] . identifier[__name__] , identifier[self] . identifier[instance] . identifier[pk] , identifier[section] , identifier[name] , identifier[self] . identifier[instance] . identifier[pk] ) | def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name) # depends on [control=['if'], data=[]]
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, self.instance.pk, section, name, self.instance.pk) |
def _sanitize_dates(start, end):
"""
Return (datetime_start, datetime_end) tuple
if start is None - default is 2015/01/01
if end is None - default is today
"""
if isinstance(start, int):
# regard int as year
start = datetime(start, 1, 1)
start = to_datetime(start)
if isinstance(end, int):
end = datetime(end, 1, 1)
end = to_datetime(end)
if start is None:
start = datetime(2015, 1, 1)
if end is None:
end = datetime.today()
if start > end:
raise ValueError('start must be an earlier date than end')
return start, end | def function[_sanitize_dates, parameter[start, end]]:
constant[
Return (datetime_start, datetime_end) tuple
if start is None - default is 2015/01/01
if end is None - default is today
]
if call[name[isinstance], parameter[name[start], name[int]]] begin[:]
variable[start] assign[=] call[name[datetime], parameter[name[start], constant[1], constant[1]]]
variable[start] assign[=] call[name[to_datetime], parameter[name[start]]]
if call[name[isinstance], parameter[name[end], name[int]]] begin[:]
variable[end] assign[=] call[name[datetime], parameter[name[end], constant[1], constant[1]]]
variable[end] assign[=] call[name[to_datetime], parameter[name[end]]]
if compare[name[start] is constant[None]] begin[:]
variable[start] assign[=] call[name[datetime], parameter[constant[2015], constant[1], constant[1]]]
if compare[name[end] is constant[None]] begin[:]
variable[end] assign[=] call[name[datetime].today, parameter[]]
if compare[name[start] greater[>] name[end]] begin[:]
<ast.Raise object at 0x7da1b22adb70>
return[tuple[[<ast.Name object at 0x7da1b22ad0c0>, <ast.Name object at 0x7da1b22add80>]]] | keyword[def] identifier[_sanitize_dates] ( identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[start] , identifier[int] ):
identifier[start] = identifier[datetime] ( identifier[start] , literal[int] , literal[int] )
identifier[start] = identifier[to_datetime] ( identifier[start] )
keyword[if] identifier[isinstance] ( identifier[end] , identifier[int] ):
identifier[end] = identifier[datetime] ( identifier[end] , literal[int] , literal[int] )
identifier[end] = identifier[to_datetime] ( identifier[end] )
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[start] = identifier[datetime] ( literal[int] , literal[int] , literal[int] )
keyword[if] identifier[end] keyword[is] keyword[None] :
identifier[end] = identifier[datetime] . identifier[today] ()
keyword[if] identifier[start] > identifier[end] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[start] , identifier[end] | def _sanitize_dates(start, end):
"""
Return (datetime_start, datetime_end) tuple
if start is None - default is 2015/01/01
if end is None - default is today
"""
if isinstance(start, int): # regard int as year
start = datetime(start, 1, 1) # depends on [control=['if'], data=[]]
start = to_datetime(start)
if isinstance(end, int):
end = datetime(end, 1, 1) # depends on [control=['if'], data=[]]
end = to_datetime(end)
if start is None:
start = datetime(2015, 1, 1) # depends on [control=['if'], data=['start']]
if end is None:
end = datetime.today() # depends on [control=['if'], data=['end']]
if start > end:
raise ValueError('start must be an earlier date than end') # depends on [control=['if'], data=[]]
return (start, end) |
def take_complement(list_, index_list):
""" Returns items in ``list_`` not indexed by index_list """
mask = not_list(index_to_boolmask(index_list, len(list_)))
return compress(list_, mask) | def function[take_complement, parameter[list_, index_list]]:
constant[ Returns items in ``list_`` not indexed by index_list ]
variable[mask] assign[=] call[name[not_list], parameter[call[name[index_to_boolmask], parameter[name[index_list], call[name[len], parameter[name[list_]]]]]]]
return[call[name[compress], parameter[name[list_], name[mask]]]] | keyword[def] identifier[take_complement] ( identifier[list_] , identifier[index_list] ):
literal[string]
identifier[mask] = identifier[not_list] ( identifier[index_to_boolmask] ( identifier[index_list] , identifier[len] ( identifier[list_] )))
keyword[return] identifier[compress] ( identifier[list_] , identifier[mask] ) | def take_complement(list_, index_list):
""" Returns items in ``list_`` not indexed by index_list """
mask = not_list(index_to_boolmask(index_list, len(list_)))
return compress(list_, mask) |
def process_request(self, method, data=None):
"""Process request over HTTP to ubersmith instance.
method: Ubersmith API method string
data: dict of method arguments
"""
# make sure requested method is valid
self._validate_request_method(method)
# attempt the request multiple times
attempts = 3
for i in range(attempts):
response = self._send_request(method, data)
# handle case where ubersmith is 'updating token'
# see: https://github.com/jasonkeene/python-ubersmith/issues/1
if self._is_token_response(response):
if i < attempts - 1:
# wait 2 secs before retrying request
time.sleep(2)
continue
else:
raise UpdatingTokenResponse
break
resp = BaseResponse(response)
# test for error in json response
if response.headers.get('content-type') == 'application/json':
if not resp.json.get('status'):
if all([
resp.json.get('error_code') == 1,
resp.json.get('error_message') == u"We are currently "
"undergoing maintenance, please check back shortly.",
]):
raise MaintenanceResponse(response=resp.json)
else:
raise ResponseError(response=resp.json)
return resp | def function[process_request, parameter[self, method, data]]:
constant[Process request over HTTP to ubersmith instance.
method: Ubersmith API method string
data: dict of method arguments
]
call[name[self]._validate_request_method, parameter[name[method]]]
variable[attempts] assign[=] constant[3]
for taget[name[i]] in starred[call[name[range], parameter[name[attempts]]]] begin[:]
variable[response] assign[=] call[name[self]._send_request, parameter[name[method], name[data]]]
if call[name[self]._is_token_response, parameter[name[response]]] begin[:]
if compare[name[i] less[<] binary_operation[name[attempts] - constant[1]]] begin[:]
call[name[time].sleep, parameter[constant[2]]]
continue
break
variable[resp] assign[=] call[name[BaseResponse], parameter[name[response]]]
if compare[call[name[response].headers.get, parameter[constant[content-type]]] equal[==] constant[application/json]] begin[:]
if <ast.UnaryOp object at 0x7da1b26afc10> begin[:]
if call[name[all], parameter[list[[<ast.Compare object at 0x7da1b26acca0>, <ast.Compare object at 0x7da1b26ad030>]]]] begin[:]
<ast.Raise object at 0x7da1b26af8b0>
return[name[resp]] | keyword[def] identifier[process_request] ( identifier[self] , identifier[method] , identifier[data] = keyword[None] ):
literal[string]
identifier[self] . identifier[_validate_request_method] ( identifier[method] )
identifier[attempts] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[attempts] ):
identifier[response] = identifier[self] . identifier[_send_request] ( identifier[method] , identifier[data] )
keyword[if] identifier[self] . identifier[_is_token_response] ( identifier[response] ):
keyword[if] identifier[i] < identifier[attempts] - literal[int] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[continue]
keyword[else] :
keyword[raise] identifier[UpdatingTokenResponse]
keyword[break]
identifier[resp] = identifier[BaseResponse] ( identifier[response] )
keyword[if] identifier[response] . identifier[headers] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] keyword[not] identifier[resp] . identifier[json] . identifier[get] ( literal[string] ):
keyword[if] identifier[all] ([
identifier[resp] . identifier[json] . identifier[get] ( literal[string] )== literal[int] ,
identifier[resp] . identifier[json] . identifier[get] ( literal[string] )== literal[string]
literal[string] ,
]):
keyword[raise] identifier[MaintenanceResponse] ( identifier[response] = identifier[resp] . identifier[json] )
keyword[else] :
keyword[raise] identifier[ResponseError] ( identifier[response] = identifier[resp] . identifier[json] )
keyword[return] identifier[resp] | def process_request(self, method, data=None):
"""Process request over HTTP to ubersmith instance.
method: Ubersmith API method string
data: dict of method arguments
"""
# make sure requested method is valid
self._validate_request_method(method)
# attempt the request multiple times
attempts = 3
for i in range(attempts):
response = self._send_request(method, data)
# handle case where ubersmith is 'updating token'
# see: https://github.com/jasonkeene/python-ubersmith/issues/1
if self._is_token_response(response):
if i < attempts - 1:
# wait 2 secs before retrying request
time.sleep(2)
continue # depends on [control=['if'], data=[]]
else:
raise UpdatingTokenResponse # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=['i']]
resp = BaseResponse(response)
# test for error in json response
if response.headers.get('content-type') == 'application/json':
if not resp.json.get('status'):
if all([resp.json.get('error_code') == 1, resp.json.get('error_message') == u'We are currently undergoing maintenance, please check back shortly.']):
raise MaintenanceResponse(response=resp.json) # depends on [control=['if'], data=[]]
else:
raise ResponseError(response=resp.json) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return resp |
def ldap_server_host_basedn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
basedn = ET.SubElement(host, "basedn")
basedn.text = kwargs.pop('basedn')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[ldap_server_host_basedn, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[ldap_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ldap-server]]]
variable[host] assign[=] call[name[ET].SubElement, parameter[name[ldap_server], constant[host]]]
variable[hostname_key] assign[=] call[name[ET].SubElement, parameter[name[host], constant[hostname]]]
name[hostname_key].text assign[=] call[name[kwargs].pop, parameter[constant[hostname]]]
variable[basedn] assign[=] call[name[ET].SubElement, parameter[name[host], constant[basedn]]]
name[basedn].text assign[=] call[name[kwargs].pop, parameter[constant[basedn]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[ldap_server_host_basedn] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[ldap_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[host] = identifier[ET] . identifier[SubElement] ( identifier[ldap_server] , literal[string] )
identifier[hostname_key] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[hostname_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[basedn] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[basedn] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def ldap_server_host_basedn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
ldap_server = ET.SubElement(config, 'ldap-server', xmlns='urn:brocade.com:mgmt:brocade-aaa')
host = ET.SubElement(ldap_server, 'host')
hostname_key = ET.SubElement(host, 'hostname')
hostname_key.text = kwargs.pop('hostname')
basedn = ET.SubElement(host, 'basedn')
basedn.text = kwargs.pop('basedn')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def use_federated_bank_view(self):
"""Pass through to provider ItemLookupSession.use_federated_bank_view"""
self._bank_view = FEDERATED
# self._get_provider_session('item_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_bank_view()
except AttributeError:
pass | def function[use_federated_bank_view, parameter[self]]:
constant[Pass through to provider ItemLookupSession.use_federated_bank_view]
name[self]._bank_view assign[=] name[FEDERATED]
for taget[name[session]] in starred[call[name[self]._get_provider_sessions, parameter[]]] begin[:]
<ast.Try object at 0x7da18f58c940> | keyword[def] identifier[use_federated_bank_view] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_bank_view] = identifier[FEDERATED]
keyword[for] identifier[session] keyword[in] identifier[self] . identifier[_get_provider_sessions] ():
keyword[try] :
identifier[session] . identifier[use_federated_bank_view] ()
keyword[except] identifier[AttributeError] :
keyword[pass] | def use_federated_bank_view(self):
"""Pass through to provider ItemLookupSession.use_federated_bank_view"""
self._bank_view = FEDERATED
# self._get_provider_session('item_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_bank_view() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['session']] |
def calc_resize_factor(prediction, image_size):
"""
Calculates how much prediction.shape and image_size differ.
"""
resize_factor_x = prediction.shape[1] / float(image_size[1])
resize_factor_y = prediction.shape[0] / float(image_size[0])
if abs(resize_factor_x - resize_factor_y) > 1.0/image_size[1] :
raise RuntimeError("""The aspect ratio of the fixations does not
match with the prediction: %f vs. %f"""
%(resize_factor_y, resize_factor_x))
return (resize_factor_y, resize_factor_x) | def function[calc_resize_factor, parameter[prediction, image_size]]:
constant[
Calculates how much prediction.shape and image_size differ.
]
variable[resize_factor_x] assign[=] binary_operation[call[name[prediction].shape][constant[1]] / call[name[float], parameter[call[name[image_size]][constant[1]]]]]
variable[resize_factor_y] assign[=] binary_operation[call[name[prediction].shape][constant[0]] / call[name[float], parameter[call[name[image_size]][constant[0]]]]]
if compare[call[name[abs], parameter[binary_operation[name[resize_factor_x] - name[resize_factor_y]]]] greater[>] binary_operation[constant[1.0] / call[name[image_size]][constant[1]]]] begin[:]
<ast.Raise object at 0x7da1b0fde290>
return[tuple[[<ast.Name object at 0x7da1b0fdc0d0>, <ast.Name object at 0x7da1b0fdeb90>]]] | keyword[def] identifier[calc_resize_factor] ( identifier[prediction] , identifier[image_size] ):
literal[string]
identifier[resize_factor_x] = identifier[prediction] . identifier[shape] [ literal[int] ]/ identifier[float] ( identifier[image_size] [ literal[int] ])
identifier[resize_factor_y] = identifier[prediction] . identifier[shape] [ literal[int] ]/ identifier[float] ( identifier[image_size] [ literal[int] ])
keyword[if] identifier[abs] ( identifier[resize_factor_x] - identifier[resize_factor_y] )> literal[int] / identifier[image_size] [ literal[int] ]:
keyword[raise] identifier[RuntimeError] ( literal[string]
%( identifier[resize_factor_y] , identifier[resize_factor_x] ))
keyword[return] ( identifier[resize_factor_y] , identifier[resize_factor_x] ) | def calc_resize_factor(prediction, image_size):
"""
Calculates how much prediction.shape and image_size differ.
"""
resize_factor_x = prediction.shape[1] / float(image_size[1])
resize_factor_y = prediction.shape[0] / float(image_size[0])
if abs(resize_factor_x - resize_factor_y) > 1.0 / image_size[1]:
raise RuntimeError('The aspect ratio of the fixations does not\n match with the prediction: %f vs. %f' % (resize_factor_y, resize_factor_x)) # depends on [control=['if'], data=[]]
return (resize_factor_y, resize_factor_x) |
def table(rows, columns=None, output=None, data_args={}, **kwargs):
"""
Return a formatted string of "list of list" table data.
See: http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.html
Examples:
>>> fmt.print([("foo", 1), ("bar", 2)])
0 1
0 foo 1
1 bar 2
>>> fmt.print([("foo", 1), ("bar", 2)], columns=("type", "value"))
type value
0 foo 1
1 bar 2
Arguments:
rows (list of list): Data to format, one row per element,
multiple columns per row.
columns (list of str, optional): Column names.
output (str, optional): Path to output file.
data_args (dict, optional): Any additional kwargs to pass to
pandas.DataFrame constructor.
**kwargs: Any additional arguments to pass to
pandas.DataFrame.to_string().
Returns:
str: Formatted data as table.
Raises:
Error: If number of columns (if provided) does not equal
number of columns in rows; or if number of columns is not
consistent across all rows.
"""
import pandas
# Number of columns.
num_columns = len(rows[0])
# Check that each row is the same length.
for i,row in enumerate(rows[1:]):
if len(row) != num_columns:
raise Error("Number of columns in row {i_row} ({c_row}) "
"does not match number of columns in row 0 ({z_row})"
.format(i_row=i, c_row=len(row), z_row=num_columns))
if columns is None:
# Default parameters.
if "header" not in kwargs:
kwargs["header"] = False
elif len(columns) != num_columns:
# Check that number of columns matches number of columns in
# rows.
raise Error("Number of columns in header ({c_header}) does not "
"match the number of columns in the data ({c_rows})"
.format(c_header=len(columns), c_rows=num_columns))
# Default arguments.
if "index" not in kwargs:
kwargs["index"] = False
data_args["columns"] = columns
string = pandas.DataFrame(list(rows), **data_args).to_string(**kwargs)
if output is None:
return string
else:
print(string, file=open(output, "w"))
io.info("Wrote", output) | def function[table, parameter[rows, columns, output, data_args]]:
constant[
Return a formatted string of "list of list" table data.
See: http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.html
Examples:
>>> fmt.print([("foo", 1), ("bar", 2)])
0 1
0 foo 1
1 bar 2
>>> fmt.print([("foo", 1), ("bar", 2)], columns=("type", "value"))
type value
0 foo 1
1 bar 2
Arguments:
rows (list of list): Data to format, one row per element,
multiple columns per row.
columns (list of str, optional): Column names.
output (str, optional): Path to output file.
data_args (dict, optional): Any additional kwargs to pass to
pandas.DataFrame constructor.
**kwargs: Any additional arguments to pass to
pandas.DataFrame.to_string().
Returns:
str: Formatted data as table.
Raises:
Error: If number of columns (if provided) does not equal
number of columns in rows; or if number of columns is not
consistent across all rows.
]
import module[pandas]
variable[num_columns] assign[=] call[name[len], parameter[call[name[rows]][constant[0]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0a4a2f0>, <ast.Name object at 0x7da1b0a4af50>]]] in starred[call[name[enumerate], parameter[call[name[rows]][<ast.Slice object at 0x7da1b09674f0>]]]] begin[:]
if compare[call[name[len], parameter[name[row]]] not_equal[!=] name[num_columns]] begin[:]
<ast.Raise object at 0x7da1b0964550>
if compare[name[columns] is constant[None]] begin[:]
if compare[constant[header] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[header]] assign[=] constant[False]
if compare[constant[index] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[index]] assign[=] constant[False]
call[name[data_args]][constant[columns]] assign[=] name[columns]
variable[string] assign[=] call[call[name[pandas].DataFrame, parameter[call[name[list], parameter[name[rows]]]]].to_string, parameter[]]
if compare[name[output] is constant[None]] begin[:]
return[name[string]] | keyword[def] identifier[table] ( identifier[rows] , identifier[columns] = keyword[None] , identifier[output] = keyword[None] , identifier[data_args] ={},** identifier[kwargs] ):
literal[string]
keyword[import] identifier[pandas]
identifier[num_columns] = identifier[len] ( identifier[rows] [ literal[int] ])
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[rows] [ literal[int] :]):
keyword[if] identifier[len] ( identifier[row] )!= identifier[num_columns] :
keyword[raise] identifier[Error] ( literal[string]
literal[string]
. identifier[format] ( identifier[i_row] = identifier[i] , identifier[c_row] = identifier[len] ( identifier[row] ), identifier[z_row] = identifier[num_columns] ))
keyword[if] identifier[columns] keyword[is] keyword[None] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[False]
keyword[elif] identifier[len] ( identifier[columns] )!= identifier[num_columns] :
keyword[raise] identifier[Error] ( literal[string]
literal[string]
. identifier[format] ( identifier[c_header] = identifier[len] ( identifier[columns] ), identifier[c_rows] = identifier[num_columns] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[False]
identifier[data_args] [ literal[string] ]= identifier[columns]
identifier[string] = identifier[pandas] . identifier[DataFrame] ( identifier[list] ( identifier[rows] ),** identifier[data_args] ). identifier[to_string] (** identifier[kwargs] )
keyword[if] identifier[output] keyword[is] keyword[None] :
keyword[return] identifier[string]
keyword[else] :
identifier[print] ( identifier[string] , identifier[file] = identifier[open] ( identifier[output] , literal[string] ))
identifier[io] . identifier[info] ( literal[string] , identifier[output] ) | def table(rows, columns=None, output=None, data_args={}, **kwargs):
"""
Return a formatted string of "list of list" table data.
See: http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.html
Examples:
>>> fmt.print([("foo", 1), ("bar", 2)])
0 1
0 foo 1
1 bar 2
>>> fmt.print([("foo", 1), ("bar", 2)], columns=("type", "value"))
type value
0 foo 1
1 bar 2
Arguments:
rows (list of list): Data to format, one row per element,
multiple columns per row.
columns (list of str, optional): Column names.
output (str, optional): Path to output file.
data_args (dict, optional): Any additional kwargs to pass to
pandas.DataFrame constructor.
**kwargs: Any additional arguments to pass to
pandas.DataFrame.to_string().
Returns:
str: Formatted data as table.
Raises:
Error: If number of columns (if provided) does not equal
number of columns in rows; or if number of columns is not
consistent across all rows.
"""
import pandas
# Number of columns.
num_columns = len(rows[0])
# Check that each row is the same length.
for (i, row) in enumerate(rows[1:]):
if len(row) != num_columns:
raise Error('Number of columns in row {i_row} ({c_row}) does not match number of columns in row 0 ({z_row})'.format(i_row=i, c_row=len(row), z_row=num_columns)) # depends on [control=['if'], data=['num_columns']] # depends on [control=['for'], data=[]]
if columns is None:
# Default parameters.
if 'header' not in kwargs:
kwargs['header'] = False # depends on [control=['if'], data=['kwargs']] # depends on [control=['if'], data=[]]
elif len(columns) != num_columns:
# Check that number of columns matches number of columns in
# rows.
raise Error('Number of columns in header ({c_header}) does not match the number of columns in the data ({c_rows})'.format(c_header=len(columns), c_rows=num_columns)) # depends on [control=['if'], data=['num_columns']]
# Default arguments.
if 'index' not in kwargs:
kwargs['index'] = False # depends on [control=['if'], data=['kwargs']]
data_args['columns'] = columns
string = pandas.DataFrame(list(rows), **data_args).to_string(**kwargs)
if output is None:
return string # depends on [control=['if'], data=[]]
else:
print(string, file=open(output, 'w'))
io.info('Wrote', output) |
def help_center_section_translation_update(self, section_id, locale, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation"
api_path = "/api/v2/help_center/sections/{section_id}/translations/{locale}.json"
api_path = api_path.format(section_id=section_id, locale=locale)
return self.call(api_path, method="PUT", data=data, **kwargs) | def function[help_center_section_translation_update, parameter[self, section_id, locale, data]]:
constant[https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation]
variable[api_path] assign[=] constant[/api/v2/help_center/sections/{section_id}/translations/{locale}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[help_center_section_translation_update] ( identifier[self] , identifier[section_id] , identifier[locale] , identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[section_id] = identifier[section_id] , identifier[locale] = identifier[locale] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def help_center_section_translation_update(self, section_id, locale, data, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation"""
api_path = '/api/v2/help_center/sections/{section_id}/translations/{locale}.json'
api_path = api_path.format(section_id=section_id, locale=locale)
return self.call(api_path, method='PUT', data=data, **kwargs) |
def dir_freq(directory):
'''Returns a list of tuples of (word,# of directories it occurs)'''
content = dir_list(directory)
i = 0
freqdict = {}
for filename in content:
filewords = eliminate_repeats(read_file(directory + '/' + filename))
for word in filewords:
if freqdict.has_key(word):
freqdict[word] += 1
else:
freqdict[word] = 1
tupleize = []
for key in freqdict.keys():
wordtuple = (key,freqdict[key])
tupleize.append(wordtuple)
return tupleize | def function[dir_freq, parameter[directory]]:
constant[Returns a list of tuples of (word,# of directories it occurs)]
variable[content] assign[=] call[name[dir_list], parameter[name[directory]]]
variable[i] assign[=] constant[0]
variable[freqdict] assign[=] dictionary[[], []]
for taget[name[filename]] in starred[name[content]] begin[:]
variable[filewords] assign[=] call[name[eliminate_repeats], parameter[call[name[read_file], parameter[binary_operation[binary_operation[name[directory] + constant[/]] + name[filename]]]]]]
for taget[name[word]] in starred[name[filewords]] begin[:]
if call[name[freqdict].has_key, parameter[name[word]]] begin[:]
<ast.AugAssign object at 0x7da207f031f0>
variable[tupleize] assign[=] list[[]]
for taget[name[key]] in starred[call[name[freqdict].keys, parameter[]]] begin[:]
variable[wordtuple] assign[=] tuple[[<ast.Name object at 0x7da207f00dc0>, <ast.Subscript object at 0x7da207f02050>]]
call[name[tupleize].append, parameter[name[wordtuple]]]
return[name[tupleize]] | keyword[def] identifier[dir_freq] ( identifier[directory] ):
literal[string]
identifier[content] = identifier[dir_list] ( identifier[directory] )
identifier[i] = literal[int]
identifier[freqdict] ={}
keyword[for] identifier[filename] keyword[in] identifier[content] :
identifier[filewords] = identifier[eliminate_repeats] ( identifier[read_file] ( identifier[directory] + literal[string] + identifier[filename] ))
keyword[for] identifier[word] keyword[in] identifier[filewords] :
keyword[if] identifier[freqdict] . identifier[has_key] ( identifier[word] ):
identifier[freqdict] [ identifier[word] ]+= literal[int]
keyword[else] :
identifier[freqdict] [ identifier[word] ]= literal[int]
identifier[tupleize] =[]
keyword[for] identifier[key] keyword[in] identifier[freqdict] . identifier[keys] ():
identifier[wordtuple] =( identifier[key] , identifier[freqdict] [ identifier[key] ])
identifier[tupleize] . identifier[append] ( identifier[wordtuple] )
keyword[return] identifier[tupleize] | def dir_freq(directory):
"""Returns a list of tuples of (word,# of directories it occurs)"""
content = dir_list(directory)
i = 0
freqdict = {}
for filename in content:
filewords = eliminate_repeats(read_file(directory + '/' + filename))
for word in filewords:
if freqdict.has_key(word):
freqdict[word] += 1 # depends on [control=['if'], data=[]]
else:
freqdict[word] = 1 # depends on [control=['for'], data=['word']] # depends on [control=['for'], data=['filename']]
tupleize = []
for key in freqdict.keys():
wordtuple = (key, freqdict[key])
tupleize.append(wordtuple) # depends on [control=['for'], data=['key']]
return tupleize |
def transformToNative(obj):
"""
Turn obj.value into a timedelta or datetime.
"""
if obj.isNative:
return obj
value = getattr(obj, 'value_param', 'DURATION').upper()
if hasattr(obj, 'value_param'):
del obj.value_param
if obj.value == '':
obj.isNative = True
return obj
elif value == 'DURATION':
try:
return Duration.transformToNative(obj)
except ParseError:
logger.warning("TRIGGER not recognized as DURATION, trying "
"DATE-TIME, because iCal sometimes exports "
"DATE-TIMEs without setting VALUE=DATE-TIME")
try:
obj.isNative = False
dt = DateTimeBehavior.transformToNative(obj)
return dt
except:
msg = "TRIGGER with no VALUE not recognized as DURATION " \
"or as DATE-TIME"
raise ParseError(msg)
elif value == 'DATE-TIME':
# TRIGGERs with DATE-TIME values must be in UTC, we could validate
# that fact, for now we take it on faith.
return DateTimeBehavior.transformToNative(obj)
else:
raise ParseError("VALUE must be DURATION or DATE-TIME") | def function[transformToNative, parameter[obj]]:
constant[
Turn obj.value into a timedelta or datetime.
]
if name[obj].isNative begin[:]
return[name[obj]]
variable[value] assign[=] call[call[name[getattr], parameter[name[obj], constant[value_param], constant[DURATION]]].upper, parameter[]]
if call[name[hasattr], parameter[name[obj], constant[value_param]]] begin[:]
<ast.Delete object at 0x7da18bccb340>
if compare[name[obj].value equal[==] constant[]] begin[:]
name[obj].isNative assign[=] constant[True]
return[name[obj]] | keyword[def] identifier[transformToNative] ( identifier[obj] ):
literal[string]
keyword[if] identifier[obj] . identifier[isNative] :
keyword[return] identifier[obj]
identifier[value] = identifier[getattr] ( identifier[obj] , literal[string] , literal[string] ). identifier[upper] ()
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
keyword[del] identifier[obj] . identifier[value_param]
keyword[if] identifier[obj] . identifier[value] == literal[string] :
identifier[obj] . identifier[isNative] = keyword[True]
keyword[return] identifier[obj]
keyword[elif] identifier[value] == literal[string] :
keyword[try] :
keyword[return] identifier[Duration] . identifier[transformToNative] ( identifier[obj] )
keyword[except] identifier[ParseError] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string]
literal[string] )
keyword[try] :
identifier[obj] . identifier[isNative] = keyword[False]
identifier[dt] = identifier[DateTimeBehavior] . identifier[transformToNative] ( identifier[obj] )
keyword[return] identifier[dt]
keyword[except] :
identifier[msg] = literal[string] literal[string]
keyword[raise] identifier[ParseError] ( identifier[msg] )
keyword[elif] identifier[value] == literal[string] :
keyword[return] identifier[DateTimeBehavior] . identifier[transformToNative] ( identifier[obj] )
keyword[else] :
keyword[raise] identifier[ParseError] ( literal[string] ) | def transformToNative(obj):
"""
Turn obj.value into a timedelta or datetime.
"""
if obj.isNative:
return obj # depends on [control=['if'], data=[]]
value = getattr(obj, 'value_param', 'DURATION').upper()
if hasattr(obj, 'value_param'):
del obj.value_param # depends on [control=['if'], data=[]]
if obj.value == '':
obj.isNative = True
return obj # depends on [control=['if'], data=[]]
elif value == 'DURATION':
try:
return Duration.transformToNative(obj) # depends on [control=['try'], data=[]]
except ParseError:
logger.warning('TRIGGER not recognized as DURATION, trying DATE-TIME, because iCal sometimes exports DATE-TIMEs without setting VALUE=DATE-TIME')
try:
obj.isNative = False
dt = DateTimeBehavior.transformToNative(obj)
return dt # depends on [control=['try'], data=[]]
except:
msg = 'TRIGGER with no VALUE not recognized as DURATION or as DATE-TIME'
raise ParseError(msg) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif value == 'DATE-TIME':
# TRIGGERs with DATE-TIME values must be in UTC, we could validate
# that fact, for now we take it on faith.
return DateTimeBehavior.transformToNative(obj) # depends on [control=['if'], data=[]]
else:
raise ParseError('VALUE must be DURATION or DATE-TIME') |
def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root) | def function[create_release_vcs, parameter[path, vcs_name]]:
constant[Return a new release VCS that can release from this source path.]
from relative_module[rez.plugin_managers] import module[plugin_manager]
variable[vcs_types] assign[=] call[name[get_release_vcs_types], parameter[]]
if name[vcs_name] begin[:]
if compare[name[vcs_name] <ast.NotIn object at 0x7da2590d7190> name[vcs_types]] begin[:]
<ast.Raise object at 0x7da1b1880520>
variable[cls] assign[=] call[name[plugin_manager].get_plugin_class, parameter[constant[release_vcs], name[vcs_name]]]
return[call[name[cls], parameter[name[path]]]]
variable[classes_by_level] assign[=] dictionary[[], []]
for taget[name[vcs_name]] in starred[name[vcs_types]] begin[:]
variable[cls] assign[=] call[name[plugin_manager].get_plugin_class, parameter[constant[release_vcs], name[vcs_name]]]
variable[result] assign[=] call[name[cls].find_vcs_root, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da1b1882830> begin[:]
continue
<ast.Tuple object at 0x7da1b1883c70> assign[=] name[result]
call[call[name[classes_by_level].setdefault, parameter[name[levels_up], list[[]]]].append, parameter[tuple[[<ast.Name object at 0x7da1b1882980>, <ast.Name object at 0x7da1b1880340>]]]]
if <ast.UnaryOp object at 0x7da1b1882920> begin[:]
<ast.Raise object at 0x7da1b18838e0>
variable[lowest_level] assign[=] call[call[name[sorted], parameter[name[classes_by_level]]]][constant[0]]
variable[clss] assign[=] call[name[classes_by_level]][name[lowest_level]]
if compare[call[name[len], parameter[name[clss]]] greater[>] constant[1]] begin[:]
variable[clss_str] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b17eeef0>]]
<ast.Raise object at 0x7da1b17ec6d0> | keyword[def] identifier[create_release_vcs] ( identifier[path] , identifier[vcs_name] = keyword[None] ):
literal[string]
keyword[from] identifier[rez] . identifier[plugin_managers] keyword[import] identifier[plugin_manager]
identifier[vcs_types] = identifier[get_release_vcs_types] ()
keyword[if] identifier[vcs_name] :
keyword[if] identifier[vcs_name] keyword[not] keyword[in] identifier[vcs_types] :
keyword[raise] identifier[ReleaseVCSError] ( literal[string] % identifier[vcs_name] )
identifier[cls] = identifier[plugin_manager] . identifier[get_plugin_class] ( literal[string] , identifier[vcs_name] )
keyword[return] identifier[cls] ( identifier[path] )
identifier[classes_by_level] ={}
keyword[for] identifier[vcs_name] keyword[in] identifier[vcs_types] :
identifier[cls] = identifier[plugin_manager] . identifier[get_plugin_class] ( literal[string] , identifier[vcs_name] )
identifier[result] = identifier[cls] . identifier[find_vcs_root] ( identifier[path] )
keyword[if] keyword[not] identifier[result] :
keyword[continue]
identifier[vcs_path] , identifier[levels_up] = identifier[result]
identifier[classes_by_level] . identifier[setdefault] ( identifier[levels_up] ,[]). identifier[append] (( identifier[cls] , identifier[vcs_path] ))
keyword[if] keyword[not] identifier[classes_by_level] :
keyword[raise] identifier[ReleaseVCSError] ( literal[string]
literal[string] % identifier[path] )
identifier[lowest_level] = identifier[sorted] ( identifier[classes_by_level] )[ literal[int] ]
identifier[clss] = identifier[classes_by_level] [ identifier[lowest_level] ]
keyword[if] identifier[len] ( identifier[clss] )> literal[int] :
identifier[clss_str] = literal[string] . identifier[join] ( identifier[x] [ literal[int] ]. identifier[name] () keyword[for] identifier[x] keyword[in] identifier[clss] )
keyword[raise] identifier[ReleaseVCSError] ( literal[string]
literal[string]
literal[string] %( identifier[path] , identifier[clss_str] ))
keyword[else] :
identifier[cls] , identifier[vcs_root] = identifier[clss] [ literal[int] ]
keyword[return] identifier[cls] ( identifier[pkg_root] = identifier[path] , identifier[vcs_root] = identifier[vcs_root] ) | def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError('Unknown version control system: %r' % vcs_name) # depends on [control=['if'], data=['vcs_name']]
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path) # depends on [control=['if'], data=[]]
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue # depends on [control=['if'], data=[]]
(vcs_path, levels_up) = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path)) # depends on [control=['for'], data=['vcs_name']]
if not classes_by_level:
raise ReleaseVCSError('No version control system for package releasing is associated with the path %s' % path) # depends on [control=['if'], data=[]]
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ', '.join((x[0].name() for x in clss))
raise ReleaseVCSError('Several version control systems are associated with the path %s: %s. Use rez-release --vcs to choose.' % (path, clss_str)) # depends on [control=['if'], data=[]]
else:
(cls, vcs_root) = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root) |
def resize_for_flows(self):
"""Extend `unamey` and `fnamey` for bus injections and line flows"""
if self.system.config.dime_enable:
self.system.tds.config.compute_flows = True
if self.system.tds.config.compute_flows:
nflows = 2 * self.system.Bus.n + \
8 * self.system.Line.n + \
2 * self.system.Area.n_combination
self.unamey.extend([''] * nflows)
self.fnamey.extend([''] * nflows) | def function[resize_for_flows, parameter[self]]:
constant[Extend `unamey` and `fnamey` for bus injections and line flows]
if name[self].system.config.dime_enable begin[:]
name[self].system.tds.config.compute_flows assign[=] constant[True]
if name[self].system.tds.config.compute_flows begin[:]
variable[nflows] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * name[self].system.Bus.n] + binary_operation[constant[8] * name[self].system.Line.n]] + binary_operation[constant[2] * name[self].system.Area.n_combination]]
call[name[self].unamey.extend, parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe93ac0>]] * name[nflows]]]]
call[name[self].fnamey.extend, parameter[binary_operation[list[[<ast.Constant object at 0x7da18fe929b0>]] * name[nflows]]]] | keyword[def] identifier[resize_for_flows] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[system] . identifier[config] . identifier[dime_enable] :
identifier[self] . identifier[system] . identifier[tds] . identifier[config] . identifier[compute_flows] = keyword[True]
keyword[if] identifier[self] . identifier[system] . identifier[tds] . identifier[config] . identifier[compute_flows] :
identifier[nflows] = literal[int] * identifier[self] . identifier[system] . identifier[Bus] . identifier[n] + literal[int] * identifier[self] . identifier[system] . identifier[Line] . identifier[n] + literal[int] * identifier[self] . identifier[system] . identifier[Area] . identifier[n_combination]
identifier[self] . identifier[unamey] . identifier[extend] ([ literal[string] ]* identifier[nflows] )
identifier[self] . identifier[fnamey] . identifier[extend] ([ literal[string] ]* identifier[nflows] ) | def resize_for_flows(self):
"""Extend `unamey` and `fnamey` for bus injections and line flows"""
if self.system.config.dime_enable:
self.system.tds.config.compute_flows = True # depends on [control=['if'], data=[]]
if self.system.tds.config.compute_flows:
nflows = 2 * self.system.Bus.n + 8 * self.system.Line.n + 2 * self.system.Area.n_combination
self.unamey.extend([''] * nflows)
self.fnamey.extend([''] * nflows) # depends on [control=['if'], data=[]] |
def classify(self, text):
"""
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
"""
score = self.score(text)
if not score:
return None
return sorted(score.items(), key=lambda v: v[1])[-1][0] | def function[classify, parameter[self, text]]:
constant[
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
]
variable[score] assign[=] call[name[self].score, parameter[name[text]]]
if <ast.UnaryOp object at 0x7da18f09e8c0> begin[:]
return[constant[None]]
return[call[call[call[name[sorted], parameter[call[name[score].items, parameter[]]]]][<ast.UnaryOp object at 0x7da1b0aadba0>]][constant[0]]] | keyword[def] identifier[classify] ( identifier[self] , identifier[text] ):
literal[string]
identifier[score] = identifier[self] . identifier[score] ( identifier[text] )
keyword[if] keyword[not] identifier[score] :
keyword[return] keyword[None]
keyword[return] identifier[sorted] ( identifier[score] . identifier[items] (), identifier[key] = keyword[lambda] identifier[v] : identifier[v] [ literal[int] ])[- literal[int] ][ literal[int] ] | def classify(self, text):
"""
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
"""
score = self.score(text)
if not score:
return None # depends on [control=['if'], data=[]]
return sorted(score.items(), key=lambda v: v[1])[-1][0] |
def resource_action(client, action='', log_format='item: %(key)s', **kwargs):
"""Call _action_ using boto3 _client_ with _kwargs_.
This is meant for _action_ methods that will create or implicitely prove a
given Resource exists. The _log_failure_ flag is available for methods that
should always succeed, but will occasionally fail due to unknown AWS
issues.
Args:
client (botocore.client.IAM): boto3 client object.
action (str): Client method to call.
log_format (str): Generic log message format, 'Added' or 'Found' will
be prepended depending on the scenario.
prefix (str): Prefix word to use in successful INFO message.
**kwargs: Keyword arguments to pass to _action_ method.
Returns:
dict: boto3 response.
"""
result = None
try:
result = getattr(client, action)(**kwargs)
LOG.info(log_format, kwargs)
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'AccessDenied':
LOG.fatal(error)
raise
elif error_code == 'EntityAlreadyExists':
LOG.info(' '.join(('Found', log_format)), kwargs)
else:
LOG.fatal(error)
return result | def function[resource_action, parameter[client, action, log_format]]:
constant[Call _action_ using boto3 _client_ with _kwargs_.
This is meant for _action_ methods that will create or implicitely prove a
given Resource exists. The _log_failure_ flag is available for methods that
should always succeed, but will occasionally fail due to unknown AWS
issues.
Args:
client (botocore.client.IAM): boto3 client object.
action (str): Client method to call.
log_format (str): Generic log message format, 'Added' or 'Found' will
be prepended depending on the scenario.
prefix (str): Prefix word to use in successful INFO message.
**kwargs: Keyword arguments to pass to _action_ method.
Returns:
dict: boto3 response.
]
variable[result] assign[=] constant[None]
<ast.Try object at 0x7da207f01810>
return[name[result]] | keyword[def] identifier[resource_action] ( identifier[client] , identifier[action] = literal[string] , identifier[log_format] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[result] = keyword[None]
keyword[try] :
identifier[result] = identifier[getattr] ( identifier[client] , identifier[action] )(** identifier[kwargs] )
identifier[LOG] . identifier[info] ( identifier[log_format] , identifier[kwargs] )
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] :
identifier[error_code] = identifier[error] . identifier[response] [ literal[string] ][ literal[string] ]
keyword[if] identifier[error_code] == literal[string] :
identifier[LOG] . identifier[fatal] ( identifier[error] )
keyword[raise]
keyword[elif] identifier[error_code] == literal[string] :
identifier[LOG] . identifier[info] ( literal[string] . identifier[join] (( literal[string] , identifier[log_format] )), identifier[kwargs] )
keyword[else] :
identifier[LOG] . identifier[fatal] ( identifier[error] )
keyword[return] identifier[result] | def resource_action(client, action='', log_format='item: %(key)s', **kwargs):
"""Call _action_ using boto3 _client_ with _kwargs_.
This is meant for _action_ methods that will create or implicitely prove a
given Resource exists. The _log_failure_ flag is available for methods that
should always succeed, but will occasionally fail due to unknown AWS
issues.
Args:
client (botocore.client.IAM): boto3 client object.
action (str): Client method to call.
log_format (str): Generic log message format, 'Added' or 'Found' will
be prepended depending on the scenario.
prefix (str): Prefix word to use in successful INFO message.
**kwargs: Keyword arguments to pass to _action_ method.
Returns:
dict: boto3 response.
"""
result = None
try:
result = getattr(client, action)(**kwargs)
LOG.info(log_format, kwargs) # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'AccessDenied':
LOG.fatal(error)
raise # depends on [control=['if'], data=[]]
elif error_code == 'EntityAlreadyExists':
LOG.info(' '.join(('Found', log_format)), kwargs) # depends on [control=['if'], data=[]]
else:
LOG.fatal(error) # depends on [control=['except'], data=['error']]
return result |
def removeFunction(self):
"""Remove function tag
"""
root = self.etree
t_execute = root.find('execute')
try:
root.remove(t_execute)
return True
except (Exception,) as e:
print(e)
return False | def function[removeFunction, parameter[self]]:
constant[Remove function tag
]
variable[root] assign[=] name[self].etree
variable[t_execute] assign[=] call[name[root].find, parameter[constant[execute]]]
<ast.Try object at 0x7da18ede7910>
return[constant[False]] | keyword[def] identifier[removeFunction] ( identifier[self] ):
literal[string]
identifier[root] = identifier[self] . identifier[etree]
identifier[t_execute] = identifier[root] . identifier[find] ( literal[string] )
keyword[try] :
identifier[root] . identifier[remove] ( identifier[t_execute] )
keyword[return] keyword[True]
keyword[except] ( identifier[Exception] ,) keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
keyword[return] keyword[False] | def removeFunction(self):
"""Remove function tag
"""
root = self.etree
t_execute = root.find('execute')
try:
root.remove(t_execute)
return True # depends on [control=['try'], data=[]]
except (Exception,) as e:
print(e) # depends on [control=['except'], data=['e']]
return False |
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode") | def function[validate_subnet, parameter[s]]:
constant[Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
]
if call[name[isinstance], parameter[name[s], name[basestring]]] begin[:]
if compare[constant[/] in name[s]] begin[:]
<ast.Tuple object at 0x7da18bc72800> assign[=] call[name[s].split, parameter[constant[/], constant[2]]]
return[<ast.BoolOp object at 0x7da18bc717b0>]
<ast.Raise object at 0x7da20c6c7790> | keyword[def] identifier[validate_subnet] ( identifier[s] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[basestring] ):
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[start] , identifier[mask] = identifier[s] . identifier[split] ( literal[string] , literal[int] )
keyword[return] identifier[validate_ip] ( identifier[start] ) keyword[and] identifier[validate_netmask] ( identifier[mask] )
keyword[else] :
keyword[return] keyword[False]
keyword[raise] identifier[TypeError] ( literal[string] ) | def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
(start, mask) = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask) # depends on [control=['if'], data=['s']]
else:
return False # depends on [control=['if'], data=[]]
raise TypeError('expected string or unicode') |
def put(self, item, **kwargs):
"""Put an item into the queue."""
if self.full():
raise Full()
self._append(item) | def function[put, parameter[self, item]]:
constant[Put an item into the queue.]
if call[name[self].full, parameter[]] begin[:]
<ast.Raise object at 0x7da18ede4070>
call[name[self]._append, parameter[name[item]]] | keyword[def] identifier[put] ( identifier[self] , identifier[item] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[full] ():
keyword[raise] identifier[Full] ()
identifier[self] . identifier[_append] ( identifier[item] ) | def put(self, item, **kwargs):
"""Put an item into the queue."""
if self.full():
raise Full() # depends on [control=['if'], data=[]]
self._append(item) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.