code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def lex(string):
"this is only used by tests"
safe_lexer = LEXER.clone() # reentrant? I can't tell, I hate implicit globals. do a threading test
safe_lexer.input(string)
a = []
while 1:
t = safe_lexer.token()
if t: a.append(t)
else: break
return a | def function[lex, parameter[string]]:
constant[this is only used by tests]
variable[safe_lexer] assign[=] call[name[LEXER].clone, parameter[]]
call[name[safe_lexer].input, parameter[name[string]]]
variable[a] assign[=] list[[]]
while constant[1] begin[:]
variable[t] assign[=] call[name[safe_lexer].token, parameter[]]
if name[t] begin[:]
call[name[a].append, parameter[name[t]]]
return[name[a]] | keyword[def] identifier[lex] ( identifier[string] ):
literal[string]
identifier[safe_lexer] = identifier[LEXER] . identifier[clone] ()
identifier[safe_lexer] . identifier[input] ( identifier[string] )
identifier[a] =[]
keyword[while] literal[int] :
identifier[t] = identifier[safe_lexer] . identifier[token] ()
keyword[if] identifier[t] : identifier[a] . identifier[append] ( identifier[t] )
keyword[else] : keyword[break]
keyword[return] identifier[a] | def lex(string):
"""this is only used by tests"""
safe_lexer = LEXER.clone() # reentrant? I can't tell, I hate implicit globals. do a threading test
safe_lexer.input(string)
a = []
while 1:
t = safe_lexer.token()
if t:
a.append(t) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
return a |
def open(self, mode='rb'):
"""
Open the file for reading.
:rtype: django.core.files.storage.File
"""
file = self.storage.open(self.relative_name, mode=mode) # type: File
return file | def function[open, parameter[self, mode]]:
constant[
Open the file for reading.
:rtype: django.core.files.storage.File
]
variable[file] assign[=] call[name[self].storage.open, parameter[name[self].relative_name]]
return[name[file]] | keyword[def] identifier[open] ( identifier[self] , identifier[mode] = literal[string] ):
literal[string]
identifier[file] = identifier[self] . identifier[storage] . identifier[open] ( identifier[self] . identifier[relative_name] , identifier[mode] = identifier[mode] )
keyword[return] identifier[file] | def open(self, mode='rb'):
"""
Open the file for reading.
:rtype: django.core.files.storage.File
"""
file = self.storage.open(self.relative_name, mode=mode) # type: File
return file |
def diff(self, a_ref, target=None, b_ref=None):
"""Gerenates diff message string output
Args:
target(str) - file/directory to check diff of
a_ref(str) - first tag
(optional) b_ref(str) - second git tag
Returns:
string: string of output message with diff info
"""
result = {}
diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref)
result[DIFF_A_REF] = diff_dct[DIFF_A_REF]
result[DIFF_B_REF] = diff_dct[DIFF_B_REF]
if diff_dct[DIFF_EQUAL]:
result[DIFF_EQUAL] = True
return result
result[DIFF_LIST] = []
diff_outs = _get_diff_outs(self, diff_dct)
if target is None:
result[DIFF_LIST] = [
_diff_royal(self, path, diff_outs[path]) for path in diff_outs
]
elif target in diff_outs:
result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])]
else:
msg = "Have not found file/directory '{}' in the commits"
raise FileNotInCommitError(msg.format(target))
return result | def function[diff, parameter[self, a_ref, target, b_ref]]:
constant[Gerenates diff message string output
Args:
target(str) - file/directory to check diff of
a_ref(str) - first tag
(optional) b_ref(str) - second git tag
Returns:
string: string of output message with diff info
]
variable[result] assign[=] dictionary[[], []]
variable[diff_dct] assign[=] call[name[self].scm.get_diff_trees, parameter[name[a_ref]]]
call[name[result]][name[DIFF_A_REF]] assign[=] call[name[diff_dct]][name[DIFF_A_REF]]
call[name[result]][name[DIFF_B_REF]] assign[=] call[name[diff_dct]][name[DIFF_B_REF]]
if call[name[diff_dct]][name[DIFF_EQUAL]] begin[:]
call[name[result]][name[DIFF_EQUAL]] assign[=] constant[True]
return[name[result]]
call[name[result]][name[DIFF_LIST]] assign[=] list[[]]
variable[diff_outs] assign[=] call[name[_get_diff_outs], parameter[name[self], name[diff_dct]]]
if compare[name[target] is constant[None]] begin[:]
call[name[result]][name[DIFF_LIST]] assign[=] <ast.ListComp object at 0x7da1b1f1a590>
return[name[result]] | keyword[def] identifier[diff] ( identifier[self] , identifier[a_ref] , identifier[target] = keyword[None] , identifier[b_ref] = keyword[None] ):
literal[string]
identifier[result] ={}
identifier[diff_dct] = identifier[self] . identifier[scm] . identifier[get_diff_trees] ( identifier[a_ref] , identifier[b_ref] = identifier[b_ref] )
identifier[result] [ identifier[DIFF_A_REF] ]= identifier[diff_dct] [ identifier[DIFF_A_REF] ]
identifier[result] [ identifier[DIFF_B_REF] ]= identifier[diff_dct] [ identifier[DIFF_B_REF] ]
keyword[if] identifier[diff_dct] [ identifier[DIFF_EQUAL] ]:
identifier[result] [ identifier[DIFF_EQUAL] ]= keyword[True]
keyword[return] identifier[result]
identifier[result] [ identifier[DIFF_LIST] ]=[]
identifier[diff_outs] = identifier[_get_diff_outs] ( identifier[self] , identifier[diff_dct] )
keyword[if] identifier[target] keyword[is] keyword[None] :
identifier[result] [ identifier[DIFF_LIST] ]=[
identifier[_diff_royal] ( identifier[self] , identifier[path] , identifier[diff_outs] [ identifier[path] ]) keyword[for] identifier[path] keyword[in] identifier[diff_outs]
]
keyword[elif] identifier[target] keyword[in] identifier[diff_outs] :
identifier[result] [ identifier[DIFF_LIST] ]=[ identifier[_diff_royal] ( identifier[self] , identifier[target] , identifier[diff_outs] [ identifier[target] ])]
keyword[else] :
identifier[msg] = literal[string]
keyword[raise] identifier[FileNotInCommitError] ( identifier[msg] . identifier[format] ( identifier[target] ))
keyword[return] identifier[result] | def diff(self, a_ref, target=None, b_ref=None):
"""Gerenates diff message string output
Args:
target(str) - file/directory to check diff of
a_ref(str) - first tag
(optional) b_ref(str) - second git tag
Returns:
string: string of output message with diff info
"""
result = {}
diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref)
result[DIFF_A_REF] = diff_dct[DIFF_A_REF]
result[DIFF_B_REF] = diff_dct[DIFF_B_REF]
if diff_dct[DIFF_EQUAL]:
result[DIFF_EQUAL] = True
return result # depends on [control=['if'], data=[]]
result[DIFF_LIST] = []
diff_outs = _get_diff_outs(self, diff_dct)
if target is None:
result[DIFF_LIST] = [_diff_royal(self, path, diff_outs[path]) for path in diff_outs] # depends on [control=['if'], data=[]]
elif target in diff_outs:
result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])] # depends on [control=['if'], data=['target', 'diff_outs']]
else:
msg = "Have not found file/directory '{}' in the commits"
raise FileNotInCommitError(msg.format(target))
return result |
def delete_state(self, state):
"""Delete a specified state from the LRS
:param state: State document to be deleted
:type state: :class:`tincan.documents.state_document.StateDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
return self._delete_state(
activity=state.activity,
agent=state.agent,
state_id=state.id,
etag=state.etag
) | def function[delete_state, parameter[self, state]]:
constant[Delete a specified state from the LRS
:param state: State document to be deleted
:type state: :class:`tincan.documents.state_document.StateDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
]
return[call[name[self]._delete_state, parameter[]]] | keyword[def] identifier[delete_state] ( identifier[self] , identifier[state] ):
literal[string]
keyword[return] identifier[self] . identifier[_delete_state] (
identifier[activity] = identifier[state] . identifier[activity] ,
identifier[agent] = identifier[state] . identifier[agent] ,
identifier[state_id] = identifier[state] . identifier[id] ,
identifier[etag] = identifier[state] . identifier[etag]
) | def delete_state(self, state):
"""Delete a specified state from the LRS
:param state: State document to be deleted
:type state: :class:`tincan.documents.state_document.StateDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
return self._delete_state(activity=state.activity, agent=state.agent, state_id=state.id, etag=state.etag) |
def backing_file(self):
"""
When using linked clone this will return the path to the base image
:returns: None if it's not a linked clone, the path otherwise
"""
with open(self._path, 'rb') as f:
f.seek(self.backing_file_offset)
content = f.read(self.backing_file_size)
path = content.decode()
if len(path) == 0:
return None
return path | def function[backing_file, parameter[self]]:
constant[
When using linked clone this will return the path to the base image
:returns: None if it's not a linked clone, the path otherwise
]
with call[name[open], parameter[name[self]._path, constant[rb]]] begin[:]
call[name[f].seek, parameter[name[self].backing_file_offset]]
variable[content] assign[=] call[name[f].read, parameter[name[self].backing_file_size]]
variable[path] assign[=] call[name[content].decode, parameter[]]
if compare[call[name[len], parameter[name[path]]] equal[==] constant[0]] begin[:]
return[constant[None]]
return[name[path]] | keyword[def] identifier[backing_file] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[seek] ( identifier[self] . identifier[backing_file_offset] )
identifier[content] = identifier[f] . identifier[read] ( identifier[self] . identifier[backing_file_size] )
identifier[path] = identifier[content] . identifier[decode] ()
keyword[if] identifier[len] ( identifier[path] )== literal[int] :
keyword[return] keyword[None]
keyword[return] identifier[path] | def backing_file(self):
"""
When using linked clone this will return the path to the base image
:returns: None if it's not a linked clone, the path otherwise
"""
with open(self._path, 'rb') as f:
f.seek(self.backing_file_offset)
content = f.read(self.backing_file_size) # depends on [control=['with'], data=['f']]
path = content.decode()
if len(path) == 0:
return None # depends on [control=['if'], data=[]]
return path |
def flatten_list(node):
"""
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
"""
flat_elems = []
for elem in node:
if elem == 'expr1024':
for subelem in elem:
assert subelem == 'expr32'
for subsubelem in subelem:
flat_elems.append(subsubelem)
elif elem == 'expr32':
for subelem in elem:
assert subelem == 'expr'
flat_elems.append(subelem)
else:
flat_elems.append(elem)
pass
pass
return flat_elems | def function[flatten_list, parameter[node]]:
constant[
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
]
variable[flat_elems] assign[=] list[[]]
for taget[name[elem]] in starred[name[node]] begin[:]
if compare[name[elem] equal[==] constant[expr1024]] begin[:]
for taget[name[subelem]] in starred[name[elem]] begin[:]
assert[compare[name[subelem] equal[==] constant[expr32]]]
for taget[name[subsubelem]] in starred[name[subelem]] begin[:]
call[name[flat_elems].append, parameter[name[subsubelem]]]
pass
return[name[flat_elems]] | keyword[def] identifier[flatten_list] ( identifier[node] ):
literal[string]
identifier[flat_elems] =[]
keyword[for] identifier[elem] keyword[in] identifier[node] :
keyword[if] identifier[elem] == literal[string] :
keyword[for] identifier[subelem] keyword[in] identifier[elem] :
keyword[assert] identifier[subelem] == literal[string]
keyword[for] identifier[subsubelem] keyword[in] identifier[subelem] :
identifier[flat_elems] . identifier[append] ( identifier[subsubelem] )
keyword[elif] identifier[elem] == literal[string] :
keyword[for] identifier[subelem] keyword[in] identifier[elem] :
keyword[assert] identifier[subelem] == literal[string]
identifier[flat_elems] . identifier[append] ( identifier[subelem] )
keyword[else] :
identifier[flat_elems] . identifier[append] ( identifier[elem] )
keyword[pass]
keyword[pass]
keyword[return] identifier[flat_elems] | def flatten_list(node):
"""
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
"""
flat_elems = []
for elem in node:
if elem == 'expr1024':
for subelem in elem:
assert subelem == 'expr32'
for subsubelem in subelem:
flat_elems.append(subsubelem) # depends on [control=['for'], data=['subsubelem']] # depends on [control=['for'], data=['subelem']] # depends on [control=['if'], data=['elem']]
elif elem == 'expr32':
for subelem in elem:
assert subelem == 'expr'
flat_elems.append(subelem) # depends on [control=['for'], data=['subelem']] # depends on [control=['if'], data=['elem']]
else:
flat_elems.append(elem)
pass
pass # depends on [control=['for'], data=['elem']]
return flat_elems |
def symbolic_master_equation(self, rho=None):
"""Compute the symbolic Liouvillian acting on a state rho
If no rho is given, an OperatorSymbol is created in its place.
This correspnds to the RHS of the master equation
in which an average is taken over the external noise degrees of
freedom.
Args:
rho (Operator): A symbolic density matrix operator
Returns:
Operator: The RHS of the master equation.
"""
L, H = self.L, self.H
if rho is None:
rho = OperatorSymbol('rho', hs=self.space)
return (-I * (H * rho - rho * H) +
sum(Lk * rho * adjoint(Lk) -
(adjoint(Lk) * Lk * rho + rho * adjoint(Lk) * Lk) / 2
for Lk in L.matrix.ravel())) | def function[symbolic_master_equation, parameter[self, rho]]:
constant[Compute the symbolic Liouvillian acting on a state rho
If no rho is given, an OperatorSymbol is created in its place.
This correspnds to the RHS of the master equation
in which an average is taken over the external noise degrees of
freedom.
Args:
rho (Operator): A symbolic density matrix operator
Returns:
Operator: The RHS of the master equation.
]
<ast.Tuple object at 0x7da2041d9f30> assign[=] tuple[[<ast.Attribute object at 0x7da2041db8b0>, <ast.Attribute object at 0x7da2041d9a80>]]
if compare[name[rho] is constant[None]] begin[:]
variable[rho] assign[=] call[name[OperatorSymbol], parameter[constant[rho]]]
return[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da2043461d0> * binary_operation[binary_operation[name[H] * name[rho]] - binary_operation[name[rho] * name[H]]]] + call[name[sum], parameter[<ast.GeneratorExp object at 0x7da2043478b0>]]]] | keyword[def] identifier[symbolic_master_equation] ( identifier[self] , identifier[rho] = keyword[None] ):
literal[string]
identifier[L] , identifier[H] = identifier[self] . identifier[L] , identifier[self] . identifier[H]
keyword[if] identifier[rho] keyword[is] keyword[None] :
identifier[rho] = identifier[OperatorSymbol] ( literal[string] , identifier[hs] = identifier[self] . identifier[space] )
keyword[return] (- identifier[I] *( identifier[H] * identifier[rho] - identifier[rho] * identifier[H] )+
identifier[sum] ( identifier[Lk] * identifier[rho] * identifier[adjoint] ( identifier[Lk] )-
( identifier[adjoint] ( identifier[Lk] )* identifier[Lk] * identifier[rho] + identifier[rho] * identifier[adjoint] ( identifier[Lk] )* identifier[Lk] )/ literal[int]
keyword[for] identifier[Lk] keyword[in] identifier[L] . identifier[matrix] . identifier[ravel] ())) | def symbolic_master_equation(self, rho=None):
"""Compute the symbolic Liouvillian acting on a state rho
If no rho is given, an OperatorSymbol is created in its place.
This correspnds to the RHS of the master equation
in which an average is taken over the external noise degrees of
freedom.
Args:
rho (Operator): A symbolic density matrix operator
Returns:
Operator: The RHS of the master equation.
"""
(L, H) = (self.L, self.H)
if rho is None:
rho = OperatorSymbol('rho', hs=self.space) # depends on [control=['if'], data=['rho']]
return -I * (H * rho - rho * H) + sum((Lk * rho * adjoint(Lk) - (adjoint(Lk) * Lk * rho + rho * adjoint(Lk) * Lk) / 2 for Lk in L.matrix.ravel())) |
def _assume2point():
"""Convert global assumptions to a point."""
point = dict()
for lit in _ASSUMPTIONS:
if isinstance(lit, Complement):
point[~lit] = 0
elif isinstance(lit, Variable):
point[lit] = 1
return point | def function[_assume2point, parameter[]]:
constant[Convert global assumptions to a point.]
variable[point] assign[=] call[name[dict], parameter[]]
for taget[name[lit]] in starred[name[_ASSUMPTIONS]] begin[:]
if call[name[isinstance], parameter[name[lit], name[Complement]]] begin[:]
call[name[point]][<ast.UnaryOp object at 0x7da1b0ed3a90>] assign[=] constant[0]
return[name[point]] | keyword[def] identifier[_assume2point] ():
literal[string]
identifier[point] = identifier[dict] ()
keyword[for] identifier[lit] keyword[in] identifier[_ASSUMPTIONS] :
keyword[if] identifier[isinstance] ( identifier[lit] , identifier[Complement] ):
identifier[point] [~ identifier[lit] ]= literal[int]
keyword[elif] identifier[isinstance] ( identifier[lit] , identifier[Variable] ):
identifier[point] [ identifier[lit] ]= literal[int]
keyword[return] identifier[point] | def _assume2point():
"""Convert global assumptions to a point."""
point = dict()
for lit in _ASSUMPTIONS:
if isinstance(lit, Complement):
point[~lit] = 0 # depends on [control=['if'], data=[]]
elif isinstance(lit, Variable):
point[lit] = 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['lit']]
return point |
def update_file(dk_api, kitchen, recipe_name, message, files_to_update_param):
"""
reutrns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_update_param: string -- file system directory where the recipe file lives
:rtype: string
"""
rc = DKReturnCode()
if kitchen is None or recipe_name is None or message is None or files_to_update_param is None:
s = 'ERROR: DKCloudCommandRunner bad input parameters'
rc.set(rc.DK_FAIL, s)
return rc
# Take a simple string or an array
if isinstance(files_to_update_param, basestring):
files_to_update = [files_to_update_param]
else:
files_to_update = files_to_update_param
msg = ''
for file_to_update in files_to_update:
try:
with open(file_to_update, 'r') as f:
file_contents = f.read()
except IOError as e:
if len(msg) != 0:
msg += '\n'
msg += '%s' % (str(e))
rc.set(rc.DK_FAIL, msg)
return rc
except ValueError as e:
if len(msg) != 0:
msg += '\n'
msg += 'ERROR: %s' % e.message
rc.set(rc.DK_FAIL, msg)
return rc
rc = dk_api.update_file(kitchen, recipe_name, message, file_to_update, file_contents)
if not rc.ok():
if len(msg) != 0:
msg += '\n'
msg += 'DKCloudCommand.update_file for %s failed\n\tmessage: %s' % (file_to_update, rc.get_message())
rc.set_message(msg)
return rc
else:
if len(msg) != 0:
msg += '\n'
msg += 'DKCloudCommand.update_file for %s succeeded' % file_to_update
rc.set_message(msg)
return rc | def function[update_file, parameter[dk_api, kitchen, recipe_name, message, files_to_update_param]]:
constant[
reutrns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_update_param: string -- file system directory where the recipe file lives
:rtype: string
]
variable[rc] assign[=] call[name[DKReturnCode], parameter[]]
if <ast.BoolOp object at 0x7da20e7486d0> begin[:]
variable[s] assign[=] constant[ERROR: DKCloudCommandRunner bad input parameters]
call[name[rc].set, parameter[name[rc].DK_FAIL, name[s]]]
return[name[rc]]
if call[name[isinstance], parameter[name[files_to_update_param], name[basestring]]] begin[:]
variable[files_to_update] assign[=] list[[<ast.Name object at 0x7da20e7484f0>]]
variable[msg] assign[=] constant[]
for taget[name[file_to_update]] in starred[name[files_to_update]] begin[:]
<ast.Try object at 0x7da18bcca800>
variable[rc] assign[=] call[name[dk_api].update_file, parameter[name[kitchen], name[recipe_name], name[message], name[file_to_update], name[file_contents]]]
if <ast.UnaryOp object at 0x7da1b23440a0> begin[:]
if compare[call[name[len], parameter[name[msg]]] not_equal[!=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b2346620>
<ast.AugAssign object at 0x7da1b2346d10>
call[name[rc].set_message, parameter[name[msg]]]
return[name[rc]]
call[name[rc].set_message, parameter[name[msg]]]
return[name[rc]] | keyword[def] identifier[update_file] ( identifier[dk_api] , identifier[kitchen] , identifier[recipe_name] , identifier[message] , identifier[files_to_update_param] ):
literal[string]
identifier[rc] = identifier[DKReturnCode] ()
keyword[if] identifier[kitchen] keyword[is] keyword[None] keyword[or] identifier[recipe_name] keyword[is] keyword[None] keyword[or] identifier[message] keyword[is] keyword[None] keyword[or] identifier[files_to_update_param] keyword[is] keyword[None] :
identifier[s] = literal[string]
identifier[rc] . identifier[set] ( identifier[rc] . identifier[DK_FAIL] , identifier[s] )
keyword[return] identifier[rc]
keyword[if] identifier[isinstance] ( identifier[files_to_update_param] , identifier[basestring] ):
identifier[files_to_update] =[ identifier[files_to_update_param] ]
keyword[else] :
identifier[files_to_update] = identifier[files_to_update_param]
identifier[msg] = literal[string]
keyword[for] identifier[file_to_update] keyword[in] identifier[files_to_update] :
keyword[try] :
keyword[with] identifier[open] ( identifier[file_to_update] , literal[string] ) keyword[as] identifier[f] :
identifier[file_contents] = identifier[f] . identifier[read] ()
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[len] ( identifier[msg] )!= literal[int] :
identifier[msg] += literal[string]
identifier[msg] += literal[string] %( identifier[str] ( identifier[e] ))
identifier[rc] . identifier[set] ( identifier[rc] . identifier[DK_FAIL] , identifier[msg] )
keyword[return] identifier[rc]
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[if] identifier[len] ( identifier[msg] )!= literal[int] :
identifier[msg] += literal[string]
identifier[msg] += literal[string] % identifier[e] . identifier[message]
identifier[rc] . identifier[set] ( identifier[rc] . identifier[DK_FAIL] , identifier[msg] )
keyword[return] identifier[rc]
identifier[rc] = identifier[dk_api] . identifier[update_file] ( identifier[kitchen] , identifier[recipe_name] , identifier[message] , identifier[file_to_update] , identifier[file_contents] )
keyword[if] keyword[not] identifier[rc] . identifier[ok] ():
keyword[if] identifier[len] ( identifier[msg] )!= literal[int] :
identifier[msg] += literal[string]
identifier[msg] += literal[string] %( identifier[file_to_update] , identifier[rc] . identifier[get_message] ())
identifier[rc] . identifier[set_message] ( identifier[msg] )
keyword[return] identifier[rc]
keyword[else] :
keyword[if] identifier[len] ( identifier[msg] )!= literal[int] :
identifier[msg] += literal[string]
identifier[msg] += literal[string] % identifier[file_to_update]
identifier[rc] . identifier[set_message] ( identifier[msg] )
keyword[return] identifier[rc] | def update_file(dk_api, kitchen, recipe_name, message, files_to_update_param):
"""
reutrns a string.
:param dk_api: -- api object
:param kitchen: string
:param recipe_name: string -- kitchen name, string
:param message: string message -- commit message, string
:param files_to_update_param: string -- file system directory where the recipe file lives
:rtype: string
"""
rc = DKReturnCode()
if kitchen is None or recipe_name is None or message is None or (files_to_update_param is None):
s = 'ERROR: DKCloudCommandRunner bad input parameters'
rc.set(rc.DK_FAIL, s)
return rc # depends on [control=['if'], data=[]]
# Take a simple string or an array
if isinstance(files_to_update_param, basestring):
files_to_update = [files_to_update_param] # depends on [control=['if'], data=[]]
else:
files_to_update = files_to_update_param
msg = ''
for file_to_update in files_to_update:
try:
with open(file_to_update, 'r') as f:
file_contents = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError as e:
if len(msg) != 0:
msg += '\n' # depends on [control=['if'], data=[]]
msg += '%s' % str(e)
rc.set(rc.DK_FAIL, msg)
return rc # depends on [control=['except'], data=['e']]
except ValueError as e:
if len(msg) != 0:
msg += '\n' # depends on [control=['if'], data=[]]
msg += 'ERROR: %s' % e.message
rc.set(rc.DK_FAIL, msg)
return rc # depends on [control=['except'], data=['e']]
rc = dk_api.update_file(kitchen, recipe_name, message, file_to_update, file_contents)
if not rc.ok():
if len(msg) != 0:
msg += '\n' # depends on [control=['if'], data=[]]
msg += 'DKCloudCommand.update_file for %s failed\n\tmessage: %s' % (file_to_update, rc.get_message())
rc.set_message(msg)
return rc # depends on [control=['if'], data=[]]
else:
if len(msg) != 0:
msg += '\n' # depends on [control=['if'], data=[]]
msg += 'DKCloudCommand.update_file for %s succeeded' % file_to_update # depends on [control=['for'], data=['file_to_update']]
rc.set_message(msg)
return rc |
def execution_engine_model_changed(self, model, prop_name, info):
"""Active observation of state machine and show and hide widget. """
if not self._view_initialized:
return
active_sm_id = rafcon.gui.singleton.state_machine_manager_model.state_machine_manager.active_state_machine_id
if active_sm_id is None:
# relieve all state machines that have no active execution and hide the widget
self.disable()
else:
# observe all state machines that have an active execution and show the widget
self.check_configuration() | def function[execution_engine_model_changed, parameter[self, model, prop_name, info]]:
constant[Active observation of state machine and show and hide widget. ]
if <ast.UnaryOp object at 0x7da1b1aa6590> begin[:]
return[None]
variable[active_sm_id] assign[=] name[rafcon].gui.singleton.state_machine_manager_model.state_machine_manager.active_state_machine_id
if compare[name[active_sm_id] is constant[None]] begin[:]
call[name[self].disable, parameter[]] | keyword[def] identifier[execution_engine_model_changed] ( identifier[self] , identifier[model] , identifier[prop_name] , identifier[info] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_view_initialized] :
keyword[return]
identifier[active_sm_id] = identifier[rafcon] . identifier[gui] . identifier[singleton] . identifier[state_machine_manager_model] . identifier[state_machine_manager] . identifier[active_state_machine_id]
keyword[if] identifier[active_sm_id] keyword[is] keyword[None] :
identifier[self] . identifier[disable] ()
keyword[else] :
identifier[self] . identifier[check_configuration] () | def execution_engine_model_changed(self, model, prop_name, info):
"""Active observation of state machine and show and hide widget. """
if not self._view_initialized:
return # depends on [control=['if'], data=[]]
active_sm_id = rafcon.gui.singleton.state_machine_manager_model.state_machine_manager.active_state_machine_id
if active_sm_id is None:
# relieve all state machines that have no active execution and hide the widget
self.disable() # depends on [control=['if'], data=[]]
else:
# observe all state machines that have an active execution and show the widget
self.check_configuration() |
def superReadFile(filepath, **kwargs):
"""
Uses pandas.read_excel (on excel files) and returns a dataframe of the
first sheet (unless sheet is specified in kwargs)
Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of
the data. One function to read almost all types of data files.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
ext = os.path.splitext(filepath)[1].lower()
if ext in ['.xlsx', '.xls']:
df = pd.read_excel(filepath, **kwargs)
elif ext in ['.pkl', '.p', '.pickle', '.pk']:
df = pd.read_pickle(filepath)
else:
# Assume it's a text-like file and try to read it.
try:
df = superReadText(filepath, **kwargs)
except Exception as e:
# TODO: Make this trace back better? Custom Exception? Raise original?
raise Exception("Error reading file: {}".format(e))
return df | def function[superReadFile, parameter[filepath]]:
constant[
Uses pandas.read_excel (on excel files) and returns a dataframe of the
first sheet (unless sheet is specified in kwargs)
Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of
the data. One function to read almost all types of data files.
]
if call[name[isinstance], parameter[name[filepath], name[pd].DataFrame]] begin[:]
return[name[filepath]]
variable[ext] assign[=] call[call[call[name[os].path.splitext, parameter[name[filepath]]]][constant[1]].lower, parameter[]]
if compare[name[ext] in list[[<ast.Constant object at 0x7da1b07710c0>, <ast.Constant object at 0x7da1b0771810>]]] begin[:]
variable[df] assign[=] call[name[pd].read_excel, parameter[name[filepath]]]
return[name[df]] | keyword[def] identifier[superReadFile] ( identifier[filepath] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[filepath] , identifier[pd] . identifier[DataFrame] ):
keyword[return] identifier[filepath]
identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filepath] )[ literal[int] ]. identifier[lower] ()
keyword[if] identifier[ext] keyword[in] [ literal[string] , literal[string] ]:
identifier[df] = identifier[pd] . identifier[read_excel] ( identifier[filepath] ,** identifier[kwargs] )
keyword[elif] identifier[ext] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[df] = identifier[pd] . identifier[read_pickle] ( identifier[filepath] )
keyword[else] :
keyword[try] :
identifier[df] = identifier[superReadText] ( identifier[filepath] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return] identifier[df] | def superReadFile(filepath, **kwargs):
"""
Uses pandas.read_excel (on excel files) and returns a dataframe of the
first sheet (unless sheet is specified in kwargs)
Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of
the data. One function to read almost all types of data files.
"""
if isinstance(filepath, pd.DataFrame):
return filepath # depends on [control=['if'], data=[]]
ext = os.path.splitext(filepath)[1].lower()
if ext in ['.xlsx', '.xls']:
df = pd.read_excel(filepath, **kwargs) # depends on [control=['if'], data=[]]
elif ext in ['.pkl', '.p', '.pickle', '.pk']:
df = pd.read_pickle(filepath) # depends on [control=['if'], data=[]]
else:
# Assume it's a text-like file and try to read it.
try:
df = superReadText(filepath, **kwargs) # depends on [control=['try'], data=[]]
except Exception as e:
# TODO: Make this trace back better? Custom Exception? Raise original?
raise Exception('Error reading file: {}'.format(e)) # depends on [control=['except'], data=['e']]
return df |
def create_line_generator(self):
"""
Creates a generator function yielding lines in the file
Should only yield non-empty lines
"""
if self.file_name.endswith(".gz"):
if sys.version_info.major == 3:
gz = gzip.open(self.file_name, mode='rt', encoding=self.encoding)
else:
gz = gzip.open(self.file_name, mode='rt')
for line in gz.readlines():
yield line
gz.close()
else:
if sys.version_info.major == 3:
# Python 3 native `open` is much faster
file = open(self.file_name, mode='r', encoding=self.encoding)
else:
# Python 2 needs the codecs package to deal with encoding
file = codecs.open(self.file_name, mode='r', encoding=self.encoding)
for line in file:
yield line
file.close() | def function[create_line_generator, parameter[self]]:
constant[
Creates a generator function yielding lines in the file
Should only yield non-empty lines
]
if call[name[self].file_name.endswith, parameter[constant[.gz]]] begin[:]
if compare[name[sys].version_info.major equal[==] constant[3]] begin[:]
variable[gz] assign[=] call[name[gzip].open, parameter[name[self].file_name]]
for taget[name[line]] in starred[call[name[gz].readlines, parameter[]]] begin[:]
<ast.Yield object at 0x7da204347a90>
call[name[gz].close, parameter[]] | keyword[def] identifier[create_line_generator] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[file_name] . identifier[endswith] ( literal[string] ):
keyword[if] identifier[sys] . identifier[version_info] . identifier[major] == literal[int] :
identifier[gz] = identifier[gzip] . identifier[open] ( identifier[self] . identifier[file_name] , identifier[mode] = literal[string] , identifier[encoding] = identifier[self] . identifier[encoding] )
keyword[else] :
identifier[gz] = identifier[gzip] . identifier[open] ( identifier[self] . identifier[file_name] , identifier[mode] = literal[string] )
keyword[for] identifier[line] keyword[in] identifier[gz] . identifier[readlines] ():
keyword[yield] identifier[line]
identifier[gz] . identifier[close] ()
keyword[else] :
keyword[if] identifier[sys] . identifier[version_info] . identifier[major] == literal[int] :
identifier[file] = identifier[open] ( identifier[self] . identifier[file_name] , identifier[mode] = literal[string] , identifier[encoding] = identifier[self] . identifier[encoding] )
keyword[else] :
identifier[file] = identifier[codecs] . identifier[open] ( identifier[self] . identifier[file_name] , identifier[mode] = literal[string] , identifier[encoding] = identifier[self] . identifier[encoding] )
keyword[for] identifier[line] keyword[in] identifier[file] :
keyword[yield] identifier[line]
identifier[file] . identifier[close] () | def create_line_generator(self):
"""
Creates a generator function yielding lines in the file
Should only yield non-empty lines
"""
if self.file_name.endswith('.gz'):
if sys.version_info.major == 3:
gz = gzip.open(self.file_name, mode='rt', encoding=self.encoding) # depends on [control=['if'], data=[]]
else:
gz = gzip.open(self.file_name, mode='rt')
for line in gz.readlines():
yield line # depends on [control=['for'], data=['line']]
gz.close() # depends on [control=['if'], data=[]]
else:
if sys.version_info.major == 3:
# Python 3 native `open` is much faster
file = open(self.file_name, mode='r', encoding=self.encoding) # depends on [control=['if'], data=[]]
else:
# Python 2 needs the codecs package to deal with encoding
file = codecs.open(self.file_name, mode='r', encoding=self.encoding)
for line in file:
yield line # depends on [control=['for'], data=['line']]
file.close() |
def close_stale_prs(self, update, pull_request, scheduled):
"""
Closes stale pull requests for the given update, links to the new pull request and deletes
the stale branch.
A stale PR is a PR that:
- Is not merged
- Is not closed
- Has no commits (except the bot commit)
:param update:
:param pull_request:
"""
closed = []
if self.bot_token and not pull_request.is_initial:
for pr in self.pull_requests:
close_pr = False
same_title = \
pr.canonical_title(self.config.pr_prefix) == \
pull_request.canonical_title(self.config.pr_prefix)
if scheduled and pull_request.is_scheduled:
# check that the PR is open and the title does not match
if pr.is_open and not same_title:
# we want to close the previous scheduled PR if it is not merged yet
# and we want to close all previous updates if the user choose to
# switch to a scheduled update
if pr.is_scheduled or pr.is_update:
close_pr = True
elif pull_request.is_update:
# check that, the pr is an update, is open, the titles are not equal and that
# the requirement matches
if pr.is_update and \
pr.is_open and \
not same_title and \
pr.get_requirement(self.config.pr_prefix) == update.requirement.key:
# there's a possible race condition where multiple updates with more than
# one target version conflict with each other (closing each others PRs).
# Check that's not the case here
if not self.has_conflicting_update(update):
close_pr = True
if close_pr and self.is_bot_the_only_committer(pr=pr):
logger.info("Closing stale PR {} for {}".format(pr.title, pull_request.title))
self.provider.close_pull_request(
bot_repo=self.bot_repo,
user_repo=self.user_repo,
pull_request=pr,
comment="Closing this in favor of #{}".format(
pull_request.number),
prefix=self.config.branch_prefix
)
pr.state = "closed"
closed.append(pr)
for closed_pr in closed:
self.pull_requests.remove(closed_pr) | def function[close_stale_prs, parameter[self, update, pull_request, scheduled]]:
constant[
Closes stale pull requests for the given update, links to the new pull request and deletes
the stale branch.
A stale PR is a PR that:
- Is not merged
- Is not closed
- Has no commits (except the bot commit)
:param update:
:param pull_request:
]
variable[closed] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b07f9720> begin[:]
for taget[name[pr]] in starred[name[self].pull_requests] begin[:]
variable[close_pr] assign[=] constant[False]
variable[same_title] assign[=] compare[call[name[pr].canonical_title, parameter[name[self].config.pr_prefix]] equal[==] call[name[pull_request].canonical_title, parameter[name[self].config.pr_prefix]]]
if <ast.BoolOp object at 0x7da1b07fae00> begin[:]
if <ast.BoolOp object at 0x7da1b07fa5c0> begin[:]
if <ast.BoolOp object at 0x7da1b07f87c0> begin[:]
variable[close_pr] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b07fb610> begin[:]
call[name[logger].info, parameter[call[constant[Closing stale PR {} for {}].format, parameter[name[pr].title, name[pull_request].title]]]]
call[name[self].provider.close_pull_request, parameter[]]
name[pr].state assign[=] constant[closed]
call[name[closed].append, parameter[name[pr]]]
for taget[name[closed_pr]] in starred[name[closed]] begin[:]
call[name[self].pull_requests.remove, parameter[name[closed_pr]]] | keyword[def] identifier[close_stale_prs] ( identifier[self] , identifier[update] , identifier[pull_request] , identifier[scheduled] ):
literal[string]
identifier[closed] =[]
keyword[if] identifier[self] . identifier[bot_token] keyword[and] keyword[not] identifier[pull_request] . identifier[is_initial] :
keyword[for] identifier[pr] keyword[in] identifier[self] . identifier[pull_requests] :
identifier[close_pr] = keyword[False]
identifier[same_title] = identifier[pr] . identifier[canonical_title] ( identifier[self] . identifier[config] . identifier[pr_prefix] )== identifier[pull_request] . identifier[canonical_title] ( identifier[self] . identifier[config] . identifier[pr_prefix] )
keyword[if] identifier[scheduled] keyword[and] identifier[pull_request] . identifier[is_scheduled] :
keyword[if] identifier[pr] . identifier[is_open] keyword[and] keyword[not] identifier[same_title] :
keyword[if] identifier[pr] . identifier[is_scheduled] keyword[or] identifier[pr] . identifier[is_update] :
identifier[close_pr] = keyword[True]
keyword[elif] identifier[pull_request] . identifier[is_update] :
keyword[if] identifier[pr] . identifier[is_update] keyword[and] identifier[pr] . identifier[is_open] keyword[and] keyword[not] identifier[same_title] keyword[and] identifier[pr] . identifier[get_requirement] ( identifier[self] . identifier[config] . identifier[pr_prefix] )== identifier[update] . identifier[requirement] . identifier[key] :
keyword[if] keyword[not] identifier[self] . identifier[has_conflicting_update] ( identifier[update] ):
identifier[close_pr] = keyword[True]
keyword[if] identifier[close_pr] keyword[and] identifier[self] . identifier[is_bot_the_only_committer] ( identifier[pr] = identifier[pr] ):
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[pr] . identifier[title] , identifier[pull_request] . identifier[title] ))
identifier[self] . identifier[provider] . identifier[close_pull_request] (
identifier[bot_repo] = identifier[self] . identifier[bot_repo] ,
identifier[user_repo] = identifier[self] . identifier[user_repo] ,
identifier[pull_request] = identifier[pr] ,
identifier[comment] = literal[string] . identifier[format] (
identifier[pull_request] . identifier[number] ),
identifier[prefix] = identifier[self] . identifier[config] . identifier[branch_prefix]
)
identifier[pr] . identifier[state] = literal[string]
identifier[closed] . identifier[append] ( identifier[pr] )
keyword[for] identifier[closed_pr] keyword[in] identifier[closed] :
identifier[self] . identifier[pull_requests] . identifier[remove] ( identifier[closed_pr] ) | def close_stale_prs(self, update, pull_request, scheduled):
"""
Closes stale pull requests for the given update, links to the new pull request and deletes
the stale branch.
A stale PR is a PR that:
- Is not merged
- Is not closed
- Has no commits (except the bot commit)
:param update:
:param pull_request:
"""
closed = []
if self.bot_token and (not pull_request.is_initial):
for pr in self.pull_requests:
close_pr = False
same_title = pr.canonical_title(self.config.pr_prefix) == pull_request.canonical_title(self.config.pr_prefix)
if scheduled and pull_request.is_scheduled:
# check that the PR is open and the title does not match
if pr.is_open and (not same_title):
# we want to close the previous scheduled PR if it is not merged yet
# and we want to close all previous updates if the user choose to
# switch to a scheduled update
if pr.is_scheduled or pr.is_update:
close_pr = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif pull_request.is_update:
# check that, the pr is an update, is open, the titles are not equal and that
# the requirement matches
if pr.is_update and pr.is_open and (not same_title) and (pr.get_requirement(self.config.pr_prefix) == update.requirement.key):
# there's a possible race condition where multiple updates with more than
# one target version conflict with each other (closing each others PRs).
# Check that's not the case here
if not self.has_conflicting_update(update):
close_pr = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if close_pr and self.is_bot_the_only_committer(pr=pr):
logger.info('Closing stale PR {} for {}'.format(pr.title, pull_request.title))
self.provider.close_pull_request(bot_repo=self.bot_repo, user_repo=self.user_repo, pull_request=pr, comment='Closing this in favor of #{}'.format(pull_request.number), prefix=self.config.branch_prefix)
pr.state = 'closed'
closed.append(pr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pr']] # depends on [control=['if'], data=[]]
for closed_pr in closed:
self.pull_requests.remove(closed_pr) # depends on [control=['for'], data=['closed_pr']] |
def fanout_message(connections, payload):
"""
Distributes payload (message) to all connected ws clients
"""
for conn in connections:
try:
yield from conn.send(json.dumps(payload))
except Exception as e:
logger.debug('could not send', e) | def function[fanout_message, parameter[connections, payload]]:
constant[
Distributes payload (message) to all connected ws clients
]
for taget[name[conn]] in starred[name[connections]] begin[:]
<ast.Try object at 0x7da204344eb0> | keyword[def] identifier[fanout_message] ( identifier[connections] , identifier[payload] ):
literal[string]
keyword[for] identifier[conn] keyword[in] identifier[connections] :
keyword[try] :
keyword[yield] keyword[from] identifier[conn] . identifier[send] ( identifier[json] . identifier[dumps] ( identifier[payload] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[e] ) | def fanout_message(connections, payload):
"""
Distributes payload (message) to all connected ws clients
"""
for conn in connections:
try:
yield from conn.send(json.dumps(payload)) # depends on [control=['try'], data=[]]
except Exception as e:
logger.debug('could not send', e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['conn']] |
def get_list(list_type=None,
search_term=None,
page=None,
page_size=None,
sort_by=None):
'''
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type')
opts['ListType'] = list_type
if search_term is not None:
if len(search_term) > 70:
log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70]
opts['SearchTerm'] = search_term
if page is not None:
opts['Page'] = page
if page_size is not None:
if page_size > 100 or page_size < 10:
log.error('Invalid option for page')
raise Exception('Invalid option for page')
opts['PageSize'] = page_size
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by')
opts['SortBy'] = sort_by
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0]
domains = []
for d in domainresult.getElementsByTagName("Domain"):
domains.append(salt.utils.namecheap.atts_to_dict(d))
return domains | def function[get_list, parameter[list_type, search_term, page, page_size, sort_by]]:
constant[
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
]
variable[opts] assign[=] call[name[salt].utils.namecheap.get_opts, parameter[constant[namecheap.domains.getList]]]
if compare[name[list_type] is_not constant[None]] begin[:]
if compare[name[list_type] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da204621810>, <ast.Constant object at 0x7da204622fe0>, <ast.Constant object at 0x7da204620fd0>]]] begin[:]
call[name[log].error, parameter[constant[Invalid option for list_type]]]
<ast.Raise object at 0x7da204620ac0>
call[name[opts]][constant[ListType]] assign[=] name[list_type]
if compare[name[search_term] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[name[search_term]]] greater[>] constant[70]] begin[:]
call[name[log].warning, parameter[constant[search_term trimmed to first 70 characters]]]
variable[search_term] assign[=] call[name[search_term]][<ast.Slice object at 0x7da20c76d030>]
call[name[opts]][constant[SearchTerm]] assign[=] name[search_term]
if compare[name[page] is_not constant[None]] begin[:]
call[name[opts]][constant[Page]] assign[=] name[page]
if compare[name[page_size] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18bc73460> begin[:]
call[name[log].error, parameter[constant[Invalid option for page]]]
<ast.Raise object at 0x7da18bc73070>
call[name[opts]][constant[PageSize]] assign[=] name[page_size]
if compare[name[sort_by] is_not constant[None]] begin[:]
if compare[name[sort_by] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18bc72ec0>, <ast.Constant object at 0x7da18bc727a0>, <ast.Constant object at 0x7da18bc71900>, <ast.Constant object at 0x7da18bc70520>, <ast.Constant object at 0x7da18bc70af0>, <ast.Constant object at 0x7da18bc712d0>]]] begin[:]
call[name[log].error, parameter[constant[Invalid option for sort_by]]]
<ast.Raise object at 0x7da18bc73670>
call[name[opts]][constant[SortBy]] assign[=] name[sort_by]
variable[response_xml] assign[=] call[name[salt].utils.namecheap.get_request, parameter[name[opts]]]
if compare[name[response_xml] is constant[None]] begin[:]
return[list[[]]]
variable[domainresult] assign[=] call[call[name[response_xml].getElementsByTagName, parameter[constant[DomainGetListResult]]]][constant[0]]
variable[domains] assign[=] list[[]]
for taget[name[d]] in starred[call[name[domainresult].getElementsByTagName, parameter[constant[Domain]]]] begin[:]
call[name[domains].append, parameter[call[name[salt].utils.namecheap.atts_to_dict, parameter[name[d]]]]]
return[name[domains]] | keyword[def] identifier[get_list] ( identifier[list_type] = keyword[None] ,
identifier[search_term] = keyword[None] ,
identifier[page] = keyword[None] ,
identifier[page_size] = keyword[None] ,
identifier[sort_by] = keyword[None] ):
literal[string]
identifier[opts] = identifier[salt] . identifier[utils] . identifier[namecheap] . identifier[get_opts] ( literal[string] )
keyword[if] identifier[list_type] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[list_type] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[log] . identifier[error] ( literal[string] )
keyword[raise] identifier[Exception] ( literal[string] )
identifier[opts] [ literal[string] ]= identifier[list_type]
keyword[if] identifier[search_term] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[search_term] )> literal[int] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[search_term] = identifier[search_term] [ literal[int] : literal[int] ]
identifier[opts] [ literal[string] ]= identifier[search_term]
keyword[if] identifier[page] keyword[is] keyword[not] keyword[None] :
identifier[opts] [ literal[string] ]= identifier[page]
keyword[if] identifier[page_size] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[page_size] > literal[int] keyword[or] identifier[page_size] < literal[int] :
identifier[log] . identifier[error] ( literal[string] )
keyword[raise] identifier[Exception] ( literal[string] )
identifier[opts] [ literal[string] ]= identifier[page_size]
keyword[if] identifier[sort_by] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[sort_by] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[log] . identifier[error] ( literal[string] )
keyword[raise] identifier[Exception] ( literal[string] )
identifier[opts] [ literal[string] ]= identifier[sort_by]
identifier[response_xml] = identifier[salt] . identifier[utils] . identifier[namecheap] . identifier[get_request] ( identifier[opts] )
keyword[if] identifier[response_xml] keyword[is] keyword[None] :
keyword[return] []
identifier[domainresult] = identifier[response_xml] . identifier[getElementsByTagName] ( literal[string] )[ literal[int] ]
identifier[domains] =[]
keyword[for] identifier[d] keyword[in] identifier[domainresult] . identifier[getElementsByTagName] ( literal[string] ):
identifier[domains] . identifier[append] ( identifier[salt] . identifier[utils] . identifier[namecheap] . identifier[atts_to_dict] ( identifier[d] ))
keyword[return] identifier[domains] | def get_list(list_type=None, search_term=None, page=None, page_size=None, sort_by=None):
"""
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
"""
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type') # depends on [control=['if'], data=[]]
opts['ListType'] = list_type # depends on [control=['if'], data=['list_type']]
if search_term is not None:
if len(search_term) > 70:
log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70] # depends on [control=['if'], data=[]]
opts['SearchTerm'] = search_term # depends on [control=['if'], data=['search_term']]
if page is not None:
opts['Page'] = page # depends on [control=['if'], data=['page']]
if page_size is not None:
if page_size > 100 or page_size < 10:
log.error('Invalid option for page')
raise Exception('Invalid option for page') # depends on [control=['if'], data=[]]
opts['PageSize'] = page_size # depends on [control=['if'], data=['page_size']]
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by') # depends on [control=['if'], data=[]]
opts['SortBy'] = sort_by # depends on [control=['if'], data=['sort_by']]
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return [] # depends on [control=['if'], data=[]]
domainresult = response_xml.getElementsByTagName('DomainGetListResult')[0]
domains = []
for d in domainresult.getElementsByTagName('Domain'):
domains.append(salt.utils.namecheap.atts_to_dict(d)) # depends on [control=['for'], data=['d']]
return domains |
def add(self, item, count=1):
'''
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
'''
self.n_items_seen += count
self.n_chunk_items_seen += count
# get current chunk
chunk_id = self.n_chunks
chunk = self.chunked_counts.get(chunk_id, {})
self.chunked_counts[chunk_id] = chunk
# update count in the current chunk counter dict
if item in chunk:
chunk[item] += count
else:
self.n_counts += 1
chunk[item] = count
# is the current chunk done?
if self.n_chunk_items_seen >= self.chunk_size:
self.n_chunks += 1
self.n_chunk_items_seen = 0
# In case we reached max capacity in count entries,
# drop oldest chunks until we come back within limit
while self.n_counts >= self.max_counts:
self._drop_oldest_chunk() | def function[add, parameter[self, item, count]]:
constant[
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
]
<ast.AugAssign object at 0x7da1b195f9d0>
<ast.AugAssign object at 0x7da1b195d7e0>
variable[chunk_id] assign[=] name[self].n_chunks
variable[chunk] assign[=] call[name[self].chunked_counts.get, parameter[name[chunk_id], dictionary[[], []]]]
call[name[self].chunked_counts][name[chunk_id]] assign[=] name[chunk]
if compare[name[item] in name[chunk]] begin[:]
<ast.AugAssign object at 0x7da1b195ed10>
if compare[name[self].n_chunk_items_seen greater_or_equal[>=] name[self].chunk_size] begin[:]
<ast.AugAssign object at 0x7da1b195f4c0>
name[self].n_chunk_items_seen assign[=] constant[0]
while compare[name[self].n_counts greater_or_equal[>=] name[self].max_counts] begin[:]
call[name[self]._drop_oldest_chunk, parameter[]] | keyword[def] identifier[add] ( identifier[self] , identifier[item] , identifier[count] = literal[int] ):
literal[string]
identifier[self] . identifier[n_items_seen] += identifier[count]
identifier[self] . identifier[n_chunk_items_seen] += identifier[count]
identifier[chunk_id] = identifier[self] . identifier[n_chunks]
identifier[chunk] = identifier[self] . identifier[chunked_counts] . identifier[get] ( identifier[chunk_id] ,{})
identifier[self] . identifier[chunked_counts] [ identifier[chunk_id] ]= identifier[chunk]
keyword[if] identifier[item] keyword[in] identifier[chunk] :
identifier[chunk] [ identifier[item] ]+= identifier[count]
keyword[else] :
identifier[self] . identifier[n_counts] += literal[int]
identifier[chunk] [ identifier[item] ]= identifier[count]
keyword[if] identifier[self] . identifier[n_chunk_items_seen] >= identifier[self] . identifier[chunk_size] :
identifier[self] . identifier[n_chunks] += literal[int]
identifier[self] . identifier[n_chunk_items_seen] = literal[int]
keyword[while] identifier[self] . identifier[n_counts] >= identifier[self] . identifier[max_counts] :
identifier[self] . identifier[_drop_oldest_chunk] () | def add(self, item, count=1):
"""
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
"""
self.n_items_seen += count
self.n_chunk_items_seen += count
# get current chunk
chunk_id = self.n_chunks
chunk = self.chunked_counts.get(chunk_id, {})
self.chunked_counts[chunk_id] = chunk
# update count in the current chunk counter dict
if item in chunk:
chunk[item] += count # depends on [control=['if'], data=['item', 'chunk']]
else:
self.n_counts += 1
chunk[item] = count
# is the current chunk done?
if self.n_chunk_items_seen >= self.chunk_size:
self.n_chunks += 1
self.n_chunk_items_seen = 0 # depends on [control=['if'], data=[]]
# In case we reached max capacity in count entries,
# drop oldest chunks until we come back within limit
while self.n_counts >= self.max_counts:
self._drop_oldest_chunk() # depends on [control=['while'], data=[]] |
def _addr(self):
"""
Assign dae addresses for algebraic and state variables.
Addresses are stored in ``self.__dict__[var]``.
``dae.m`` and ``dae.n`` are updated accordingly.
Returns
-------
None
"""
group_by = self._config['address_group_by']
assert not self._flags['address'], "{} address already assigned".format(self._name)
assert group_by in ('element', 'variable')
m0 = self.system.dae.m
n0 = self.system.dae.n
mend = m0 + len(self._algebs) * self.n
nend = n0 + len(self._states) * self.n
if group_by == 'variable':
for idx, item in enumerate(self._algebs):
self.__dict__[item] = list(
range(m0 + idx * self.n, m0 + (idx + 1) * self.n))
for idx, item in enumerate(self._states):
self.__dict__[item] = list(
range(n0 + idx * self.n, n0 + (idx + 1) * self.n))
elif group_by == 'element':
for idx, item in enumerate(self._algebs):
self.__dict__[item] = list(
range(m0 + idx, mend, len(self._algebs)))
for idx, item in enumerate(self._states):
self.__dict__[item] = list(
range(n0 + idx, nend, len(self._states)))
self.system.dae.m = mend
self.system.dae.n = nend
self._flags['address'] = True | def function[_addr, parameter[self]]:
constant[
Assign dae addresses for algebraic and state variables.
Addresses are stored in ``self.__dict__[var]``.
``dae.m`` and ``dae.n`` are updated accordingly.
Returns
-------
None
]
variable[group_by] assign[=] call[name[self]._config][constant[address_group_by]]
assert[<ast.UnaryOp object at 0x7da18c4cd420>]
assert[compare[name[group_by] in tuple[[<ast.Constant object at 0x7da18eb57370>, <ast.Constant object at 0x7da18eb55f90>]]]]
variable[m0] assign[=] name[self].system.dae.m
variable[n0] assign[=] name[self].system.dae.n
variable[mend] assign[=] binary_operation[name[m0] + binary_operation[call[name[len], parameter[name[self]._algebs]] * name[self].n]]
variable[nend] assign[=] binary_operation[name[n0] + binary_operation[call[name[len], parameter[name[self]._states]] * name[self].n]]
if compare[name[group_by] equal[==] constant[variable]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18eb55630>, <ast.Name object at 0x7da18eb56260>]]] in starred[call[name[enumerate], parameter[name[self]._algebs]]] begin[:]
call[name[self].__dict__][name[item]] assign[=] call[name[list], parameter[call[name[range], parameter[binary_operation[name[m0] + binary_operation[name[idx] * name[self].n]], binary_operation[name[m0] + binary_operation[binary_operation[name[idx] + constant[1]] * name[self].n]]]]]]
for taget[tuple[[<ast.Name object at 0x7da18eb55420>, <ast.Name object at 0x7da18eb56ec0>]]] in starred[call[name[enumerate], parameter[name[self]._states]]] begin[:]
call[name[self].__dict__][name[item]] assign[=] call[name[list], parameter[call[name[range], parameter[binary_operation[name[n0] + binary_operation[name[idx] * name[self].n]], binary_operation[name[n0] + binary_operation[binary_operation[name[idx] + constant[1]] * name[self].n]]]]]]
name[self].system.dae.m assign[=] name[mend]
name[self].system.dae.n assign[=] name[nend]
call[name[self]._flags][constant[address]] assign[=] constant[True] | keyword[def] identifier[_addr] ( identifier[self] ):
literal[string]
identifier[group_by] = identifier[self] . identifier[_config] [ literal[string] ]
keyword[assert] keyword[not] identifier[self] . identifier[_flags] [ literal[string] ], literal[string] . identifier[format] ( identifier[self] . identifier[_name] )
keyword[assert] identifier[group_by] keyword[in] ( literal[string] , literal[string] )
identifier[m0] = identifier[self] . identifier[system] . identifier[dae] . identifier[m]
identifier[n0] = identifier[self] . identifier[system] . identifier[dae] . identifier[n]
identifier[mend] = identifier[m0] + identifier[len] ( identifier[self] . identifier[_algebs] )* identifier[self] . identifier[n]
identifier[nend] = identifier[n0] + identifier[len] ( identifier[self] . identifier[_states] )* identifier[self] . identifier[n]
keyword[if] identifier[group_by] == literal[string] :
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_algebs] ):
identifier[self] . identifier[__dict__] [ identifier[item] ]= identifier[list] (
identifier[range] ( identifier[m0] + identifier[idx] * identifier[self] . identifier[n] , identifier[m0] +( identifier[idx] + literal[int] )* identifier[self] . identifier[n] ))
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_states] ):
identifier[self] . identifier[__dict__] [ identifier[item] ]= identifier[list] (
identifier[range] ( identifier[n0] + identifier[idx] * identifier[self] . identifier[n] , identifier[n0] +( identifier[idx] + literal[int] )* identifier[self] . identifier[n] ))
keyword[elif] identifier[group_by] == literal[string] :
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_algebs] ):
identifier[self] . identifier[__dict__] [ identifier[item] ]= identifier[list] (
identifier[range] ( identifier[m0] + identifier[idx] , identifier[mend] , identifier[len] ( identifier[self] . identifier[_algebs] )))
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_states] ):
identifier[self] . identifier[__dict__] [ identifier[item] ]= identifier[list] (
identifier[range] ( identifier[n0] + identifier[idx] , identifier[nend] , identifier[len] ( identifier[self] . identifier[_states] )))
identifier[self] . identifier[system] . identifier[dae] . identifier[m] = identifier[mend]
identifier[self] . identifier[system] . identifier[dae] . identifier[n] = identifier[nend]
identifier[self] . identifier[_flags] [ literal[string] ]= keyword[True] | def _addr(self):
"""
Assign dae addresses for algebraic and state variables.
Addresses are stored in ``self.__dict__[var]``.
``dae.m`` and ``dae.n`` are updated accordingly.
Returns
-------
None
"""
group_by = self._config['address_group_by']
assert not self._flags['address'], '{} address already assigned'.format(self._name)
assert group_by in ('element', 'variable')
m0 = self.system.dae.m
n0 = self.system.dae.n
mend = m0 + len(self._algebs) * self.n
nend = n0 + len(self._states) * self.n
if group_by == 'variable':
for (idx, item) in enumerate(self._algebs):
self.__dict__[item] = list(range(m0 + idx * self.n, m0 + (idx + 1) * self.n)) # depends on [control=['for'], data=[]]
for (idx, item) in enumerate(self._states):
self.__dict__[item] = list(range(n0 + idx * self.n, n0 + (idx + 1) * self.n)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif group_by == 'element':
for (idx, item) in enumerate(self._algebs):
self.__dict__[item] = list(range(m0 + idx, mend, len(self._algebs))) # depends on [control=['for'], data=[]]
for (idx, item) in enumerate(self._states):
self.__dict__[item] = list(range(n0 + idx, nend, len(self._states))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
self.system.dae.m = mend
self.system.dae.n = nend
self._flags['address'] = True |
def matches(cntxt: Context, T: RDFGraph, expr: ShExJ.tripleExpr) -> bool:
"""
**matches**: asserts that a triple expression is matched by a set of triples that come from the neighbourhood of a
node in an RDF graph. The expression `matches(T, expr, m)` indicates that a set of triples `T` can satisfy these
rules:
* expr has semActs and `matches(T, expr, m)` by the remaining rules in this list and the evaluation
of semActs succeeds according to the section below on Semantic Actions.
* expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and T
can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
`matches(Tn, expr, m)` by the remaining rules in this list.
* expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
* expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
* expr is a TripleConstraint and:
* T is a set of one triple. Let t be the soul triple in T.
* t's predicate equals expr's predicate. Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in `arcsOut`.
* either
* expr has no valueExpr
* or `satisfies(value, valueExpr, G, m).
"""
if isinstance_(expr, ShExJ.tripleExprLabel):
return matchesExpr(cntxt, T, expr)
else:
return matchesCardinality(cntxt, T, expr) and (expr.semActs is None or semActsSatisfied(expr.semActs, cntxt)) | def function[matches, parameter[cntxt, T, expr]]:
constant[
**matches**: asserts that a triple expression is matched by a set of triples that come from the neighbourhood of a
node in an RDF graph. The expression `matches(T, expr, m)` indicates that a set of triples `T` can satisfy these
rules:
* expr has semActs and `matches(T, expr, m)` by the remaining rules in this list and the evaluation
of semActs succeeds according to the section below on Semantic Actions.
* expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and T
can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
`matches(Tn, expr, m)` by the remaining rules in this list.
* expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
* expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
* expr is a TripleConstraint and:
* T is a set of one triple. Let t be the soul triple in T.
* t's predicate equals expr's predicate. Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in `arcsOut`.
* either
* expr has no valueExpr
* or `satisfies(value, valueExpr, G, m).
]
if call[name[isinstance_], parameter[name[expr], name[ShExJ].tripleExprLabel]] begin[:]
return[call[name[matchesExpr], parameter[name[cntxt], name[T], name[expr]]]] | keyword[def] identifier[matches] ( identifier[cntxt] : identifier[Context] , identifier[T] : identifier[RDFGraph] , identifier[expr] : identifier[ShExJ] . identifier[tripleExpr] )-> identifier[bool] :
literal[string]
keyword[if] identifier[isinstance_] ( identifier[expr] , identifier[ShExJ] . identifier[tripleExprLabel] ):
keyword[return] identifier[matchesExpr] ( identifier[cntxt] , identifier[T] , identifier[expr] )
keyword[else] :
keyword[return] identifier[matchesCardinality] ( identifier[cntxt] , identifier[T] , identifier[expr] ) keyword[and] ( identifier[expr] . identifier[semActs] keyword[is] keyword[None] keyword[or] identifier[semActsSatisfied] ( identifier[expr] . identifier[semActs] , identifier[cntxt] )) | def matches(cntxt: Context, T: RDFGraph, expr: ShExJ.tripleExpr) -> bool:
"""
**matches**: asserts that a triple expression is matched by a set of triples that come from the neighbourhood of a
node in an RDF graph. The expression `matches(T, expr, m)` indicates that a set of triples `T` can satisfy these
rules:
* expr has semActs and `matches(T, expr, m)` by the remaining rules in this list and the evaluation
of semActs succeeds according to the section below on Semantic Actions.
* expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and T
can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
`matches(Tn, expr, m)` by the remaining rules in this list.
* expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches(T, se2, m).
* expr is an EachOf and there is some partition of T into T1, T2,… such that for every expression
expr1, expr2,… in shapeExprs, matches(Tn, exprn, m).
* expr is a TripleConstraint and:
* T is a set of one triple. Let t be the soul triple in T.
* t's predicate equals expr's predicate. Let value be t's subject if inverse is true, else t's object.
* if inverse is true, t is in arcsIn, else t is in `arcsOut`.
* either
* expr has no valueExpr
* or `satisfies(value, valueExpr, G, m).
"""
if isinstance_(expr, ShExJ.tripleExprLabel):
return matchesExpr(cntxt, T, expr) # depends on [control=['if'], data=[]]
else:
return matchesCardinality(cntxt, T, expr) and (expr.semActs is None or semActsSatisfied(expr.semActs, cntxt)) |
def sendHeartbeat(self):
"""
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
"""
for name, md in self.cfg.recordingDevices.items():
try:
data = marshal(md, recordingDeviceFields)
data['serviceURL'] = self.cfg.getServiceURL() + API_PREFIX + '/devices/' + name
targetURL = self.serverURL + API_PREFIX + '/devices/' + name
logger.info("Pinging " + targetURL)
resp = self.httpclient.put(targetURL, json=data)
if resp.status_code != 200:
logger.warning("Unable to ping server at " + targetURL + " with " + str(data.keys()) +
", response is " + str(resp.status_code))
else:
logger.info("Pinged server at " + targetURL + " with " + str(data.items()))
except:
logger.exception("Unable to ping server") | def function[sendHeartbeat, parameter[self]]:
constant[
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
]
for taget[tuple[[<ast.Name object at 0x7da1b0ff80a0>, <ast.Name object at 0x7da1b0ff8b50>]]] in starred[call[name[self].cfg.recordingDevices.items, parameter[]]] begin[:]
<ast.Try object at 0x7da1b0ff9d80> | keyword[def] identifier[sendHeartbeat] ( identifier[self] ):
literal[string]
keyword[for] identifier[name] , identifier[md] keyword[in] identifier[self] . identifier[cfg] . identifier[recordingDevices] . identifier[items] ():
keyword[try] :
identifier[data] = identifier[marshal] ( identifier[md] , identifier[recordingDeviceFields] )
identifier[data] [ literal[string] ]= identifier[self] . identifier[cfg] . identifier[getServiceURL] ()+ identifier[API_PREFIX] + literal[string] + identifier[name]
identifier[targetURL] = identifier[self] . identifier[serverURL] + identifier[API_PREFIX] + literal[string] + identifier[name]
identifier[logger] . identifier[info] ( literal[string] + identifier[targetURL] )
identifier[resp] = identifier[self] . identifier[httpclient] . identifier[put] ( identifier[targetURL] , identifier[json] = identifier[data] )
keyword[if] identifier[resp] . identifier[status_code] != literal[int] :
identifier[logger] . identifier[warning] ( literal[string] + identifier[targetURL] + literal[string] + identifier[str] ( identifier[data] . identifier[keys] ())+
literal[string] + identifier[str] ( identifier[resp] . identifier[status_code] ))
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] + identifier[targetURL] + literal[string] + identifier[str] ( identifier[data] . identifier[items] ()))
keyword[except] :
identifier[logger] . identifier[exception] ( literal[string] ) | def sendHeartbeat(self):
"""
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
"""
for (name, md) in self.cfg.recordingDevices.items():
try:
data = marshal(md, recordingDeviceFields)
data['serviceURL'] = self.cfg.getServiceURL() + API_PREFIX + '/devices/' + name
targetURL = self.serverURL + API_PREFIX + '/devices/' + name
logger.info('Pinging ' + targetURL)
resp = self.httpclient.put(targetURL, json=data)
if resp.status_code != 200:
logger.warning('Unable to ping server at ' + targetURL + ' with ' + str(data.keys()) + ', response is ' + str(resp.status_code)) # depends on [control=['if'], data=[]]
else:
logger.info('Pinged server at ' + targetURL + ' with ' + str(data.items())) # depends on [control=['try'], data=[]]
except:
logger.exception('Unable to ping server') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] |
def _walk_config(config, modules, f, prefix=''):
"""Recursively walk through a module list.
For every module, calls ``f(config, module, name)`` where
`config` is the configuration scoped to that module, `module`
is the Configurable-like object, and `name` is the complete
path (ending in the module name).
:param dict config: configuration to walk and possibly update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param f: callback function for each module
:param str prefix: prefix name of the config
:return: config
"""
def work_in(parent_config, config_name, prefix, module):
# create_config_tree() needs to have been called by now
# and you should never hit either of these asserts
if config_name not in parent_config:
raise ProgrammerError('{0} not present in configuration'
.format(prefix))
if not isinstance(parent_config[config_name], collections.Mapping):
raise ConfigurationError(
'{0} must be an object configuration'.format(prefix))
# do the work!
f(parent_config[config_name], module, prefix)
return _recurse_config(config, modules, work_in) | def function[_walk_config, parameter[config, modules, f, prefix]]:
constant[Recursively walk through a module list.
For every module, calls ``f(config, module, name)`` where
`config` is the configuration scoped to that module, `module`
is the Configurable-like object, and `name` is the complete
path (ending in the module name).
:param dict config: configuration to walk and possibly update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param f: callback function for each module
:param str prefix: prefix name of the config
:return: config
]
def function[work_in, parameter[parent_config, config_name, prefix, module]]:
if compare[name[config_name] <ast.NotIn object at 0x7da2590d7190> name[parent_config]] begin[:]
<ast.Raise object at 0x7da18eb56c20>
if <ast.UnaryOp object at 0x7da1b2346a70> begin[:]
<ast.Raise object at 0x7da1b23458a0>
call[name[f], parameter[call[name[parent_config]][name[config_name]], name[module], name[prefix]]]
return[call[name[_recurse_config], parameter[name[config], name[modules], name[work_in]]]] | keyword[def] identifier[_walk_config] ( identifier[config] , identifier[modules] , identifier[f] , identifier[prefix] = literal[string] ):
literal[string]
keyword[def] identifier[work_in] ( identifier[parent_config] , identifier[config_name] , identifier[prefix] , identifier[module] ):
keyword[if] identifier[config_name] keyword[not] keyword[in] identifier[parent_config] :
keyword[raise] identifier[ProgrammerError] ( literal[string]
. identifier[format] ( identifier[prefix] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[parent_config] [ identifier[config_name] ], identifier[collections] . identifier[Mapping] ):
keyword[raise] identifier[ConfigurationError] (
literal[string] . identifier[format] ( identifier[prefix] ))
identifier[f] ( identifier[parent_config] [ identifier[config_name] ], identifier[module] , identifier[prefix] )
keyword[return] identifier[_recurse_config] ( identifier[config] , identifier[modules] , identifier[work_in] ) | def _walk_config(config, modules, f, prefix=''):
"""Recursively walk through a module list.
For every module, calls ``f(config, module, name)`` where
`config` is the configuration scoped to that module, `module`
is the Configurable-like object, and `name` is the complete
path (ending in the module name).
:param dict config: configuration to walk and possibly update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param f: callback function for each module
:param str prefix: prefix name of the config
:return: config
"""
def work_in(parent_config, config_name, prefix, module):
# create_config_tree() needs to have been called by now
# and you should never hit either of these asserts
if config_name not in parent_config:
raise ProgrammerError('{0} not present in configuration'.format(prefix)) # depends on [control=['if'], data=[]]
if not isinstance(parent_config[config_name], collections.Mapping):
raise ConfigurationError('{0} must be an object configuration'.format(prefix)) # depends on [control=['if'], data=[]]
# do the work!
f(parent_config[config_name], module, prefix)
return _recurse_config(config, modules, work_in) |
def p_identlist(self, t):
'''identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist
'''
if len(t)==5 :
#print(t[1],t[2],t[3],t[4])
t[0] = t[1]+t[2]+t[3]+t[4]
elif len(t)==4 :
#print(t[1],t[2],t[3])
t[0] = t[1]+t[2]+t[3]
elif len(t)==3 :
#print(t[1],t[2])
t[0] = t[1]+t[2]
elif len(t)==2 :
#print(t[0],t[1])
t[0]=t[1]
else:
print("Syntax error at '",str(t),"'") | def function[p_identlist, parameter[self, t]]:
constant[identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist
]
if compare[call[name[len], parameter[name[t]]] equal[==] constant[5]] begin[:]
call[name[t]][constant[0]] assign[=] binary_operation[binary_operation[binary_operation[call[name[t]][constant[1]] + call[name[t]][constant[2]]] + call[name[t]][constant[3]]] + call[name[t]][constant[4]]] | keyword[def] identifier[p_identlist] ( identifier[self] , identifier[t] ):
literal[string]
keyword[if] identifier[len] ( identifier[t] )== literal[int] :
identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[t] )== literal[int] :
identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[t] )== literal[int] :
identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[t] )== literal[int] :
identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]
keyword[else] :
identifier[print] ( literal[string] , identifier[str] ( identifier[t] ), literal[string] ) | def p_identlist(self, t):
"""identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist
"""
if len(t) == 5:
#print(t[1],t[2],t[3],t[4])
t[0] = t[1] + t[2] + t[3] + t[4] # depends on [control=['if'], data=[]]
elif len(t) == 4:
#print(t[1],t[2],t[3])
t[0] = t[1] + t[2] + t[3] # depends on [control=['if'], data=[]]
elif len(t) == 3:
#print(t[1],t[2])
t[0] = t[1] + t[2] # depends on [control=['if'], data=[]]
elif len(t) == 2:
#print(t[0],t[1])
t[0] = t[1] # depends on [control=['if'], data=[]]
else:
print("Syntax error at '", str(t), "'") |
def format_author_ed(citation_elements):
"""Standardise to (ed.) and (eds.)
e.g. Remove extra space in (ed. )
"""
for el in citation_elements:
if el['type'] == 'AUTH':
el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)')
el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)')
return citation_elements | def function[format_author_ed, parameter[citation_elements]]:
constant[Standardise to (ed.) and (eds.)
e.g. Remove extra space in (ed. )
]
for taget[name[el]] in starred[name[citation_elements]] begin[:]
if compare[call[name[el]][constant[type]] equal[==] constant[AUTH]] begin[:]
call[name[el]][constant[auth_txt]] assign[=] call[call[name[el]][constant[auth_txt]].replace, parameter[constant[(ed. )], constant[(ed.)]]]
call[name[el]][constant[auth_txt]] assign[=] call[call[name[el]][constant[auth_txt]].replace, parameter[constant[(eds. )], constant[(eds.)]]]
return[name[citation_elements]] | keyword[def] identifier[format_author_ed] ( identifier[citation_elements] ):
literal[string]
keyword[for] identifier[el] keyword[in] identifier[citation_elements] :
keyword[if] identifier[el] [ literal[string] ]== literal[string] :
identifier[el] [ literal[string] ]= identifier[el] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[el] [ literal[string] ]= identifier[el] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[citation_elements] | def format_author_ed(citation_elements):
"""Standardise to (ed.) and (eds.)
e.g. Remove extra space in (ed. )
"""
for el in citation_elements:
if el['type'] == 'AUTH':
el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)')
el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']]
return citation_elements |
def cell_source(cell):
"""Return the source of the current cell, as an array of lines"""
source = cell.source
if source == '':
return ['']
if source.endswith('\n'):
return source.splitlines() + ['']
return source.splitlines() | def function[cell_source, parameter[cell]]:
constant[Return the source of the current cell, as an array of lines]
variable[source] assign[=] name[cell].source
if compare[name[source] equal[==] constant[]] begin[:]
return[list[[<ast.Constant object at 0x7da18f721f60>]]]
if call[name[source].endswith, parameter[constant[
]]] begin[:]
return[binary_operation[call[name[source].splitlines, parameter[]] + list[[<ast.Constant object at 0x7da2041dbcd0>]]]]
return[call[name[source].splitlines, parameter[]]] | keyword[def] identifier[cell_source] ( identifier[cell] ):
literal[string]
identifier[source] = identifier[cell] . identifier[source]
keyword[if] identifier[source] == literal[string] :
keyword[return] [ literal[string] ]
keyword[if] identifier[source] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[source] . identifier[splitlines] ()+[ literal[string] ]
keyword[return] identifier[source] . identifier[splitlines] () | def cell_source(cell):
"""Return the source of the current cell, as an array of lines"""
source = cell.source
if source == '':
return [''] # depends on [control=['if'], data=[]]
if source.endswith('\n'):
return source.splitlines() + [''] # depends on [control=['if'], data=[]]
return source.splitlines() |
def _add(self, hostport):
"""Creates a peer from the hostport and adds it to the peer heap"""
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
on_conn_change=self._update_heap,
)
peer.rank = self.rank_calculator.get_rank(peer)
self._peers[peer.hostport] = peer
self.peer_heap.add_and_shuffle(peer) | def function[_add, parameter[self, hostport]]:
constant[Creates a peer from the hostport and adds it to the peer heap]
variable[peer] assign[=] call[name[self].peer_class, parameter[]]
name[peer].rank assign[=] call[name[self].rank_calculator.get_rank, parameter[name[peer]]]
call[name[self]._peers][name[peer].hostport] assign[=] name[peer]
call[name[self].peer_heap.add_and_shuffle, parameter[name[peer]]] | keyword[def] identifier[_add] ( identifier[self] , identifier[hostport] ):
literal[string]
identifier[peer] = identifier[self] . identifier[peer_class] (
identifier[tchannel] = identifier[self] . identifier[tchannel] ,
identifier[hostport] = identifier[hostport] ,
identifier[on_conn_change] = identifier[self] . identifier[_update_heap] ,
)
identifier[peer] . identifier[rank] = identifier[self] . identifier[rank_calculator] . identifier[get_rank] ( identifier[peer] )
identifier[self] . identifier[_peers] [ identifier[peer] . identifier[hostport] ]= identifier[peer]
identifier[self] . identifier[peer_heap] . identifier[add_and_shuffle] ( identifier[peer] ) | def _add(self, hostport):
"""Creates a peer from the hostport and adds it to the peer heap"""
peer = self.peer_class(tchannel=self.tchannel, hostport=hostport, on_conn_change=self._update_heap)
peer.rank = self.rank_calculator.get_rank(peer)
self._peers[peer.hostport] = peer
self.peer_heap.add_and_shuffle(peer) |
def create_request(version, method, url, headers):
"""Create a HTTP request header."""
# According to my measurements using b''.join is faster that constructing a
# bytearray.
message = []
message.append('{} {} HTTP/{}\r\n'.format(method, url, version))
for name, value in headers:
message.append(name)
message.append(': ')
message.append(value)
message.append('\r\n')
message.append('\r\n')
return s2b(''.join(message)) | def function[create_request, parameter[version, method, url, headers]]:
constant[Create a HTTP request header.]
variable[message] assign[=] list[[]]
call[name[message].append, parameter[call[constant[{} {} HTTP/{}
].format, parameter[name[method], name[url], name[version]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b031f2b0>, <ast.Name object at 0x7da1b031fd00>]]] in starred[name[headers]] begin[:]
call[name[message].append, parameter[name[name]]]
call[name[message].append, parameter[constant[: ]]]
call[name[message].append, parameter[name[value]]]
call[name[message].append, parameter[constant[
]]]
call[name[message].append, parameter[constant[
]]]
return[call[name[s2b], parameter[call[constant[].join, parameter[name[message]]]]]] | keyword[def] identifier[create_request] ( identifier[version] , identifier[method] , identifier[url] , identifier[headers] ):
literal[string]
identifier[message] =[]
identifier[message] . identifier[append] ( literal[string] . identifier[format] ( identifier[method] , identifier[url] , identifier[version] ))
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[headers] :
identifier[message] . identifier[append] ( identifier[name] )
identifier[message] . identifier[append] ( literal[string] )
identifier[message] . identifier[append] ( identifier[value] )
identifier[message] . identifier[append] ( literal[string] )
identifier[message] . identifier[append] ( literal[string] )
keyword[return] identifier[s2b] ( literal[string] . identifier[join] ( identifier[message] )) | def create_request(version, method, url, headers):
"""Create a HTTP request header."""
# According to my measurements using b''.join is faster that constructing a
# bytearray.
message = []
message.append('{} {} HTTP/{}\r\n'.format(method, url, version))
for (name, value) in headers:
message.append(name)
message.append(': ')
message.append(value)
message.append('\r\n') # depends on [control=['for'], data=[]]
message.append('\r\n')
return s2b(''.join(message)) |
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result | def function[file_exists, parameter[pathname]]:
constant[checks that a given file exists]
variable[result] assign[=] constant[1]
<ast.Try object at 0x7da2054a7250>
return[name[result]] | keyword[def] identifier[file_exists] ( identifier[pathname] ):
literal[string]
identifier[result] = literal[int]
keyword[try] :
identifier[file] = identifier[open] ( identifier[pathname] , literal[string] )
identifier[file] . identifier[close] ()
keyword[except] :
identifier[result] = keyword[None]
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[pathname] + literal[string] )
keyword[return] identifier[result] | def file_exists(pathname):
"""checks that a given file exists"""
result = 1
try:
file = open(pathname, 'r')
file.close() # depends on [control=['try'], data=[]]
except:
result = None
sys.stderr.write(pathname + " couldn't be accessed\n") # depends on [control=['except'], data=[]]
return result |
def t_HEXCONSTANT(self, t):
r'0x[0-9A-Fa-f]+'
t.value = int(t.value, 16)
t.type = 'INTCONSTANT'
return t | def function[t_HEXCONSTANT, parameter[self, t]]:
constant[0x[0-9A-Fa-f]+]
name[t].value assign[=] call[name[int], parameter[name[t].value, constant[16]]]
name[t].type assign[=] constant[INTCONSTANT]
return[name[t]] | keyword[def] identifier[t_HEXCONSTANT] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[value] = identifier[int] ( identifier[t] . identifier[value] , literal[int] )
identifier[t] . identifier[type] = literal[string]
keyword[return] identifier[t] | def t_HEXCONSTANT(self, t):
"""0x[0-9A-Fa-f]+"""
t.value = int(t.value, 16)
t.type = 'INTCONSTANT'
return t |
def load_dataset(self, ds_str):
"""
Returns an instantiated instance of either a netCDF file or an SOS
mapped DS object.
:param str ds_str: URL of the resource to load
"""
# If it's a remote URL load it as a remote resource, otherwise treat it
# as a local resource.
pr = urlparse(ds_str)
if pr.netloc:
return self.load_remote_dataset(ds_str)
return self.load_local_dataset(ds_str) | def function[load_dataset, parameter[self, ds_str]]:
constant[
Returns an instantiated instance of either a netCDF file or an SOS
mapped DS object.
:param str ds_str: URL of the resource to load
]
variable[pr] assign[=] call[name[urlparse], parameter[name[ds_str]]]
if name[pr].netloc begin[:]
return[call[name[self].load_remote_dataset, parameter[name[ds_str]]]]
return[call[name[self].load_local_dataset, parameter[name[ds_str]]]] | keyword[def] identifier[load_dataset] ( identifier[self] , identifier[ds_str] ):
literal[string]
identifier[pr] = identifier[urlparse] ( identifier[ds_str] )
keyword[if] identifier[pr] . identifier[netloc] :
keyword[return] identifier[self] . identifier[load_remote_dataset] ( identifier[ds_str] )
keyword[return] identifier[self] . identifier[load_local_dataset] ( identifier[ds_str] ) | def load_dataset(self, ds_str):
"""
Returns an instantiated instance of either a netCDF file or an SOS
mapped DS object.
:param str ds_str: URL of the resource to load
"""
# If it's a remote URL load it as a remote resource, otherwise treat it
# as a local resource.
pr = urlparse(ds_str)
if pr.netloc:
return self.load_remote_dataset(ds_str) # depends on [control=['if'], data=[]]
return self.load_local_dataset(ds_str) |
def clear(self):
"""Clear all waiters.
This method will remove any current scheduled waiter with an
asyncio.CancelledError exception.
"""
for _, waiter in self.waiters():
if isinstance(waiter, asyncio.Future) and not waiter.done():
waiter.set_exception(asyncio.CancelledError())
self._waiters = {} | def function[clear, parameter[self]]:
constant[Clear all waiters.
This method will remove any current scheduled waiter with an
asyncio.CancelledError exception.
]
for taget[tuple[[<ast.Name object at 0x7da18f58dea0>, <ast.Name object at 0x7da18f58e200>]]] in starred[call[name[self].waiters, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18f58e8c0> begin[:]
call[name[waiter].set_exception, parameter[call[name[asyncio].CancelledError, parameter[]]]]
name[self]._waiters assign[=] dictionary[[], []] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
keyword[for] identifier[_] , identifier[waiter] keyword[in] identifier[self] . identifier[waiters] ():
keyword[if] identifier[isinstance] ( identifier[waiter] , identifier[asyncio] . identifier[Future] ) keyword[and] keyword[not] identifier[waiter] . identifier[done] ():
identifier[waiter] . identifier[set_exception] ( identifier[asyncio] . identifier[CancelledError] ())
identifier[self] . identifier[_waiters] ={} | def clear(self):
"""Clear all waiters.
This method will remove any current scheduled waiter with an
asyncio.CancelledError exception.
"""
for (_, waiter) in self.waiters():
if isinstance(waiter, asyncio.Future) and (not waiter.done()):
waiter.set_exception(asyncio.CancelledError()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self._waiters = {} |
def vote_count(self):
"""
Returns the total number of votes cast for this
poll options.
"""
return Vote.objects.filter(
content_type=ContentType.objects.get_for_model(self),
object_id=self.id
).aggregate(Sum('vote'))['vote__sum'] or 0 | def function[vote_count, parameter[self]]:
constant[
Returns the total number of votes cast for this
poll options.
]
return[<ast.BoolOp object at 0x7da1b149ed70>] | keyword[def] identifier[vote_count] ( identifier[self] ):
literal[string]
keyword[return] identifier[Vote] . identifier[objects] . identifier[filter] (
identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[self] ),
identifier[object_id] = identifier[self] . identifier[id]
). identifier[aggregate] ( identifier[Sum] ( literal[string] ))[ literal[string] ] keyword[or] literal[int] | def vote_count(self):
"""
Returns the total number of votes cast for this
poll options.
"""
return Vote.objects.filter(content_type=ContentType.objects.get_for_model(self), object_id=self.id).aggregate(Sum('vote'))['vote__sum'] or 0 |
def prepare_files(self, target_dir):
"""
Proper version of file needs to be moved to external directory.
Because: 1. local files can differ from commited, 2. we can push man branches
"""
diff_names = self.git_wrapper.get_diff_names(self.remote_sha1, self.local_sha1)
files_modified = diff_names.split('\n')
extensions = LINTERS.keys()
for file_path in files_modified:
extension = file_path.split('.')[-1]
if extension not in extensions:
continue
new_file_path = os.path.join(target_dir, file_path)
new_dirname = os.path.dirname(new_file_path)
if not os.path.isdir(new_dirname):
os.makedirs(new_dirname)
with open(new_file_path, "wb") as fh:
self.git_wrapper.save_content_to_file(file_path, self.local_ref, fh)
yield new_file_path | def function[prepare_files, parameter[self, target_dir]]:
constant[
Proper version of file needs to be moved to external directory.
Because: 1. local files can differ from commited, 2. we can push man branches
]
variable[diff_names] assign[=] call[name[self].git_wrapper.get_diff_names, parameter[name[self].remote_sha1, name[self].local_sha1]]
variable[files_modified] assign[=] call[name[diff_names].split, parameter[constant[
]]]
variable[extensions] assign[=] call[name[LINTERS].keys, parameter[]]
for taget[name[file_path]] in starred[name[files_modified]] begin[:]
variable[extension] assign[=] call[call[name[file_path].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da20c991870>]
if compare[name[extension] <ast.NotIn object at 0x7da2590d7190> name[extensions]] begin[:]
continue
variable[new_file_path] assign[=] call[name[os].path.join, parameter[name[target_dir], name[file_path]]]
variable[new_dirname] assign[=] call[name[os].path.dirname, parameter[name[new_file_path]]]
if <ast.UnaryOp object at 0x7da20c9913f0> begin[:]
call[name[os].makedirs, parameter[name[new_dirname]]]
with call[name[open], parameter[name[new_file_path], constant[wb]]] begin[:]
call[name[self].git_wrapper.save_content_to_file, parameter[name[file_path], name[self].local_ref, name[fh]]]
<ast.Yield object at 0x7da20c993c40> | keyword[def] identifier[prepare_files] ( identifier[self] , identifier[target_dir] ):
literal[string]
identifier[diff_names] = identifier[self] . identifier[git_wrapper] . identifier[get_diff_names] ( identifier[self] . identifier[remote_sha1] , identifier[self] . identifier[local_sha1] )
identifier[files_modified] = identifier[diff_names] . identifier[split] ( literal[string] )
identifier[extensions] = identifier[LINTERS] . identifier[keys] ()
keyword[for] identifier[file_path] keyword[in] identifier[files_modified] :
identifier[extension] = identifier[file_path] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] identifier[extension] keyword[not] keyword[in] identifier[extensions] :
keyword[continue]
identifier[new_file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , identifier[file_path] )
identifier[new_dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[new_file_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[new_dirname] ):
identifier[os] . identifier[makedirs] ( identifier[new_dirname] )
keyword[with] identifier[open] ( identifier[new_file_path] , literal[string] ) keyword[as] identifier[fh] :
identifier[self] . identifier[git_wrapper] . identifier[save_content_to_file] ( identifier[file_path] , identifier[self] . identifier[local_ref] , identifier[fh] )
keyword[yield] identifier[new_file_path] | def prepare_files(self, target_dir):
"""
Proper version of file needs to be moved to external directory.
Because: 1. local files can differ from commited, 2. we can push man branches
"""
diff_names = self.git_wrapper.get_diff_names(self.remote_sha1, self.local_sha1)
files_modified = diff_names.split('\n')
extensions = LINTERS.keys()
for file_path in files_modified:
extension = file_path.split('.')[-1]
if extension not in extensions:
continue # depends on [control=['if'], data=[]]
new_file_path = os.path.join(target_dir, file_path)
new_dirname = os.path.dirname(new_file_path)
if not os.path.isdir(new_dirname):
os.makedirs(new_dirname) # depends on [control=['if'], data=[]]
with open(new_file_path, 'wb') as fh:
self.git_wrapper.save_content_to_file(file_path, self.local_ref, fh) # depends on [control=['with'], data=['fh']]
yield new_file_path # depends on [control=['for'], data=['file_path']] |
def create(pid_type, pid_value, status, object_type, object_uuid):
"""Create new persistent identifier."""
from .models import PersistentIdentifier
if bool(object_type) ^ bool(object_uuid):
raise click.BadParameter('Speficy both or any of --type and --uuid.')
new_pid = PersistentIdentifier.create(
pid_type,
pid_value,
status=status,
object_type=object_type,
object_uuid=object_uuid,
)
db.session.commit()
click.echo(
'{0.pid_type} {0.pid_value} {0.pid_provider}'.format(new_pid)
) | def function[create, parameter[pid_type, pid_value, status, object_type, object_uuid]]:
constant[Create new persistent identifier.]
from relative_module[models] import module[PersistentIdentifier]
if binary_operation[call[name[bool], parameter[name[object_type]]] <ast.BitXor object at 0x7da2590d6b00> call[name[bool], parameter[name[object_uuid]]]] begin[:]
<ast.Raise object at 0x7da18f812050>
variable[new_pid] assign[=] call[name[PersistentIdentifier].create, parameter[name[pid_type], name[pid_value]]]
call[name[db].session.commit, parameter[]]
call[name[click].echo, parameter[call[constant[{0.pid_type} {0.pid_value} {0.pid_provider}].format, parameter[name[new_pid]]]]] | keyword[def] identifier[create] ( identifier[pid_type] , identifier[pid_value] , identifier[status] , identifier[object_type] , identifier[object_uuid] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[PersistentIdentifier]
keyword[if] identifier[bool] ( identifier[object_type] )^ identifier[bool] ( identifier[object_uuid] ):
keyword[raise] identifier[click] . identifier[BadParameter] ( literal[string] )
identifier[new_pid] = identifier[PersistentIdentifier] . identifier[create] (
identifier[pid_type] ,
identifier[pid_value] ,
identifier[status] = identifier[status] ,
identifier[object_type] = identifier[object_type] ,
identifier[object_uuid] = identifier[object_uuid] ,
)
identifier[db] . identifier[session] . identifier[commit] ()
identifier[click] . identifier[echo] (
literal[string] . identifier[format] ( identifier[new_pid] )
) | def create(pid_type, pid_value, status, object_type, object_uuid):
"""Create new persistent identifier."""
from .models import PersistentIdentifier
if bool(object_type) ^ bool(object_uuid):
raise click.BadParameter('Speficy both or any of --type and --uuid.') # depends on [control=['if'], data=[]]
new_pid = PersistentIdentifier.create(pid_type, pid_value, status=status, object_type=object_type, object_uuid=object_uuid)
db.session.commit()
click.echo('{0.pid_type} {0.pid_value} {0.pid_provider}'.format(new_pid)) |
def _get_parent_id_list(self, qualifier_id, hierarchy_id):
"""Returns list of parent id strings for qualifier_id in hierarchy.
Uses memcache if caching is enabled.
"""
if self._caching_enabled():
key = 'parent_id_list_{0}'.format(str(qualifier_id))
# If configured to use memcache as the caching engine, use it.
# Otherwise default to diskcache
caching_engine = 'diskcache'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingEngine@json')
caching_engine = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
if caching_engine == 'memcache':
import memcache
caching_host = '127.0.0.1:11211'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingHostURI@json')
caching_host = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
mc = memcache.Client([caching_host], debug=0)
parent_id_list = mc.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
mc.set(key, parent_id_list)
elif caching_engine == 'diskcache':
import diskcache
with diskcache.Cache('/tmp/dlkit_cache') as cache:
# A little bit non-DRY, since it's almost the same as for memcache above.
# However, for diskcache.Cache, we have to call ".close()" or use a
# ``with`` statement to safeguard calling ".close()", so we keep this
# separate from the memcache implementation.
parent_id_list = cache.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
cache.set(key, parent_id_list)
else:
raise errors.NotFound('The {0} caching engine was not found.'.format(caching_engine))
else:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
return parent_id_list | def function[_get_parent_id_list, parameter[self, qualifier_id, hierarchy_id]]:
constant[Returns list of parent id strings for qualifier_id in hierarchy.
Uses memcache if caching is enabled.
]
if call[name[self]._caching_enabled, parameter[]] begin[:]
variable[key] assign[=] call[constant[parent_id_list_{0}].format, parameter[call[name[str], parameter[name[qualifier_id]]]]]
variable[caching_engine] assign[=] constant[diskcache]
<ast.Try object at 0x7da2054a5240>
if compare[name[caching_engine] equal[==] constant[memcache]] begin[:]
import module[memcache]
variable[caching_host] assign[=] constant[127.0.0.1:11211]
<ast.Try object at 0x7da2054a4f10>
variable[mc] assign[=] call[name[memcache].Client, parameter[list[[<ast.Name object at 0x7da2054a7d60>]]]]
variable[parent_id_list] assign[=] call[name[mc].get, parameter[name[key]]]
if compare[name[parent_id_list] is constant[None]] begin[:]
variable[parent_ids] assign[=] call[call[name[self]._get_hierarchy_session, parameter[name[hierarchy_id]]].get_parents, parameter[name[qualifier_id]]]
variable[parent_id_list] assign[=] <ast.ListComp object at 0x7da20c6c6860>
call[name[mc].set, parameter[name[key], name[parent_id_list]]]
return[name[parent_id_list]] | keyword[def] identifier[_get_parent_id_list] ( identifier[self] , identifier[qualifier_id] , identifier[hierarchy_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_caching_enabled] ():
identifier[key] = literal[string] . identifier[format] ( identifier[str] ( identifier[qualifier_id] ))
identifier[caching_engine] = literal[string]
keyword[try] :
identifier[config] = identifier[self] . identifier[_runtime] . identifier[get_configuration] ()
identifier[parameter_id] = identifier[Id] ( literal[string] )
identifier[caching_engine] = identifier[config] . identifier[get_value_by_parameter] ( identifier[parameter_id] ). identifier[get_string_value] ()
keyword[except] ( identifier[AttributeError] , identifier[KeyError] , identifier[errors] . identifier[NotFound] ):
keyword[pass]
keyword[if] identifier[caching_engine] == literal[string] :
keyword[import] identifier[memcache]
identifier[caching_host] = literal[string]
keyword[try] :
identifier[config] = identifier[self] . identifier[_runtime] . identifier[get_configuration] ()
identifier[parameter_id] = identifier[Id] ( literal[string] )
identifier[caching_host] = identifier[config] . identifier[get_value_by_parameter] ( identifier[parameter_id] ). identifier[get_string_value] ()
keyword[except] ( identifier[AttributeError] , identifier[KeyError] , identifier[errors] . identifier[NotFound] ):
keyword[pass]
identifier[mc] = identifier[memcache] . identifier[Client] ([ identifier[caching_host] ], identifier[debug] = literal[int] )
identifier[parent_id_list] = identifier[mc] . identifier[get] ( identifier[key] )
keyword[if] identifier[parent_id_list] keyword[is] keyword[None] :
identifier[parent_ids] = identifier[self] . identifier[_get_hierarchy_session] ( identifier[hierarchy_id] ). identifier[get_parents] ( identifier[qualifier_id] )
identifier[parent_id_list] =[ identifier[str] ( identifier[parent_id] ) keyword[for] identifier[parent_id] keyword[in] identifier[parent_ids] ]
identifier[mc] . identifier[set] ( identifier[key] , identifier[parent_id_list] )
keyword[elif] identifier[caching_engine] == literal[string] :
keyword[import] identifier[diskcache]
keyword[with] identifier[diskcache] . identifier[Cache] ( literal[string] ) keyword[as] identifier[cache] :
identifier[parent_id_list] = identifier[cache] . identifier[get] ( identifier[key] )
keyword[if] identifier[parent_id_list] keyword[is] keyword[None] :
identifier[parent_ids] = identifier[self] . identifier[_get_hierarchy_session] ( identifier[hierarchy_id] ). identifier[get_parents] ( identifier[qualifier_id] )
identifier[parent_id_list] =[ identifier[str] ( identifier[parent_id] ) keyword[for] identifier[parent_id] keyword[in] identifier[parent_ids] ]
identifier[cache] . identifier[set] ( identifier[key] , identifier[parent_id_list] )
keyword[else] :
keyword[raise] identifier[errors] . identifier[NotFound] ( literal[string] . identifier[format] ( identifier[caching_engine] ))
keyword[else] :
identifier[parent_ids] = identifier[self] . identifier[_get_hierarchy_session] ( identifier[hierarchy_id] ). identifier[get_parents] ( identifier[qualifier_id] )
identifier[parent_id_list] =[ identifier[str] ( identifier[parent_id] ) keyword[for] identifier[parent_id] keyword[in] identifier[parent_ids] ]
keyword[return] identifier[parent_id_list] | def _get_parent_id_list(self, qualifier_id, hierarchy_id):
"""Returns list of parent id strings for qualifier_id in hierarchy.
Uses memcache if caching is enabled.
"""
if self._caching_enabled():
key = 'parent_id_list_{0}'.format(str(qualifier_id))
# If configured to use memcache as the caching engine, use it.
# Otherwise default to diskcache
caching_engine = 'diskcache'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingEngine@json')
caching_engine = config.get_value_by_parameter(parameter_id).get_string_value() # depends on [control=['try'], data=[]]
except (AttributeError, KeyError, errors.NotFound):
pass # depends on [control=['except'], data=[]]
if caching_engine == 'memcache':
import memcache
caching_host = '127.0.0.1:11211'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingHostURI@json')
caching_host = config.get_value_by_parameter(parameter_id).get_string_value() # depends on [control=['try'], data=[]]
except (AttributeError, KeyError, errors.NotFound):
pass # depends on [control=['except'], data=[]]
mc = memcache.Client([caching_host], debug=0)
parent_id_list = mc.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
mc.set(key, parent_id_list) # depends on [control=['if'], data=['parent_id_list']] # depends on [control=['if'], data=[]]
elif caching_engine == 'diskcache':
import diskcache
with diskcache.Cache('/tmp/dlkit_cache') as cache:
# A little bit non-DRY, since it's almost the same as for memcache above.
# However, for diskcache.Cache, we have to call ".close()" or use a
# ``with`` statement to safeguard calling ".close()", so we keep this
# separate from the memcache implementation.
parent_id_list = cache.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
cache.set(key, parent_id_list) # depends on [control=['if'], data=['parent_id_list']] # depends on [control=['with'], data=['cache']] # depends on [control=['if'], data=[]]
else:
raise errors.NotFound('The {0} caching engine was not found.'.format(caching_engine)) # depends on [control=['if'], data=[]]
else:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
return parent_id_list |
def laplace_like(x, mu, tau):
R"""
Laplace (double exponential) log-likelihood.
The Laplace (or double exponential) distribution describes the
difference between two independent, identically distributed exponential
events. It is often used as a heavier-tailed alternative to the normal.
.. math::
f(x \mid \mu, \tau) = \frac{\tau}{2}e^{-\tau |x-\mu|}
:Parameters:
- `x` : :math:`-\infty < x < \infty`
- `mu` : Location parameter :math:`-\infty < mu < \infty`
- `tau` : Scale parameter :math:`\tau > 0`
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \frac{2}{\tau^2}`
"""
return flib.gamma(np.abs(np.array(x) - mu), 1, tau) - \
np.size(x) * np.log(2) | def function[laplace_like, parameter[x, mu, tau]]:
constant[
Laplace (double exponential) log-likelihood.
The Laplace (or double exponential) distribution describes the
difference between two independent, identically distributed exponential
events. It is often used as a heavier-tailed alternative to the normal.
.. math::
f(x \mid \mu, \tau) = \frac{\tau}{2}e^{-\tau |x-\mu|}
:Parameters:
- `x` : :math:`-\infty < x < \infty`
- `mu` : Location parameter :math:`-\infty < mu < \infty`
- `tau` : Scale parameter :math:`\tau > 0`
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \frac{2}{\tau^2}`
]
return[binary_operation[call[name[flib].gamma, parameter[call[name[np].abs, parameter[binary_operation[call[name[np].array, parameter[name[x]]] - name[mu]]]], constant[1], name[tau]]] - binary_operation[call[name[np].size, parameter[name[x]]] * call[name[np].log, parameter[constant[2]]]]]] | keyword[def] identifier[laplace_like] ( identifier[x] , identifier[mu] , identifier[tau] ):
literal[string]
keyword[return] identifier[flib] . identifier[gamma] ( identifier[np] . identifier[abs] ( identifier[np] . identifier[array] ( identifier[x] )- identifier[mu] ), literal[int] , identifier[tau] )- identifier[np] . identifier[size] ( identifier[x] )* identifier[np] . identifier[log] ( literal[int] ) | def laplace_like(x, mu, tau):
"""
Laplace (double exponential) log-likelihood.
The Laplace (or double exponential) distribution describes the
difference between two independent, identically distributed exponential
events. It is often used as a heavier-tailed alternative to the normal.
.. math::
f(x \\mid \\mu, \\tau) = \\frac{\\tau}{2}e^{-\\tau |x-\\mu|}
:Parameters:
- `x` : :math:`-\\infty < x < \\infty`
- `mu` : Location parameter :math:`-\\infty < mu < \\infty`
- `tau` : Scale parameter :math:`\\tau > 0`
.. note::
- :math:`E(X) = \\mu`
- :math:`Var(X) = \\frac{2}{\\tau^2}`
"""
return flib.gamma(np.abs(np.array(x) - mu), 1, tau) - np.size(x) * np.log(2) |
def logp_plus_loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
'''
sum = logp_of_set(self.markov_blanket)
if self.verbose > 2:
print_('\t' + self._id +
' Current log-likelihood plus current log-probability', sum)
return sum | def function[logp_plus_loglike, parameter[self]]:
constant[
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
]
variable[sum] assign[=] call[name[logp_of_set], parameter[name[self].markov_blanket]]
if compare[name[self].verbose greater[>] constant[2]] begin[:]
call[name[print_], parameter[binary_operation[binary_operation[constant[ ] + name[self]._id] + constant[ Current log-likelihood plus current log-probability]], name[sum]]]
return[name[sum]] | keyword[def] identifier[logp_plus_loglike] ( identifier[self] ):
literal[string]
identifier[sum] = identifier[logp_of_set] ( identifier[self] . identifier[markov_blanket] )
keyword[if] identifier[self] . identifier[verbose] > literal[int] :
identifier[print_] ( literal[string] + identifier[self] . identifier[_id] +
literal[string] , identifier[sum] )
keyword[return] identifier[sum] | def logp_plus_loglike(self):
"""
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
"""
sum = logp_of_set(self.markov_blanket)
if self.verbose > 2:
print_('\t' + self._id + ' Current log-likelihood plus current log-probability', sum) # depends on [control=['if'], data=[]]
return sum |
async def _reset_vector(self):
"""Initialize the controller's subsystems inside the emulation thread."""
# Send ourselves all of our config variable assignments
config_rpcs = self.config_database.stream_matching(8, self.name)
for rpc in config_rpcs:
await self._device.emulator.await_rpc(*rpc)
config_assignments = self.latch_config_variables()
self._logger.info("Latched config variables at reset for controller: %s", config_assignments)
for system in self._post_config_subsystems:
try:
system.clear_to_reset(config_assignments)
await asyncio.wait_for(system.initialize(), timeout=2.0)
except:
self._logger.exception("Error initializing %s", system)
raise
self._logger.info("Finished clearing controller to reset condition")
# Now reset all of the tiles
for address, _ in self._device.iter_tiles(include_controller=False):
self._logger.info("Sending reset signal to tile at address %d", address)
try:
await self._device.emulator.await_rpc(address, rpcs.RESET)
except TileNotFoundError:
pass
except:
self._logger.exception("Error sending reset signal to tile at address %d", address)
raise
self.initialized.set() | <ast.AsyncFunctionDef object at 0x7da20c76dc60> | keyword[async] keyword[def] identifier[_reset_vector] ( identifier[self] ):
literal[string]
identifier[config_rpcs] = identifier[self] . identifier[config_database] . identifier[stream_matching] ( literal[int] , identifier[self] . identifier[name] )
keyword[for] identifier[rpc] keyword[in] identifier[config_rpcs] :
keyword[await] identifier[self] . identifier[_device] . identifier[emulator] . identifier[await_rpc] (* identifier[rpc] )
identifier[config_assignments] = identifier[self] . identifier[latch_config_variables] ()
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[config_assignments] )
keyword[for] identifier[system] keyword[in] identifier[self] . identifier[_post_config_subsystems] :
keyword[try] :
identifier[system] . identifier[clear_to_reset] ( identifier[config_assignments] )
keyword[await] identifier[asyncio] . identifier[wait_for] ( identifier[system] . identifier[initialize] (), identifier[timeout] = literal[int] )
keyword[except] :
identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] , identifier[system] )
keyword[raise]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
keyword[for] identifier[address] , identifier[_] keyword[in] identifier[self] . identifier[_device] . identifier[iter_tiles] ( identifier[include_controller] = keyword[False] ):
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[address] )
keyword[try] :
keyword[await] identifier[self] . identifier[_device] . identifier[emulator] . identifier[await_rpc] ( identifier[address] , identifier[rpcs] . identifier[RESET] )
keyword[except] identifier[TileNotFoundError] :
keyword[pass]
keyword[except] :
identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] , identifier[address] )
keyword[raise]
identifier[self] . identifier[initialized] . identifier[set] () | async def _reset_vector(self):
"""Initialize the controller's subsystems inside the emulation thread."""
# Send ourselves all of our config variable assignments
config_rpcs = self.config_database.stream_matching(8, self.name)
for rpc in config_rpcs:
await self._device.emulator.await_rpc(*rpc) # depends on [control=['for'], data=['rpc']]
config_assignments = self.latch_config_variables()
self._logger.info('Latched config variables at reset for controller: %s', config_assignments)
for system in self._post_config_subsystems:
try:
system.clear_to_reset(config_assignments)
await asyncio.wait_for(system.initialize(), timeout=2.0) # depends on [control=['try'], data=[]]
except:
self._logger.exception('Error initializing %s', system)
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['system']]
self._logger.info('Finished clearing controller to reset condition')
# Now reset all of the tiles
for (address, _) in self._device.iter_tiles(include_controller=False):
self._logger.info('Sending reset signal to tile at address %d', address)
try:
await self._device.emulator.await_rpc(address, rpcs.RESET) # depends on [control=['try'], data=[]]
except TileNotFoundError:
pass # depends on [control=['except'], data=[]]
except:
self._logger.exception('Error sending reset signal to tile at address %d', address)
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
self.initialized.set() |
def contains_value(self, *values):
"""Asserts that val is a dict and contains the given value or values."""
self._check_dict_like(self.val, check_getitem=False)
if len(values) == 0:
raise ValueError('one or more value args must be given')
missing = []
for v in values:
if v not in self.val.values():
missing.append(v)
if missing:
self._err('Expected <%s> to contain values %s, but did not contain %s.' % (self.val, self._fmt_items(values), self._fmt_items(missing)))
return self | def function[contains_value, parameter[self]]:
constant[Asserts that val is a dict and contains the given value or values.]
call[name[self]._check_dict_like, parameter[name[self].val]]
if compare[call[name[len], parameter[name[values]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b013c130>
variable[missing] assign[=] list[[]]
for taget[name[v]] in starred[name[values]] begin[:]
if compare[name[v] <ast.NotIn object at 0x7da2590d7190> call[name[self].val.values, parameter[]]] begin[:]
call[name[missing].append, parameter[name[v]]]
if name[missing] begin[:]
call[name[self]._err, parameter[binary_operation[constant[Expected <%s> to contain values %s, but did not contain %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b013ce80>, <ast.Call object at 0x7da1b013ceb0>, <ast.Call object at 0x7da1b013fcd0>]]]]]
return[name[self]] | keyword[def] identifier[contains_value] ( identifier[self] ,* identifier[values] ):
literal[string]
identifier[self] . identifier[_check_dict_like] ( identifier[self] . identifier[val] , identifier[check_getitem] = keyword[False] )
keyword[if] identifier[len] ( identifier[values] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[missing] =[]
keyword[for] identifier[v] keyword[in] identifier[values] :
keyword[if] identifier[v] keyword[not] keyword[in] identifier[self] . identifier[val] . identifier[values] ():
identifier[missing] . identifier[append] ( identifier[v] )
keyword[if] identifier[missing] :
identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[self] . identifier[_fmt_items] ( identifier[values] ), identifier[self] . identifier[_fmt_items] ( identifier[missing] )))
keyword[return] identifier[self] | def contains_value(self, *values):
"""Asserts that val is a dict and contains the given value or values."""
self._check_dict_like(self.val, check_getitem=False)
if len(values) == 0:
raise ValueError('one or more value args must be given') # depends on [control=['if'], data=[]]
missing = []
for v in values:
if v not in self.val.values():
missing.append(v) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=['v']]
if missing:
self._err('Expected <%s> to contain values %s, but did not contain %s.' % (self.val, self._fmt_items(values), self._fmt_items(missing))) # depends on [control=['if'], data=[]]
return self |
def eigen(matrix):
""" Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low """
(vals, vecs) = np.linalg.eigh(matrix)
ind = vals.argsort()[::-1]
vals = vals[ind]
vecs = vecs[:, ind]
vals_ = vals.copy()
vals_[vals_ < 0] = 0.
cum_var_exp = np.cumsum(vals_ / vals_.sum())
return Decomp(matrix.copy(), vals, vecs, cum_var_exp) | def function[eigen, parameter[matrix]]:
constant[ Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low ]
<ast.Tuple object at 0x7da20c6ab280> assign[=] call[name[np].linalg.eigh, parameter[name[matrix]]]
variable[ind] assign[=] call[call[name[vals].argsort, parameter[]]][<ast.Slice object at 0x7da20c6abb80>]
variable[vals] assign[=] call[name[vals]][name[ind]]
variable[vecs] assign[=] call[name[vecs]][tuple[[<ast.Slice object at 0x7da20c6a9f30>, <ast.Name object at 0x7da20c6aae90>]]]
variable[vals_] assign[=] call[name[vals].copy, parameter[]]
call[name[vals_]][compare[name[vals_] less[<] constant[0]]] assign[=] constant[0.0]
variable[cum_var_exp] assign[=] call[name[np].cumsum, parameter[binary_operation[name[vals_] / call[name[vals_].sum, parameter[]]]]]
return[call[name[Decomp], parameter[call[name[matrix].copy, parameter[]], name[vals], name[vecs], name[cum_var_exp]]]] | keyword[def] identifier[eigen] ( identifier[matrix] ):
literal[string]
( identifier[vals] , identifier[vecs] )= identifier[np] . identifier[linalg] . identifier[eigh] ( identifier[matrix] )
identifier[ind] = identifier[vals] . identifier[argsort] ()[::- literal[int] ]
identifier[vals] = identifier[vals] [ identifier[ind] ]
identifier[vecs] = identifier[vecs] [:, identifier[ind] ]
identifier[vals_] = identifier[vals] . identifier[copy] ()
identifier[vals_] [ identifier[vals_] < literal[int] ]= literal[int]
identifier[cum_var_exp] = identifier[np] . identifier[cumsum] ( identifier[vals_] / identifier[vals_] . identifier[sum] ())
keyword[return] identifier[Decomp] ( identifier[matrix] . identifier[copy] (), identifier[vals] , identifier[vecs] , identifier[cum_var_exp] ) | def eigen(matrix):
""" Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low """
(vals, vecs) = np.linalg.eigh(matrix)
ind = vals.argsort()[::-1]
vals = vals[ind]
vecs = vecs[:, ind]
vals_ = vals.copy()
vals_[vals_ < 0] = 0.0
cum_var_exp = np.cumsum(vals_ / vals_.sum())
return Decomp(matrix.copy(), vals, vecs, cum_var_exp) |
def from_file(filename, password='', keytype=None):
"""
Returns a new PrivateKey instance with the given attributes.
If keytype is None, we attempt to automatically detect the type.
:type filename: string
:param filename: The key file name.
:type password: string
:param password: The key password.
:type keytype: string
:param keytype: The key type.
:rtype: PrivateKey
:return: The new key.
"""
if keytype is None:
try:
key = RSAKey.from_private_key_file(filename)
keytype = 'rsa'
except SSHException as e:
try:
key = DSSKey.from_private_key_file(filename)
keytype = 'dss'
except SSHException as e:
msg = 'not a recognized private key: ' + repr(filename)
raise ValueError(msg)
key = PrivateKey(keytype)
key.filename = filename
key.password = password
return key | def function[from_file, parameter[filename, password, keytype]]:
constant[
Returns a new PrivateKey instance with the given attributes.
If keytype is None, we attempt to automatically detect the type.
:type filename: string
:param filename: The key file name.
:type password: string
:param password: The key password.
:type keytype: string
:param keytype: The key type.
:rtype: PrivateKey
:return: The new key.
]
if compare[name[keytype] is constant[None]] begin[:]
<ast.Try object at 0x7da1b06519f0>
variable[key] assign[=] call[name[PrivateKey], parameter[name[keytype]]]
name[key].filename assign[=] name[filename]
name[key].password assign[=] name[password]
return[name[key]] | keyword[def] identifier[from_file] ( identifier[filename] , identifier[password] = literal[string] , identifier[keytype] = keyword[None] ):
literal[string]
keyword[if] identifier[keytype] keyword[is] keyword[None] :
keyword[try] :
identifier[key] = identifier[RSAKey] . identifier[from_private_key_file] ( identifier[filename] )
identifier[keytype] = literal[string]
keyword[except] identifier[SSHException] keyword[as] identifier[e] :
keyword[try] :
identifier[key] = identifier[DSSKey] . identifier[from_private_key_file] ( identifier[filename] )
identifier[keytype] = literal[string]
keyword[except] identifier[SSHException] keyword[as] identifier[e] :
identifier[msg] = literal[string] + identifier[repr] ( identifier[filename] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
identifier[key] = identifier[PrivateKey] ( identifier[keytype] )
identifier[key] . identifier[filename] = identifier[filename]
identifier[key] . identifier[password] = identifier[password]
keyword[return] identifier[key] | def from_file(filename, password='', keytype=None):
"""
Returns a new PrivateKey instance with the given attributes.
If keytype is None, we attempt to automatically detect the type.
:type filename: string
:param filename: The key file name.
:type password: string
:param password: The key password.
:type keytype: string
:param keytype: The key type.
:rtype: PrivateKey
:return: The new key.
"""
if keytype is None:
try:
key = RSAKey.from_private_key_file(filename)
keytype = 'rsa' # depends on [control=['try'], data=[]]
except SSHException as e:
try:
key = DSSKey.from_private_key_file(filename)
keytype = 'dss' # depends on [control=['try'], data=[]]
except SSHException as e:
msg = 'not a recognized private key: ' + repr(filename)
raise ValueError(msg) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['keytype']]
key = PrivateKey(keytype)
key.filename = filename
key.password = password
return key |
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value | def function[digital_write, parameter[pin_num, value, hardware_addr]]:
constant[Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
]
call[call[name[_get_pifacedigital], parameter[name[hardware_addr]]].output_pins][name[pin_num]].value assign[=] name[value] | keyword[def] identifier[digital_write] ( identifier[pin_num] , identifier[value] , identifier[hardware_addr] = literal[int] ):
literal[string]
identifier[_get_pifacedigital] ( identifier[hardware_addr] ). identifier[output_pins] [ identifier[pin_num] ]. identifier[value] = identifier[value] | def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value |
def clear_modules(self):
"""
Clears the modules snapshot.
"""
for aModule in compat.itervalues(self.__moduleDict):
aModule.clear()
self.__moduleDict = dict() | def function[clear_modules, parameter[self]]:
constant[
Clears the modules snapshot.
]
for taget[name[aModule]] in starred[call[name[compat].itervalues, parameter[name[self].__moduleDict]]] begin[:]
call[name[aModule].clear, parameter[]]
name[self].__moduleDict assign[=] call[name[dict], parameter[]] | keyword[def] identifier[clear_modules] ( identifier[self] ):
literal[string]
keyword[for] identifier[aModule] keyword[in] identifier[compat] . identifier[itervalues] ( identifier[self] . identifier[__moduleDict] ):
identifier[aModule] . identifier[clear] ()
identifier[self] . identifier[__moduleDict] = identifier[dict] () | def clear_modules(self):
"""
Clears the modules snapshot.
"""
for aModule in compat.itervalues(self.__moduleDict):
aModule.clear() # depends on [control=['for'], data=['aModule']]
self.__moduleDict = dict() |
def _cache_loc(self, path, saltenv='base', cachedir=None):
'''
Return the local location to cache the file, cache dirs will be made
'''
cachedir = self.get_cachedir(cachedir)
dest = salt.utils.path.join(cachedir,
'files',
saltenv,
path)
destdir = os.path.dirname(dest)
with salt.utils.files.set_umask(0o077):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
os.remove(destdir)
# ensure destdir exists
try:
os.makedirs(destdir)
except OSError as exc:
if exc.errno != errno.EEXIST: # ignore if it was there already
raise
yield dest | def function[_cache_loc, parameter[self, path, saltenv, cachedir]]:
constant[
Return the local location to cache the file, cache dirs will be made
]
variable[cachedir] assign[=] call[name[self].get_cachedir, parameter[name[cachedir]]]
variable[dest] assign[=] call[name[salt].utils.path.join, parameter[name[cachedir], constant[files], name[saltenv], name[path]]]
variable[destdir] assign[=] call[name[os].path.dirname, parameter[name[dest]]]
with call[name[salt].utils.files.set_umask, parameter[constant[63]]] begin[:]
if call[name[os].path.isfile, parameter[name[destdir]]] begin[:]
call[name[os].remove, parameter[name[destdir]]]
<ast.Try object at 0x7da20c7c89d0>
<ast.Yield object at 0x7da20c7cb3d0> | keyword[def] identifier[_cache_loc] ( identifier[self] , identifier[path] , identifier[saltenv] = literal[string] , identifier[cachedir] = keyword[None] ):
literal[string]
identifier[cachedir] = identifier[self] . identifier[get_cachedir] ( identifier[cachedir] )
identifier[dest] = identifier[salt] . identifier[utils] . identifier[path] . identifier[join] ( identifier[cachedir] ,
literal[string] ,
identifier[saltenv] ,
identifier[path] )
identifier[destdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[dest] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[set_umask] ( literal[int] ):
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[destdir] ):
identifier[os] . identifier[remove] ( identifier[destdir] )
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[destdir] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] != identifier[errno] . identifier[EEXIST] :
keyword[raise]
keyword[yield] identifier[dest] | def _cache_loc(self, path, saltenv='base', cachedir=None):
"""
Return the local location to cache the file, cache dirs will be made
"""
cachedir = self.get_cachedir(cachedir)
dest = salt.utils.path.join(cachedir, 'files', saltenv, path)
destdir = os.path.dirname(dest)
with salt.utils.files.set_umask(63):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
os.remove(destdir) # depends on [control=['if'], data=[]]
# ensure destdir exists
try:
os.makedirs(destdir) # depends on [control=['try'], data=[]]
except OSError as exc:
if exc.errno != errno.EEXIST: # ignore if it was there already
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']]
yield dest # depends on [control=['with'], data=[]] |
def fix_builtins(override_debug=False):
"""Activate the builtins compatibility."""
override_dict = {}
orig_print = None
used_print = None
if(__builtins__.__class__ is dict):
builtins_dict = __builtins__
else:
try:
import builtins
except ImportError:
import __builtin__ as builtins
builtins_dict = builtins.__dict__
def _deprecated(*args, **kwargs):
"""Report the fact that the called function is deprecated."""
import traceback
raise DeprecationWarning("the called function is deprecated => " +
traceback.extract_stack(None, 2)[0][3])
def _print_wrapper(*args, **kwargs):
flush = kwargs.get("flush", False)
if "flush" in kwargs:
del kwargs["flush"]
orig_print(*args, **kwargs)
if flush:
kwargs.get("file", sys.stdout).flush()
def _print_full(*args, **kwargs):
opt = {"sep": " ", "end": "\n", "file": sys.stdout, "flush": False}
for key in kwargs:
if(key in opt):
opt[key] = kwargs[key]
else:
raise TypeError("'"+key+"' is an invalid keyword argument "
"for this function")
opt["file"].write(opt["sep"].join(str(val) for val in args)+opt["end"])
if opt["flush"]:
opt["file"].flush()
def _sorted(my_list):
my_list = list(my_list)
my_list.sort()
return my_list
def _format(value, format_spec):
return value.__format__(format_spec)
if builtins_dict.get(__name__, False):
raise RuntimeError(__name__+" already loaded")
# Exceptions
if builtins_dict.get("BaseException") is None:
override_dict["BaseException"] = Exception
# basestring
if builtins_dict.get("basestring") is None:
if builtins_dict.get("bytes") is None:
import types
override_dict["basestring"] = types.StringType
else:
override_dict["basestring"] = (str, bytes) # It works only when used in isinstance
# IntType
if getattr(int, "__str__", None) is None:
import types
override_dict["IntType"] = types.IntType
else:
override_dict["IntType"] = int # Python >= 2.2
if 'format' not in str.__dict__:
override_dict["str"] = _Internal.ExtStr
# Function 'input'
if builtins_dict.get("raw_input") is not None:
override_dict["input"] = builtins_dict.get("raw_input")
override_dict["raw_input"] = _deprecated
# Function 'print' (also aliased as print_)
if sys.version_info >= (3, 3):
used_print = builtins_dict.get("print")
else:
orig_print = builtins_dict.get("print")
if orig_print is not None:
used_print = _print_wrapper
else:
used_print = _print_full
override_dict["print"] = used_print
override_dict["print_"] = used_print
# Function 'sorted'
if builtins_dict.get("sorted") is None:
override_dict["sorted"] = _sorted
# Function 'format'
if builtins_dict.get("format") is None:
override_dict["format"] = _format
override_dict[__name__] = True
builtins_dict.update(override_dict)
del override_dict | def function[fix_builtins, parameter[override_debug]]:
constant[Activate the builtins compatibility.]
variable[override_dict] assign[=] dictionary[[], []]
variable[orig_print] assign[=] constant[None]
variable[used_print] assign[=] constant[None]
if compare[name[__builtins__].__class__ is name[dict]] begin[:]
variable[builtins_dict] assign[=] name[__builtins__]
def function[_deprecated, parameter[]]:
constant[Report the fact that the called function is deprecated.]
import module[traceback]
<ast.Raise object at 0x7da2054a5360>
def function[_print_wrapper, parameter[]]:
variable[flush] assign[=] call[name[kwargs].get, parameter[constant[flush], constant[False]]]
if compare[constant[flush] in name[kwargs]] begin[:]
<ast.Delete object at 0x7da2054a63b0>
call[name[orig_print], parameter[<ast.Starred object at 0x7da2054a64d0>]]
if name[flush] begin[:]
call[call[name[kwargs].get, parameter[constant[file], name[sys].stdout]].flush, parameter[]]
def function[_print_full, parameter[]]:
variable[opt] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6170>, <ast.Constant object at 0x7da2054a7fd0>, <ast.Constant object at 0x7da2054a4940>, <ast.Constant object at 0x7da2054a4d00>], [<ast.Constant object at 0x7da2054a6ad0>, <ast.Constant object at 0x7da2054a5b10>, <ast.Attribute object at 0x7da2054a4a60>, <ast.Constant object at 0x7da2054a6470>]]
for taget[name[key]] in starred[name[kwargs]] begin[:]
if compare[name[key] in name[opt]] begin[:]
call[name[opt]][name[key]] assign[=] call[name[kwargs]][name[key]]
call[call[name[opt]][constant[file]].write, parameter[binary_operation[call[call[name[opt]][constant[sep]].join, parameter[<ast.GeneratorExp object at 0x7da2054a5450>]] + call[name[opt]][constant[end]]]]]
if call[name[opt]][constant[flush]] begin[:]
call[call[name[opt]][constant[file]].flush, parameter[]]
def function[_sorted, parameter[my_list]]:
variable[my_list] assign[=] call[name[list], parameter[name[my_list]]]
call[name[my_list].sort, parameter[]]
return[name[my_list]]
def function[_format, parameter[value, format_spec]]:
return[call[name[value].__format__, parameter[name[format_spec]]]]
if call[name[builtins_dict].get, parameter[name[__name__], constant[False]]] begin[:]
<ast.Raise object at 0x7da207f02a40>
if compare[call[name[builtins_dict].get, parameter[constant[BaseException]]] is constant[None]] begin[:]
call[name[override_dict]][constant[BaseException]] assign[=] name[Exception]
if compare[call[name[builtins_dict].get, parameter[constant[basestring]]] is constant[None]] begin[:]
if compare[call[name[builtins_dict].get, parameter[constant[bytes]]] is constant[None]] begin[:]
import module[types]
call[name[override_dict]][constant[basestring]] assign[=] name[types].StringType
if compare[call[name[getattr], parameter[name[int], constant[__str__], constant[None]]] is constant[None]] begin[:]
import module[types]
call[name[override_dict]][constant[IntType]] assign[=] name[types].IntType
if compare[constant[format] <ast.NotIn object at 0x7da2590d7190> name[str].__dict__] begin[:]
call[name[override_dict]][constant[str]] assign[=] name[_Internal].ExtStr
if compare[call[name[builtins_dict].get, parameter[constant[raw_input]]] is_not constant[None]] begin[:]
call[name[override_dict]][constant[input]] assign[=] call[name[builtins_dict].get, parameter[constant[raw_input]]]
call[name[override_dict]][constant[raw_input]] assign[=] name[_deprecated]
if compare[name[sys].version_info greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da207f02f20>, <ast.Constant object at 0x7da207f022c0>]]] begin[:]
variable[used_print] assign[=] call[name[builtins_dict].get, parameter[constant[print]]]
call[name[override_dict]][constant[print_]] assign[=] name[used_print]
if compare[call[name[builtins_dict].get, parameter[constant[sorted]]] is constant[None]] begin[:]
call[name[override_dict]][constant[sorted]] assign[=] name[_sorted]
if compare[call[name[builtins_dict].get, parameter[constant[format]]] is constant[None]] begin[:]
call[name[override_dict]][constant[format]] assign[=] name[_format]
call[name[override_dict]][name[__name__]] assign[=] constant[True]
call[name[builtins_dict].update, parameter[name[override_dict]]]
<ast.Delete object at 0x7da18bc73580> | keyword[def] identifier[fix_builtins] ( identifier[override_debug] = keyword[False] ):
literal[string]
identifier[override_dict] ={}
identifier[orig_print] = keyword[None]
identifier[used_print] = keyword[None]
keyword[if] ( identifier[__builtins__] . identifier[__class__] keyword[is] identifier[dict] ):
identifier[builtins_dict] = identifier[__builtins__]
keyword[else] :
keyword[try] :
keyword[import] identifier[builtins]
keyword[except] identifier[ImportError] :
keyword[import] identifier[__builtin__] keyword[as] identifier[builtins]
identifier[builtins_dict] = identifier[builtins] . identifier[__dict__]
keyword[def] identifier[_deprecated] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[traceback]
keyword[raise] identifier[DeprecationWarning] ( literal[string] +
identifier[traceback] . identifier[extract_stack] ( keyword[None] , literal[int] )[ literal[int] ][ literal[int] ])
keyword[def] identifier[_print_wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[flush] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[del] identifier[kwargs] [ literal[string] ]
identifier[orig_print] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[flush] :
identifier[kwargs] . identifier[get] ( literal[string] , identifier[sys] . identifier[stdout] ). identifier[flush] ()
keyword[def] identifier[_print_full] (* identifier[args] ,** identifier[kwargs] ):
identifier[opt] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[sys] . identifier[stdout] , literal[string] : keyword[False] }
keyword[for] identifier[key] keyword[in] identifier[kwargs] :
keyword[if] ( identifier[key] keyword[in] identifier[opt] ):
identifier[opt] [ identifier[key] ]= identifier[kwargs] [ identifier[key] ]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] + identifier[key] + literal[string]
literal[string] )
identifier[opt] [ literal[string] ]. identifier[write] ( identifier[opt] [ literal[string] ]. identifier[join] ( identifier[str] ( identifier[val] ) keyword[for] identifier[val] keyword[in] identifier[args] )+ identifier[opt] [ literal[string] ])
keyword[if] identifier[opt] [ literal[string] ]:
identifier[opt] [ literal[string] ]. identifier[flush] ()
keyword[def] identifier[_sorted] ( identifier[my_list] ):
identifier[my_list] = identifier[list] ( identifier[my_list] )
identifier[my_list] . identifier[sort] ()
keyword[return] identifier[my_list]
keyword[def] identifier[_format] ( identifier[value] , identifier[format_spec] ):
keyword[return] identifier[value] . identifier[__format__] ( identifier[format_spec] )
keyword[if] identifier[builtins_dict] . identifier[get] ( identifier[__name__] , keyword[False] ):
keyword[raise] identifier[RuntimeError] ( identifier[__name__] + literal[string] )
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[override_dict] [ literal[string] ]= identifier[Exception]
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
keyword[import] identifier[types]
identifier[override_dict] [ literal[string] ]= identifier[types] . identifier[StringType]
keyword[else] :
identifier[override_dict] [ literal[string] ]=( identifier[str] , identifier[bytes] )
keyword[if] identifier[getattr] ( identifier[int] , literal[string] , keyword[None] ) keyword[is] keyword[None] :
keyword[import] identifier[types]
identifier[override_dict] [ literal[string] ]= identifier[types] . identifier[IntType]
keyword[else] :
identifier[override_dict] [ literal[string] ]= identifier[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[str] . identifier[__dict__] :
identifier[override_dict] [ literal[string] ]= identifier[_Internal] . identifier[ExtStr]
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[override_dict] [ literal[string] ]= identifier[builtins_dict] . identifier[get] ( literal[string] )
identifier[override_dict] [ literal[string] ]= identifier[_deprecated]
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
identifier[used_print] = identifier[builtins_dict] . identifier[get] ( literal[string] )
keyword[else] :
identifier[orig_print] = identifier[builtins_dict] . identifier[get] ( literal[string] )
keyword[if] identifier[orig_print] keyword[is] keyword[not] keyword[None] :
identifier[used_print] = identifier[_print_wrapper]
keyword[else] :
identifier[used_print] = identifier[_print_full]
identifier[override_dict] [ literal[string] ]= identifier[used_print]
identifier[override_dict] [ literal[string] ]= identifier[used_print]
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[override_dict] [ literal[string] ]= identifier[_sorted]
keyword[if] identifier[builtins_dict] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[override_dict] [ literal[string] ]= identifier[_format]
identifier[override_dict] [ identifier[__name__] ]= keyword[True]
identifier[builtins_dict] . identifier[update] ( identifier[override_dict] )
keyword[del] identifier[override_dict] | def fix_builtins(override_debug=False):
"""Activate the builtins compatibility."""
override_dict = {}
orig_print = None
used_print = None
if __builtins__.__class__ is dict:
builtins_dict = __builtins__ # depends on [control=['if'], data=[]]
else:
try:
import builtins # depends on [control=['try'], data=[]]
except ImportError:
import __builtin__ as builtins # depends on [control=['except'], data=[]]
builtins_dict = builtins.__dict__
def _deprecated(*args, **kwargs):
"""Report the fact that the called function is deprecated."""
import traceback
raise DeprecationWarning('the called function is deprecated => ' + traceback.extract_stack(None, 2)[0][3])
def _print_wrapper(*args, **kwargs):
flush = kwargs.get('flush', False)
if 'flush' in kwargs:
del kwargs['flush'] # depends on [control=['if'], data=['kwargs']]
orig_print(*args, **kwargs)
if flush:
kwargs.get('file', sys.stdout).flush() # depends on [control=['if'], data=[]]
def _print_full(*args, **kwargs):
opt = {'sep': ' ', 'end': '\n', 'file': sys.stdout, 'flush': False}
for key in kwargs:
if key in opt:
opt[key] = kwargs[key] # depends on [control=['if'], data=['key', 'opt']]
else:
raise TypeError("'" + key + "' is an invalid keyword argument for this function") # depends on [control=['for'], data=['key']]
opt['file'].write(opt['sep'].join((str(val) for val in args)) + opt['end'])
if opt['flush']:
opt['file'].flush() # depends on [control=['if'], data=[]]
def _sorted(my_list):
my_list = list(my_list)
my_list.sort()
return my_list
def _format(value, format_spec):
return value.__format__(format_spec)
if builtins_dict.get(__name__, False):
raise RuntimeError(__name__ + ' already loaded') # depends on [control=['if'], data=[]]
# Exceptions
if builtins_dict.get('BaseException') is None:
override_dict['BaseException'] = Exception # depends on [control=['if'], data=[]]
# basestring
if builtins_dict.get('basestring') is None:
if builtins_dict.get('bytes') is None:
import types
override_dict['basestring'] = types.StringType # depends on [control=['if'], data=[]]
else:
override_dict['basestring'] = (str, bytes) # It works only when used in isinstance # depends on [control=['if'], data=[]]
# IntType
if getattr(int, '__str__', None) is None:
import types
override_dict['IntType'] = types.IntType # depends on [control=['if'], data=[]]
else:
override_dict['IntType'] = int # Python >= 2.2
if 'format' not in str.__dict__:
override_dict['str'] = _Internal.ExtStr # depends on [control=['if'], data=[]]
# Function 'input'
if builtins_dict.get('raw_input') is not None:
override_dict['input'] = builtins_dict.get('raw_input') # depends on [control=['if'], data=[]]
override_dict['raw_input'] = _deprecated
# Function 'print' (also aliased as print_)
if sys.version_info >= (3, 3):
used_print = builtins_dict.get('print') # depends on [control=['if'], data=[]]
else:
orig_print = builtins_dict.get('print')
if orig_print is not None:
used_print = _print_wrapper # depends on [control=['if'], data=[]]
else:
used_print = _print_full
override_dict['print'] = used_print
override_dict['print_'] = used_print
# Function 'sorted'
if builtins_dict.get('sorted') is None:
override_dict['sorted'] = _sorted # depends on [control=['if'], data=[]]
# Function 'format'
if builtins_dict.get('format') is None:
override_dict['format'] = _format # depends on [control=['if'], data=[]]
override_dict[__name__] = True
builtins_dict.update(override_dict)
del override_dict |
def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title('Cross Validation Scores for {}'.format(self.name))
# Add the legend
loc = kwargs.pop("loc", "best")
edgecolor = kwargs.pop("edgecolor", "k")
self.ax.legend(frameon=True, loc=loc, edgecolor=edgecolor)
# set spacing between the x ticks
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# Set the axis labels
self.ax.set_xlabel('Training Instances')
self.ax.set_ylabel('Score') | def function[finalize, parameter[self]]:
constant[
Add the title, legend, and other visual final touches to the plot.
]
call[name[self].set_title, parameter[call[constant[Cross Validation Scores for {}].format, parameter[name[self].name]]]]
variable[loc] assign[=] call[name[kwargs].pop, parameter[constant[loc], constant[best]]]
variable[edgecolor] assign[=] call[name[kwargs].pop, parameter[constant[edgecolor], constant[k]]]
call[name[self].ax.legend, parameter[]]
call[name[self].ax.xaxis.set_major_locator, parameter[call[name[ticker].MultipleLocator, parameter[constant[1]]]]]
call[name[self].ax.set_xlabel, parameter[constant[Training Instances]]]
call[name[self].ax.set_ylabel, parameter[constant[Score]]] | keyword[def] identifier[finalize] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[set_title] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
identifier[loc] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[edgecolor] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[self] . identifier[ax] . identifier[legend] ( identifier[frameon] = keyword[True] , identifier[loc] = identifier[loc] , identifier[edgecolor] = identifier[edgecolor] )
identifier[self] . identifier[ax] . identifier[xaxis] . identifier[set_major_locator] ( identifier[ticker] . identifier[MultipleLocator] ( literal[int] ))
identifier[self] . identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[self] . identifier[ax] . identifier[set_ylabel] ( literal[string] ) | def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title('Cross Validation Scores for {}'.format(self.name))
# Add the legend
loc = kwargs.pop('loc', 'best')
edgecolor = kwargs.pop('edgecolor', 'k')
self.ax.legend(frameon=True, loc=loc, edgecolor=edgecolor)
# set spacing between the x ticks
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# Set the axis labels
self.ax.set_xlabel('Training Instances')
self.ax.set_ylabel('Score') |
def randmatrix(m, n, random_seed=None):
"""Creates an m x n matrix of random values drawn using
the Xavier Glorot method."""
val = np.sqrt(6.0 / (m + n))
np.random.seed(random_seed)
return np.random.uniform(-val, val, size=(m, n)) | def function[randmatrix, parameter[m, n, random_seed]]:
constant[Creates an m x n matrix of random values drawn using
the Xavier Glorot method.]
variable[val] assign[=] call[name[np].sqrt, parameter[binary_operation[constant[6.0] / binary_operation[name[m] + name[n]]]]]
call[name[np].random.seed, parameter[name[random_seed]]]
return[call[name[np].random.uniform, parameter[<ast.UnaryOp object at 0x7da20e956e60>, name[val]]]] | keyword[def] identifier[randmatrix] ( identifier[m] , identifier[n] , identifier[random_seed] = keyword[None] ):
literal[string]
identifier[val] = identifier[np] . identifier[sqrt] ( literal[int] /( identifier[m] + identifier[n] ))
identifier[np] . identifier[random] . identifier[seed] ( identifier[random_seed] )
keyword[return] identifier[np] . identifier[random] . identifier[uniform] (- identifier[val] , identifier[val] , identifier[size] =( identifier[m] , identifier[n] )) | def randmatrix(m, n, random_seed=None):
"""Creates an m x n matrix of random values drawn using
the Xavier Glorot method."""
val = np.sqrt(6.0 / (m + n))
np.random.seed(random_seed)
return np.random.uniform(-val, val, size=(m, n)) |
def isEnabled( self ):
"""
Returns whether or not this node is enabled.
"""
if ( self._disableWithLayer and self._layer ):
lenabled = self._layer.isEnabled()
else:
lenabled = True
return self._enabled and lenabled | def function[isEnabled, parameter[self]]:
constant[
Returns whether or not this node is enabled.
]
if <ast.BoolOp object at 0x7da18f09d1b0> begin[:]
variable[lenabled] assign[=] call[name[self]._layer.isEnabled, parameter[]]
return[<ast.BoolOp object at 0x7da18f09c190>] | keyword[def] identifier[isEnabled] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_disableWithLayer] keyword[and] identifier[self] . identifier[_layer] ):
identifier[lenabled] = identifier[self] . identifier[_layer] . identifier[isEnabled] ()
keyword[else] :
identifier[lenabled] = keyword[True]
keyword[return] identifier[self] . identifier[_enabled] keyword[and] identifier[lenabled] | def isEnabled(self):
"""
Returns whether or not this node is enabled.
"""
if self._disableWithLayer and self._layer:
lenabled = self._layer.isEnabled() # depends on [control=['if'], data=[]]
else:
lenabled = True
return self._enabled and lenabled |
def ding0_graph_to_routing_specs(graph):
""" Build data dictionary from graph nodes for routing (translation)
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
Returns
-------
:obj:`dict`
Data dictionary for routing.
See Also
--------
ding0.grid.mv_grid.models.models.Graph : for keys of return dict
"""
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
specs = {}
nodes_demands = {}
nodes_pos = {}
nodes_agg = {}
# check if there are only load areas of type aggregated and satellite
# -> treat satellites as normal load areas (allow for routing)
satellites_only = True
for node in graph.nodes():
if isinstance(node, LVLoadAreaCentreDing0):
if not node.lv_load_area.is_satellite and not node.lv_load_area.is_aggregated:
satellites_only = False
for node in graph.nodes():
# station is LV station
if isinstance(node, LVLoadAreaCentreDing0):
# only major stations are connected via MV ring
# (satellites in case of there're only satellites in grid district)
if not node.lv_load_area.is_satellite or satellites_only:
# get demand and position of node
# convert node's demand to int for performance purposes and to avoid that node
# allocation with subsequent deallocation results in demand<0 due to rounding errors.
nodes_demands[str(node)] = int(node.lv_load_area.peak_load / cos_phi_load)
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
# get aggregation flag
if node.lv_load_area.is_aggregated:
nodes_agg[str(node)] = True
else:
nodes_agg[str(node)] = False
# station is MV station
elif isinstance(node, MVStationDing0):
nodes_demands[str(node)] = 0
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
specs['DEPOT'] = str(node)
specs['BRANCH_KIND'] = node.grid.default_branch_kind
specs['BRANCH_TYPE'] = node.grid.default_branch_type
specs['V_LEVEL'] = node.grid.v_level
specs['NODE_COORD_SECTION'] = nodes_pos
specs['DEMAND'] = nodes_demands
specs['MATRIX'] = calc_geo_dist_matrix_vincenty(nodes_pos)
specs['IS_AGGREGATED'] = nodes_agg
return specs | def function[ding0_graph_to_routing_specs, parameter[graph]]:
constant[ Build data dictionary from graph nodes for routing (translation)
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
Returns
-------
:obj:`dict`
Data dictionary for routing.
See Also
--------
ding0.grid.mv_grid.models.models.Graph : for keys of return dict
]
variable[cos_phi_load] assign[=] call[name[cfg_ding0].get, parameter[constant[assumptions], constant[cos_phi_load]]]
variable[specs] assign[=] dictionary[[], []]
variable[nodes_demands] assign[=] dictionary[[], []]
variable[nodes_pos] assign[=] dictionary[[], []]
variable[nodes_agg] assign[=] dictionary[[], []]
variable[satellites_only] assign[=] constant[True]
for taget[name[node]] in starred[call[name[graph].nodes, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[node], name[LVLoadAreaCentreDing0]]] begin[:]
if <ast.BoolOp object at 0x7da1b05917b0> begin[:]
variable[satellites_only] assign[=] constant[False]
for taget[name[node]] in starred[call[name[graph].nodes, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[node], name[LVLoadAreaCentreDing0]]] begin[:]
if <ast.BoolOp object at 0x7da1b0590310> begin[:]
call[name[nodes_demands]][call[name[str], parameter[name[node]]]] assign[=] call[name[int], parameter[binary_operation[name[node].lv_load_area.peak_load / name[cos_phi_load]]]]
call[name[nodes_pos]][call[name[str], parameter[name[node]]]] assign[=] tuple[[<ast.Attribute object at 0x7da1b05925f0>, <ast.Attribute object at 0x7da1b0593070>]]
if name[node].lv_load_area.is_aggregated begin[:]
call[name[nodes_agg]][call[name[str], parameter[name[node]]]] assign[=] constant[True]
call[name[specs]][constant[NODE_COORD_SECTION]] assign[=] name[nodes_pos]
call[name[specs]][constant[DEMAND]] assign[=] name[nodes_demands]
call[name[specs]][constant[MATRIX]] assign[=] call[name[calc_geo_dist_matrix_vincenty], parameter[name[nodes_pos]]]
call[name[specs]][constant[IS_AGGREGATED]] assign[=] name[nodes_agg]
return[name[specs]] | keyword[def] identifier[ding0_graph_to_routing_specs] ( identifier[graph] ):
literal[string]
identifier[cos_phi_load] = identifier[cfg_ding0] . identifier[get] ( literal[string] , literal[string] )
identifier[specs] ={}
identifier[nodes_demands] ={}
identifier[nodes_pos] ={}
identifier[nodes_agg] ={}
identifier[satellites_only] = keyword[True]
keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[nodes] ():
keyword[if] identifier[isinstance] ( identifier[node] , identifier[LVLoadAreaCentreDing0] ):
keyword[if] keyword[not] identifier[node] . identifier[lv_load_area] . identifier[is_satellite] keyword[and] keyword[not] identifier[node] . identifier[lv_load_area] . identifier[is_aggregated] :
identifier[satellites_only] = keyword[False]
keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[nodes] ():
keyword[if] identifier[isinstance] ( identifier[node] , identifier[LVLoadAreaCentreDing0] ):
keyword[if] keyword[not] identifier[node] . identifier[lv_load_area] . identifier[is_satellite] keyword[or] identifier[satellites_only] :
identifier[nodes_demands] [ identifier[str] ( identifier[node] )]= identifier[int] ( identifier[node] . identifier[lv_load_area] . identifier[peak_load] / identifier[cos_phi_load] )
identifier[nodes_pos] [ identifier[str] ( identifier[node] )]=( identifier[node] . identifier[geo_data] . identifier[x] , identifier[node] . identifier[geo_data] . identifier[y] )
keyword[if] identifier[node] . identifier[lv_load_area] . identifier[is_aggregated] :
identifier[nodes_agg] [ identifier[str] ( identifier[node] )]= keyword[True]
keyword[else] :
identifier[nodes_agg] [ identifier[str] ( identifier[node] )]= keyword[False]
keyword[elif] identifier[isinstance] ( identifier[node] , identifier[MVStationDing0] ):
identifier[nodes_demands] [ identifier[str] ( identifier[node] )]= literal[int]
identifier[nodes_pos] [ identifier[str] ( identifier[node] )]=( identifier[node] . identifier[geo_data] . identifier[x] , identifier[node] . identifier[geo_data] . identifier[y] )
identifier[specs] [ literal[string] ]= identifier[str] ( identifier[node] )
identifier[specs] [ literal[string] ]= identifier[node] . identifier[grid] . identifier[default_branch_kind]
identifier[specs] [ literal[string] ]= identifier[node] . identifier[grid] . identifier[default_branch_type]
identifier[specs] [ literal[string] ]= identifier[node] . identifier[grid] . identifier[v_level]
identifier[specs] [ literal[string] ]= identifier[nodes_pos]
identifier[specs] [ literal[string] ]= identifier[nodes_demands]
identifier[specs] [ literal[string] ]= identifier[calc_geo_dist_matrix_vincenty] ( identifier[nodes_pos] )
identifier[specs] [ literal[string] ]= identifier[nodes_agg]
keyword[return] identifier[specs] | def ding0_graph_to_routing_specs(graph):
""" Build data dictionary from graph nodes for routing (translation)
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
Returns
-------
:obj:`dict`
Data dictionary for routing.
See Also
--------
ding0.grid.mv_grid.models.models.Graph : for keys of return dict
"""
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
specs = {}
nodes_demands = {}
nodes_pos = {}
nodes_agg = {}
# check if there are only load areas of type aggregated and satellite
# -> treat satellites as normal load areas (allow for routing)
satellites_only = True
for node in graph.nodes():
if isinstance(node, LVLoadAreaCentreDing0):
if not node.lv_load_area.is_satellite and (not node.lv_load_area.is_aggregated):
satellites_only = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
for node in graph.nodes():
# station is LV station
if isinstance(node, LVLoadAreaCentreDing0):
# only major stations are connected via MV ring
# (satellites in case of there're only satellites in grid district)
if not node.lv_load_area.is_satellite or satellites_only:
# get demand and position of node
# convert node's demand to int for performance purposes and to avoid that node
# allocation with subsequent deallocation results in demand<0 due to rounding errors.
nodes_demands[str(node)] = int(node.lv_load_area.peak_load / cos_phi_load)
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
# get aggregation flag
if node.lv_load_area.is_aggregated:
nodes_agg[str(node)] = True # depends on [control=['if'], data=[]]
else:
nodes_agg[str(node)] = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# station is MV station
elif isinstance(node, MVStationDing0):
nodes_demands[str(node)] = 0
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
specs['DEPOT'] = str(node)
specs['BRANCH_KIND'] = node.grid.default_branch_kind
specs['BRANCH_TYPE'] = node.grid.default_branch_type
specs['V_LEVEL'] = node.grid.v_level # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
specs['NODE_COORD_SECTION'] = nodes_pos
specs['DEMAND'] = nodes_demands
specs['MATRIX'] = calc_geo_dist_matrix_vincenty(nodes_pos)
specs['IS_AGGREGATED'] = nodes_agg
return specs |
def rank_permutation(r, n):
"""Given r and n find the permutation of {0,..,n-1} with rank according to lexicographical order equal to r
:param r n: integers with 0 ≤ r < n!
:returns: permutation p as a list of n integers
:beware: computation with big numbers
:complexity: `O(n^2)`
"""
fact = 1 # compute (n-1) factorial
for i in range(2, n):
fact *= i
digits = list(range(n)) # all yet unused digits
p = [] # build permutation
for i in range(n):
q = r // fact # by decomposing r = q * fact + rest
r %= fact
p.append(digits[q])
del digits[q] # remove digit at position q
if i != n - 1:
fact //= (n - 1 - i) # weight of next digit
return p | def function[rank_permutation, parameter[r, n]]:
constant[Given r and n find the permutation of {0,..,n-1} with rank according to lexicographical order equal to r
:param r n: integers with 0 ≤ r < n!
:returns: permutation p as a list of n integers
:beware: computation with big numbers
:complexity: `O(n^2)`
]
variable[fact] assign[=] constant[1]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], name[n]]]] begin[:]
<ast.AugAssign object at 0x7da20c993010>
variable[digits] assign[=] call[name[list], parameter[call[name[range], parameter[name[n]]]]]
variable[p] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
variable[q] assign[=] binary_operation[name[r] <ast.FloorDiv object at 0x7da2590d6bc0> name[fact]]
<ast.AugAssign object at 0x7da18bcc8e80>
call[name[p].append, parameter[call[name[digits]][name[q]]]]
<ast.Delete object at 0x7da18bcc93f0>
if compare[name[i] not_equal[!=] binary_operation[name[n] - constant[1]]] begin[:]
<ast.AugAssign object at 0x7da1b07f5ba0>
return[name[p]] | keyword[def] identifier[rank_permutation] ( identifier[r] , identifier[n] ):
literal[string]
identifier[fact] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] ):
identifier[fact] *= identifier[i]
identifier[digits] = identifier[list] ( identifier[range] ( identifier[n] ))
identifier[p] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[q] = identifier[r] // identifier[fact]
identifier[r] %= identifier[fact]
identifier[p] . identifier[append] ( identifier[digits] [ identifier[q] ])
keyword[del] identifier[digits] [ identifier[q] ]
keyword[if] identifier[i] != identifier[n] - literal[int] :
identifier[fact] //=( identifier[n] - literal[int] - identifier[i] )
keyword[return] identifier[p] | def rank_permutation(r, n):
"""Given r and n find the permutation of {0,..,n-1} with rank according to lexicographical order equal to r
:param r n: integers with 0 ≤ r < n!
:returns: permutation p as a list of n integers
:beware: computation with big numbers
:complexity: `O(n^2)`
"""
fact = 1 # compute (n-1) factorial
for i in range(2, n):
fact *= i # depends on [control=['for'], data=['i']]
digits = list(range(n)) # all yet unused digits
p = [] # build permutation
for i in range(n):
q = r // fact # by decomposing r = q * fact + rest
r %= fact
p.append(digits[q])
del digits[q] # remove digit at position q
if i != n - 1:
fact //= n - 1 - i # weight of next digit # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=['i']]
return p |
def cli(env, zone):
"""Delete zone."""
manager = SoftLayer.DNSManager(env.client)
zone_id = helpers.resolve_id(manager.resolve_ids, zone, name='zone')
if not (env.skip_confirmations or formatting.no_going_back(zone)):
raise exceptions.CLIAbort("Aborted.")
manager.delete_zone(zone_id) | def function[cli, parameter[env, zone]]:
constant[Delete zone.]
variable[manager] assign[=] call[name[SoftLayer].DNSManager, parameter[name[env].client]]
variable[zone_id] assign[=] call[name[helpers].resolve_id, parameter[name[manager].resolve_ids, name[zone]]]
if <ast.UnaryOp object at 0x7da20e957d30> begin[:]
<ast.Raise object at 0x7da20e9552a0>
call[name[manager].delete_zone, parameter[name[zone_id]]] | keyword[def] identifier[cli] ( identifier[env] , identifier[zone] ):
literal[string]
identifier[manager] = identifier[SoftLayer] . identifier[DNSManager] ( identifier[env] . identifier[client] )
identifier[zone_id] = identifier[helpers] . identifier[resolve_id] ( identifier[manager] . identifier[resolve_ids] , identifier[zone] , identifier[name] = literal[string] )
keyword[if] keyword[not] ( identifier[env] . identifier[skip_confirmations] keyword[or] identifier[formatting] . identifier[no_going_back] ( identifier[zone] )):
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] )
identifier[manager] . identifier[delete_zone] ( identifier[zone_id] ) | def cli(env, zone):
"""Delete zone."""
manager = SoftLayer.DNSManager(env.client)
zone_id = helpers.resolve_id(manager.resolve_ids, zone, name='zone')
if not (env.skip_confirmations or formatting.no_going_back(zone)):
raise exceptions.CLIAbort('Aborted.') # depends on [control=['if'], data=[]]
manager.delete_zone(zone_id) |
def file_pour(filepath, block_size=10240, *args, **kwargs):
"""Write physical files from entries."""
def opener(archive_res):
_LOGGER.debug("Opening from file (file_pour): %s", filepath)
_archive_read_open_filename(archive_res, filepath, block_size)
return _pour(opener, *args, flags=0, **kwargs) | def function[file_pour, parameter[filepath, block_size]]:
constant[Write physical files from entries.]
def function[opener, parameter[archive_res]]:
call[name[_LOGGER].debug, parameter[constant[Opening from file (file_pour): %s], name[filepath]]]
call[name[_archive_read_open_filename], parameter[name[archive_res], name[filepath], name[block_size]]]
return[call[name[_pour], parameter[name[opener], <ast.Starred object at 0x7da1b0df41c0>]]] | keyword[def] identifier[file_pour] ( identifier[filepath] , identifier[block_size] = literal[int] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[opener] ( identifier[archive_res] ):
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[filepath] )
identifier[_archive_read_open_filename] ( identifier[archive_res] , identifier[filepath] , identifier[block_size] )
keyword[return] identifier[_pour] ( identifier[opener] ,* identifier[args] , identifier[flags] = literal[int] ,** identifier[kwargs] ) | def file_pour(filepath, block_size=10240, *args, **kwargs):
"""Write physical files from entries."""
def opener(archive_res):
_LOGGER.debug('Opening from file (file_pour): %s', filepath)
_archive_read_open_filename(archive_res, filepath, block_size)
return _pour(opener, *args, flags=0, **kwargs) |
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness"""
with PcapWriter(filename, *args, **kargs) as pcap:
pcap.write(pkt) | def function[wrpcap, parameter[filename, pkt]]:
constant[Write a list of packets to a pcap file
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness]
with call[name[PcapWriter], parameter[name[filename], <ast.Starred object at 0x7da1b12abcd0>]] begin[:]
call[name[pcap].write, parameter[name[pkt]]] | keyword[def] identifier[wrpcap] ( identifier[filename] , identifier[pkt] ,* identifier[args] ,** identifier[kargs] ):
literal[string]
keyword[with] identifier[PcapWriter] ( identifier[filename] ,* identifier[args] ,** identifier[kargs] ) keyword[as] identifier[pcap] :
identifier[pcap] . identifier[write] ( identifier[pkt] ) | def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness"""
with PcapWriter(filename, *args, **kargs) as pcap:
pcap.write(pkt) # depends on [control=['with'], data=['pcap']] |
def execute(self, sql, values = ()) :
"executes an sql command for you or appends it to the current transacations. returns a cursor"
sql = sql.strip()
self._debugActions(sql, values)
cur = self.connection.cursor()
cur.execute(sql, values)
return cur | def function[execute, parameter[self, sql, values]]:
constant[executes an sql command for you or appends it to the current transacations. returns a cursor]
variable[sql] assign[=] call[name[sql].strip, parameter[]]
call[name[self]._debugActions, parameter[name[sql], name[values]]]
variable[cur] assign[=] call[name[self].connection.cursor, parameter[]]
call[name[cur].execute, parameter[name[sql], name[values]]]
return[name[cur]] | keyword[def] identifier[execute] ( identifier[self] , identifier[sql] , identifier[values] =()):
literal[string]
identifier[sql] = identifier[sql] . identifier[strip] ()
identifier[self] . identifier[_debugActions] ( identifier[sql] , identifier[values] )
identifier[cur] = identifier[self] . identifier[connection] . identifier[cursor] ()
identifier[cur] . identifier[execute] ( identifier[sql] , identifier[values] )
keyword[return] identifier[cur] | def execute(self, sql, values=()):
"""executes an sql command for you or appends it to the current transacations. returns a cursor"""
sql = sql.strip()
self._debugActions(sql, values)
cur = self.connection.cursor()
cur.execute(sql, values)
return cur |
def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs)) | def function[create, parameter[self]]:
constant[
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
]
return[call[call[name[super], parameter[name[PostController], name[self]]].create, parameter[]]] | keyword[def] identifier[create] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[super] ( identifier[PostController] , identifier[self] ). identifier[create] (** identifier[self] . identifier[_with_markdown] ( identifier[kwargs] )) | def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs)) |
def _write(self, context, report_dir, report_name, assets_dir=None,
template=None):
"""Writes the data in `context` in the report's template to
`report_name` in `report_dir`.
If `assets_dir` is supplied, copies all assets for this report
to the specified directory.
If `template` is supplied, uses that template instead of
automatically finding it. This is useful if a single report
generates multiple files using the same template.
:param context: context data to render within the template
:type context: `dict`
:param report_dir: directory to write the report to
:type report_dir: `str`
:param report_name: name of file to write the report to
:type report_name: `str`
:param assets_dir: optional directory to output report assets to
:type assets_dir: `str`
:param template: template to render and output
:type template: `jinja2.Template`
"""
if template is None:
template = self._get_template()
report = template.render(context)
output_file = os.path.join(report_dir, report_name)
with open(output_file, 'w', encoding='utf-8') as fh:
fh.write(report)
if assets_dir:
self._copy_static_assets(assets_dir) | def function[_write, parameter[self, context, report_dir, report_name, assets_dir, template]]:
constant[Writes the data in `context` in the report's template to
`report_name` in `report_dir`.
If `assets_dir` is supplied, copies all assets for this report
to the specified directory.
If `template` is supplied, uses that template instead of
automatically finding it. This is useful if a single report
generates multiple files using the same template.
:param context: context data to render within the template
:type context: `dict`
:param report_dir: directory to write the report to
:type report_dir: `str`
:param report_name: name of file to write the report to
:type report_name: `str`
:param assets_dir: optional directory to output report assets to
:type assets_dir: `str`
:param template: template to render and output
:type template: `jinja2.Template`
]
if compare[name[template] is constant[None]] begin[:]
variable[template] assign[=] call[name[self]._get_template, parameter[]]
variable[report] assign[=] call[name[template].render, parameter[name[context]]]
variable[output_file] assign[=] call[name[os].path.join, parameter[name[report_dir], name[report_name]]]
with call[name[open], parameter[name[output_file], constant[w]]] begin[:]
call[name[fh].write, parameter[name[report]]]
if name[assets_dir] begin[:]
call[name[self]._copy_static_assets, parameter[name[assets_dir]]] | keyword[def] identifier[_write] ( identifier[self] , identifier[context] , identifier[report_dir] , identifier[report_name] , identifier[assets_dir] = keyword[None] ,
identifier[template] = keyword[None] ):
literal[string]
keyword[if] identifier[template] keyword[is] keyword[None] :
identifier[template] = identifier[self] . identifier[_get_template] ()
identifier[report] = identifier[template] . identifier[render] ( identifier[context] )
identifier[output_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[report_dir] , identifier[report_name] )
keyword[with] identifier[open] ( identifier[output_file] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[report] )
keyword[if] identifier[assets_dir] :
identifier[self] . identifier[_copy_static_assets] ( identifier[assets_dir] ) | def _write(self, context, report_dir, report_name, assets_dir=None, template=None):
"""Writes the data in `context` in the report's template to
`report_name` in `report_dir`.
If `assets_dir` is supplied, copies all assets for this report
to the specified directory.
If `template` is supplied, uses that template instead of
automatically finding it. This is useful if a single report
generates multiple files using the same template.
:param context: context data to render within the template
:type context: `dict`
:param report_dir: directory to write the report to
:type report_dir: `str`
:param report_name: name of file to write the report to
:type report_name: `str`
:param assets_dir: optional directory to output report assets to
:type assets_dir: `str`
:param template: template to render and output
:type template: `jinja2.Template`
"""
if template is None:
template = self._get_template() # depends on [control=['if'], data=['template']]
report = template.render(context)
output_file = os.path.join(report_dir, report_name)
with open(output_file, 'w', encoding='utf-8') as fh:
fh.write(report) # depends on [control=['with'], data=['fh']]
if assets_dir:
self._copy_static_assets(assets_dir) # depends on [control=['if'], data=[]] |
def special_parse_process_python_code(sourcecode):
r"""
pip install redbaron
http://stackoverflow.com/questions/7456933/python-ast-with-preserved-comments
CommandLine:
python -m utool.util_inspect special_parse_process_python_code --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> sourcecode = ut.read_from(ut.util_inspect.__file__)
>>> result = special_parse_process_python_code(sourcecode)
>>> print(result)
"""
import ast
import astor
#sourcecode = 'from __future__ import print_function\n' + sourcecode
sourcecode_ = sourcecode.encode('utf8')
pt = ast.parse(sourcecode_, 'testfile')
generator = astor.codegen.SourceGenerator(' ' * 4)
generator.visit(pt)
resturctured_source = (''.join(generator.result))
print(resturctured_source)
visitor = ast.NodeVisitor()
visitor.visit(pt)
import redbaron
# Pares a FULL syntax tree that keeps blockcomments
baron = redbaron.RedBaron(sourcecode)
#fst = baron.fst()
node = (baron.node_list[54]) # NOQA
[n.type for n in baron.node_list] | def function[special_parse_process_python_code, parameter[sourcecode]]:
constant[
pip install redbaron
http://stackoverflow.com/questions/7456933/python-ast-with-preserved-comments
CommandLine:
python -m utool.util_inspect special_parse_process_python_code --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> sourcecode = ut.read_from(ut.util_inspect.__file__)
>>> result = special_parse_process_python_code(sourcecode)
>>> print(result)
]
import module[ast]
import module[astor]
variable[sourcecode_] assign[=] call[name[sourcecode].encode, parameter[constant[utf8]]]
variable[pt] assign[=] call[name[ast].parse, parameter[name[sourcecode_], constant[testfile]]]
variable[generator] assign[=] call[name[astor].codegen.SourceGenerator, parameter[binary_operation[constant[ ] * constant[4]]]]
call[name[generator].visit, parameter[name[pt]]]
variable[resturctured_source] assign[=] call[constant[].join, parameter[name[generator].result]]
call[name[print], parameter[name[resturctured_source]]]
variable[visitor] assign[=] call[name[ast].NodeVisitor, parameter[]]
call[name[visitor].visit, parameter[name[pt]]]
import module[redbaron]
variable[baron] assign[=] call[name[redbaron].RedBaron, parameter[name[sourcecode]]]
variable[node] assign[=] call[name[baron].node_list][constant[54]]
<ast.ListComp object at 0x7da1b24e9930> | keyword[def] identifier[special_parse_process_python_code] ( identifier[sourcecode] ):
literal[string]
keyword[import] identifier[ast]
keyword[import] identifier[astor]
identifier[sourcecode_] = identifier[sourcecode] . identifier[encode] ( literal[string] )
identifier[pt] = identifier[ast] . identifier[parse] ( identifier[sourcecode_] , literal[string] )
identifier[generator] = identifier[astor] . identifier[codegen] . identifier[SourceGenerator] ( literal[string] * literal[int] )
identifier[generator] . identifier[visit] ( identifier[pt] )
identifier[resturctured_source] =( literal[string] . identifier[join] ( identifier[generator] . identifier[result] ))
identifier[print] ( identifier[resturctured_source] )
identifier[visitor] = identifier[ast] . identifier[NodeVisitor] ()
identifier[visitor] . identifier[visit] ( identifier[pt] )
keyword[import] identifier[redbaron]
identifier[baron] = identifier[redbaron] . identifier[RedBaron] ( identifier[sourcecode] )
identifier[node] =( identifier[baron] . identifier[node_list] [ literal[int] ])
[ identifier[n] . identifier[type] keyword[for] identifier[n] keyword[in] identifier[baron] . identifier[node_list] ] | def special_parse_process_python_code(sourcecode):
"""
pip install redbaron
http://stackoverflow.com/questions/7456933/python-ast-with-preserved-comments
CommandLine:
python -m utool.util_inspect special_parse_process_python_code --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> sourcecode = ut.read_from(ut.util_inspect.__file__)
>>> result = special_parse_process_python_code(sourcecode)
>>> print(result)
"""
import ast
import astor
#sourcecode = 'from __future__ import print_function\n' + sourcecode
sourcecode_ = sourcecode.encode('utf8')
pt = ast.parse(sourcecode_, 'testfile')
generator = astor.codegen.SourceGenerator(' ' * 4)
generator.visit(pt)
resturctured_source = ''.join(generator.result)
print(resturctured_source)
visitor = ast.NodeVisitor()
visitor.visit(pt)
import redbaron
# Pares a FULL syntax tree that keeps blockcomments
baron = redbaron.RedBaron(sourcecode)
#fst = baron.fst()
node = baron.node_list[54] # NOQA
[n.type for n in baron.node_list] |
def is_checkmate(self):
'''Checks if the current position is a checkmate.'''
if not self.is_check():
return False
try:
next(self.generate_legal_moves().__iter__())
return False
except StopIteration:
return True | def function[is_checkmate, parameter[self]]:
constant[Checks if the current position is a checkmate.]
if <ast.UnaryOp object at 0x7da1b03fa650> begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b03fbd90> | keyword[def] identifier[is_checkmate] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_check] ():
keyword[return] keyword[False]
keyword[try] :
identifier[next] ( identifier[self] . identifier[generate_legal_moves] (). identifier[__iter__] ())
keyword[return] keyword[False]
keyword[except] identifier[StopIteration] :
keyword[return] keyword[True] | def is_checkmate(self):
"""Checks if the current position is a checkmate."""
if not self.is_check():
return False # depends on [control=['if'], data=[]]
try:
next(self.generate_legal_moves().__iter__())
return False # depends on [control=['try'], data=[]]
except StopIteration:
return True # depends on [control=['except'], data=[]] |
def pre_process(self, xout, y0, params=()):
""" Transforms input to internal values, used internally. """
for pre_processor in self.pre_processors:
xout, y0, params = pre_processor(xout, y0, params)
return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)] | def function[pre_process, parameter[self, xout, y0, params]]:
constant[ Transforms input to internal values, used internally. ]
for taget[name[pre_processor]] in starred[name[self].pre_processors] begin[:]
<ast.Tuple object at 0x7da207f01b40> assign[=] call[name[pre_processor], parameter[name[xout], name[y0], name[params]]]
return[<ast.ListComp object at 0x7da207f02230>] | keyword[def] identifier[pre_process] ( identifier[self] , identifier[xout] , identifier[y0] , identifier[params] =()):
literal[string]
keyword[for] identifier[pre_processor] keyword[in] identifier[self] . identifier[pre_processors] :
identifier[xout] , identifier[y0] , identifier[params] = identifier[pre_processor] ( identifier[xout] , identifier[y0] , identifier[params] )
keyword[return] [ identifier[self] . identifier[numpy] . identifier[atleast_1d] ( identifier[arr] ) keyword[for] identifier[arr] keyword[in] ( identifier[xout] , identifier[y0] , identifier[params] )] | def pre_process(self, xout, y0, params=()):
""" Transforms input to internal values, used internally. """
for pre_processor in self.pre_processors:
(xout, y0, params) = pre_processor(xout, y0, params) # depends on [control=['for'], data=['pre_processor']]
return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)] |
def DisableCronJob(self, cronjob_id):
"""Disables a cronjob."""
job = self.cronjobs.get(cronjob_id)
if job is None:
raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id)
job.enabled = False | def function[DisableCronJob, parameter[self, cronjob_id]]:
constant[Disables a cronjob.]
variable[job] assign[=] call[name[self].cronjobs.get, parameter[name[cronjob_id]]]
if compare[name[job] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1d92080>
name[job].enabled assign[=] constant[False] | keyword[def] identifier[DisableCronJob] ( identifier[self] , identifier[cronjob_id] ):
literal[string]
identifier[job] = identifier[self] . identifier[cronjobs] . identifier[get] ( identifier[cronjob_id] )
keyword[if] identifier[job] keyword[is] keyword[None] :
keyword[raise] identifier[db] . identifier[UnknownCronJobError] ( literal[string] % identifier[cronjob_id] )
identifier[job] . identifier[enabled] = keyword[False] | def DisableCronJob(self, cronjob_id):
"""Disables a cronjob."""
job = self.cronjobs.get(cronjob_id)
if job is None:
raise db.UnknownCronJobError('Cron job %s not known.' % cronjob_id) # depends on [control=['if'], data=[]]
job.enabled = False |
def render(self, at):
# draw bg
surf = self.surf
surf.fill(BASE3)
bg = pygame.Surface((self.size[0], self.bar_height))
bg.fill(BASE2)
surf.blit(bg, (0, 0))
# draw bar
ratio = self.gauge.get(at) / float(self.gauge.max(at))
if ratio > 1:
bar_color = BLUE
ratio = 1
elif ratio == 1:
bar_color = CYAN
elif ratio > 0.3:
bar_color = GREEN
elif ratio > 0.1:
bar_color = YELLOW
elif ratio > 0:
bar_color = ORANGE
if ratio > 0:
bar = pygame.Surface((int(self.size[0] * ratio), self.bar_height))
bar.fill(bar_color)
surf.blit(bar, (0, 0))
# write current state
text = font.render('{0}/{1}'.format(
int(self.gauge.get(at)), self.gauge.max(at)), True, BASE1)
surf.blit(text, (10, font.get_height() / 2))
# write time recover in
speed = self.gauge.velocity(at)
if speed != 0:
text = font.render('{0:+.2f}/s'.format(speed), True,
GREEN if speed > 0 else RED)
surf.blit(text, (surf.get_width() - text.get_width() - 10,
font.get_height() / 2))
'''
try:
move_in = self.gauge.momenta[0].move_in(self.gauge, at)
except (AttributeError, IndexError):
pass
else:
if move_in:
move_in = math.ceil(move_in)
text = font.render('{0:02.0f}:{1:02.0f}'.format(
move_in / 60, move_in % 60), True, text_colors[1])
surf.blit(text, (surf.get_width() - text.get_width() - 10,
font.get_height() / 2))
'''
return surf | def function[render, parameter[self, at]]:
variable[surf] assign[=] name[self].surf
call[name[surf].fill, parameter[name[BASE3]]]
variable[bg] assign[=] call[name[pygame].Surface, parameter[tuple[[<ast.Subscript object at 0x7da1b25d7310>, <ast.Attribute object at 0x7da1b25d5ed0>]]]]
call[name[bg].fill, parameter[name[BASE2]]]
call[name[surf].blit, parameter[name[bg], tuple[[<ast.Constant object at 0x7da1b25d4c40>, <ast.Constant object at 0x7da1b25d50c0>]]]]
variable[ratio] assign[=] binary_operation[call[name[self].gauge.get, parameter[name[at]]] / call[name[float], parameter[call[name[self].gauge.max, parameter[name[at]]]]]]
if compare[name[ratio] greater[>] constant[1]] begin[:]
variable[bar_color] assign[=] name[BLUE]
variable[ratio] assign[=] constant[1]
if compare[name[ratio] greater[>] constant[0]] begin[:]
variable[bar] assign[=] call[name[pygame].Surface, parameter[tuple[[<ast.Call object at 0x7da18f00e200>, <ast.Attribute object at 0x7da18f00e650>]]]]
call[name[bar].fill, parameter[name[bar_color]]]
call[name[surf].blit, parameter[name[bar], tuple[[<ast.Constant object at 0x7da18f00c250>, <ast.Constant object at 0x7da18f00d750>]]]]
variable[text] assign[=] call[name[font].render, parameter[call[constant[{0}/{1}].format, parameter[call[name[int], parameter[call[name[self].gauge.get, parameter[name[at]]]]], call[name[self].gauge.max, parameter[name[at]]]]], constant[True], name[BASE1]]]
call[name[surf].blit, parameter[name[text], tuple[[<ast.Constant object at 0x7da1b2651ea0>, <ast.BinOp object at 0x7da1b2651900>]]]]
variable[speed] assign[=] call[name[self].gauge.velocity, parameter[name[at]]]
if compare[name[speed] not_equal[!=] constant[0]] begin[:]
variable[text] assign[=] call[name[font].render, parameter[call[constant[{0:+.2f}/s].format, parameter[name[speed]]], constant[True], <ast.IfExp object at 0x7da1b25d6200>]]
call[name[surf].blit, parameter[name[text], tuple[[<ast.BinOp object at 0x7da1b25d6620>, <ast.BinOp object at 0x7da1b25d5060>]]]]
constant[
try:
move_in = self.gauge.momenta[0].move_in(self.gauge, at)
except (AttributeError, IndexError):
pass
else:
if move_in:
move_in = math.ceil(move_in)
text = font.render('{0:02.0f}:{1:02.0f}'.format(
move_in / 60, move_in % 60), True, text_colors[1])
surf.blit(text, (surf.get_width() - text.get_width() - 10,
font.get_height() / 2))
]
return[name[surf]] | keyword[def] identifier[render] ( identifier[self] , identifier[at] ):
identifier[surf] = identifier[self] . identifier[surf]
identifier[surf] . identifier[fill] ( identifier[BASE3] )
identifier[bg] = identifier[pygame] . identifier[Surface] (( identifier[self] . identifier[size] [ literal[int] ], identifier[self] . identifier[bar_height] ))
identifier[bg] . identifier[fill] ( identifier[BASE2] )
identifier[surf] . identifier[blit] ( identifier[bg] ,( literal[int] , literal[int] ))
identifier[ratio] = identifier[self] . identifier[gauge] . identifier[get] ( identifier[at] )/ identifier[float] ( identifier[self] . identifier[gauge] . identifier[max] ( identifier[at] ))
keyword[if] identifier[ratio] > literal[int] :
identifier[bar_color] = identifier[BLUE]
identifier[ratio] = literal[int]
keyword[elif] identifier[ratio] == literal[int] :
identifier[bar_color] = identifier[CYAN]
keyword[elif] identifier[ratio] > literal[int] :
identifier[bar_color] = identifier[GREEN]
keyword[elif] identifier[ratio] > literal[int] :
identifier[bar_color] = identifier[YELLOW]
keyword[elif] identifier[ratio] > literal[int] :
identifier[bar_color] = identifier[ORANGE]
keyword[if] identifier[ratio] > literal[int] :
identifier[bar] = identifier[pygame] . identifier[Surface] (( identifier[int] ( identifier[self] . identifier[size] [ literal[int] ]* identifier[ratio] ), identifier[self] . identifier[bar_height] ))
identifier[bar] . identifier[fill] ( identifier[bar_color] )
identifier[surf] . identifier[blit] ( identifier[bar] ,( literal[int] , literal[int] ))
identifier[text] = identifier[font] . identifier[render] ( literal[string] . identifier[format] (
identifier[int] ( identifier[self] . identifier[gauge] . identifier[get] ( identifier[at] )), identifier[self] . identifier[gauge] . identifier[max] ( identifier[at] )), keyword[True] , identifier[BASE1] )
identifier[surf] . identifier[blit] ( identifier[text] ,( literal[int] , identifier[font] . identifier[get_height] ()/ literal[int] ))
identifier[speed] = identifier[self] . identifier[gauge] . identifier[velocity] ( identifier[at] )
keyword[if] identifier[speed] != literal[int] :
identifier[text] = identifier[font] . identifier[render] ( literal[string] . identifier[format] ( identifier[speed] ), keyword[True] ,
identifier[GREEN] keyword[if] identifier[speed] > literal[int] keyword[else] identifier[RED] )
identifier[surf] . identifier[blit] ( identifier[text] ,( identifier[surf] . identifier[get_width] ()- identifier[text] . identifier[get_width] ()- literal[int] ,
identifier[font] . identifier[get_height] ()/ literal[int] ))
literal[string]
keyword[return] identifier[surf] | def render(self, at):
# draw bg
surf = self.surf
surf.fill(BASE3)
bg = pygame.Surface((self.size[0], self.bar_height))
bg.fill(BASE2)
surf.blit(bg, (0, 0))
# draw bar
ratio = self.gauge.get(at) / float(self.gauge.max(at))
if ratio > 1:
bar_color = BLUE
ratio = 1 # depends on [control=['if'], data=['ratio']]
elif ratio == 1:
bar_color = CYAN # depends on [control=['if'], data=[]]
elif ratio > 0.3:
bar_color = GREEN # depends on [control=['if'], data=[]]
elif ratio > 0.1:
bar_color = YELLOW # depends on [control=['if'], data=[]]
elif ratio > 0:
bar_color = ORANGE # depends on [control=['if'], data=[]]
if ratio > 0:
bar = pygame.Surface((int(self.size[0] * ratio), self.bar_height))
bar.fill(bar_color)
surf.blit(bar, (0, 0)) # depends on [control=['if'], data=['ratio']]
# write current state
text = font.render('{0}/{1}'.format(int(self.gauge.get(at)), self.gauge.max(at)), True, BASE1)
surf.blit(text, (10, font.get_height() / 2))
# write time recover in
speed = self.gauge.velocity(at)
if speed != 0:
text = font.render('{0:+.2f}/s'.format(speed), True, GREEN if speed > 0 else RED)
surf.blit(text, (surf.get_width() - text.get_width() - 10, font.get_height() / 2)) # depends on [control=['if'], data=['speed']]
"\n try:\n move_in = self.gauge.momenta[0].move_in(self.gauge, at)\n except (AttributeError, IndexError):\n pass\n else:\n if move_in:\n move_in = math.ceil(move_in)\n text = font.render('{0:02.0f}:{1:02.0f}'.format(\n move_in / 60, move_in % 60), True, text_colors[1])\n surf.blit(text, (surf.get_width() - text.get_width() - 10,\n font.get_height() / 2))\n "
return surf |
def get_output_cache_key(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
cachekey = self.get_output_cache_base_key(placeholder_name, instance)
if self.cache_output_per_site:
cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID)
# Append language code
if self.cache_output_per_language:
# NOTE: Not using self.language_code, but using the current language instead.
# That is what the {% trans %} tags are rendered as after all.
# The render_placeholder() code can switch the language if needed.
user_language = get_language()
if user_language not in self.cache_supported_language_codes:
user_language = 'unsupported'
cachekey = "{0}.{1}".format(cachekey, user_language)
return cachekey | def function[get_output_cache_key, parameter[self, placeholder_name, instance]]:
constant[
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
]
variable[cachekey] assign[=] call[name[self].get_output_cache_base_key, parameter[name[placeholder_name], name[instance]]]
if name[self].cache_output_per_site begin[:]
variable[cachekey] assign[=] call[constant[{0}-s{1}].format, parameter[name[cachekey], name[settings].SITE_ID]]
if name[self].cache_output_per_language begin[:]
variable[user_language] assign[=] call[name[get_language], parameter[]]
if compare[name[user_language] <ast.NotIn object at 0x7da2590d7190> name[self].cache_supported_language_codes] begin[:]
variable[user_language] assign[=] constant[unsupported]
variable[cachekey] assign[=] call[constant[{0}.{1}].format, parameter[name[cachekey], name[user_language]]]
return[name[cachekey]] | keyword[def] identifier[get_output_cache_key] ( identifier[self] , identifier[placeholder_name] , identifier[instance] ):
literal[string]
identifier[cachekey] = identifier[self] . identifier[get_output_cache_base_key] ( identifier[placeholder_name] , identifier[instance] )
keyword[if] identifier[self] . identifier[cache_output_per_site] :
identifier[cachekey] = literal[string] . identifier[format] ( identifier[cachekey] , identifier[settings] . identifier[SITE_ID] )
keyword[if] identifier[self] . identifier[cache_output_per_language] :
identifier[user_language] = identifier[get_language] ()
keyword[if] identifier[user_language] keyword[not] keyword[in] identifier[self] . identifier[cache_supported_language_codes] :
identifier[user_language] = literal[string]
identifier[cachekey] = literal[string] . identifier[format] ( identifier[cachekey] , identifier[user_language] )
keyword[return] identifier[cachekey] | def get_output_cache_key(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
cachekey = self.get_output_cache_base_key(placeholder_name, instance)
if self.cache_output_per_site:
cachekey = '{0}-s{1}'.format(cachekey, settings.SITE_ID) # depends on [control=['if'], data=[]]
# Append language code
if self.cache_output_per_language:
# NOTE: Not using self.language_code, but using the current language instead.
# That is what the {% trans %} tags are rendered as after all.
# The render_placeholder() code can switch the language if needed.
user_language = get_language()
if user_language not in self.cache_supported_language_codes:
user_language = 'unsupported' # depends on [control=['if'], data=['user_language']]
cachekey = '{0}.{1}'.format(cachekey, user_language) # depends on [control=['if'], data=[]]
return cachekey |
def preparse_iter(self):
"""
Comments can be anywhere. So break apart the Dockerfile into significant
lines and any comments that precede them. And if a line is a carryover
from the previous via an escaped-newline, bring the directive with it.
"""
to_yield = {}
last_directive = None
lines_processed = 0
for line in self.lines_iter():
if not line:
continue
if line.startswith(u'#'):
comment = line.lstrip('#').strip()
# Directives have to precede any instructions
if lines_processed == 1:
if comment.startswith(u'escape='):
self.escape_char = comment.split(u'=', 1)[1]
continue
to_yield.setdefault('comments', []).append(comment)
else:
# last_directive being set means the previous line ended with a
# newline escape
if last_directive:
directive, payload = last_directive, line
else:
directive, payload = line.split(u' ', 1)
if line.endswith(self.escape_char):
payload = payload.rstrip(self.escape_char)
last_directive = directive
else:
last_directive = None
to_yield['directive'] = directive
to_yield['payload'] = payload.strip()
yield to_yield
to_yield = {} | def function[preparse_iter, parameter[self]]:
constant[
Comments can be anywhere. So break apart the Dockerfile into significant
lines and any comments that precede them. And if a line is a carryover
from the previous via an escaped-newline, bring the directive with it.
]
variable[to_yield] assign[=] dictionary[[], []]
variable[last_directive] assign[=] constant[None]
variable[lines_processed] assign[=] constant[0]
for taget[name[line]] in starred[call[name[self].lines_iter, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc71d80> begin[:]
continue
if call[name[line].startswith, parameter[constant[#]]] begin[:]
variable[comment] assign[=] call[call[name[line].lstrip, parameter[constant[#]]].strip, parameter[]]
if compare[name[lines_processed] equal[==] constant[1]] begin[:]
if call[name[comment].startswith, parameter[constant[escape=]]] begin[:]
name[self].escape_char assign[=] call[call[name[comment].split, parameter[constant[=], constant[1]]]][constant[1]]
continue
call[call[name[to_yield].setdefault, parameter[constant[comments], list[[]]]].append, parameter[name[comment]]] | keyword[def] identifier[preparse_iter] ( identifier[self] ):
literal[string]
identifier[to_yield] ={}
identifier[last_directive] = keyword[None]
identifier[lines_processed] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[lines_iter] ():
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[comment] = identifier[line] . identifier[lstrip] ( literal[string] ). identifier[strip] ()
keyword[if] identifier[lines_processed] == literal[int] :
keyword[if] identifier[comment] . identifier[startswith] ( literal[string] ):
identifier[self] . identifier[escape_char] = identifier[comment] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
keyword[continue]
identifier[to_yield] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( identifier[comment] )
keyword[else] :
keyword[if] identifier[last_directive] :
identifier[directive] , identifier[payload] = identifier[last_directive] , identifier[line]
keyword[else] :
identifier[directive] , identifier[payload] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[line] . identifier[endswith] ( identifier[self] . identifier[escape_char] ):
identifier[payload] = identifier[payload] . identifier[rstrip] ( identifier[self] . identifier[escape_char] )
identifier[last_directive] = identifier[directive]
keyword[else] :
identifier[last_directive] = keyword[None]
identifier[to_yield] [ literal[string] ]= identifier[directive]
identifier[to_yield] [ literal[string] ]= identifier[payload] . identifier[strip] ()
keyword[yield] identifier[to_yield]
identifier[to_yield] ={} | def preparse_iter(self):
"""
Comments can be anywhere. So break apart the Dockerfile into significant
lines and any comments that precede them. And if a line is a carryover
from the previous via an escaped-newline, bring the directive with it.
"""
to_yield = {}
last_directive = None
lines_processed = 0
for line in self.lines_iter():
if not line:
continue # depends on [control=['if'], data=[]]
if line.startswith(u'#'):
comment = line.lstrip('#').strip()
# Directives have to precede any instructions
if lines_processed == 1:
if comment.startswith(u'escape='):
self.escape_char = comment.split(u'=', 1)[1]
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
to_yield.setdefault('comments', []).append(comment) # depends on [control=['if'], data=[]]
else:
# last_directive being set means the previous line ended with a
# newline escape
if last_directive:
(directive, payload) = (last_directive, line) # depends on [control=['if'], data=[]]
else:
(directive, payload) = line.split(u' ', 1)
if line.endswith(self.escape_char):
payload = payload.rstrip(self.escape_char)
last_directive = directive # depends on [control=['if'], data=[]]
else:
last_directive = None
to_yield['directive'] = directive
to_yield['payload'] = payload.strip()
yield to_yield
to_yield = {} # depends on [control=['for'], data=['line']] |
def matrixToMathTransform(matrix):
""" Take a 6-tuple and return a ShallowTransform object."""
if isinstance(matrix, ShallowTransform):
return matrix
off, scl, rot = MathTransform(matrix).decompose()
return ShallowTransform(off, scl, rot) | def function[matrixToMathTransform, parameter[matrix]]:
constant[ Take a 6-tuple and return a ShallowTransform object.]
if call[name[isinstance], parameter[name[matrix], name[ShallowTransform]]] begin[:]
return[name[matrix]]
<ast.Tuple object at 0x7da18bcc8040> assign[=] call[call[name[MathTransform], parameter[name[matrix]]].decompose, parameter[]]
return[call[name[ShallowTransform], parameter[name[off], name[scl], name[rot]]]] | keyword[def] identifier[matrixToMathTransform] ( identifier[matrix] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[matrix] , identifier[ShallowTransform] ):
keyword[return] identifier[matrix]
identifier[off] , identifier[scl] , identifier[rot] = identifier[MathTransform] ( identifier[matrix] ). identifier[decompose] ()
keyword[return] identifier[ShallowTransform] ( identifier[off] , identifier[scl] , identifier[rot] ) | def matrixToMathTransform(matrix):
""" Take a 6-tuple and return a ShallowTransform object."""
if isinstance(matrix, ShallowTransform):
return matrix # depends on [control=['if'], data=[]]
(off, scl, rot) = MathTransform(matrix).decompose()
return ShallowTransform(off, scl, rot) |
def prune_tour_worker(arg):
""" Worker thread for CLMFile.prune_tour()
"""
from .chic import score_evaluate_M
t, stour, tour_score, active_sizes, M = arg
stour_score, = score_evaluate_M(stour, active_sizes, M)
delta_score = tour_score - stour_score
log10d = np.log10(delta_score) if delta_score > 1e-9 else -9
return t, log10d | def function[prune_tour_worker, parameter[arg]]:
constant[ Worker thread for CLMFile.prune_tour()
]
from relative_module[chic] import module[score_evaluate_M]
<ast.Tuple object at 0x7da2041da410> assign[=] name[arg]
<ast.Tuple object at 0x7da2041d80a0> assign[=] call[name[score_evaluate_M], parameter[name[stour], name[active_sizes], name[M]]]
variable[delta_score] assign[=] binary_operation[name[tour_score] - name[stour_score]]
variable[log10d] assign[=] <ast.IfExp object at 0x7da2041d9cc0>
return[tuple[[<ast.Name object at 0x7da2041da620>, <ast.Name object at 0x7da2041dbf40>]]] | keyword[def] identifier[prune_tour_worker] ( identifier[arg] ):
literal[string]
keyword[from] . identifier[chic] keyword[import] identifier[score_evaluate_M]
identifier[t] , identifier[stour] , identifier[tour_score] , identifier[active_sizes] , identifier[M] = identifier[arg]
identifier[stour_score] ,= identifier[score_evaluate_M] ( identifier[stour] , identifier[active_sizes] , identifier[M] )
identifier[delta_score] = identifier[tour_score] - identifier[stour_score]
identifier[log10d] = identifier[np] . identifier[log10] ( identifier[delta_score] ) keyword[if] identifier[delta_score] > literal[int] keyword[else] - literal[int]
keyword[return] identifier[t] , identifier[log10d] | def prune_tour_worker(arg):
""" Worker thread for CLMFile.prune_tour()
"""
from .chic import score_evaluate_M
(t, stour, tour_score, active_sizes, M) = arg
(stour_score,) = score_evaluate_M(stour, active_sizes, M)
delta_score = tour_score - stour_score
log10d = np.log10(delta_score) if delta_score > 1e-09 else -9
return (t, log10d) |
def listFileArray(self, **kwargs):
"""
API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed.
* When run_num=1, one has to provide logical_file_name.
* When lfn list is present, no run or lumi list is allowed.
:param logical_file_name: logical_file_name of the file, Max length 1000.
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list, Max list length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections, Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1.
:type validFileOnly: int
:param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'logical_file_name',
'release_version', 'pset_hash', 'app_name',
'output_module_label', 'run_num',
'origin_site_name', 'lumi_list', 'detail', 'validFileOnly', 'sumOverLumi']
requiredParameters = {'multiple': ['dataset', 'block_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
checkInputParameter(method="listFileArray", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
# In order to protect DB and make sure the query can be return in 300 seconds, we limit the length of
# logical file names, lumi and run num to 1000. These number may be adjusted later if
# needed. YG May-20-2015.
# CMS has all MC data with run_num=1. It almost is a full table scan if run_num=1 without lfn. So we will request lfn
# to be present when run_num=1. YG Jan 14, 2016
if 'logical_file_name' in kwargs.keys() and isinstance(kwargs['logical_file_name'], list)\
and len(kwargs['logical_file_name']) > 1:
if 'run_num' in kwargs.keys() and isinstance(kwargs['run_num'],list) and len(kwargs['run_num']) > 1 :
raise dbsClientException('Invalid input', 'files API does not supprt two lists: run_num and lfn. ')
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list'] and len(kwargs['lumi_list']) > 1 :
raise dbsClientException('Invalid input', 'files API does not supprt two lists: lumi_lis and lfn. ')
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list']:
if 'run_num' not in kwargs.keys() or not kwargs['run_num'] or kwargs['run_num'] ==-1 :
raise dbsClientException('Invalid input', 'When Lumi section is present, a single run is required. ')
else:
if 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.')
else:
if kwargs['run_num']==1 or kwargs['run_num']=='1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.')
#check if no lfn is given, but run_num=1 is used for searching
if ('logical_file_name' not in kwargs.keys() or not kwargs['logical_file_name']) and 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.')
else:
if kwargs['run_num'] == 1 or kwargs['run_num'] == '1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.')
results = []
mykey = None
total_lumi_len = 0
split_lumi_list = []
max_list_len = 1000 #this number is defined in DBS server
for key, value in kwargs.iteritems():
if key == 'lumi_list' and isinstance(kwargs['lumi_list'], list)\
and kwargs['lumi_list'] and isinstance(kwargs['lumi_list'][0], list):
lapp = 0
l = 0
sm = []
for i in kwargs['lumi_list']:
while i[0]+max_list_len < i[1]:
split_lumi_list.append([[i[0], i[0]+max_list_len-1]])
i[0] = i[0] + max_list_len
else:
l += (i[1]-i[0]+1)
if l <= max_list_len:
sm.append([i[0], i[1]])
lapp = l #number lumis in sm
else:
split_lumi_list.append(sm)
sm=[]
sm.append([i[0], i[1]])
lapp = i[1]-i[0]+1
if sm:
split_lumi_list.append(sm)
elif key in ('logical_file_name', 'run_num', 'lumi_list') and isinstance(value, list) and len(value)>max_list_len:
mykey =key
#
if mykey:
sourcelist = []
#create a new list to slice
sourcelist = kwargs[mykey][:]
for slice in slicedIterator(sourcelist, max_list_len):
kwargs[mykey] = slice
results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST"))
elif split_lumi_list:
for item in split_lumi_list:
kwargs['lumi_list'] = item
results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST"))
else:
return self.__callServer("fileArray", data=kwargs, callmethod="POST")
#make sure only one dictionary per lfn.
#Make sure this changes when we move to 2.7 or 3.0
#http://stackoverflow.com/questions/11092511/python-list-of-unique-dictionaries
# YG May-26-2015
return dict((v['logical_file_name'], v) for v in results).values() | def function[listFileArray, parameter[self]]:
constant[
API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed.
* When run_num=1, one has to provide logical_file_name.
* When lfn list is present, no run or lumi list is allowed.
:param logical_file_name: logical_file_name of the file, Max length 1000.
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list, Max list length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections, Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1.
:type validFileOnly: int
:param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
]
variable[validParameters] assign[=] list[[<ast.Constant object at 0x7da18eb57df0>, <ast.Constant object at 0x7da18eb57370>, <ast.Constant object at 0x7da18eb56920>, <ast.Constant object at 0x7da18eb54790>, <ast.Constant object at 0x7da18eb57970>, <ast.Constant object at 0x7da18eb54070>, <ast.Constant object at 0x7da18eb561d0>, <ast.Constant object at 0x7da18eb54220>, <ast.Constant object at 0x7da18eb559f0>, <ast.Constant object at 0x7da18eb545b0>, <ast.Constant object at 0x7da18eb54190>, <ast.Constant object at 0x7da18eb54e20>, <ast.Constant object at 0x7da18eb54490>]]
variable[requiredParameters] assign[=] dictionary[[<ast.Constant object at 0x7da18eb56350>], [<ast.List object at 0x7da18eb542b0>]]
if compare[constant[detail] <ast.NotIn object at 0x7da2590d7190> call[name[kwargs].keys, parameter[]]] begin[:]
call[name[kwargs]][constant[detail]] assign[=] constant[False]
call[name[checkInputParameter], parameter[]]
if <ast.BoolOp object at 0x7da18eb56680> begin[:]
if <ast.BoolOp object at 0x7da18eb56c80> begin[:]
<ast.Raise object at 0x7da18eb55c30>
if <ast.BoolOp object at 0x7da18eb547c0> begin[:]
if call[name[isinstance], parameter[call[name[kwargs]][constant[run_num]], name[list]]] begin[:]
if <ast.BoolOp object at 0x7da18eb55270> begin[:]
<ast.Raise object at 0x7da18eb55f90>
variable[results] assign[=] list[[]]
variable[mykey] assign[=] constant[None]
variable[total_lumi_len] assign[=] constant[0]
variable[split_lumi_list] assign[=] list[[]]
variable[max_list_len] assign[=] constant[1000]
for taget[tuple[[<ast.Name object at 0x7da18eb54640>, <ast.Name object at 0x7da18eb54100>]]] in starred[call[name[kwargs].iteritems, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18eb54910> begin[:]
variable[lapp] assign[=] constant[0]
variable[l] assign[=] constant[0]
variable[sm] assign[=] list[[]]
for taget[name[i]] in starred[call[name[kwargs]][constant[lumi_list]]] begin[:]
while compare[binary_operation[call[name[i]][constant[0]] + name[max_list_len]] less[<] call[name[i]][constant[1]]] begin[:]
call[name[split_lumi_list].append, parameter[list[[<ast.List object at 0x7da18eb57f10>]]]]
call[name[i]][constant[0]] assign[=] binary_operation[call[name[i]][constant[0]] + name[max_list_len]]
if name[sm] begin[:]
call[name[split_lumi_list].append, parameter[name[sm]]]
if name[mykey] begin[:]
variable[sourcelist] assign[=] list[[]]
variable[sourcelist] assign[=] call[call[name[kwargs]][name[mykey]]][<ast.Slice object at 0x7da207f992d0>]
for taget[name[slice]] in starred[call[name[slicedIterator], parameter[name[sourcelist], name[max_list_len]]]] begin[:]
call[name[kwargs]][name[mykey]] assign[=] name[slice]
call[name[results].extend, parameter[call[name[self].__callServer, parameter[constant[fileArray]]]]]
return[call[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20e955db0>]].values, parameter[]]] | keyword[def] identifier[listFileArray] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[validParameters] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[requiredParameters] ={ literal[string] :[ literal[string] , literal[string] , literal[string] ]}
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] . identifier[keys] ():
identifier[kwargs] [ literal[string] ]= keyword[False]
identifier[checkInputParameter] ( identifier[method] = literal[string] , identifier[parameters] = identifier[kwargs] . identifier[keys] (), identifier[validParameters] = identifier[validParameters] ,
identifier[requiredParameters] = identifier[requiredParameters] )
keyword[if] literal[string] keyword[in] identifier[kwargs] . identifier[keys] () keyword[and] identifier[isinstance] ( identifier[kwargs] [ literal[string] ], identifier[list] ) keyword[and] identifier[len] ( identifier[kwargs] [ literal[string] ])> literal[int] :
keyword[if] literal[string] keyword[in] identifier[kwargs] . identifier[keys] () keyword[and] identifier[isinstance] ( identifier[kwargs] [ literal[string] ], identifier[list] ) keyword[and] identifier[len] ( identifier[kwargs] [ literal[string] ])> literal[int] :
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[elif] literal[string] keyword[in] identifier[kwargs] . identifier[keys] () keyword[and] identifier[kwargs] [ literal[string] ] keyword[and] identifier[len] ( identifier[kwargs] [ literal[string] ])> literal[int] :
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[elif] literal[string] keyword[in] identifier[kwargs] . identifier[keys] () keyword[and] identifier[kwargs] [ literal[string] ]:
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] . identifier[keys] () keyword[or] keyword[not] identifier[kwargs] [ literal[string] ] keyword[or] identifier[kwargs] [ literal[string] ]==- literal[int] :
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[kwargs] . identifier[keys] ():
keyword[if] identifier[isinstance] ( identifier[kwargs] [ literal[string] ], identifier[list] ):
keyword[if] literal[int] keyword[in] identifier[kwargs] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[kwargs] [ literal[string] ]:
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[else] :
keyword[if] identifier[kwargs] [ literal[string] ]== literal[int] keyword[or] identifier[kwargs] [ literal[string] ]== literal[string] :
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[kwargs] . identifier[keys] () keyword[or] keyword[not] identifier[kwargs] [ literal[string] ]) keyword[and] literal[string] keyword[in] identifier[kwargs] . identifier[keys] ():
keyword[if] identifier[isinstance] ( identifier[kwargs] [ literal[string] ], identifier[list] ):
keyword[if] literal[int] keyword[in] identifier[kwargs] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[kwargs] [ literal[string] ]:
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
keyword[else] :
keyword[if] identifier[kwargs] [ literal[string] ]== literal[int] keyword[or] identifier[kwargs] [ literal[string] ]== literal[string] :
keyword[raise] identifier[dbsClientException] ( literal[string] , literal[string] )
identifier[results] =[]
identifier[mykey] = keyword[None]
identifier[total_lumi_len] = literal[int]
identifier[split_lumi_list] =[]
identifier[max_list_len] = literal[int]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[iteritems] ():
keyword[if] identifier[key] == literal[string] keyword[and] identifier[isinstance] ( identifier[kwargs] [ literal[string] ], identifier[list] ) keyword[and] identifier[kwargs] [ literal[string] ] keyword[and] identifier[isinstance] ( identifier[kwargs] [ literal[string] ][ literal[int] ], identifier[list] ):
identifier[lapp] = literal[int]
identifier[l] = literal[int]
identifier[sm] =[]
keyword[for] identifier[i] keyword[in] identifier[kwargs] [ literal[string] ]:
keyword[while] identifier[i] [ literal[int] ]+ identifier[max_list_len] < identifier[i] [ literal[int] ]:
identifier[split_lumi_list] . identifier[append] ([[ identifier[i] [ literal[int] ], identifier[i] [ literal[int] ]+ identifier[max_list_len] - literal[int] ]])
identifier[i] [ literal[int] ]= identifier[i] [ literal[int] ]+ identifier[max_list_len]
keyword[else] :
identifier[l] +=( identifier[i] [ literal[int] ]- identifier[i] [ literal[int] ]+ literal[int] )
keyword[if] identifier[l] <= identifier[max_list_len] :
identifier[sm] . identifier[append] ([ identifier[i] [ literal[int] ], identifier[i] [ literal[int] ]])
identifier[lapp] = identifier[l]
keyword[else] :
identifier[split_lumi_list] . identifier[append] ( identifier[sm] )
identifier[sm] =[]
identifier[sm] . identifier[append] ([ identifier[i] [ literal[int] ], identifier[i] [ literal[int] ]])
identifier[lapp] = identifier[i] [ literal[int] ]- identifier[i] [ literal[int] ]+ literal[int]
keyword[if] identifier[sm] :
identifier[split_lumi_list] . identifier[append] ( identifier[sm] )
keyword[elif] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[and] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[and] identifier[len] ( identifier[value] )> identifier[max_list_len] :
identifier[mykey] = identifier[key]
keyword[if] identifier[mykey] :
identifier[sourcelist] =[]
identifier[sourcelist] = identifier[kwargs] [ identifier[mykey] ][:]
keyword[for] identifier[slice] keyword[in] identifier[slicedIterator] ( identifier[sourcelist] , identifier[max_list_len] ):
identifier[kwargs] [ identifier[mykey] ]= identifier[slice]
identifier[results] . identifier[extend] ( identifier[self] . identifier[__callServer] ( literal[string] , identifier[data] = identifier[kwargs] , identifier[callmethod] = literal[string] ))
keyword[elif] identifier[split_lumi_list] :
keyword[for] identifier[item] keyword[in] identifier[split_lumi_list] :
identifier[kwargs] [ literal[string] ]= identifier[item]
identifier[results] . identifier[extend] ( identifier[self] . identifier[__callServer] ( literal[string] , identifier[data] = identifier[kwargs] , identifier[callmethod] = literal[string] ))
keyword[else] :
keyword[return] identifier[self] . identifier[__callServer] ( literal[string] , identifier[data] = identifier[kwargs] , identifier[callmethod] = literal[string] )
keyword[return] identifier[dict] (( identifier[v] [ literal[string] ], identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[results] ). identifier[values] () | def listFileArray(self, **kwargs):
"""
API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed.
* When run_num=1, one has to provide logical_file_name.
* When lfn list is present, no run or lumi list is allowed.
:param logical_file_name: logical_file_name of the file, Max length 1000.
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list, Max list length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections, Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1.
:type validFileOnly: int
:param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'logical_file_name', 'release_version', 'pset_hash', 'app_name', 'output_module_label', 'run_num', 'origin_site_name', 'lumi_list', 'detail', 'validFileOnly', 'sumOverLumi']
requiredParameters = {'multiple': ['dataset', 'block_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False # depends on [control=['if'], data=[]]
checkInputParameter(method='listFileArray', parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) # In order to protect DB and make sure the query can be return in 300 seconds, we limit the length of
# logical file names, lumi and run num to 1000. These number may be adjusted later if
# needed. YG May-20-2015.
# CMS has all MC data with run_num=1. It almost is a full table scan if run_num=1 without lfn. So we will request lfn
# to be present when run_num=1. YG Jan 14, 2016
if 'logical_file_name' in kwargs.keys() and isinstance(kwargs['logical_file_name'], list) and (len(kwargs['logical_file_name']) > 1):
if 'run_num' in kwargs.keys() and isinstance(kwargs['run_num'], list) and (len(kwargs['run_num']) > 1):
raise dbsClientException('Invalid input', 'files API does not supprt two lists: run_num and lfn. ') # depends on [control=['if'], data=[]]
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list'] and (len(kwargs['lumi_list']) > 1):
raise dbsClientException('Invalid input', 'files API does not supprt two lists: lumi_lis and lfn. ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list']:
if 'run_num' not in kwargs.keys() or not kwargs['run_num'] or kwargs['run_num'] == -1:
raise dbsClientException('Invalid input', 'When Lumi section is present, a single run is required. ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif kwargs['run_num'] == 1 or kwargs['run_num'] == '1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#check if no lfn is given, but run_num=1 is used for searching
if ('logical_file_name' not in kwargs.keys() or not kwargs['logical_file_name']) and 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif kwargs['run_num'] == 1 or kwargs['run_num'] == '1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
results = []
mykey = None
total_lumi_len = 0
split_lumi_list = []
max_list_len = 1000 #this number is defined in DBS server
for (key, value) in kwargs.iteritems():
if key == 'lumi_list' and isinstance(kwargs['lumi_list'], list) and kwargs['lumi_list'] and isinstance(kwargs['lumi_list'][0], list):
lapp = 0
l = 0
sm = []
for i in kwargs['lumi_list']:
while i[0] + max_list_len < i[1]:
split_lumi_list.append([[i[0], i[0] + max_list_len - 1]])
i[0] = i[0] + max_list_len # depends on [control=['while'], data=[]]
else:
l += i[1] - i[0] + 1
if l <= max_list_len:
sm.append([i[0], i[1]])
lapp = l #number lumis in sm # depends on [control=['if'], data=['l']]
else:
split_lumi_list.append(sm)
sm = []
sm.append([i[0], i[1]])
lapp = i[1] - i[0] + 1 # depends on [control=['for'], data=['i']]
if sm:
split_lumi_list.append(sm) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key in ('logical_file_name', 'run_num', 'lumi_list') and isinstance(value, list) and (len(value) > max_list_len):
mykey = key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
#
if mykey:
sourcelist = []
#create a new list to slice
sourcelist = kwargs[mykey][:]
for slice in slicedIterator(sourcelist, max_list_len):
kwargs[mykey] = slice
results.extend(self.__callServer('fileArray', data=kwargs, callmethod='POST')) # depends on [control=['for'], data=['slice']] # depends on [control=['if'], data=[]]
elif split_lumi_list:
for item in split_lumi_list:
kwargs['lumi_list'] = item
results.extend(self.__callServer('fileArray', data=kwargs, callmethod='POST')) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
else:
return self.__callServer('fileArray', data=kwargs, callmethod='POST')
#make sure only one dictionary per lfn.
#Make sure this changes when we move to 2.7 or 3.0
#http://stackoverflow.com/questions/11092511/python-list-of-unique-dictionaries
# YG May-26-2015
return dict(((v['logical_file_name'], v) for v in results)).values() |
def motivate(channel, rest):
"Motivate someone"
if rest:
r = rest.strip()
m = re.match(r'^(.+)\s*\bfor\b\s*(.+)$', r)
if m:
r = m.groups()[0].strip()
else:
r = channel
karma.Karma.store.change(r, 1)
return "you're doing good work, %s!" % r | def function[motivate, parameter[channel, rest]]:
constant[Motivate someone]
if name[rest] begin[:]
variable[r] assign[=] call[name[rest].strip, parameter[]]
variable[m] assign[=] call[name[re].match, parameter[constant[^(.+)\s*\bfor\b\s*(.+)$], name[r]]]
if name[m] begin[:]
variable[r] assign[=] call[call[call[name[m].groups, parameter[]]][constant[0]].strip, parameter[]]
call[name[karma].Karma.store.change, parameter[name[r], constant[1]]]
return[binary_operation[constant[you're doing good work, %s!] <ast.Mod object at 0x7da2590d6920> name[r]]] | keyword[def] identifier[motivate] ( identifier[channel] , identifier[rest] ):
literal[string]
keyword[if] identifier[rest] :
identifier[r] = identifier[rest] . identifier[strip] ()
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[r] )
keyword[if] identifier[m] :
identifier[r] = identifier[m] . identifier[groups] ()[ literal[int] ]. identifier[strip] ()
keyword[else] :
identifier[r] = identifier[channel]
identifier[karma] . identifier[Karma] . identifier[store] . identifier[change] ( identifier[r] , literal[int] )
keyword[return] literal[string] % identifier[r] | def motivate(channel, rest):
"""Motivate someone"""
if rest:
r = rest.strip()
m = re.match('^(.+)\\s*\\bfor\\b\\s*(.+)$', r)
if m:
r = m.groups()[0].strip() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
r = channel
karma.Karma.store.change(r, 1)
return "you're doing good work, %s!" % r |
def extension_counts(container=None, file_list=None, return_counts=True):
'''extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
extensions = dict()
for item in file_list:
filename,ext = os.path.splitext(item)
if ext == '':
if return_counts == False:
extensions = update_dict(extensions,'no-extension',item)
else:
extensions = update_dict_sum(extensions,'no-extension')
else:
if return_counts == False:
extensions = update_dict(extensions,ext,item)
else:
extensions = update_dict_sum(extensions,ext)
return extensions | def function[extension_counts, parameter[container, file_list, return_counts]]:
constant[extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
]
if compare[name[file_list] is constant[None]] begin[:]
variable[file_list] assign[=] call[call[name[get_container_contents], parameter[name[container]]]][constant[all]]
variable[extensions] assign[=] call[name[dict], parameter[]]
for taget[name[item]] in starred[name[file_list]] begin[:]
<ast.Tuple object at 0x7da20c7961d0> assign[=] call[name[os].path.splitext, parameter[name[item]]]
if compare[name[ext] equal[==] constant[]] begin[:]
if compare[name[return_counts] equal[==] constant[False]] begin[:]
variable[extensions] assign[=] call[name[update_dict], parameter[name[extensions], constant[no-extension], name[item]]]
return[name[extensions]] | keyword[def] identifier[extension_counts] ( identifier[container] = keyword[None] , identifier[file_list] = keyword[None] , identifier[return_counts] = keyword[True] ):
literal[string]
keyword[if] identifier[file_list] keyword[is] keyword[None] :
identifier[file_list] = identifier[get_container_contents] ( identifier[container] , identifier[split_delim] = literal[string] )[ literal[string] ]
identifier[extensions] = identifier[dict] ()
keyword[for] identifier[item] keyword[in] identifier[file_list] :
identifier[filename] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[item] )
keyword[if] identifier[ext] == literal[string] :
keyword[if] identifier[return_counts] == keyword[False] :
identifier[extensions] = identifier[update_dict] ( identifier[extensions] , literal[string] , identifier[item] )
keyword[else] :
identifier[extensions] = identifier[update_dict_sum] ( identifier[extensions] , literal[string] )
keyword[else] :
keyword[if] identifier[return_counts] == keyword[False] :
identifier[extensions] = identifier[update_dict] ( identifier[extensions] , identifier[ext] , identifier[item] )
keyword[else] :
identifier[extensions] = identifier[update_dict_sum] ( identifier[extensions] , identifier[ext] )
keyword[return] identifier[extensions] | def extension_counts(container=None, file_list=None, return_counts=True):
"""extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
"""
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all'] # depends on [control=['if'], data=['file_list']]
extensions = dict()
for item in file_list:
(filename, ext) = os.path.splitext(item)
if ext == '':
if return_counts == False:
extensions = update_dict(extensions, 'no-extension', item) # depends on [control=['if'], data=[]]
else:
extensions = update_dict_sum(extensions, 'no-extension') # depends on [control=['if'], data=[]]
elif return_counts == False:
extensions = update_dict(extensions, ext, item) # depends on [control=['if'], data=[]]
else:
extensions = update_dict_sum(extensions, ext) # depends on [control=['for'], data=['item']]
return extensions |
def default_config_file(self):
""" default config file living in PyEMMA package """
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME) | def function[default_config_file, parameter[self]]:
constant[ default config file living in PyEMMA package ]
import module[os.path] as alias[p]
import module[pyemma]
return[call[name[p].join, parameter[call[name[pyemma].__path__][constant[0]], name[Config].DEFAULT_CONFIG_FILE_NAME]]] | keyword[def] identifier[default_config_file] ( identifier[self] ):
literal[string]
keyword[import] identifier[os] . identifier[path] keyword[as] identifier[p]
keyword[import] identifier[pyemma]
keyword[return] identifier[p] . identifier[join] ( identifier[pyemma] . identifier[__path__] [ literal[int] ], identifier[Config] . identifier[DEFAULT_CONFIG_FILE_NAME] ) | def default_config_file(self):
""" default config file living in PyEMMA package """
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME) |
def parse_requirements(filename):
""" load requirements from a pip requirements file """
with open(filename, 'r') as f:
lineiter = list(line.strip() for line in f)
return [line for line in lineiter if line and not line.startswith("#")] | def function[parse_requirements, parameter[filename]]:
constant[ load requirements from a pip requirements file ]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
variable[lineiter] assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1b24bada0>]]
return[<ast.ListComp object at 0x7da1b24bb1c0>] | keyword[def] identifier[parse_requirements] ( identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[lineiter] = identifier[list] ( identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[f] )
keyword[return] [ identifier[line] keyword[for] identifier[line] keyword[in] identifier[lineiter] keyword[if] identifier[line] keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] )] | def parse_requirements(filename):
""" load requirements from a pip requirements file """
with open(filename, 'r') as f:
lineiter = list((line.strip() for line in f)) # depends on [control=['with'], data=['f']]
return [line for line in lineiter if line and (not line.startswith('#'))] |
def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
s = sm
# need to check s has all entries up to n
if s.shape[1] < n2 + 1:
sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype)
sn[:, :s.shape[1]] = s
s = sn
# fold
mf = (n1 + 1) // 2
nf = (n2 + 1) // 2
n1 = mf * 2
n2 = nf * 2
o = (s[:mf, :nf] + # top left
s[mf:n1, :nf][::-1] + # top right
s[:mf, nf:n2][:, ::-1] + # bottom left
s[mf:n1, nf:n2][::-1, ::-1]) # bottom right
return o | def function[fold_joint_sfs, parameter[s, n1, n2]]:
constant[Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
]
variable[s] assign[=] call[name[asarray_ndim], parameter[name[s], constant[2]]]
assert[compare[call[name[s].shape][constant[0]] less_or_equal[<=] binary_operation[name[n1] + constant[1]]]]
assert[compare[call[name[s].shape][constant[1]] less_or_equal[<=] binary_operation[name[n2] + constant[1]]]]
if compare[call[name[s].shape][constant[0]] less[<] binary_operation[name[n1] + constant[1]]] begin[:]
variable[sm] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da2041d94b0>, <ast.Subscript object at 0x7da2041da620>]]]]
call[name[sm]][<ast.Slice object at 0x7da2041da200>] assign[=] name[s]
variable[s] assign[=] name[sm]
if compare[call[name[s].shape][constant[1]] less[<] binary_operation[name[n2] + constant[1]]] begin[:]
variable[sn] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da2041d9a20>, <ast.BinOp object at 0x7da2041da350>]]]]
call[name[sn]][tuple[[<ast.Slice object at 0x7da2041dba90>, <ast.Slice object at 0x7da2041d9db0>]]] assign[=] name[s]
variable[s] assign[=] name[sn]
variable[mf] assign[=] binary_operation[binary_operation[name[n1] + constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[nf] assign[=] binary_operation[binary_operation[name[n2] + constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[n1] assign[=] binary_operation[name[mf] * constant[2]]
variable[n2] assign[=] binary_operation[name[nf] * constant[2]]
variable[o] assign[=] binary_operation[binary_operation[binary_operation[call[name[s]][tuple[[<ast.Slice object at 0x7da18fe90220>, <ast.Slice object at 0x7da18fe93970>]]] + call[call[name[s]][tuple[[<ast.Slice object at 0x7da18fe91fc0>, <ast.Slice object at 0x7da18fe900d0>]]]][<ast.Slice object at 0x7da18fe903a0>]] + call[call[name[s]][tuple[[<ast.Slice object at 0x7da18fe90370>, <ast.Slice object at 0x7da18fe93c70>]]]][tuple[[<ast.Slice object at 0x7da18fe934c0>, <ast.Slice object at 0x7da18fe915a0>]]]] + call[call[name[s]][tuple[[<ast.Slice object at 0x7da18fe92680>, <ast.Slice object at 0x7da18fe919f0>]]]][tuple[[<ast.Slice object at 0x7da18fe93250>, <ast.Slice object at 0x7da18fe92b30>]]]]
return[name[o]] | keyword[def] identifier[fold_joint_sfs] ( identifier[s] , identifier[n1] , identifier[n2] ):
literal[string]
identifier[s] = identifier[asarray_ndim] ( identifier[s] , literal[int] )
keyword[assert] identifier[s] . identifier[shape] [ literal[int] ]<= identifier[n1] + literal[int] , literal[string]
keyword[assert] identifier[s] . identifier[shape] [ literal[int] ]<= identifier[n2] + literal[int] , literal[string]
keyword[if] identifier[s] . identifier[shape] [ literal[int] ]< identifier[n1] + literal[int] :
identifier[sm] = identifier[np] . identifier[zeros] (( identifier[n1] + literal[int] , identifier[s] . identifier[shape] [ literal[int] ]), identifier[dtype] = identifier[s] . identifier[dtype] )
identifier[sm] [: identifier[s] . identifier[shape] [ literal[int] ]]= identifier[s]
identifier[s] = identifier[sm]
keyword[if] identifier[s] . identifier[shape] [ literal[int] ]< identifier[n2] + literal[int] :
identifier[sn] = identifier[np] . identifier[zeros] (( identifier[s] . identifier[shape] [ literal[int] ], identifier[n2] + literal[int] ), identifier[dtype] = identifier[s] . identifier[dtype] )
identifier[sn] [:,: identifier[s] . identifier[shape] [ literal[int] ]]= identifier[s]
identifier[s] = identifier[sn]
identifier[mf] =( identifier[n1] + literal[int] )// literal[int]
identifier[nf] =( identifier[n2] + literal[int] )// literal[int]
identifier[n1] = identifier[mf] * literal[int]
identifier[n2] = identifier[nf] * literal[int]
identifier[o] =( identifier[s] [: identifier[mf] ,: identifier[nf] ]+
identifier[s] [ identifier[mf] : identifier[n1] ,: identifier[nf] ][::- literal[int] ]+
identifier[s] [: identifier[mf] , identifier[nf] : identifier[n2] ][:,::- literal[int] ]+
identifier[s] [ identifier[mf] : identifier[n1] , identifier[nf] : identifier[n2] ][::- literal[int] ,::- literal[int] ])
keyword[return] identifier[o] | def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
s = sm # depends on [control=['if'], data=[]]
# need to check s has all entries up to n
if s.shape[1] < n2 + 1:
sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype)
sn[:, :s.shape[1]] = s
s = sn # depends on [control=['if'], data=[]]
# fold
mf = (n1 + 1) // 2
nf = (n2 + 1) // 2
n1 = mf * 2
n2 = nf * 2 # top left
# top right
# bottom left
o = s[:mf, :nf] + s[mf:n1, :nf][::-1] + s[:mf, nf:n2][:, ::-1] + s[mf:n1, nf:n2][::-1, ::-1] # bottom right
return o |
def setup_queue(self, queue_name):
"""
Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
self._logger.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name,
durable=True, exclusive=False,
auto_delete=False) | def function[setup_queue, parameter[self, queue_name]]:
constant[
Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
]
call[name[self]._logger.info, parameter[constant[Declaring queue %s], name[queue_name]]]
call[name[self]._channel.queue_declare, parameter[name[self].on_queue_declareok, name[queue_name]]] | keyword[def] identifier[setup_queue] ( identifier[self] , identifier[queue_name] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[queue_name] )
identifier[self] . identifier[_channel] . identifier[queue_declare] ( identifier[self] . identifier[on_queue_declareok] , identifier[queue_name] ,
identifier[durable] = keyword[True] , identifier[exclusive] = keyword[False] ,
identifier[auto_delete] = keyword[False] ) | def setup_queue(self, queue_name):
"""
Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
self._logger.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name, durable=True, exclusive=False, auto_delete=False) |
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes | def function[_get_best_indexes, parameter[logits, n_best_size]]:
constant[Get the n-best logits from a list.]
variable[index_and_score] assign[=] call[name[sorted], parameter[call[name[enumerate], parameter[name[logits]]]]]
variable[best_indexes] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[index_and_score]]]]]] begin[:]
if compare[name[i] greater_or_equal[>=] name[n_best_size]] begin[:]
break
call[name[best_indexes].append, parameter[call[call[name[index_and_score]][name[i]]][constant[0]]]]
return[name[best_indexes]] | keyword[def] identifier[_get_best_indexes] ( identifier[logits] , identifier[n_best_size] ):
literal[string]
identifier[index_and_score] = identifier[sorted] ( identifier[enumerate] ( identifier[logits] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] )
identifier[best_indexes] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[index_and_score] )):
keyword[if] identifier[i] >= identifier[n_best_size] :
keyword[break]
identifier[best_indexes] . identifier[append] ( identifier[index_and_score] [ identifier[i] ][ literal[int] ])
keyword[return] identifier[best_indexes] | def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break # depends on [control=['if'], data=[]]
best_indexes.append(index_and_score[i][0]) # depends on [control=['for'], data=['i']]
return best_indexes |
def split_diff(old, new):
"""
Returns a generator yielding the side-by-side diff of `old` and `new`).
"""
return map(lambda l: l.rstrip(),
icdiff.ConsoleDiff(cols=COLUMNS).make_table(old.splitlines(), new.splitlines())) | def function[split_diff, parameter[old, new]]:
constant[
Returns a generator yielding the side-by-side diff of `old` and `new`).
]
return[call[name[map], parameter[<ast.Lambda object at 0x7da20c6c6710>, call[call[name[icdiff].ConsoleDiff, parameter[]].make_table, parameter[call[name[old].splitlines, parameter[]], call[name[new].splitlines, parameter[]]]]]]] | keyword[def] identifier[split_diff] ( identifier[old] , identifier[new] ):
literal[string]
keyword[return] identifier[map] ( keyword[lambda] identifier[l] : identifier[l] . identifier[rstrip] (),
identifier[icdiff] . identifier[ConsoleDiff] ( identifier[cols] = identifier[COLUMNS] ). identifier[make_table] ( identifier[old] . identifier[splitlines] (), identifier[new] . identifier[splitlines] ())) | def split_diff(old, new):
"""
Returns a generator yielding the side-by-side diff of `old` and `new`).
"""
return map(lambda l: l.rstrip(), icdiff.ConsoleDiff(cols=COLUMNS).make_table(old.splitlines(), new.splitlines())) |
def ipv6_link_local(self, **kwargs):
"""Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
ve_name = kwargs.pop('name')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['loopback', 've']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
link_args = dict(name=ve_name, rbridge_id=rbridge_id,
int_type=int_type)
method_name = 'rbridge_id_interface_%s_ipv6_ipv6_config_address_' \
'use_link_local_only' % int_type
method_class = self._rbridge
v6_link_local = getattr(method_class, method_name)
config = v6_link_local(**link_args)
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}use-link-local-only')
if item is not None:
return True
if kwargs.pop('delete', False):
config.find('.//*use-link-local-only').set('operation', 'delete')
return callback(config) | def function[ipv6_link_local, parameter[self]]:
constant[Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
]
variable[int_type] assign[=] call[call[name[kwargs].pop, parameter[constant[int_type]]].lower, parameter[]]
variable[ve_name] assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[rbridge_id] assign[=] call[name[kwargs].pop, parameter[constant[rbridge_id], constant[1]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
variable[valid_int_types] assign[=] list[[<ast.Constant object at 0x7da20e74b040>, <ast.Constant object at 0x7da20e748a90>]]
if compare[name[int_type] <ast.NotIn object at 0x7da2590d7190> name[valid_int_types]] begin[:]
<ast.Raise object at 0x7da20e74be20>
variable[link_args] assign[=] call[name[dict], parameter[]]
variable[method_name] assign[=] binary_operation[constant[rbridge_id_interface_%s_ipv6_ipv6_config_address_use_link_local_only] <ast.Mod object at 0x7da2590d6920> name[int_type]]
variable[method_class] assign[=] name[self]._rbridge
variable[v6_link_local] assign[=] call[name[getattr], parameter[name[method_class], name[method_name]]]
variable[config] assign[=] call[name[v6_link_local], parameter[]]
if call[name[kwargs].pop, parameter[constant[get], constant[False]]] begin[:]
variable[output] assign[=] call[name[callback], parameter[name[config]]]
variable[item] assign[=] call[name[output].data.find, parameter[constant[.//{*}use-link-local-only]]]
if compare[name[item] is_not constant[None]] begin[:]
return[constant[True]]
if call[name[kwargs].pop, parameter[constant[delete], constant[False]]] begin[:]
call[call[name[config].find, parameter[constant[.//*use-link-local-only]]].set, parameter[constant[operation], constant[delete]]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[ipv6_link_local] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[int_type] = identifier[kwargs] . identifier[pop] ( literal[string] ). identifier[lower] ()
identifier[ve_name] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[rbridge_id] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
identifier[valid_int_types] =[ literal[string] , literal[string] ]
keyword[if] identifier[int_type] keyword[not] keyword[in] identifier[valid_int_types] :
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[repr] ( identifier[valid_int_types] ))
identifier[link_args] = identifier[dict] ( identifier[name] = identifier[ve_name] , identifier[rbridge_id] = identifier[rbridge_id] ,
identifier[int_type] = identifier[int_type] )
identifier[method_name] = literal[string] literal[string] % identifier[int_type]
identifier[method_class] = identifier[self] . identifier[_rbridge]
identifier[v6_link_local] = identifier[getattr] ( identifier[method_class] , identifier[method_name] )
identifier[config] = identifier[v6_link_local] (** identifier[link_args] )
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ):
identifier[output] = identifier[callback] ( identifier[config] , identifier[handler] = literal[string] )
identifier[item] = identifier[output] . identifier[data] . identifier[find] ( literal[string] )
keyword[if] identifier[item] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ):
identifier[config] . identifier[find] ( literal[string] ). identifier[set] ( literal[string] , literal[string] )
keyword[return] identifier[callback] ( identifier[config] ) | def ipv6_link_local(self, **kwargs):
"""Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
ve_name = kwargs.pop('name')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['loopback', 've']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' % repr(valid_int_types)) # depends on [control=['if'], data=['valid_int_types']]
link_args = dict(name=ve_name, rbridge_id=rbridge_id, int_type=int_type)
method_name = 'rbridge_id_interface_%s_ipv6_ipv6_config_address_use_link_local_only' % int_type
method_class = self._rbridge
v6_link_local = getattr(method_class, method_name)
config = v6_link_local(**link_args)
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}use-link-local-only')
if item is not None:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if kwargs.pop('delete', False):
config.find('.//*use-link-local-only').set('operation', 'delete') # depends on [control=['if'], data=[]]
return callback(config) |
def find_goodness_of_fit(rapid_qout_file, reach_id_file, observed_file,
out_analysis_file, daily=False):
"""
Finds the goodness of fit comparing observed streamflow in a rapid Qout
file with simulated flows in a csv file.
Parameters
----------
rapid_qout_file: str
Path to the RAPID Qout file.
reach_id_file: str
ath to file with river reach ID's associate with the RAPID Qout file.
It is in the format of the RAPID observed flows reach ID file.
observed_file: str
Path to input csv with with observed flows corresponding to the
RAPID Qout. It is in the format of the RAPID observed flows file.
out_analysis_file: str
Path to the analysis output csv file.
daily: bool, optional
If True and the file is CF-Compliant, it will compare the
*observed_file* with daily average flow from Qout. Default is False.
Example with CF-Compliant RAPID Qout file:
.. code:: python
import os
from RAPIDpy.postprocess import find_goodness_of_fit
INPUT_DATA_PATH = '/path/to/data'
reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
'Qout_nasa_lis_3hr_20020830_CF.nc')
cf_out_analysis_file = \
os.path.join(OUTPUT_DATA_PATH,
'cf_goodness_of_fit_results-daily.csv')
find_goodness_of_fit(cf_input_qout_file,
reach_id_file,
observed_file,
cf_out_analysis_file,
daily=True)
"""
reach_id_list = np.loadtxt(reach_id_file,
delimiter=",", usecols=(0,),
ndmin=1, dtype=np.int32)
data_nc = RAPIDDataset(rapid_qout_file)
# analyze and write
observed_table = np.loadtxt(observed_file,
ndmin=2, delimiter=",",
usecols=tuple(range(reach_id_list.size)))
with open(out_analysis_file, 'w') as outcsv:
writer = csvwriter(outcsv)
writer.writerow(["reach_id",
"percent_bias",
"abs_percent_bias",
"rmse",
"mae",
"bias",
"NSE",
"likelihood",
"correlation_coeff",
"index_agreement",
"KGE"])
for index, reach_id in enumerate(reach_id_list):
observed_array = observed_table[:, index]
simulated_array = data_nc.get_qout(reach_id, daily=daily)
# make sure they are the same length
simulated_array = simulated_array[:len(observed_array)]
observed_array = observed_array[:len(simulated_array)]
simulated_array, observed_array = \
filter_nan(simulated_array, observed_array)
writer.writerow([reach_id,
pc_bias(simulated_array, observed_array),
apb(simulated_array, observed_array),
rmse(simulated_array, observed_array),
mae(simulated_array, observed_array),
bias(simulated_array, observed_array),
NS(simulated_array, observed_array),
L(simulated_array, observed_array),
correlation(simulated_array, observed_array),
index_agreement(simulated_array, observed_array),
KGE(simulated_array, observed_array)[0]]) | def function[find_goodness_of_fit, parameter[rapid_qout_file, reach_id_file, observed_file, out_analysis_file, daily]]:
constant[
Finds the goodness of fit comparing observed streamflow in a rapid Qout
file with simulated flows in a csv file.
Parameters
----------
rapid_qout_file: str
Path to the RAPID Qout file.
reach_id_file: str
ath to file with river reach ID's associate with the RAPID Qout file.
It is in the format of the RAPID observed flows reach ID file.
observed_file: str
Path to input csv with with observed flows corresponding to the
RAPID Qout. It is in the format of the RAPID observed flows file.
out_analysis_file: str
Path to the analysis output csv file.
daily: bool, optional
If True and the file is CF-Compliant, it will compare the
*observed_file* with daily average flow from Qout. Default is False.
Example with CF-Compliant RAPID Qout file:
.. code:: python
import os
from RAPIDpy.postprocess import find_goodness_of_fit
INPUT_DATA_PATH = '/path/to/data'
reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
'Qout_nasa_lis_3hr_20020830_CF.nc')
cf_out_analysis_file = os.path.join(OUTPUT_DATA_PATH,
'cf_goodness_of_fit_results-daily.csv')
find_goodness_of_fit(cf_input_qout_file,
reach_id_file,
observed_file,
cf_out_analysis_file,
daily=True)
]
variable[reach_id_list] assign[=] call[name[np].loadtxt, parameter[name[reach_id_file]]]
variable[data_nc] assign[=] call[name[RAPIDDataset], parameter[name[rapid_qout_file]]]
variable[observed_table] assign[=] call[name[np].loadtxt, parameter[name[observed_file]]]
with call[name[open], parameter[name[out_analysis_file], constant[w]]] begin[:]
variable[writer] assign[=] call[name[csvwriter], parameter[name[outcsv]]]
call[name[writer].writerow, parameter[list[[<ast.Constant object at 0x7da2044c1000>, <ast.Constant object at 0x7da2044c3d90>, <ast.Constant object at 0x7da2044c12d0>, <ast.Constant object at 0x7da2044c2e00>, <ast.Constant object at 0x7da2044c1e70>, <ast.Constant object at 0x7da2044c0430>, <ast.Constant object at 0x7da2044c03d0>, <ast.Constant object at 0x7da2044c04c0>, <ast.Constant object at 0x7da2044c1f90>, <ast.Constant object at 0x7da2044c0670>, <ast.Constant object at 0x7da2044c2c20>]]]]
for taget[tuple[[<ast.Name object at 0x7da2044c1480>, <ast.Name object at 0x7da2044c3940>]]] in starred[call[name[enumerate], parameter[name[reach_id_list]]]] begin[:]
variable[observed_array] assign[=] call[name[observed_table]][tuple[[<ast.Slice object at 0x7da2044c2290>, <ast.Name object at 0x7da2044c2b00>]]]
variable[simulated_array] assign[=] call[name[data_nc].get_qout, parameter[name[reach_id]]]
variable[simulated_array] assign[=] call[name[simulated_array]][<ast.Slice object at 0x7da2044c18d0>]
variable[observed_array] assign[=] call[name[observed_array]][<ast.Slice object at 0x7da2044c1b70>]
<ast.Tuple object at 0x7da2044c0bb0> assign[=] call[name[filter_nan], parameter[name[simulated_array], name[observed_array]]]
call[name[writer].writerow, parameter[list[[<ast.Name object at 0x7da2044c3880>, <ast.Call object at 0x7da2044c0370>, <ast.Call object at 0x7da2044c2470>, <ast.Call object at 0x7da2044c3b20>, <ast.Call object at 0x7da2044c2c50>, <ast.Call object at 0x7da2044c35b0>, <ast.Call object at 0x7da2044c14e0>, <ast.Call object at 0x7da2044c3160>, <ast.Call object at 0x7da2044c06d0>, <ast.Call object at 0x7da2044c35e0>, <ast.Subscript object at 0x7da20c993d00>]]]] | keyword[def] identifier[find_goodness_of_fit] ( identifier[rapid_qout_file] , identifier[reach_id_file] , identifier[observed_file] ,
identifier[out_analysis_file] , identifier[daily] = keyword[False] ):
literal[string]
identifier[reach_id_list] = identifier[np] . identifier[loadtxt] ( identifier[reach_id_file] ,
identifier[delimiter] = literal[string] , identifier[usecols] =( literal[int] ,),
identifier[ndmin] = literal[int] , identifier[dtype] = identifier[np] . identifier[int32] )
identifier[data_nc] = identifier[RAPIDDataset] ( identifier[rapid_qout_file] )
identifier[observed_table] = identifier[np] . identifier[loadtxt] ( identifier[observed_file] ,
identifier[ndmin] = literal[int] , identifier[delimiter] = literal[string] ,
identifier[usecols] = identifier[tuple] ( identifier[range] ( identifier[reach_id_list] . identifier[size] )))
keyword[with] identifier[open] ( identifier[out_analysis_file] , literal[string] ) keyword[as] identifier[outcsv] :
identifier[writer] = identifier[csvwriter] ( identifier[outcsv] )
identifier[writer] . identifier[writerow] ([ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
keyword[for] identifier[index] , identifier[reach_id] keyword[in] identifier[enumerate] ( identifier[reach_id_list] ):
identifier[observed_array] = identifier[observed_table] [:, identifier[index] ]
identifier[simulated_array] = identifier[data_nc] . identifier[get_qout] ( identifier[reach_id] , identifier[daily] = identifier[daily] )
identifier[simulated_array] = identifier[simulated_array] [: identifier[len] ( identifier[observed_array] )]
identifier[observed_array] = identifier[observed_array] [: identifier[len] ( identifier[simulated_array] )]
identifier[simulated_array] , identifier[observed_array] = identifier[filter_nan] ( identifier[simulated_array] , identifier[observed_array] )
identifier[writer] . identifier[writerow] ([ identifier[reach_id] ,
identifier[pc_bias] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[apb] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[rmse] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[mae] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[bias] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[NS] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[L] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[correlation] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[index_agreement] ( identifier[simulated_array] , identifier[observed_array] ),
identifier[KGE] ( identifier[simulated_array] , identifier[observed_array] )[ literal[int] ]]) | def find_goodness_of_fit(rapid_qout_file, reach_id_file, observed_file, out_analysis_file, daily=False):
"""
Finds the goodness of fit comparing observed streamflow in a rapid Qout
file with simulated flows in a csv file.
Parameters
----------
rapid_qout_file: str
Path to the RAPID Qout file.
reach_id_file: str
ath to file with river reach ID's associate with the RAPID Qout file.
It is in the format of the RAPID observed flows reach ID file.
observed_file: str
Path to input csv with with observed flows corresponding to the
RAPID Qout. It is in the format of the RAPID observed flows file.
out_analysis_file: str
Path to the analysis output csv file.
daily: bool, optional
If True and the file is CF-Compliant, it will compare the
*observed_file* with daily average flow from Qout. Default is False.
Example with CF-Compliant RAPID Qout file:
.. code:: python
import os
from RAPIDpy.postprocess import find_goodness_of_fit
INPUT_DATA_PATH = '/path/to/data'
reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
'Qout_nasa_lis_3hr_20020830_CF.nc')
cf_out_analysis_file = os.path.join(OUTPUT_DATA_PATH,
'cf_goodness_of_fit_results-daily.csv')
find_goodness_of_fit(cf_input_qout_file,
reach_id_file,
observed_file,
cf_out_analysis_file,
daily=True)
"""
reach_id_list = np.loadtxt(reach_id_file, delimiter=',', usecols=(0,), ndmin=1, dtype=np.int32)
data_nc = RAPIDDataset(rapid_qout_file)
# analyze and write
observed_table = np.loadtxt(observed_file, ndmin=2, delimiter=',', usecols=tuple(range(reach_id_list.size)))
with open(out_analysis_file, 'w') as outcsv:
writer = csvwriter(outcsv)
writer.writerow(['reach_id', 'percent_bias', 'abs_percent_bias', 'rmse', 'mae', 'bias', 'NSE', 'likelihood', 'correlation_coeff', 'index_agreement', 'KGE'])
for (index, reach_id) in enumerate(reach_id_list):
observed_array = observed_table[:, index]
simulated_array = data_nc.get_qout(reach_id, daily=daily)
# make sure they are the same length
simulated_array = simulated_array[:len(observed_array)]
observed_array = observed_array[:len(simulated_array)]
(simulated_array, observed_array) = filter_nan(simulated_array, observed_array)
writer.writerow([reach_id, pc_bias(simulated_array, observed_array), apb(simulated_array, observed_array), rmse(simulated_array, observed_array), mae(simulated_array, observed_array), bias(simulated_array, observed_array), NS(simulated_array, observed_array), L(simulated_array, observed_array), correlation(simulated_array, observed_array), index_agreement(simulated_array, observed_array), KGE(simulated_array, observed_array)[0]]) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['outcsv']] |
def init_app(self,
app,
entry_point_group='invenio_admin.views',
permission_factory=None,
view_class_factory=protected_adminview_factory,
index_view_class=AdminIndexView):
"""Flask application initialization.
:param app: The Flask application.
:param entry_point_group: Name of entry point group to load
views/models from. (Default: ``'invenio_admin.views'``)
:param permission_factory: Default permission factory to use when
protecting an admin view. (Default:
:func:`~.permissions.admin_permission_factory`)
:param view_class_factory: Factory for creating admin view classes on
the fly. Used to protect admin views with authentication and
authorization. (Default:
:func:`~.views.protected_adminview_factory`)
:param index_view_class: Specify administrative interface index page.
(Default: :class:`flask_admin.base.AdminIndexView`)
:param kwargs: Passed to :class:`flask_admin.base.Admin`.
:returns: Extension state.
"""
self.init_config(app)
default_permission_factory = app.config['ADMIN_PERMISSION_FACTORY']
permission_factory = permission_factory or \
import_string(default_permission_factory)
# Create administration app.
admin = Admin(
app,
name=app.config['ADMIN_APPNAME'],
template_mode=app.config['ADMIN_TEMPLATE_MODE'],
index_view=view_class_factory(index_view_class)(),
)
@app.before_first_request
def lazy_base_template():
"""Initialize admin base template lazily."""
base_template = app.config.get('ADMIN_BASE_TEMPLATE')
if base_template:
admin.base_template = base_template
# Create admin state
state = _AdminState(app, admin, permission_factory, view_class_factory)
if entry_point_group:
state.load_entry_point_group(entry_point_group)
app.extensions['invenio-admin'] = state
return state | def function[init_app, parameter[self, app, entry_point_group, permission_factory, view_class_factory, index_view_class]]:
constant[Flask application initialization.
:param app: The Flask application.
:param entry_point_group: Name of entry point group to load
views/models from. (Default: ``'invenio_admin.views'``)
:param permission_factory: Default permission factory to use when
protecting an admin view. (Default:
:func:`~.permissions.admin_permission_factory`)
:param view_class_factory: Factory for creating admin view classes on
the fly. Used to protect admin views with authentication and
authorization. (Default:
:func:`~.views.protected_adminview_factory`)
:param index_view_class: Specify administrative interface index page.
(Default: :class:`flask_admin.base.AdminIndexView`)
:param kwargs: Passed to :class:`flask_admin.base.Admin`.
:returns: Extension state.
]
call[name[self].init_config, parameter[name[app]]]
variable[default_permission_factory] assign[=] call[name[app].config][constant[ADMIN_PERMISSION_FACTORY]]
variable[permission_factory] assign[=] <ast.BoolOp object at 0x7da1b2571120>
variable[admin] assign[=] call[name[Admin], parameter[name[app]]]
def function[lazy_base_template, parameter[]]:
constant[Initialize admin base template lazily.]
variable[base_template] assign[=] call[name[app].config.get, parameter[constant[ADMIN_BASE_TEMPLATE]]]
if name[base_template] begin[:]
name[admin].base_template assign[=] name[base_template]
variable[state] assign[=] call[name[_AdminState], parameter[name[app], name[admin], name[permission_factory], name[view_class_factory]]]
if name[entry_point_group] begin[:]
call[name[state].load_entry_point_group, parameter[name[entry_point_group]]]
call[name[app].extensions][constant[invenio-admin]] assign[=] name[state]
return[name[state]] | keyword[def] identifier[init_app] ( identifier[self] ,
identifier[app] ,
identifier[entry_point_group] = literal[string] ,
identifier[permission_factory] = keyword[None] ,
identifier[view_class_factory] = identifier[protected_adminview_factory] ,
identifier[index_view_class] = identifier[AdminIndexView] ):
literal[string]
identifier[self] . identifier[init_config] ( identifier[app] )
identifier[default_permission_factory] = identifier[app] . identifier[config] [ literal[string] ]
identifier[permission_factory] = identifier[permission_factory] keyword[or] identifier[import_string] ( identifier[default_permission_factory] )
identifier[admin] = identifier[Admin] (
identifier[app] ,
identifier[name] = identifier[app] . identifier[config] [ literal[string] ],
identifier[template_mode] = identifier[app] . identifier[config] [ literal[string] ],
identifier[index_view] = identifier[view_class_factory] ( identifier[index_view_class] )(),
)
@ identifier[app] . identifier[before_first_request]
keyword[def] identifier[lazy_base_template] ():
literal[string]
identifier[base_template] = identifier[app] . identifier[config] . identifier[get] ( literal[string] )
keyword[if] identifier[base_template] :
identifier[admin] . identifier[base_template] = identifier[base_template]
identifier[state] = identifier[_AdminState] ( identifier[app] , identifier[admin] , identifier[permission_factory] , identifier[view_class_factory] )
keyword[if] identifier[entry_point_group] :
identifier[state] . identifier[load_entry_point_group] ( identifier[entry_point_group] )
identifier[app] . identifier[extensions] [ literal[string] ]= identifier[state]
keyword[return] identifier[state] | def init_app(self, app, entry_point_group='invenio_admin.views', permission_factory=None, view_class_factory=protected_adminview_factory, index_view_class=AdminIndexView):
"""Flask application initialization.
:param app: The Flask application.
:param entry_point_group: Name of entry point group to load
views/models from. (Default: ``'invenio_admin.views'``)
:param permission_factory: Default permission factory to use when
protecting an admin view. (Default:
:func:`~.permissions.admin_permission_factory`)
:param view_class_factory: Factory for creating admin view classes on
the fly. Used to protect admin views with authentication and
authorization. (Default:
:func:`~.views.protected_adminview_factory`)
:param index_view_class: Specify administrative interface index page.
(Default: :class:`flask_admin.base.AdminIndexView`)
:param kwargs: Passed to :class:`flask_admin.base.Admin`.
:returns: Extension state.
"""
self.init_config(app)
default_permission_factory = app.config['ADMIN_PERMISSION_FACTORY']
permission_factory = permission_factory or import_string(default_permission_factory)
# Create administration app.
admin = Admin(app, name=app.config['ADMIN_APPNAME'], template_mode=app.config['ADMIN_TEMPLATE_MODE'], index_view=view_class_factory(index_view_class)())
@app.before_first_request
def lazy_base_template():
"""Initialize admin base template lazily."""
base_template = app.config.get('ADMIN_BASE_TEMPLATE')
if base_template:
admin.base_template = base_template # depends on [control=['if'], data=[]]
# Create admin state
state = _AdminState(app, admin, permission_factory, view_class_factory)
if entry_point_group:
state.load_entry_point_group(entry_point_group) # depends on [control=['if'], data=[]]
app.extensions['invenio-admin'] = state
return state |
def get_xy_array(x_segment, y_segment):
"""
input: x_segment, y_segment
output: xy_segment, ( format: [(x[0], y[0]), (x[1], y[1])]
"""
xy_array = []
for num, x in enumerate(x_segment):
xy_array.append((x, y_segment[num]))
return xy_array | def function[get_xy_array, parameter[x_segment, y_segment]]:
constant[
input: x_segment, y_segment
output: xy_segment, ( format: [(x[0], y[0]), (x[1], y[1])]
]
variable[xy_array] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f00e5c0>, <ast.Name object at 0x7da18f00de10>]]] in starred[call[name[enumerate], parameter[name[x_segment]]]] begin[:]
call[name[xy_array].append, parameter[tuple[[<ast.Name object at 0x7da18f00e620>, <ast.Subscript object at 0x7da18f00dfc0>]]]]
return[name[xy_array]] | keyword[def] identifier[get_xy_array] ( identifier[x_segment] , identifier[y_segment] ):
literal[string]
identifier[xy_array] =[]
keyword[for] identifier[num] , identifier[x] keyword[in] identifier[enumerate] ( identifier[x_segment] ):
identifier[xy_array] . identifier[append] (( identifier[x] , identifier[y_segment] [ identifier[num] ]))
keyword[return] identifier[xy_array] | def get_xy_array(x_segment, y_segment):
"""
input: x_segment, y_segment
output: xy_segment, ( format: [(x[0], y[0]), (x[1], y[1])]
"""
xy_array = []
for (num, x) in enumerate(x_segment):
xy_array.append((x, y_segment[num])) # depends on [control=['for'], data=[]]
return xy_array |
def put(self, url, headers=None, body=None, kwargs=None):
"""Make a PUT request.
To make a PUT request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
"""
return self._request(
method='put',
url=url,
headers=headers,
body=body,
kwargs=kwargs
) | def function[put, parameter[self, url, headers, body, kwargs]]:
constant[Make a PUT request.
To make a PUT request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
]
return[call[name[self]._request, parameter[]]] | keyword[def] identifier[put] ( identifier[self] , identifier[url] , identifier[headers] = keyword[None] , identifier[body] = keyword[None] , identifier[kwargs] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[body] = identifier[body] ,
identifier[kwargs] = identifier[kwargs]
) | def put(self, url, headers=None, body=None, kwargs=None):
"""Make a PUT request.
To make a PUT request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
"""
return self._request(method='put', url=url, headers=headers, body=body, kwargs=kwargs) |
def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
if self.representative_structure:
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? '
'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain_index_mapping = seqprop.letter_annotations[access_key]
resnum_to_chain_index = {}
for x in resnums:
ix = chain_index_mapping[x - 1] - 1
if np.isnan(ix):
log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id,
chain_id,
x))
else:
resnum_to_chain_index[int(x)] = int(ix)
return resnum_to_chain_index | def function[_map_seqprop_resnums_to_structprop_chain_index, parameter[self, resnums, seqprop, structprop, chain_id, use_representatives]]:
constant[Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
]
variable[resnums] assign[=] call[name[ssbio].utils.force_list, parameter[name[resnums]]]
if name[use_representatives] begin[:]
variable[seqprop] assign[=] name[self].representative_sequence
variable[structprop] assign[=] name[self].representative_structure
variable[chain_id] assign[=] name[self].representative_chain
if <ast.UnaryOp object at 0x7da1b0e47460> begin[:]
<ast.Raise object at 0x7da1b0e450c0>
if name[self].representative_structure begin[:]
if compare[name[structprop].id equal[==] name[self].representative_structure.id] begin[:]
variable[full_structure_id] assign[=] call[call[constant[{}-{}].format, parameter[name[structprop].id, name[chain_id]]].replace, parameter[constant[REP-], constant[]]]
variable[aln_id] assign[=] call[constant[{}_{}].format, parameter[name[seqprop].id, name[full_structure_id]]]
variable[access_key] assign[=] call[constant[{}_chain_index].format, parameter[name[aln_id]]]
if compare[name[access_key] <ast.NotIn object at 0x7da2590d7190> name[seqprop].letter_annotations] begin[:]
<ast.Raise object at 0x7da18c4ce6b0>
variable[chain_index_mapping] assign[=] call[name[seqprop].letter_annotations][name[access_key]]
variable[resnum_to_chain_index] assign[=] dictionary[[], []]
for taget[name[x]] in starred[name[resnums]] begin[:]
variable[ix] assign[=] binary_operation[call[name[chain_index_mapping]][binary_operation[name[x] - constant[1]]] - constant[1]]
if call[name[np].isnan, parameter[name[ix]]] begin[:]
call[name[log].warning, parameter[call[constant[{}-{}, {}: no equivalent residue found in structure sequence].format, parameter[name[structprop].id, name[chain_id], name[x]]]]]
return[name[resnum_to_chain_index]] | keyword[def] identifier[_map_seqprop_resnums_to_structprop_chain_index] ( identifier[self] , identifier[resnums] , identifier[seqprop] = keyword[None] , identifier[structprop] = keyword[None] , identifier[chain_id] = keyword[None] ,
identifier[use_representatives] = keyword[False] ):
literal[string]
identifier[resnums] = identifier[ssbio] . identifier[utils] . identifier[force_list] ( identifier[resnums] )
keyword[if] identifier[use_representatives] :
identifier[seqprop] = identifier[self] . identifier[representative_sequence]
identifier[structprop] = identifier[self] . identifier[representative_structure]
identifier[chain_id] = identifier[self] . identifier[representative_chain]
keyword[if] keyword[not] identifier[structprop] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] keyword[not] identifier[seqprop] keyword[or] keyword[not] identifier[structprop] keyword[or] keyword[not] identifier[chain_id] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[representative_structure] :
keyword[if] identifier[structprop] . identifier[id] == identifier[self] . identifier[representative_structure] . identifier[id] :
identifier[full_structure_id] = literal[string] . identifier[format] ( identifier[structprop] . identifier[id] , identifier[chain_id] ). identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[full_structure_id] = literal[string] . identifier[format] ( identifier[structprop] . identifier[id] , identifier[chain_id] )
keyword[else] :
identifier[full_structure_id] = literal[string] . identifier[format] ( identifier[structprop] . identifier[id] , identifier[chain_id] )
identifier[aln_id] = literal[string] . identifier[format] ( identifier[seqprop] . identifier[id] , identifier[full_structure_id] )
identifier[access_key] = literal[string] . identifier[format] ( identifier[aln_id] )
keyword[if] identifier[access_key] keyword[not] keyword[in] identifier[seqprop] . identifier[letter_annotations] :
keyword[raise] identifier[KeyError] ( literal[string]
literal[string] . identifier[format] ( identifier[access_key] , identifier[aln_id] ))
identifier[chain_index_mapping] = identifier[seqprop] . identifier[letter_annotations] [ identifier[access_key] ]
identifier[resnum_to_chain_index] ={}
keyword[for] identifier[x] keyword[in] identifier[resnums] :
identifier[ix] = identifier[chain_index_mapping] [ identifier[x] - literal[int] ]- literal[int]
keyword[if] identifier[np] . identifier[isnan] ( identifier[ix] ):
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[structprop] . identifier[id] ,
identifier[chain_id] ,
identifier[x] ))
keyword[else] :
identifier[resnum_to_chain_index] [ identifier[int] ( identifier[x] )]= identifier[int] ( identifier[ix] )
keyword[return] identifier[resnum_to_chain_index] | def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None, use_representatives=False):
"""Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not seqprop or not structprop or (not chain_id):
raise ValueError('Please specify sequence, structure, and chain ID') # depends on [control=['if'], data=[]]
if self.representative_structure:
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '') # depends on [control=['if'], data=[]]
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id) # depends on [control=['if'], data=[]]
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id)) # depends on [control=['if'], data=['access_key']]
chain_index_mapping = seqprop.letter_annotations[access_key]
resnum_to_chain_index = {}
for x in resnums:
ix = chain_index_mapping[x - 1] - 1
if np.isnan(ix):
log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id, chain_id, x)) # depends on [control=['if'], data=[]]
else:
resnum_to_chain_index[int(x)] = int(ix) # depends on [control=['for'], data=['x']]
return resnum_to_chain_index |
def csi_wrap(self, value, capname, *args):
"""Return a value wrapped in the selected CSI and does a reset."""
if isinstance(value, str):
value = value.encode('utf-8')
return b''.join([
self.csi(capname, *args),
value,
self.csi('sgr0'),
]) | def function[csi_wrap, parameter[self, value, capname]]:
constant[Return a value wrapped in the selected CSI and does a reset.]
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
variable[value] assign[=] call[name[value].encode, parameter[constant[utf-8]]]
return[call[constant[b''].join, parameter[list[[<ast.Call object at 0x7da1b008b370>, <ast.Name object at 0x7da1b008bb50>, <ast.Call object at 0x7da1b0088040>]]]]] | keyword[def] identifier[csi_wrap] ( identifier[self] , identifier[value] , identifier[capname] ,* identifier[args] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[value] = identifier[value] . identifier[encode] ( literal[string] )
keyword[return] literal[string] . identifier[join] ([
identifier[self] . identifier[csi] ( identifier[capname] ,* identifier[args] ),
identifier[value] ,
identifier[self] . identifier[csi] ( literal[string] ),
]) | def csi_wrap(self, value, capname, *args):
"""Return a value wrapped in the selected CSI and does a reset."""
if isinstance(value, str):
value = value.encode('utf-8') # depends on [control=['if'], data=[]]
return b''.join([self.csi(capname, *args), value, self.csi('sgr0')]) |
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr) | def function[print_error, parameter[self, error]]:
constant[
Print more information about an error.
:type error: GitError
]
call[name[print], parameter[call[name[colored], parameter[name[error].message, constant[red]]]]]
if <ast.BoolOp object at 0x7da2054a6bf0> begin[:]
call[name[print], parameter[]]
call[name[print], parameter[constant[Here's what git said:]]]
call[name[print], parameter[]]
if name[error].stdout begin[:]
call[name[print], parameter[name[error].stdout]]
if name[error].stderr begin[:]
call[name[print], parameter[name[error].stderr]]
if name[error].details begin[:]
call[name[print], parameter[]]
call[name[print], parameter[constant[Here's what we know:]]]
call[name[print], parameter[call[name[str], parameter[name[error].details]]]]
call[name[print], parameter[]] | keyword[def] identifier[print_error] ( identifier[self] , identifier[error] ):
literal[string]
identifier[print] ( identifier[colored] ( identifier[error] . identifier[message] , literal[string] ), identifier[file] = identifier[self] . identifier[stderr] )
keyword[if] identifier[error] . identifier[stdout] keyword[or] identifier[error] . identifier[stderr] :
identifier[print] ( identifier[file] = identifier[self] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[self] . identifier[stderr] )
identifier[print] ( identifier[file] = identifier[self] . identifier[stderr] )
keyword[if] identifier[error] . identifier[stdout] :
identifier[print] ( identifier[error] . identifier[stdout] , identifier[file] = identifier[self] . identifier[stderr] )
keyword[if] identifier[error] . identifier[stderr] :
identifier[print] ( identifier[error] . identifier[stderr] , identifier[file] = identifier[self] . identifier[stderr] )
keyword[if] identifier[error] . identifier[details] :
identifier[print] ( identifier[file] = identifier[self] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[self] . identifier[stderr] )
identifier[print] ( identifier[str] ( identifier[error] . identifier[details] ), identifier[file] = identifier[self] . identifier[stderr] )
identifier[print] ( identifier[file] = identifier[self] . identifier[stderr] ) | def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr) # depends on [control=['if'], data=[]]
if error.stderr:
print(error.stderr, file=self.stderr) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr) # depends on [control=['if'], data=[]] |
def add_floatspin(self, setting):
'''add a floating point spin control'''
from wx.lib.agw.floatspin import FloatSpin
tab = self.panel(setting.tab)
default = setting.value
(minv, maxv) = setting.range
ctrl = FloatSpin(tab, -1,
value = default,
min_val = minv,
max_val = maxv,
increment = setting.increment)
if setting.format is not None:
ctrl.SetFormat(setting.format)
if setting.digits is not None:
ctrl.SetDigits(setting.digits)
self._add_input(setting, ctrl, value=default) | def function[add_floatspin, parameter[self, setting]]:
constant[add a floating point spin control]
from relative_module[wx.lib.agw.floatspin] import module[FloatSpin]
variable[tab] assign[=] call[name[self].panel, parameter[name[setting].tab]]
variable[default] assign[=] name[setting].value
<ast.Tuple object at 0x7da1b1629ae0> assign[=] name[setting].range
variable[ctrl] assign[=] call[name[FloatSpin], parameter[name[tab], <ast.UnaryOp object at 0x7da1b1629b70>]]
if compare[name[setting].format is_not constant[None]] begin[:]
call[name[ctrl].SetFormat, parameter[name[setting].format]]
if compare[name[setting].digits is_not constant[None]] begin[:]
call[name[ctrl].SetDigits, parameter[name[setting].digits]]
call[name[self]._add_input, parameter[name[setting], name[ctrl]]] | keyword[def] identifier[add_floatspin] ( identifier[self] , identifier[setting] ):
literal[string]
keyword[from] identifier[wx] . identifier[lib] . identifier[agw] . identifier[floatspin] keyword[import] identifier[FloatSpin]
identifier[tab] = identifier[self] . identifier[panel] ( identifier[setting] . identifier[tab] )
identifier[default] = identifier[setting] . identifier[value]
( identifier[minv] , identifier[maxv] )= identifier[setting] . identifier[range]
identifier[ctrl] = identifier[FloatSpin] ( identifier[tab] ,- literal[int] ,
identifier[value] = identifier[default] ,
identifier[min_val] = identifier[minv] ,
identifier[max_val] = identifier[maxv] ,
identifier[increment] = identifier[setting] . identifier[increment] )
keyword[if] identifier[setting] . identifier[format] keyword[is] keyword[not] keyword[None] :
identifier[ctrl] . identifier[SetFormat] ( identifier[setting] . identifier[format] )
keyword[if] identifier[setting] . identifier[digits] keyword[is] keyword[not] keyword[None] :
identifier[ctrl] . identifier[SetDigits] ( identifier[setting] . identifier[digits] )
identifier[self] . identifier[_add_input] ( identifier[setting] , identifier[ctrl] , identifier[value] = identifier[default] ) | def add_floatspin(self, setting):
"""add a floating point spin control"""
from wx.lib.agw.floatspin import FloatSpin
tab = self.panel(setting.tab)
default = setting.value
(minv, maxv) = setting.range
ctrl = FloatSpin(tab, -1, value=default, min_val=minv, max_val=maxv, increment=setting.increment)
if setting.format is not None:
ctrl.SetFormat(setting.format) # depends on [control=['if'], data=[]]
if setting.digits is not None:
ctrl.SetDigits(setting.digits) # depends on [control=['if'], data=[]]
self._add_input(setting, ctrl, value=default) |
def compile_crystal(datarow, flavor='pmg'):
"""
Helper method for representing the MPDS crystal structures in two flavors:
either as a Pymatgen Structure object, or as an ASE Atoms object.
Attention #1. Disordered structures (e.g. fractional indices in the chemical formulae)
are not supported by this method, and hence the occupancies are not retrieved.
Currently it's up to the user to take care of that (see e.g.
https://doi.org/10.1186/s13321-016-0129-3 etc.).
Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g.
primitive vs. crystallographic cell is defaulted,
atoms wrapped or non-wrapped into the unit cell etc.
Note, that the crystal structures are not retrieved by default,
so for them one needs to specify the following fields:
- cell_abc
- sg_n
- basis_noneq
- els_noneq
e.g. like this: {'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']}
Args:
datarow: (list) Required data to construct crystal structure:
[cell_abc, sg_n, basis_noneq, els_noneq]
flavor: (str) Either "pmg", or "ase"
Returns:
- if flavor is pmg, returns Pymatgen Structure object
- if flavor is ase, returns ASE Atoms object
"""
if not datarow or not datarow[-1]:
# this is either a P-entry with the cell data, which meets the search criterion,
# or a 'low quality' structure with no basis (just unit cell parameters)
return None
if len(datarow) < 4:
raise ValueError(
"Must supply a data row that ends with the entries "
"'cell_abc', 'sg_n', 'basis_noneq', 'els_noneq'")
cell_abc, sg_n, basis_noneq, els_noneq = \
datarow[-4], int(datarow[-3]), datarow[-2], _massage_atsymb(datarow[-1])
if flavor == 'pmg' and use_pmg:
return Structure.from_spacegroup(
sg_n,
Lattice.from_parameters(*cell_abc),
els_noneq,
basis_noneq
)
elif flavor == 'ase' and use_ase:
atom_data = []
for num, i in enumerate(basis_noneq):
atom_data.append(Atom(els_noneq[num], tuple(i)))
return crystal(
atom_data,
spacegroup=sg_n,
cellpar=cell_abc,
primitive_cell=True,
onduplicates='replace'
)
else: raise APIError("Crystal structure treatment unavailable") | def function[compile_crystal, parameter[datarow, flavor]]:
constant[
Helper method for representing the MPDS crystal structures in two flavors:
either as a Pymatgen Structure object, or as an ASE Atoms object.
Attention #1. Disordered structures (e.g. fractional indices in the chemical formulae)
are not supported by this method, and hence the occupancies are not retrieved.
Currently it's up to the user to take care of that (see e.g.
https://doi.org/10.1186/s13321-016-0129-3 etc.).
Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g.
primitive vs. crystallographic cell is defaulted,
atoms wrapped or non-wrapped into the unit cell etc.
Note, that the crystal structures are not retrieved by default,
so for them one needs to specify the following fields:
- cell_abc
- sg_n
- basis_noneq
- els_noneq
e.g. like this: {'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']}
Args:
datarow: (list) Required data to construct crystal structure:
[cell_abc, sg_n, basis_noneq, els_noneq]
flavor: (str) Either "pmg", or "ase"
Returns:
- if flavor is pmg, returns Pymatgen Structure object
- if flavor is ase, returns ASE Atoms object
]
if <ast.BoolOp object at 0x7da1b1ec2260> begin[:]
return[constant[None]]
if compare[call[name[len], parameter[name[datarow]]] less[<] constant[4]] begin[:]
<ast.Raise object at 0x7da1b1ec2950>
<ast.Tuple object at 0x7da1b1ec1ff0> assign[=] tuple[[<ast.Subscript object at 0x7da1b1ec1ae0>, <ast.Call object at 0x7da1b1ec2200>, <ast.Subscript object at 0x7da1b1ec3880>, <ast.Call object at 0x7da1b1ec3d00>]]
if <ast.BoolOp object at 0x7da1b1ec1210> begin[:]
return[call[name[Structure].from_spacegroup, parameter[name[sg_n], call[name[Lattice].from_parameters, parameter[<ast.Starred object at 0x7da1b1ec1420>]], name[els_noneq], name[basis_noneq]]]] | keyword[def] identifier[compile_crystal] ( identifier[datarow] , identifier[flavor] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[datarow] keyword[or] keyword[not] identifier[datarow] [- literal[int] ]:
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[datarow] )< literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
identifier[cell_abc] , identifier[sg_n] , identifier[basis_noneq] , identifier[els_noneq] = identifier[datarow] [- literal[int] ], identifier[int] ( identifier[datarow] [- literal[int] ]), identifier[datarow] [- literal[int] ], identifier[_massage_atsymb] ( identifier[datarow] [- literal[int] ])
keyword[if] identifier[flavor] == literal[string] keyword[and] identifier[use_pmg] :
keyword[return] identifier[Structure] . identifier[from_spacegroup] (
identifier[sg_n] ,
identifier[Lattice] . identifier[from_parameters] (* identifier[cell_abc] ),
identifier[els_noneq] ,
identifier[basis_noneq]
)
keyword[elif] identifier[flavor] == literal[string] keyword[and] identifier[use_ase] :
identifier[atom_data] =[]
keyword[for] identifier[num] , identifier[i] keyword[in] identifier[enumerate] ( identifier[basis_noneq] ):
identifier[atom_data] . identifier[append] ( identifier[Atom] ( identifier[els_noneq] [ identifier[num] ], identifier[tuple] ( identifier[i] )))
keyword[return] identifier[crystal] (
identifier[atom_data] ,
identifier[spacegroup] = identifier[sg_n] ,
identifier[cellpar] = identifier[cell_abc] ,
identifier[primitive_cell] = keyword[True] ,
identifier[onduplicates] = literal[string]
)
keyword[else] : keyword[raise] identifier[APIError] ( literal[string] ) | def compile_crystal(datarow, flavor='pmg'):
"""
Helper method for representing the MPDS crystal structures in two flavors:
either as a Pymatgen Structure object, or as an ASE Atoms object.
Attention #1. Disordered structures (e.g. fractional indices in the chemical formulae)
are not supported by this method, and hence the occupancies are not retrieved.
Currently it's up to the user to take care of that (see e.g.
https://doi.org/10.1186/s13321-016-0129-3 etc.).
Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g.
primitive vs. crystallographic cell is defaulted,
atoms wrapped or non-wrapped into the unit cell etc.
Note, that the crystal structures are not retrieved by default,
so for them one needs to specify the following fields:
- cell_abc
- sg_n
- basis_noneq
- els_noneq
e.g. like this: {'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']}
Args:
datarow: (list) Required data to construct crystal structure:
[cell_abc, sg_n, basis_noneq, els_noneq]
flavor: (str) Either "pmg", or "ase"
Returns:
- if flavor is pmg, returns Pymatgen Structure object
- if flavor is ase, returns ASE Atoms object
"""
if not datarow or not datarow[-1]:
# this is either a P-entry with the cell data, which meets the search criterion,
# or a 'low quality' structure with no basis (just unit cell parameters)
return None # depends on [control=['if'], data=[]]
if len(datarow) < 4:
raise ValueError("Must supply a data row that ends with the entries 'cell_abc', 'sg_n', 'basis_noneq', 'els_noneq'") # depends on [control=['if'], data=[]]
(cell_abc, sg_n, basis_noneq, els_noneq) = (datarow[-4], int(datarow[-3]), datarow[-2], _massage_atsymb(datarow[-1]))
if flavor == 'pmg' and use_pmg:
return Structure.from_spacegroup(sg_n, Lattice.from_parameters(*cell_abc), els_noneq, basis_noneq) # depends on [control=['if'], data=[]]
elif flavor == 'ase' and use_ase:
atom_data = []
for (num, i) in enumerate(basis_noneq):
atom_data.append(Atom(els_noneq[num], tuple(i))) # depends on [control=['for'], data=[]]
return crystal(atom_data, spacegroup=sg_n, cellpar=cell_abc, primitive_cell=True, onduplicates='replace') # depends on [control=['if'], data=[]]
else:
raise APIError('Crystal structure treatment unavailable') |
def create_missing(self):
"""Customize the process of auto-generating instance attributes.
Populate ``template_kind`` if:
* this template is not a snippet, and
* the ``template_kind`` instance attribute is unset.
"""
super(ProvisioningTemplate, self).create_missing()
if (getattr(self, 'snippet', None) is False and
not hasattr(self, 'template_kind')):
self.template_kind = TemplateKind(self._server_config, id=1) | def function[create_missing, parameter[self]]:
constant[Customize the process of auto-generating instance attributes.
Populate ``template_kind`` if:
* this template is not a snippet, and
* the ``template_kind`` instance attribute is unset.
]
call[call[name[super], parameter[name[ProvisioningTemplate], name[self]]].create_missing, parameter[]]
if <ast.BoolOp object at 0x7da20cabc640> begin[:]
name[self].template_kind assign[=] call[name[TemplateKind], parameter[name[self]._server_config]] | keyword[def] identifier[create_missing] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[ProvisioningTemplate] , identifier[self] ). identifier[create_missing] ()
keyword[if] ( identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[is] keyword[False] keyword[and]
keyword[not] identifier[hasattr] ( identifier[self] , literal[string] )):
identifier[self] . identifier[template_kind] = identifier[TemplateKind] ( identifier[self] . identifier[_server_config] , identifier[id] = literal[int] ) | def create_missing(self):
"""Customize the process of auto-generating instance attributes.
Populate ``template_kind`` if:
* this template is not a snippet, and
* the ``template_kind`` instance attribute is unset.
"""
super(ProvisioningTemplate, self).create_missing()
if getattr(self, 'snippet', None) is False and (not hasattr(self, 'template_kind')):
self.template_kind = TemplateKind(self._server_config, id=1) # depends on [control=['if'], data=[]] |
def average_values(self, *args, **kwargs) -> float:
"""Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value \
of variable `soilmoisture`, the following error occurred: Variable \
`soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable \
`soilmoisture`, the following error occurred: Based on the arguments \
`('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` \
has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
"""
try:
if not self.NDIM:
return self.value
mask = self.get_submask(*args, **kwargs)
if numpy.any(mask):
weights = self.refweights[mask]
return numpy.sum(weights*self[mask])/numpy.sum(weights)
return numpy.nan
except BaseException:
objecttools.augment_excmessage(
f'While trying to calculate the mean value of variable '
f'{objecttools.devicephrase(self)}') | def function[average_values, parameter[self]]:
constant[Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value of variable `soilmoisture`, the following error occurred: Variable `soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable `soilmoisture`, the following error occurred: Based on the arguments `('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
]
<ast.Try object at 0x7da18f09f490> | keyword[def] identifier[average_values] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )-> identifier[float] :
literal[string]
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[NDIM] :
keyword[return] identifier[self] . identifier[value]
identifier[mask] = identifier[self] . identifier[get_submask] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[numpy] . identifier[any] ( identifier[mask] ):
identifier[weights] = identifier[self] . identifier[refweights] [ identifier[mask] ]
keyword[return] identifier[numpy] . identifier[sum] ( identifier[weights] * identifier[self] [ identifier[mask] ])/ identifier[numpy] . identifier[sum] ( identifier[weights] )
keyword[return] identifier[numpy] . identifier[nan]
keyword[except] identifier[BaseException] :
identifier[objecttools] . identifier[augment_excmessage] (
literal[string]
literal[string] ) | def average_values(self, *args, **kwargs) -> float:
"""Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value of variable `soilmoisture`, the following error occurred: Variable `soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable `soilmoisture`, the following error occurred: Based on the arguments `('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
"""
try:
if not self.NDIM:
return self.value # depends on [control=['if'], data=[]]
mask = self.get_submask(*args, **kwargs)
if numpy.any(mask):
weights = self.refweights[mask]
return numpy.sum(weights * self[mask]) / numpy.sum(weights) # depends on [control=['if'], data=[]]
return numpy.nan # depends on [control=['try'], data=[]]
except BaseException:
objecttools.augment_excmessage(f'While trying to calculate the mean value of variable {objecttools.devicephrase(self)}') # depends on [control=['except'], data=[]] |
def on_lstUnits_itemSelectionChanged(self):
"""Update unit description label and field widgets.
.. note:: This is an automatic Qt slot
executed when the unit selection changes.
"""
self.clear_further_steps()
# Set widgets
unit = self.selected_unit()
# Exit if no selection
if not unit:
return
self.lblDescribeUnit.setText(unit['description'])
# Enable the next button
self.parent.pbnNext.setEnabled(True) | def function[on_lstUnits_itemSelectionChanged, parameter[self]]:
constant[Update unit description label and field widgets.
.. note:: This is an automatic Qt slot
executed when the unit selection changes.
]
call[name[self].clear_further_steps, parameter[]]
variable[unit] assign[=] call[name[self].selected_unit, parameter[]]
if <ast.UnaryOp object at 0x7da1b0efb730> begin[:]
return[None]
call[name[self].lblDescribeUnit.setText, parameter[call[name[unit]][constant[description]]]]
call[name[self].parent.pbnNext.setEnabled, parameter[constant[True]]] | keyword[def] identifier[on_lstUnits_itemSelectionChanged] ( identifier[self] ):
literal[string]
identifier[self] . identifier[clear_further_steps] ()
identifier[unit] = identifier[self] . identifier[selected_unit] ()
keyword[if] keyword[not] identifier[unit] :
keyword[return]
identifier[self] . identifier[lblDescribeUnit] . identifier[setText] ( identifier[unit] [ literal[string] ])
identifier[self] . identifier[parent] . identifier[pbnNext] . identifier[setEnabled] ( keyword[True] ) | def on_lstUnits_itemSelectionChanged(self):
"""Update unit description label and field widgets.
.. note:: This is an automatic Qt slot
executed when the unit selection changes.
"""
self.clear_further_steps()
# Set widgets
unit = self.selected_unit()
# Exit if no selection
if not unit:
return # depends on [control=['if'], data=[]]
self.lblDescribeUnit.setText(unit['description'])
# Enable the next button
self.parent.pbnNext.setEnabled(True) |
def learning_curve(model, X, y, ax=None, groups=None,
train_sizes=DEFAULT_TRAIN_SIZES, cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", shuffle=False, random_state=None,
**kwargs):
"""
Displays a learning curve based on number of samples vs training and
cross validation scores. The learning curve aims to show how a model
learns and improves with experience.
This helper function is a quick wrapper to utilize the LearningCurve
for one-off analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
train_sizes : array-like, shape (n_ticks,)
default: ``np.linspace(0.1,1.0,5)``
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as
a fraction of the maximum size of the training set, otherwise it is
interpreted as absolute sizes of the training sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
exploit_incremental_learning : boolean, default: False
If the estimator supports incremental learning, this will be used to
speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` is True.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib axes
Returns the axes that the learning curve were drawn on.
"""
# Initialize the visualizer
oz = LearningCurve(
model, ax=ax, groups=groups, train_sizes=train_sizes, cv=cv,
scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch,
shuffle=shuffle, random_state=random_state,
exploit_incremental_learning=exploit_incremental_learning,
)
# Fit and poof the visualizer
oz.fit(X, y)
oz.poof(**kwargs)
return oz.ax | def function[learning_curve, parameter[model, X, y, ax, groups, train_sizes, cv, scoring, exploit_incremental_learning, n_jobs, pre_dispatch, shuffle, random_state]]:
constant[
Displays a learning curve based on number of samples vs training and
cross validation scores. The learning curve aims to show how a model
learns and improves with experience.
This helper function is a quick wrapper to utilize the LearningCurve
for one-off analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
train_sizes : array-like, shape (n_ticks,)
default: ``np.linspace(0.1,1.0,5)``
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as
a fraction of the maximum size of the training set, otherwise it is
interpreted as absolute sizes of the training sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
exploit_incremental_learning : boolean, default: False
If the estimator supports incremental learning, this will be used to
speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` is True.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib axes
Returns the axes that the learning curve were drawn on.
]
variable[oz] assign[=] call[name[LearningCurve], parameter[name[model]]]
call[name[oz].fit, parameter[name[X], name[y]]]
call[name[oz].poof, parameter[]]
return[name[oz].ax] | keyword[def] identifier[learning_curve] ( identifier[model] , identifier[X] , identifier[y] , identifier[ax] = keyword[None] , identifier[groups] = keyword[None] ,
identifier[train_sizes] = identifier[DEFAULT_TRAIN_SIZES] , identifier[cv] = keyword[None] , identifier[scoring] = keyword[None] ,
identifier[exploit_incremental_learning] = keyword[False] , identifier[n_jobs] = literal[int] ,
identifier[pre_dispatch] = literal[string] , identifier[shuffle] = keyword[False] , identifier[random_state] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[oz] = identifier[LearningCurve] (
identifier[model] , identifier[ax] = identifier[ax] , identifier[groups] = identifier[groups] , identifier[train_sizes] = identifier[train_sizes] , identifier[cv] = identifier[cv] ,
identifier[scoring] = identifier[scoring] , identifier[n_jobs] = identifier[n_jobs] , identifier[pre_dispatch] = identifier[pre_dispatch] ,
identifier[shuffle] = identifier[shuffle] , identifier[random_state] = identifier[random_state] ,
identifier[exploit_incremental_learning] = identifier[exploit_incremental_learning] ,
)
identifier[oz] . identifier[fit] ( identifier[X] , identifier[y] )
identifier[oz] . identifier[poof] (** identifier[kwargs] )
keyword[return] identifier[oz] . identifier[ax] | def learning_curve(model, X, y, ax=None, groups=None, train_sizes=DEFAULT_TRAIN_SIZES, cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch='all', shuffle=False, random_state=None, **kwargs):
"""
Displays a learning curve based on number of samples vs training and
cross validation scores. The learning curve aims to show how a model
learns and improves with experience.
This helper function is a quick wrapper to utilize the LearningCurve
for one-off analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
train_sizes : array-like, shape (n_ticks,)
default: ``np.linspace(0.1,1.0,5)``
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as
a fraction of the maximum size of the training set, otherwise it is
interpreted as absolute sizes of the training sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
exploit_incremental_learning : boolean, default: False
If the estimator supports incremental learning, this will be used to
speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` is True.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib axes
Returns the axes that the learning curve were drawn on.
"""
# Initialize the visualizer
oz = LearningCurve(model, ax=ax, groups=groups, train_sizes=train_sizes, cv=cv, scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch, shuffle=shuffle, random_state=random_state, exploit_incremental_learning=exploit_incremental_learning)
# Fit and poof the visualizer
oz.fit(X, y)
oz.poof(**kwargs)
return oz.ax |
def import_csv(csv_file, **kwargs):
"""Imports data and checks that all required columns are there."""
records = get_imported_data(csv_file, **kwargs)
_check_required_columns(csv_file, records.results)
return records | def function[import_csv, parameter[csv_file]]:
constant[Imports data and checks that all required columns are there.]
variable[records] assign[=] call[name[get_imported_data], parameter[name[csv_file]]]
call[name[_check_required_columns], parameter[name[csv_file], name[records].results]]
return[name[records]] | keyword[def] identifier[import_csv] ( identifier[csv_file] ,** identifier[kwargs] ):
literal[string]
identifier[records] = identifier[get_imported_data] ( identifier[csv_file] ,** identifier[kwargs] )
identifier[_check_required_columns] ( identifier[csv_file] , identifier[records] . identifier[results] )
keyword[return] identifier[records] | def import_csv(csv_file, **kwargs):
"""Imports data and checks that all required columns are there."""
records = get_imported_data(csv_file, **kwargs)
_check_required_columns(csv_file, records.results)
return records |
def fillna(self,method="forward",axis=0,maxlen=1):
"""
Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length
:param method: ``"forward"`` or ``"backward"``
:param axis: 0 for columnar-wise or 1 for row-wise fill
:param maxlen: Max number of consecutive NA's to fill
:return:
"""
assert_is_type(axis, 0, 1)
assert_is_type(method,str)
assert_is_type(maxlen, int)
return H2OFrame._expr(expr=ExprNode("h2o.fillna",self,method,axis,maxlen)) | def function[fillna, parameter[self, method, axis, maxlen]]:
constant[
Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length
:param method: ``"forward"`` or ``"backward"``
:param axis: 0 for columnar-wise or 1 for row-wise fill
:param maxlen: Max number of consecutive NA's to fill
:return:
]
call[name[assert_is_type], parameter[name[axis], constant[0], constant[1]]]
call[name[assert_is_type], parameter[name[method], name[str]]]
call[name[assert_is_type], parameter[name[maxlen], name[int]]]
return[call[name[H2OFrame]._expr, parameter[]]] | keyword[def] identifier[fillna] ( identifier[self] , identifier[method] = literal[string] , identifier[axis] = literal[int] , identifier[maxlen] = literal[int] ):
literal[string]
identifier[assert_is_type] ( identifier[axis] , literal[int] , literal[int] )
identifier[assert_is_type] ( identifier[method] , identifier[str] )
identifier[assert_is_type] ( identifier[maxlen] , identifier[int] )
keyword[return] identifier[H2OFrame] . identifier[_expr] ( identifier[expr] = identifier[ExprNode] ( literal[string] , identifier[self] , identifier[method] , identifier[axis] , identifier[maxlen] )) | def fillna(self, method='forward', axis=0, maxlen=1):
"""
Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length
:param method: ``"forward"`` or ``"backward"``
:param axis: 0 for columnar-wise or 1 for row-wise fill
:param maxlen: Max number of consecutive NA's to fill
:return:
"""
assert_is_type(axis, 0, 1)
assert_is_type(method, str)
assert_is_type(maxlen, int)
return H2OFrame._expr(expr=ExprNode('h2o.fillna', self, method, axis, maxlen)) |
def build(self, callable: Callable) -> Callable[..., CaptureResult]:
"""
Build a method that captures the required information when the given callable is called.
:param callable: the callable to capture information from
:return: the wrapped callable
"""
def capturing_callable(*args, **kwargs) -> CaptureResult:
return CaptureResult(return_value=callable(*args, **kwargs))
if self.capture_exceptions:
# Need to capture exceptions first else other (non-return) captured information will be lost
exceptions_wrapper = CaptureWrapBuilder._create_capture_exceptions(self.capture_exceptions)
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, exceptions_wrapper)
if self.capture_stdout:
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, CaptureWrapBuilder._capture_stdout)
if self.capture_stderr:
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, CaptureWrapBuilder._capture_stderr)
return capturing_callable | def function[build, parameter[self, callable]]:
constant[
Build a method that captures the required information when the given callable is called.
:param callable: the callable to capture information from
:return: the wrapped callable
]
def function[capturing_callable, parameter[]]:
return[call[name[CaptureResult], parameter[]]]
if name[self].capture_exceptions begin[:]
variable[exceptions_wrapper] assign[=] call[name[CaptureWrapBuilder]._create_capture_exceptions, parameter[name[self].capture_exceptions]]
variable[capturing_callable] assign[=] call[name[CaptureWrapBuilder]._wrap, parameter[name[capturing_callable], name[exceptions_wrapper]]]
if name[self].capture_stdout begin[:]
variable[capturing_callable] assign[=] call[name[CaptureWrapBuilder]._wrap, parameter[name[capturing_callable], name[CaptureWrapBuilder]._capture_stdout]]
if name[self].capture_stderr begin[:]
variable[capturing_callable] assign[=] call[name[CaptureWrapBuilder]._wrap, parameter[name[capturing_callable], name[CaptureWrapBuilder]._capture_stderr]]
return[name[capturing_callable]] | keyword[def] identifier[build] ( identifier[self] , identifier[callable] : identifier[Callable] )-> identifier[Callable] [..., identifier[CaptureResult] ]:
literal[string]
keyword[def] identifier[capturing_callable] (* identifier[args] ,** identifier[kwargs] )-> identifier[CaptureResult] :
keyword[return] identifier[CaptureResult] ( identifier[return_value] = identifier[callable] (* identifier[args] ,** identifier[kwargs] ))
keyword[if] identifier[self] . identifier[capture_exceptions] :
identifier[exceptions_wrapper] = identifier[CaptureWrapBuilder] . identifier[_create_capture_exceptions] ( identifier[self] . identifier[capture_exceptions] )
identifier[capturing_callable] = identifier[CaptureWrapBuilder] . identifier[_wrap] ( identifier[capturing_callable] , identifier[exceptions_wrapper] )
keyword[if] identifier[self] . identifier[capture_stdout] :
identifier[capturing_callable] = identifier[CaptureWrapBuilder] . identifier[_wrap] ( identifier[capturing_callable] , identifier[CaptureWrapBuilder] . identifier[_capture_stdout] )
keyword[if] identifier[self] . identifier[capture_stderr] :
identifier[capturing_callable] = identifier[CaptureWrapBuilder] . identifier[_wrap] ( identifier[capturing_callable] , identifier[CaptureWrapBuilder] . identifier[_capture_stderr] )
keyword[return] identifier[capturing_callable] | def build(self, callable: Callable) -> Callable[..., CaptureResult]:
"""
Build a method that captures the required information when the given callable is called.
:param callable: the callable to capture information from
:return: the wrapped callable
"""
def capturing_callable(*args, **kwargs) -> CaptureResult:
return CaptureResult(return_value=callable(*args, **kwargs))
if self.capture_exceptions:
# Need to capture exceptions first else other (non-return) captured information will be lost
exceptions_wrapper = CaptureWrapBuilder._create_capture_exceptions(self.capture_exceptions)
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, exceptions_wrapper) # depends on [control=['if'], data=[]]
if self.capture_stdout:
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, CaptureWrapBuilder._capture_stdout) # depends on [control=['if'], data=[]]
if self.capture_stderr:
capturing_callable = CaptureWrapBuilder._wrap(capturing_callable, CaptureWrapBuilder._capture_stderr) # depends on [control=['if'], data=[]]
return capturing_callable |
def unflatten(processed, merge_rules):
"""
Unflattens a processed object into a JSON object.
"""
unflattened = OrderedDict()
for key in processed:
current_node = unflattened
for end, part in enumerate(key, 1):
# If this is a path to an item of an array.
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#identifier-merge
if isinstance(part, IdValue):
# If the `id` of an object in the array matches, change into it.
for node in current_node:
if isinstance(node, IdDict) and node.identifier == part.identifier:
current_node = node
break
# Otherwise, append a new object, and change into it.
else:
new_node = IdDict()
new_node.identifier = part.identifier
# If the original object had an `id` value, set it.
if part.original_value is not None:
new_node['id'] = part.original_value
current_node.append(new_node)
current_node = new_node
continue
# Otherwise, this is a path to a property of an object.
node = current_node.get(part)
# If this is a path to a node we visited before, change into it. If it's an `id` field, it's already been
# set to its original value.
if node is not None:
current_node = node
continue
# If this is a full path, copy the data.
if len(key) == end:
# Omit null'ed fields.
if processed[key] is not None:
current_node[part] = processed[key]
continue
# If the path is to a new array, start a new array, and change into it.
if isinstance(key[end], IdValue):
new_node = []
# If the path is to a new object, start a new object, and change into it.
else:
new_node = OrderedDict()
current_node[part] = new_node
current_node = new_node
return unflattened | def function[unflatten, parameter[processed, merge_rules]]:
constant[
Unflattens a processed object into a JSON object.
]
variable[unflattened] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[key]] in starred[name[processed]] begin[:]
variable[current_node] assign[=] name[unflattened]
for taget[tuple[[<ast.Name object at 0x7da20e9b33a0>, <ast.Name object at 0x7da20e9b1e70>]]] in starred[call[name[enumerate], parameter[name[key], constant[1]]]] begin[:]
if call[name[isinstance], parameter[name[part], name[IdValue]]] begin[:]
for taget[name[node]] in starred[name[current_node]] begin[:]
if <ast.BoolOp object at 0x7da20e9b1690> begin[:]
variable[current_node] assign[=] name[node]
break
continue
variable[node] assign[=] call[name[current_node].get, parameter[name[part]]]
if compare[name[node] is_not constant[None]] begin[:]
variable[current_node] assign[=] name[node]
continue
if compare[call[name[len], parameter[name[key]]] equal[==] name[end]] begin[:]
if compare[call[name[processed]][name[key]] is_not constant[None]] begin[:]
call[name[current_node]][name[part]] assign[=] call[name[processed]][name[key]]
continue
if call[name[isinstance], parameter[call[name[key]][name[end]], name[IdValue]]] begin[:]
variable[new_node] assign[=] list[[]]
call[name[current_node]][name[part]] assign[=] name[new_node]
variable[current_node] assign[=] name[new_node]
return[name[unflattened]] | keyword[def] identifier[unflatten] ( identifier[processed] , identifier[merge_rules] ):
literal[string]
identifier[unflattened] = identifier[OrderedDict] ()
keyword[for] identifier[key] keyword[in] identifier[processed] :
identifier[current_node] = identifier[unflattened]
keyword[for] identifier[end] , identifier[part] keyword[in] identifier[enumerate] ( identifier[key] , literal[int] ):
keyword[if] identifier[isinstance] ( identifier[part] , identifier[IdValue] ):
keyword[for] identifier[node] keyword[in] identifier[current_node] :
keyword[if] identifier[isinstance] ( identifier[node] , identifier[IdDict] ) keyword[and] identifier[node] . identifier[identifier] == identifier[part] . identifier[identifier] :
identifier[current_node] = identifier[node]
keyword[break]
keyword[else] :
identifier[new_node] = identifier[IdDict] ()
identifier[new_node] . identifier[identifier] = identifier[part] . identifier[identifier]
keyword[if] identifier[part] . identifier[original_value] keyword[is] keyword[not] keyword[None] :
identifier[new_node] [ literal[string] ]= identifier[part] . identifier[original_value]
identifier[current_node] . identifier[append] ( identifier[new_node] )
identifier[current_node] = identifier[new_node]
keyword[continue]
identifier[node] = identifier[current_node] . identifier[get] ( identifier[part] )
keyword[if] identifier[node] keyword[is] keyword[not] keyword[None] :
identifier[current_node] = identifier[node]
keyword[continue]
keyword[if] identifier[len] ( identifier[key] )== identifier[end] :
keyword[if] identifier[processed] [ identifier[key] ] keyword[is] keyword[not] keyword[None] :
identifier[current_node] [ identifier[part] ]= identifier[processed] [ identifier[key] ]
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[key] [ identifier[end] ], identifier[IdValue] ):
identifier[new_node] =[]
keyword[else] :
identifier[new_node] = identifier[OrderedDict] ()
identifier[current_node] [ identifier[part] ]= identifier[new_node]
identifier[current_node] = identifier[new_node]
keyword[return] identifier[unflattened] | def unflatten(processed, merge_rules):
"""
Unflattens a processed object into a JSON object.
"""
unflattened = OrderedDict()
for key in processed:
current_node = unflattened
for (end, part) in enumerate(key, 1):
# If this is a path to an item of an array.
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#identifier-merge
if isinstance(part, IdValue):
# If the `id` of an object in the array matches, change into it.
for node in current_node:
if isinstance(node, IdDict) and node.identifier == part.identifier:
current_node = node
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
else:
# Otherwise, append a new object, and change into it.
new_node = IdDict()
new_node.identifier = part.identifier
# If the original object had an `id` value, set it.
if part.original_value is not None:
new_node['id'] = part.original_value # depends on [control=['if'], data=[]]
current_node.append(new_node)
current_node = new_node
continue # depends on [control=['if'], data=[]]
# Otherwise, this is a path to a property of an object.
node = current_node.get(part)
# If this is a path to a node we visited before, change into it. If it's an `id` field, it's already been
# set to its original value.
if node is not None:
current_node = node
continue # depends on [control=['if'], data=['node']]
# If this is a full path, copy the data.
if len(key) == end:
# Omit null'ed fields.
if processed[key] is not None:
current_node[part] = processed[key] # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
# If the path is to a new array, start a new array, and change into it.
if isinstance(key[end], IdValue):
new_node = [] # depends on [control=['if'], data=[]]
else:
# If the path is to a new object, start a new object, and change into it.
new_node = OrderedDict()
current_node[part] = new_node
current_node = new_node # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['key']]
return unflattened |
def start(self):
"""Start server if not previously started."""
msg = ''
if not self.running():
if self._port == 0:
self._port = _port_not_in_use()
self._process = start_server_background(self._port)
else:
msg = 'Server already started\n'
msg += 'Server running at {}'.format(self.url())
print(msg) | def function[start, parameter[self]]:
constant[Start server if not previously started.]
variable[msg] assign[=] constant[]
if <ast.UnaryOp object at 0x7da1b1454c10> begin[:]
if compare[name[self]._port equal[==] constant[0]] begin[:]
name[self]._port assign[=] call[name[_port_not_in_use], parameter[]]
name[self]._process assign[=] call[name[start_server_background], parameter[name[self]._port]]
<ast.AugAssign object at 0x7da1b14c42e0>
call[name[print], parameter[name[msg]]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[msg] = literal[string]
keyword[if] keyword[not] identifier[self] . identifier[running] ():
keyword[if] identifier[self] . identifier[_port] == literal[int] :
identifier[self] . identifier[_port] = identifier[_port_not_in_use] ()
identifier[self] . identifier[_process] = identifier[start_server_background] ( identifier[self] . identifier[_port] )
keyword[else] :
identifier[msg] = literal[string]
identifier[msg] += literal[string] . identifier[format] ( identifier[self] . identifier[url] ())
identifier[print] ( identifier[msg] ) | def start(self):
"""Start server if not previously started."""
msg = ''
if not self.running():
if self._port == 0:
self._port = _port_not_in_use() # depends on [control=['if'], data=[]]
self._process = start_server_background(self._port) # depends on [control=['if'], data=[]]
else:
msg = 'Server already started\n'
msg += 'Server running at {}'.format(self.url())
print(msg) |
def sync_repo(self, repo_name=None, envs=[], query='/repositories/'):
"""
Sync repository in specified environments
"""
juicer.utils.Log.log_debug(
"Sync Repo %s In: %s" % (repo_name, ",".join(envs)))
data = {
'override_config': {
'verify_checksum': 'true',
'verify_size': 'true'
},
}
for env in envs:
url = "%s%s-%s/actions/sync/" % (query, repo_name, env)
juicer.utils.Log.log_info("%s:", env)
_r = self.connectors[env].post(url, data)
if _r.status_code == Constants.PULP_POST_ACCEPTED:
juicer.utils.Log.log_info("`%s` sync scheduled" % repo_name)
else:
_r.raise_for_status()
return True | def function[sync_repo, parameter[self, repo_name, envs, query]]:
constant[
Sync repository in specified environments
]
call[name[juicer].utils.Log.log_debug, parameter[binary_operation[constant[Sync Repo %s In: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f58d7e0>, <ast.Call object at 0x7da18f58ca60>]]]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18f58e140>], [<ast.Dict object at 0x7da18f58dc30>]]
for taget[name[env]] in starred[name[envs]] begin[:]
variable[url] assign[=] binary_operation[constant[%s%s-%s/actions/sync/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f58d570>, <ast.Name object at 0x7da18f58d3f0>, <ast.Name object at 0x7da18f58da20>]]]
call[name[juicer].utils.Log.log_info, parameter[constant[%s:], name[env]]]
variable[_r] assign[=] call[call[name[self].connectors][name[env]].post, parameter[name[url], name[data]]]
if compare[name[_r].status_code equal[==] name[Constants].PULP_POST_ACCEPTED] begin[:]
call[name[juicer].utils.Log.log_info, parameter[binary_operation[constant[`%s` sync scheduled] <ast.Mod object at 0x7da2590d6920> name[repo_name]]]]
return[constant[True]] | keyword[def] identifier[sync_repo] ( identifier[self] , identifier[repo_name] = keyword[None] , identifier[envs] =[], identifier[query] = literal[string] ):
literal[string]
identifier[juicer] . identifier[utils] . identifier[Log] . identifier[log_debug] (
literal[string] %( identifier[repo_name] , literal[string] . identifier[join] ( identifier[envs] )))
identifier[data] ={
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string]
},
}
keyword[for] identifier[env] keyword[in] identifier[envs] :
identifier[url] = literal[string] %( identifier[query] , identifier[repo_name] , identifier[env] )
identifier[juicer] . identifier[utils] . identifier[Log] . identifier[log_info] ( literal[string] , identifier[env] )
identifier[_r] = identifier[self] . identifier[connectors] [ identifier[env] ]. identifier[post] ( identifier[url] , identifier[data] )
keyword[if] identifier[_r] . identifier[status_code] == identifier[Constants] . identifier[PULP_POST_ACCEPTED] :
identifier[juicer] . identifier[utils] . identifier[Log] . identifier[log_info] ( literal[string] % identifier[repo_name] )
keyword[else] :
identifier[_r] . identifier[raise_for_status] ()
keyword[return] keyword[True] | def sync_repo(self, repo_name=None, envs=[], query='/repositories/'):
"""
Sync repository in specified environments
"""
juicer.utils.Log.log_debug('Sync Repo %s In: %s' % (repo_name, ','.join(envs)))
data = {'override_config': {'verify_checksum': 'true', 'verify_size': 'true'}}
for env in envs:
url = '%s%s-%s/actions/sync/' % (query, repo_name, env)
juicer.utils.Log.log_info('%s:', env)
_r = self.connectors[env].post(url, data)
if _r.status_code == Constants.PULP_POST_ACCEPTED:
juicer.utils.Log.log_info('`%s` sync scheduled' % repo_name) # depends on [control=['if'], data=[]]
else:
_r.raise_for_status() # depends on [control=['for'], data=['env']]
return True |
def _set_status(self, status, result=None):
""" update operation status
:param str status: New status
:param cdumay_result.Result result: Execution result
"""
logger.info(
"{}.SetStatus: {}[{}] status update '{}' -> '{}'".format(
self.__class__.__name__, self.__class__.path, self.uuid,
self.status, status
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params
).dump()
)
)
return self.set_status(status, result) | def function[_set_status, parameter[self, status, result]]:
constant[ update operation status
:param str status: New status
:param cdumay_result.Result result: Execution result
]
call[name[logger].info, parameter[call[constant[{}.SetStatus: {}[{}] status update '{}' -> '{}'].format, parameter[name[self].__class__.__name__, name[self].__class__.path, name[self].uuid, name[self].status, name[status]]]]]
return[call[name[self].set_status, parameter[name[status], name[result]]]] | keyword[def] identifier[_set_status] ( identifier[self] , identifier[status] , identifier[result] = keyword[None] ):
literal[string]
identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[self] . identifier[__class__] . identifier[__name__] , identifier[self] . identifier[__class__] . identifier[path] , identifier[self] . identifier[uuid] ,
identifier[self] . identifier[status] , identifier[status]
),
identifier[extra] = identifier[dict] (
identifier[kmsg] = identifier[Message] (
identifier[self] . identifier[uuid] , identifier[entrypoint] = identifier[self] . identifier[__class__] . identifier[path] ,
identifier[params] = identifier[self] . identifier[params]
). identifier[dump] ()
)
)
keyword[return] identifier[self] . identifier[set_status] ( identifier[status] , identifier[result] ) | def _set_status(self, status, result=None):
""" update operation status
:param str status: New status
:param cdumay_result.Result result: Execution result
"""
logger.info("{}.SetStatus: {}[{}] status update '{}' -> '{}'".format(self.__class__.__name__, self.__class__.path, self.uuid, self.status, status), extra=dict(kmsg=Message(self.uuid, entrypoint=self.__class__.path, params=self.params).dump()))
return self.set_status(status, result) |
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
if not PY2:
return iter(d.values(**kw))
return d.itervalues(**kw) | def function[itervalues, parameter[d]]:
constant[Return an iterator over the values of a dictionary.]
if <ast.UnaryOp object at 0x7da1b09e95a0> begin[:]
return[call[name[iter], parameter[call[name[d].values, parameter[]]]]]
return[call[name[d].itervalues, parameter[]]] | keyword[def] identifier[itervalues] ( identifier[d] ,** identifier[kw] ):
literal[string]
keyword[if] keyword[not] identifier[PY2] :
keyword[return] identifier[iter] ( identifier[d] . identifier[values] (** identifier[kw] ))
keyword[return] identifier[d] . identifier[itervalues] (** identifier[kw] ) | def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
if not PY2:
return iter(d.values(**kw)) # depends on [control=['if'], data=[]]
return d.itervalues(**kw) |
def text(self):
"""Text received from self.source."""
if isinstance(self.source, str):
return self.source
else:
return self.source() | def function[text, parameter[self]]:
constant[Text received from self.source.]
if call[name[isinstance], parameter[name[self].source, name[str]]] begin[:]
return[name[self].source] | keyword[def] identifier[text] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[source] , identifier[str] ):
keyword[return] identifier[self] . identifier[source]
keyword[else] :
keyword[return] identifier[self] . identifier[source] () | def text(self):
"""Text received from self.source."""
if isinstance(self.source, str):
return self.source # depends on [control=['if'], data=[]]
else:
return self.source() |
def procesa_data_dia(self, key_dia, datos_para_procesar):
"""Procesa los datos descargados correspondientes a un día `key_dia`."""
return pvpc_procesa_datos_dia(key_dia, datos_para_procesar, verbose=self.verbose) | def function[procesa_data_dia, parameter[self, key_dia, datos_para_procesar]]:
constant[Procesa los datos descargados correspondientes a un día `key_dia`.]
return[call[name[pvpc_procesa_datos_dia], parameter[name[key_dia], name[datos_para_procesar]]]] | keyword[def] identifier[procesa_data_dia] ( identifier[self] , identifier[key_dia] , identifier[datos_para_procesar] ):
literal[string]
keyword[return] identifier[pvpc_procesa_datos_dia] ( identifier[key_dia] , identifier[datos_para_procesar] , identifier[verbose] = identifier[self] . identifier[verbose] ) | def procesa_data_dia(self, key_dia, datos_para_procesar):
"""Procesa los datos descargados correspondientes a un día `key_dia`."""
return pvpc_procesa_datos_dia(key_dia, datos_para_procesar, verbose=self.verbose) |
def all(self, domain=None):
"""
Gets the messages within a given domain.
If domain is None, it returns all messages.
@type id: The
@param id: message id
@rtype: dict
@return: A dict of messages
"""
if domain is None:
return {k: dict(v) for k, v in list(self.messages.items())}
return dict(self.messages.get(domain, {})) | def function[all, parameter[self, domain]]:
constant[
Gets the messages within a given domain.
If domain is None, it returns all messages.
@type id: The
@param id: message id
@rtype: dict
@return: A dict of messages
]
if compare[name[domain] is constant[None]] begin[:]
return[<ast.DictComp object at 0x7da18f09eda0>]
return[call[name[dict], parameter[call[name[self].messages.get, parameter[name[domain], dictionary[[], []]]]]]] | keyword[def] identifier[all] ( identifier[self] , identifier[domain] = keyword[None] ):
literal[string]
keyword[if] identifier[domain] keyword[is] keyword[None] :
keyword[return] { identifier[k] : identifier[dict] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[self] . identifier[messages] . identifier[items] ())}
keyword[return] identifier[dict] ( identifier[self] . identifier[messages] . identifier[get] ( identifier[domain] ,{})) | def all(self, domain=None):
"""
Gets the messages within a given domain.
If domain is None, it returns all messages.
@type id: The
@param id: message id
@rtype: dict
@return: A dict of messages
"""
if domain is None:
return {k: dict(v) for (k, v) in list(self.messages.items())} # depends on [control=['if'], data=[]]
return dict(self.messages.get(domain, {})) |
def count(self):
"""Count elements per RDD.
Creates a new RDD stream where each RDD has a single entry that
is the count of the elements.
:rtype: DStream
"""
return (
self
.mapPartitions(lambda p: [sum(1 for _ in p)])
.reduce(operator.add)
) | def function[count, parameter[self]]:
constant[Count elements per RDD.
Creates a new RDD stream where each RDD has a single entry that
is the count of the elements.
:rtype: DStream
]
return[call[call[name[self].mapPartitions, parameter[<ast.Lambda object at 0x7da1b08dbaf0>]].reduce, parameter[name[operator].add]]] | keyword[def] identifier[count] ( identifier[self] ):
literal[string]
keyword[return] (
identifier[self]
. identifier[mapPartitions] ( keyword[lambda] identifier[p] :[ identifier[sum] ( literal[int] keyword[for] identifier[_] keyword[in] identifier[p] )])
. identifier[reduce] ( identifier[operator] . identifier[add] )
) | def count(self):
"""Count elements per RDD.
Creates a new RDD stream where each RDD has a single entry that
is the count of the elements.
:rtype: DStream
"""
return self.mapPartitions(lambda p: [sum((1 for _ in p))]).reduce(operator.add) |
def _to_EC_KEY(self):
"""
Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected.
"""
key = self._lib.EC_KEY_new_by_curve_name(self._nid)
return _ffi.gc(key, _lib.EC_KEY_free) | def function[_to_EC_KEY, parameter[self]]:
constant[
Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected.
]
variable[key] assign[=] call[name[self]._lib.EC_KEY_new_by_curve_name, parameter[name[self]._nid]]
return[call[name[_ffi].gc, parameter[name[key], name[_lib].EC_KEY_free]]] | keyword[def] identifier[_to_EC_KEY] ( identifier[self] ):
literal[string]
identifier[key] = identifier[self] . identifier[_lib] . identifier[EC_KEY_new_by_curve_name] ( identifier[self] . identifier[_nid] )
keyword[return] identifier[_ffi] . identifier[gc] ( identifier[key] , identifier[_lib] . identifier[EC_KEY_free] ) | def _to_EC_KEY(self):
"""
Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected.
"""
key = self._lib.EC_KEY_new_by_curve_name(self._nid)
return _ffi.gc(key, _lib.EC_KEY_free) |
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox | def function[proximal, parameter[self, sigma]]:
constant[Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
]
if compare[name[sigma] equal[==] constant[0]] begin[:]
return[call[name[odl].IdentityOperator, parameter[name[self].domain]]] | keyword[def] identifier[proximal] ( identifier[self] , identifier[sigma] ):
literal[string]
keyword[if] identifier[sigma] == literal[int] :
keyword[return] identifier[odl] . identifier[IdentityOperator] ( identifier[self] . identifier[domain] )
keyword[else] :
keyword[def] identifier[tv_prox] ( identifier[z] , identifier[out] = keyword[None] ):
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[z] . identifier[space] . identifier[zero] ()
identifier[opts] = identifier[self] . identifier[prox_options]
identifier[sigma_] = identifier[np] . identifier[copy] ( identifier[sigma] )
identifier[z_] = identifier[z] . identifier[copy] ()
keyword[if] identifier[self] . identifier[strong_convexity] > literal[int] :
identifier[sigma_] /=( literal[int] + identifier[sigma] * identifier[self] . identifier[strong_convexity] )
identifier[z_] /=( literal[int] + identifier[sigma] * identifier[self] . identifier[strong_convexity] )
keyword[if] identifier[opts] [ literal[string] ]== literal[string] :
keyword[if] identifier[opts] [ literal[string] ]:
keyword[if] identifier[opts] [ literal[string] ] keyword[is] keyword[None] :
identifier[opts] [ literal[string] ]= identifier[self] . identifier[grad] . identifier[range] . identifier[zero] ()
identifier[p] = identifier[opts] [ literal[string] ]
keyword[else] :
identifier[p] = identifier[self] . identifier[grad] . identifier[range] . identifier[zero] ()
identifier[sigma_sqrt] = identifier[np] . identifier[sqrt] ( identifier[sigma_] )
identifier[z_] /= identifier[sigma_sqrt]
identifier[grad] = identifier[sigma_sqrt] * identifier[self] . identifier[grad]
identifier[grad] . identifier[norm] = identifier[sigma_sqrt] * identifier[self] . identifier[grad] . identifier[norm]
identifier[niter] = identifier[opts] [ literal[string] ]
identifier[alpha] = identifier[self] . identifier[alpha]
identifier[out] [:]= identifier[fgp_dual] ( identifier[p] , identifier[z_] , identifier[alpha] , identifier[niter] , identifier[grad] , identifier[self] . identifier[proj_C] ,
identifier[self] . identifier[proj_P] , identifier[tol] = identifier[opts] [ literal[string] ])
identifier[out] *= identifier[sigma_sqrt]
keyword[return] identifier[out]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[return] identifier[tv_prox] | def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain) # depends on [control=['if'], data=[]]
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero() # depends on [control=['if'], data=['out']]
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= 1 + sigma * self.strong_convexity
z_ /= 1 + sigma * self.strong_convexity # depends on [control=['if'], data=[]]
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero() # depends on [control=['if'], data=[]]
p = opts['p'] # depends on [control=['if'], data=[]]
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C, self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Not yet implemented')
return tv_prox |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.