code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_current_word(self, completion=False):
"""Return current word, i.e. word at cursor position"""
ret = self.get_current_word_and_position(completion)
if ret is not None:
return ret[0] | def function[get_current_word, parameter[self, completion]]:
constant[Return current word, i.e. word at cursor position]
variable[ret] assign[=] call[name[self].get_current_word_and_position, parameter[name[completion]]]
if compare[name[ret] is_not constant[None]] begin[:]
return[call[name[ret]][constant[0]]] | keyword[def] identifier[get_current_word] ( identifier[self] , identifier[completion] = keyword[False] ):
literal[string]
identifier[ret] = identifier[self] . identifier[get_current_word_and_position] ( identifier[completion] )
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ret] [ literal[int] ] | def get_current_word(self, completion=False):
"""Return current word, i.e. word at cursor position"""
ret = self.get_current_word_and_position(completion)
if ret is not None:
return ret[0] # depends on [control=['if'], data=['ret']] |
def do_sing(self, arg):
"""Sing a colorful song."""
color_escape = COLORS.get(self.songcolor, Fore.RESET)
self.poutput(arg, color=color_escape) | def function[do_sing, parameter[self, arg]]:
constant[Sing a colorful song.]
variable[color_escape] assign[=] call[name[COLORS].get, parameter[name[self].songcolor, name[Fore].RESET]]
call[name[self].poutput, parameter[name[arg]]] | keyword[def] identifier[do_sing] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[color_escape] = identifier[COLORS] . identifier[get] ( identifier[self] . identifier[songcolor] , identifier[Fore] . identifier[RESET] )
identifier[self] . identifier[poutput] ( identifier[arg] , identifier[color] = identifier[color_escape] ) | def do_sing(self, arg):
"""Sing a colorful song."""
color_escape = COLORS.get(self.songcolor, Fore.RESET)
self.poutput(arg, color=color_escape) |
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(self.graph.nodes(), key=lambda n: n.addr if n is not None else 0)
all_plt_stub_addrs = set(itertools.chain.from_iterable(obj.reverse_plt.keys() for obj in self.project.loader.all_objects if isinstance(obj, cle.MetaELF)))
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if a.addr in self.functions and a.addr not in all_plt_stub_addrs and \
not self._addr_hooked_or_syscall(a.addr):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data['jumpkind'] == 'Ijk_Call' for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=0x10 - (a.addr % 0x10), opt_level=1)
except SimTranslationError:
continue
nop_length = None
if self._is_noop_block(self.project.arch, block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns)
if nop_length is None or nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and \
not (next_node_addr in self._nodes or next_node_addr in nodes_to_append):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(next_node_addr, next_node_size, self.model,
function_address=next_node_addr,
instruction_addrs=[i for i in a.instruction_addrs
if next_node_addr <= i
< next_node_addr + next_node_size
],
thumb=a.thumb,
byte_string=None if a.byte_string is None else a.byte_string[nop_length:],
block_id=next_node_addr,
)
self.graph.add_node(next_node)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(addr=next_node_addr, size=next_node_size,
base_state=self._base_state)
self.functions._add_node(next_node_addr, snippet)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(sorted_nodes + list(nodes_to_append.values()), key=lambda n: n.addr if n is not None else 0)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in range(len(sorted_nodes)): # pylint:disable=consider-using-enumerate
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and \
(a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(a.addr, b.addr - a.addr, backup_state=self._base_state)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all([ self._is_noop_insn(insn) for insn in block.capstone.insns ]):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len([ _ for _, _, data in self.graph.in_edges([b], data=True) ])
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = b.addr + b.size - new_b_addr # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
dummy_job = CFGJob(new_b_addr, a.function_address, None)
self._scan_block(dummy_job)
continue
# for other cases, we'll let them be for now
a = b | def function[_remove_redundant_overlapping_blocks, parameter[self]]:
constant[
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
]
variable[sorted_nodes] assign[=] call[name[sorted], parameter[call[name[self].graph.nodes, parameter[]]]]
variable[all_plt_stub_addrs] assign[=] call[name[set], parameter[call[name[itertools].chain.from_iterable, parameter[<ast.GeneratorExp object at 0x7da1b21e0550>]]]]
variable[nodes_to_append] assign[=] dictionary[[], []]
for taget[name[a]] in starred[name[sorted_nodes]] begin[:]
if <ast.BoolOp object at 0x7da1b21e2710> begin[:]
variable[all_in_edges] assign[=] call[name[self].graph.in_edges, parameter[name[a]]]
if <ast.UnaryOp object at 0x7da1b21e1450> begin[:]
<ast.Try object at 0x7da1b21e3cd0>
variable[nop_length] assign[=] constant[None]
if call[name[self]._is_noop_block, parameter[name[self].project.arch, name[block]]] begin[:]
variable[nop_length] assign[=] name[block].size
if <ast.BoolOp object at 0x7da1b21e17b0> begin[:]
continue
variable[next_node_addr] assign[=] binary_operation[name[a].addr + name[nop_length]]
if <ast.BoolOp object at 0x7da1b21e15d0> begin[:]
variable[next_node_size] assign[=] binary_operation[name[a].size - name[nop_length]]
variable[next_node] assign[=] call[name[CFGNode], parameter[name[next_node_addr], name[next_node_size], name[self].model]]
call[name[self].graph.add_node, parameter[name[next_node]]]
variable[all_out_edges] assign[=] call[name[self].graph.out_edges, parameter[name[a]]]
for taget[tuple[[<ast.Name object at 0x7da1b21e1a20>, <ast.Name object at 0x7da1b21e1cf0>, <ast.Name object at 0x7da1b21e1000>]]] in starred[name[all_out_edges]] begin[:]
call[name[self].graph.add_edge, parameter[name[next_node], name[dst]]]
call[name[nodes_to_append]][name[next_node_addr]] assign[=] name[next_node]
<ast.Try object at 0x7da1b21e20e0>
if name[nodes_to_append] begin[:]
variable[sorted_nodes] assign[=] call[name[sorted], parameter[binary_operation[name[sorted_nodes] + call[name[list], parameter[call[name[nodes_to_append].values, parameter[]]]]]]]
variable[removed_nodes] assign[=] call[name[set], parameter[]]
variable[a] assign[=] constant[None]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sorted_nodes]]]]]] begin[:]
if compare[name[a] is constant[None]] begin[:]
variable[a] assign[=] call[name[sorted_nodes]][constant[0]]
continue
variable[b] assign[=] call[name[sorted_nodes]][name[i]]
if call[name[self]._addr_hooked_or_syscall, parameter[name[b].addr]] begin[:]
continue
if compare[name[b] in name[removed_nodes]] begin[:]
continue
if <ast.BoolOp object at 0x7da20c7cb250> begin[:]
<ast.Try object at 0x7da20c7caa40>
if <ast.BoolOp object at 0x7da20c7c8190> begin[:]
call[name[self]._nodes][name[b].addr] assign[=] name[b]
call[call[name[self]._nodes_by_addr][name[b].addr].append, parameter[name[b]]]
call[name[self]._shrink_node, parameter[name[a], binary_operation[name[b].addr - name[a].addr]]]
variable[a] assign[=] name[b]
continue
variable[all_functions] assign[=] name[self].kb.functions
if compare[binary_operation[name[a].addr + name[a].size] equal[==] binary_operation[name[b].addr + name[b].size]] begin[:]
variable[in_edges] assign[=] call[name[len], parameter[<ast.ListComp object at 0x7da20c7c91b0>]]
if compare[name[in_edges] equal[==] constant[0]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c7cba30>, <ast.Name object at 0x7da20c7c9b40>, <ast.Name object at 0x7da20c7c8df0>]]] in starred[call[name[self].graph.out_edges, parameter[list[[<ast.Name object at 0x7da20c7cb460>]]]]] begin[:]
call[name[self].graph.add_edge, parameter[name[a], name[dst]]]
if compare[name[b].addr in name[self]._nodes] begin[:]
<ast.Delete object at 0x7da20c7cbeb0>
if <ast.BoolOp object at 0x7da20c7cb7c0> begin[:]
call[call[name[self]._nodes_by_addr][name[b].addr].remove, parameter[name[b]]]
call[name[self].graph.remove_node, parameter[name[b]]]
if compare[name[b].addr in name[all_functions]] begin[:]
<ast.Delete object at 0x7da20c7c8a90>
call[name[removed_nodes].add, parameter[name[b]]]
continue
if compare[call[name[b].instruction_addrs][constant[0]] <ast.NotIn object at 0x7da2590d7190> name[a].instruction_addrs] begin[:]
variable[new_b_addr] assign[=] binary_operation[name[a].addr + name[a].size]
variable[new_b_size] assign[=] binary_operation[binary_operation[name[b].addr + name[b].size] - name[new_b_addr]]
if compare[name[b].addr in name[self]._nodes] begin[:]
<ast.Delete object at 0x7da20c7c81f0>
if <ast.BoolOp object at 0x7da20c7c9270> begin[:]
call[call[name[self]._nodes_by_addr][name[b].addr].remove, parameter[name[b]]]
call[name[self].graph.remove_node, parameter[name[b]]]
if compare[name[b].addr in name[all_functions]] begin[:]
<ast.Delete object at 0x7da20c7c9810>
call[name[removed_nodes].add, parameter[name[b]]]
if compare[name[new_b_size] greater[>] constant[0]] begin[:]
variable[dummy_job] assign[=] call[name[CFGJob], parameter[name[new_b_addr], name[a].function_address, constant[None]]]
call[name[self]._scan_block, parameter[name[dummy_job]]]
continue
variable[a] assign[=] name[b] | keyword[def] identifier[_remove_redundant_overlapping_blocks] ( identifier[self] ):
literal[string]
identifier[sorted_nodes] = identifier[sorted] ( identifier[self] . identifier[graph] . identifier[nodes] (), identifier[key] = keyword[lambda] identifier[n] : identifier[n] . identifier[addr] keyword[if] identifier[n] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] )
identifier[all_plt_stub_addrs] = identifier[set] ( identifier[itertools] . identifier[chain] . identifier[from_iterable] ( identifier[obj] . identifier[reverse_plt] . identifier[keys] () keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[project] . identifier[loader] . identifier[all_objects] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[cle] . identifier[MetaELF] )))
identifier[nodes_to_append] ={}
keyword[for] identifier[a] keyword[in] identifier[sorted_nodes] :
keyword[if] identifier[a] . identifier[addr] keyword[in] identifier[self] . identifier[functions] keyword[and] identifier[a] . identifier[addr] keyword[not] keyword[in] identifier[all_plt_stub_addrs] keyword[and] keyword[not] identifier[self] . identifier[_addr_hooked_or_syscall] ( identifier[a] . identifier[addr] ):
identifier[all_in_edges] = identifier[self] . identifier[graph] . identifier[in_edges] ( identifier[a] , identifier[data] = keyword[True] )
keyword[if] keyword[not] identifier[any] ([ identifier[data] [ literal[string] ]== literal[string] keyword[for] identifier[_] , identifier[_] , identifier[data] keyword[in] identifier[all_in_edges] ]):
keyword[try] :
identifier[block] = identifier[self] . identifier[_lift] ( identifier[a] . identifier[addr] , identifier[size] = literal[int] -( identifier[a] . identifier[addr] % literal[int] ), identifier[opt_level] = literal[int] )
keyword[except] identifier[SimTranslationError] :
keyword[continue]
identifier[nop_length] = keyword[None]
keyword[if] identifier[self] . identifier[_is_noop_block] ( identifier[self] . identifier[project] . identifier[arch] , identifier[block] ):
identifier[nop_length] = identifier[block] . identifier[size]
keyword[else] :
identifier[insns] = identifier[block] . identifier[capstone] . identifier[insns]
keyword[if] identifier[insns] :
identifier[nop_length] = identifier[self] . identifier[_get_nop_length] ( identifier[insns] )
keyword[if] identifier[nop_length] keyword[is] keyword[None] keyword[or] identifier[nop_length] <= literal[int] :
keyword[continue]
identifier[next_node_addr] = identifier[a] . identifier[addr] + identifier[nop_length]
keyword[if] identifier[nop_length] < identifier[a] . identifier[size] keyword[and] keyword[not] ( identifier[next_node_addr] keyword[in] identifier[self] . identifier[_nodes] keyword[or] identifier[next_node_addr] keyword[in] identifier[nodes_to_append] ):
identifier[next_node_size] = identifier[a] . identifier[size] - identifier[nop_length]
identifier[next_node] = identifier[CFGNode] ( identifier[next_node_addr] , identifier[next_node_size] , identifier[self] . identifier[model] ,
identifier[function_address] = identifier[next_node_addr] ,
identifier[instruction_addrs] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[a] . identifier[instruction_addrs]
keyword[if] identifier[next_node_addr] <= identifier[i]
< identifier[next_node_addr] + identifier[next_node_size]
],
identifier[thumb] = identifier[a] . identifier[thumb] ,
identifier[byte_string] = keyword[None] keyword[if] identifier[a] . identifier[byte_string] keyword[is] keyword[None] keyword[else] identifier[a] . identifier[byte_string] [ identifier[nop_length] :],
identifier[block_id] = identifier[next_node_addr] ,
)
identifier[self] . identifier[graph] . identifier[add_node] ( identifier[next_node] )
identifier[all_out_edges] = identifier[self] . identifier[graph] . identifier[out_edges] ( identifier[a] , identifier[data] = keyword[True] )
keyword[for] identifier[_] , identifier[dst] , identifier[data] keyword[in] identifier[all_out_edges] :
identifier[self] . identifier[graph] . identifier[add_edge] ( identifier[next_node] , identifier[dst] ,** identifier[data] )
identifier[nodes_to_append] [ identifier[next_node_addr] ]= identifier[next_node]
keyword[try] :
identifier[snippet] = identifier[self] . identifier[_to_snippet] ( identifier[addr] = identifier[next_node_addr] , identifier[size] = identifier[next_node_size] ,
identifier[base_state] = identifier[self] . identifier[_base_state] )
identifier[self] . identifier[functions] . identifier[_add_node] ( identifier[next_node_addr] , identifier[snippet] )
keyword[except] ( identifier[SimEngineError] , identifier[SimMemoryError] ):
keyword[continue]
keyword[if] identifier[nodes_to_append] :
identifier[sorted_nodes] = identifier[sorted] ( identifier[sorted_nodes] + identifier[list] ( identifier[nodes_to_append] . identifier[values] ()), identifier[key] = keyword[lambda] identifier[n] : identifier[n] . identifier[addr] keyword[if] identifier[n] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] )
identifier[removed_nodes] = identifier[set] ()
identifier[a] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sorted_nodes] )):
keyword[if] identifier[a] keyword[is] keyword[None] :
identifier[a] = identifier[sorted_nodes] [ literal[int] ]
keyword[continue]
identifier[b] = identifier[sorted_nodes] [ identifier[i] ]
keyword[if] identifier[self] . identifier[_addr_hooked_or_syscall] ( identifier[b] . identifier[addr] ):
keyword[continue]
keyword[if] identifier[b] keyword[in] identifier[removed_nodes] :
keyword[continue]
keyword[if] identifier[a] . identifier[addr] <= identifier[b] . identifier[addr] keyword[and] ( identifier[a] . identifier[addr] + identifier[a] . identifier[size] > identifier[b] . identifier[addr] ):
keyword[try] :
identifier[block] = identifier[self] . identifier[project] . identifier[factory] . identifier[fresh_block] ( identifier[a] . identifier[addr] , identifier[b] . identifier[addr] - identifier[a] . identifier[addr] , identifier[backup_state] = identifier[self] . identifier[_base_state] )
keyword[except] identifier[SimTranslationError] :
identifier[a] = identifier[b]
keyword[continue]
keyword[if] identifier[block] . identifier[capstone] . identifier[insns] keyword[and] identifier[all] ([ identifier[self] . identifier[_is_noop_insn] ( identifier[insn] ) keyword[for] identifier[insn] keyword[in] identifier[block] . identifier[capstone] . identifier[insns] ]):
identifier[self] . identifier[_nodes] [ identifier[b] . identifier[addr] ]= identifier[b]
identifier[self] . identifier[_nodes_by_addr] [ identifier[b] . identifier[addr] ]. identifier[append] ( identifier[b] )
identifier[self] . identifier[_shrink_node] ( identifier[a] , identifier[b] . identifier[addr] - identifier[a] . identifier[addr] , identifier[remove_function] = keyword[False] )
identifier[a] = identifier[b]
keyword[continue]
identifier[all_functions] = identifier[self] . identifier[kb] . identifier[functions]
keyword[if] identifier[a] . identifier[addr] + identifier[a] . identifier[size] == identifier[b] . identifier[addr] + identifier[b] . identifier[size] :
identifier[in_edges] = identifier[len] ([ identifier[_] keyword[for] identifier[_] , identifier[_] , identifier[data] keyword[in] identifier[self] . identifier[graph] . identifier[in_edges] ([ identifier[b] ], identifier[data] = keyword[True] )])
keyword[if] identifier[in_edges] == literal[int] :
keyword[for] identifier[_] , identifier[dst] , identifier[data] keyword[in] identifier[self] . identifier[graph] . identifier[out_edges] ([ identifier[b] ], identifier[data] = keyword[True] ):
identifier[self] . identifier[graph] . identifier[add_edge] ( identifier[a] , identifier[dst] ,** identifier[data] )
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[self] . identifier[_nodes] :
keyword[del] identifier[self] . identifier[_nodes] [ identifier[b] . identifier[addr] ]
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[self] . identifier[_nodes_by_addr] keyword[and] identifier[b] keyword[in] identifier[self] . identifier[_nodes_by_addr] [ identifier[b] . identifier[addr] ]:
identifier[self] . identifier[_nodes_by_addr] [ identifier[b] . identifier[addr] ]. identifier[remove] ( identifier[b] )
identifier[self] . identifier[graph] . identifier[remove_node] ( identifier[b] )
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[all_functions] :
keyword[del] identifier[all_functions] [ identifier[b] . identifier[addr] ]
identifier[removed_nodes] . identifier[add] ( identifier[b] )
keyword[continue]
keyword[if] identifier[b] . identifier[instruction_addrs] [ literal[int] ] keyword[not] keyword[in] identifier[a] . identifier[instruction_addrs] :
identifier[new_b_addr] = identifier[a] . identifier[addr] + identifier[a] . identifier[size]
identifier[new_b_size] = identifier[b] . identifier[addr] + identifier[b] . identifier[size] - identifier[new_b_addr]
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[self] . identifier[_nodes] :
keyword[del] identifier[self] . identifier[_nodes] [ identifier[b] . identifier[addr] ]
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[self] . identifier[_nodes_by_addr] keyword[and] identifier[b] keyword[in] identifier[self] . identifier[_nodes_by_addr] [ identifier[b] . identifier[addr] ]:
identifier[self] . identifier[_nodes_by_addr] [ identifier[b] . identifier[addr] ]. identifier[remove] ( identifier[b] )
identifier[self] . identifier[graph] . identifier[remove_node] ( identifier[b] )
keyword[if] identifier[b] . identifier[addr] keyword[in] identifier[all_functions] :
keyword[del] identifier[all_functions] [ identifier[b] . identifier[addr] ]
identifier[removed_nodes] . identifier[add] ( identifier[b] )
keyword[if] identifier[new_b_size] > literal[int] :
identifier[dummy_job] = identifier[CFGJob] ( identifier[new_b_addr] , identifier[a] . identifier[function_address] , keyword[None] )
identifier[self] . identifier[_scan_block] ( identifier[dummy_job] )
keyword[continue]
identifier[a] = identifier[b] | def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(self.graph.nodes(), key=lambda n: n.addr if n is not None else 0)
all_plt_stub_addrs = set(itertools.chain.from_iterable((obj.reverse_plt.keys() for obj in self.project.loader.all_objects if isinstance(obj, cle.MetaELF))))
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if a.addr in self.functions and a.addr not in all_plt_stub_addrs and (not self._addr_hooked_or_syscall(a.addr)):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data['jumpkind'] == 'Ijk_Call' for (_, _, data) in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=16 - a.addr % 16, opt_level=1) # depends on [control=['try'], data=[]]
except SimTranslationError:
continue # depends on [control=['except'], data=[]]
nop_length = None
if self._is_noop_block(self.project.arch, block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size # depends on [control=['if'], data=[]]
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns) # depends on [control=['if'], data=[]]
if nop_length is None or nop_length <= 0:
continue # depends on [control=['if'], data=[]]
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and (not (next_node_addr in self._nodes or next_node_addr in nodes_to_append)):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(next_node_addr, next_node_size, self.model, function_address=next_node_addr, instruction_addrs=[i for i in a.instruction_addrs if next_node_addr <= i < next_node_addr + next_node_size], thumb=a.thumb, byte_string=None if a.byte_string is None else a.byte_string[nop_length:], block_id=next_node_addr)
self.graph.add_node(next_node)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for (_, dst, data) in all_out_edges:
self.graph.add_edge(next_node, dst, **data) # depends on [control=['for'], data=[]]
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(addr=next_node_addr, size=next_node_size, base_state=self._base_state)
self.functions._add_node(next_node_addr, snippet) # depends on [control=['try'], data=[]]
except (SimEngineError, SimMemoryError):
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(sorted_nodes + list(nodes_to_append.values()), key=lambda n: n.addr if n is not None else 0) # depends on [control=['if'], data=[]]
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in range(len(sorted_nodes)): # pylint:disable=consider-using-enumerate
if a is None:
a = sorted_nodes[0]
continue # depends on [control=['if'], data=['a']]
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue # depends on [control=['if'], data=[]]
if b in removed_nodes:
# skip all removed nodes
continue # depends on [control=['if'], data=[]]
if a.addr <= b.addr and a.addr + a.size > b.addr:
# They are overlapping
try:
block = self.project.factory.fresh_block(a.addr, b.addr - a.addr, backup_state=self._base_state) # depends on [control=['try'], data=[]]
except SimTranslationError:
a = b
continue # depends on [control=['except'], data=[]]
if block.capstone.insns and all([self._is_noop_insn(insn) for insn in block.capstone.insns]):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue # depends on [control=['if'], data=[]]
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len([_ for (_, _, data) in self.graph.in_edges([b], data=True)])
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for (_, dst, data) in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data) # depends on [control=['for'], data=[]]
if b.addr in self._nodes:
del self._nodes[b.addr] # depends on [control=['if'], data=[]]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b) # depends on [control=['if'], data=[]]
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr] # depends on [control=['if'], data=['all_functions']]
# skip b
removed_nodes.add(b)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = b.addr + b.size - new_b_addr # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr] # depends on [control=['if'], data=[]]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b) # depends on [control=['if'], data=[]]
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr] # depends on [control=['if'], data=['all_functions']]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
dummy_job = CFGJob(new_b_addr, a.function_address, None)
self._scan_block(dummy_job) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# for other cases, we'll let them be for now
a = b # depends on [control=['for'], data=['i']] |
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.errors.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.errors.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.errors.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1/3., 2/3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac/comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.errors.append("Some fractional co-ordinates rounded to ideal values to "
"avoid finite precision errors.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data | def function[_sanitize_data, parameter[self, data]]:
constant[
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
]
constant[
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
]
if compare[constant[_atom_site_attached_hydrogens] in call[name[data].data.keys, parameter[]]] begin[:]
variable[attached_hydrogens] assign[=] <ast.ListComp object at 0x7da1b1c936a0>
if compare[call[name[len], parameter[name[attached_hydrogens]]] greater[>] constant[0]] begin[:]
call[name[self].errors.append, parameter[constant[Structure has implicit hydrogens defined, parsed structure unlikely to be suitable for use in calculations unless hydrogens added.]]]
if compare[constant[_atom_site_type_symbol] in call[name[data].data.keys, parameter[]]] begin[:]
variable[idxs_to_remove] assign[=] list[[]]
variable[new_atom_site_label] assign[=] list[[]]
variable[new_atom_site_type_symbol] assign[=] list[[]]
variable[new_atom_site_occupancy] assign[=] list[[]]
variable[new_fract_x] assign[=] list[[]]
variable[new_fract_y] assign[=] list[[]]
variable[new_fract_z] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1c92b90>, <ast.Name object at 0x7da1b1c92b60>]]] in starred[call[name[enumerate], parameter[call[name[data]][constant[_atom_site_label]]]]] begin[:]
if compare[call[name[len], parameter[call[call[call[name[data]][constant[_atom_site_type_symbol]]][name[idx]].split, parameter[constant[ + ]]]]] greater[>] call[name[len], parameter[call[call[call[name[data]][constant[_atom_site_label]]][name[idx]].split, parameter[constant[ + ]]]]]] begin[:]
variable[els_occu] assign[=] dictionary[[], []]
variable[symbol_str] assign[=] call[call[name[data]][constant[_atom_site_type_symbol]]][name[idx]]
variable[symbol_str_lst] assign[=] call[name[symbol_str].split, parameter[constant[ + ]]]
for taget[name[elocc_idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[symbol_str_lst]]]]]] begin[:]
call[name[symbol_str_lst]][name[elocc_idx]] assign[=] call[name[re].sub, parameter[constant[\([0-9]*\)], constant[], call[call[name[symbol_str_lst]][name[elocc_idx]].strip, parameter[]]]]
call[name[els_occu]][call[call[name[str], parameter[call[call[name[re].findall, parameter[constant[\D+], call[call[name[symbol_str_lst]][name[elocc_idx]].strip, parameter[]]]]][constant[1]]]].replace, parameter[constant[<sup>], constant[]]]] assign[=] call[name[float], parameter[binary_operation[constant[0] + call[call[name[re].findall, parameter[constant[\.?\d+], call[call[name[symbol_str_lst]][name[elocc_idx]].strip, parameter[]]]]][constant[1]]]]]
variable[x] assign[=] call[name[str2float], parameter[call[call[name[data]][constant[_atom_site_fract_x]]][name[idx]]]]
variable[y] assign[=] call[name[str2float], parameter[call[call[name[data]][constant[_atom_site_fract_y]]][name[idx]]]]
variable[z] assign[=] call[name[str2float], parameter[call[call[name[data]][constant[_atom_site_fract_z]]][name[idx]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c91330>, <ast.Name object at 0x7da1b1c91300>]]] in starred[call[name[els_occu].items, parameter[]]] begin[:]
call[name[new_atom_site_label].append, parameter[binary_operation[binary_operation[name[et] + constant[_fix]] + call[name[str], parameter[call[name[len], parameter[name[new_atom_site_label]]]]]]]]
call[name[new_atom_site_type_symbol].append, parameter[name[et]]]
call[name[new_atom_site_occupancy].append, parameter[call[name[str], parameter[name[occu]]]]]
call[name[new_fract_x].append, parameter[call[name[str], parameter[name[x]]]]]
call[name[new_fract_y].append, parameter[call[name[str], parameter[name[y]]]]]
call[name[new_fract_z].append, parameter[call[name[str], parameter[name[z]]]]]
call[name[idxs_to_remove].append, parameter[name[idx]]]
for taget[name[original_key]] in starred[name[data].data] begin[:]
if call[name[isinstance], parameter[call[name[data].data][name[original_key]], name[list]]] begin[:]
for taget[name[id]] in starred[call[name[sorted], parameter[name[idxs_to_remove]]]] begin[:]
<ast.Delete object at 0x7da1b1c903a0>
if compare[call[name[len], parameter[name[idxs_to_remove]]] greater[>] constant[0]] begin[:]
call[name[self].errors.append, parameter[constant[Pauling file corrections applied.]]]
<ast.AugAssign object at 0x7da1b1c5bf70>
<ast.AugAssign object at 0x7da1b1c5be50>
<ast.AugAssign object at 0x7da1b1c5bd30>
<ast.AugAssign object at 0x7da1b1c5bc10>
<ast.AugAssign object at 0x7da1b1c5baf0>
<ast.AugAssign object at 0x7da1b1c5b9d0>
constant[
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
]
if call[name[self].feature_flags][constant[magcif]] begin[:]
variable[correct_keys] assign[=] list[[<ast.Constant object at 0x7da1b1c5b6a0>, <ast.Constant object at 0x7da1b1c5b670>, <ast.Constant object at 0x7da1b1c5b640>, <ast.Constant object at 0x7da1b1c5b610>, <ast.Constant object at 0x7da1b1c5b5e0>, <ast.Constant object at 0x7da1b1c5b5b0>, <ast.Constant object at 0x7da1b1c5b580>, <ast.Constant object at 0x7da1b1c5b550>]]
variable[changes_to_make] assign[=] dictionary[[], []]
for taget[name[original_key]] in starred[name[data].data] begin[:]
for taget[name[correct_key]] in starred[name[correct_keys]] begin[:]
variable[trial_key] assign[=] call[constant[_].join, parameter[call[name[correct_key].split, parameter[constant[.]]]]]
variable[test_key] assign[=] call[constant[_].join, parameter[call[name[original_key].split, parameter[constant[.]]]]]
if compare[name[trial_key] equal[==] name[test_key]] begin[:]
call[name[changes_to_make]][name[correct_key]] assign[=] name[original_key]
for taget[tuple[[<ast.Name object at 0x7da1b1c5add0>, <ast.Name object at 0x7da1b1c5ada0>]]] in starred[call[name[changes_to_make].items, parameter[]]] begin[:]
call[name[data].data][name[correct_key]] assign[=] call[name[data].data][name[original_key]]
variable[renamed_keys] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c5aaa0>], [<ast.Constant object at 0x7da1b1c5aa70>]]
variable[changes_to_make] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1c5a950>, <ast.Name object at 0x7da1b1c5a920>]]] in starred[call[name[renamed_keys].items, parameter[]]] begin[:]
if call[name[data].data.get, parameter[name[interim_key]]] begin[:]
call[name[changes_to_make]][name[final_key]] assign[=] name[interim_key]
if compare[call[name[len], parameter[name[changes_to_make]]] greater[>] constant[0]] begin[:]
call[name[self].errors.append, parameter[constant[Keys changed to match new magCIF specification.]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c5a380>, <ast.Name object at 0x7da1b1c5a350>]]] in starred[call[name[changes_to_make].items, parameter[]]] begin[:]
call[name[data].data][name[final_key]] assign[=] call[name[data].data][name[interim_key]]
variable[important_fracs] assign[=] tuple[[<ast.BinOp object at 0x7da1b1c5a050>, <ast.BinOp object at 0x7da1b1c580a0>]]
variable[fracs_to_change] assign[=] dictionary[[], []]
for taget[name[label]] in starred[tuple[[<ast.Constant object at 0x7da1b1c58250>, <ast.Constant object at 0x7da1b1c58280>, <ast.Constant object at 0x7da1b1c582b0>]]] begin[:]
if compare[name[label] in call[name[data].data.keys, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1c58490>, <ast.Name object at 0x7da1b1c584c0>]]] in starred[call[name[enumerate], parameter[call[name[data].data][name[label]]]]] begin[:]
<ast.Try object at 0x7da1b1c58610>
for taget[name[comparison_frac]] in starred[name[important_fracs]] begin[:]
if compare[call[name[abs], parameter[binary_operation[constant[1] - binary_operation[name[frac] / name[comparison_frac]]]]] less[<] constant[0.0001]] begin[:]
call[name[fracs_to_change]][tuple[[<ast.Name object at 0x7da1b1c598a0>, <ast.Name object at 0x7da1b1c59870>]]] assign[=] call[name[str], parameter[name[comparison_frac]]]
if name[fracs_to_change] begin[:]
call[name[self].errors.append, parameter[constant[Some fractional co-ordinates rounded to ideal values to avoid finite precision errors.]]]
for taget[tuple[[<ast.Tuple object at 0x7da1b1c595a0>, <ast.Name object at 0x7da1b1c59510>]]] in starred[call[name[fracs_to_change].items, parameter[]]] begin[:]
call[call[name[data].data][name[label]]][name[idx]] assign[=] name[val]
return[name[data]] | keyword[def] identifier[_sanitize_data] ( identifier[self] , identifier[data] ):
literal[string]
literal[string]
keyword[if] literal[string] keyword[in] identifier[data] . identifier[data] . identifier[keys] ():
identifier[attached_hydrogens] =[ identifier[str2float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[data] . identifier[data] [ literal[string] ]
keyword[if] identifier[str2float] ( identifier[x] )!= literal[int] ]
keyword[if] identifier[len] ( identifier[attached_hydrogens] )> literal[int] :
identifier[self] . identifier[errors] . identifier[append] ( literal[string]
literal[string]
literal[string] )
keyword[if] literal[string] keyword[in] identifier[data] . identifier[data] . identifier[keys] ():
identifier[idxs_to_remove] =[]
identifier[new_atom_site_label] =[]
identifier[new_atom_site_type_symbol] =[]
identifier[new_atom_site_occupancy] =[]
identifier[new_fract_x] =[]
identifier[new_fract_y] =[]
identifier[new_fract_z] =[]
keyword[for] identifier[idx] , identifier[el_row] keyword[in] identifier[enumerate] ( identifier[data] [ literal[string] ]):
keyword[if] identifier[len] ( identifier[data] [ literal[string] ][ identifier[idx] ]. identifier[split] ( literal[string] ))> identifier[len] ( identifier[data] [ literal[string] ][ identifier[idx] ]. identifier[split] ( literal[string] )):
identifier[els_occu] ={}
identifier[symbol_str] = identifier[data] [ literal[string] ][ identifier[idx] ]
identifier[symbol_str_lst] = identifier[symbol_str] . identifier[split] ( literal[string] )
keyword[for] identifier[elocc_idx] keyword[in] identifier[range] ( identifier[len] ( identifier[symbol_str_lst] )):
identifier[symbol_str_lst] [ identifier[elocc_idx] ]= identifier[re] . identifier[sub] (
literal[string] , literal[string] ,
identifier[symbol_str_lst] [ identifier[elocc_idx] ]. identifier[strip] ())
identifier[els_occu] [ identifier[str] ( identifier[re] . identifier[findall] ( literal[string] , identifier[symbol_str_lst] [
identifier[elocc_idx] ]. identifier[strip] ())[ literal[int] ]). identifier[replace] ( literal[string] , literal[string] )]= identifier[float] ( literal[string] + identifier[re] . identifier[findall] ( literal[string] , identifier[symbol_str_lst] [
identifier[elocc_idx] ]. identifier[strip] ())[ literal[int] ])
identifier[x] = identifier[str2float] ( identifier[data] [ literal[string] ][ identifier[idx] ])
identifier[y] = identifier[str2float] ( identifier[data] [ literal[string] ][ identifier[idx] ])
identifier[z] = identifier[str2float] ( identifier[data] [ literal[string] ][ identifier[idx] ])
keyword[for] identifier[et] , identifier[occu] keyword[in] identifier[els_occu] . identifier[items] ():
identifier[new_atom_site_label] . identifier[append] (
identifier[et] + literal[string] + identifier[str] ( identifier[len] ( identifier[new_atom_site_label] )))
identifier[new_atom_site_type_symbol] . identifier[append] ( identifier[et] )
identifier[new_atom_site_occupancy] . identifier[append] ( identifier[str] ( identifier[occu] ))
identifier[new_fract_x] . identifier[append] ( identifier[str] ( identifier[x] ))
identifier[new_fract_y] . identifier[append] ( identifier[str] ( identifier[y] ))
identifier[new_fract_z] . identifier[append] ( identifier[str] ( identifier[z] ))
identifier[idxs_to_remove] . identifier[append] ( identifier[idx] )
keyword[for] identifier[original_key] keyword[in] identifier[data] . identifier[data] :
keyword[if] identifier[isinstance] ( identifier[data] . identifier[data] [ identifier[original_key] ], identifier[list] ):
keyword[for] identifier[id] keyword[in] identifier[sorted] ( identifier[idxs_to_remove] , identifier[reverse] = keyword[True] ):
keyword[del] identifier[data] . identifier[data] [ identifier[original_key] ][ identifier[id] ]
keyword[if] identifier[len] ( identifier[idxs_to_remove] )> literal[int] :
identifier[self] . identifier[errors] . identifier[append] ( literal[string] )
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_atom_site_label]
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_atom_site_type_symbol]
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_atom_site_occupancy]
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_fract_x]
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_fract_y]
identifier[data] . identifier[data] [ literal[string] ]+= identifier[new_fract_z]
literal[string]
keyword[if] identifier[self] . identifier[feature_flags] [ literal[string] ]:
identifier[correct_keys] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[changes_to_make] ={}
keyword[for] identifier[original_key] keyword[in] identifier[data] . identifier[data] :
keyword[for] identifier[correct_key] keyword[in] identifier[correct_keys] :
identifier[trial_key] = literal[string] . identifier[join] ( identifier[correct_key] . identifier[split] ( literal[string] ))
identifier[test_key] = literal[string] . identifier[join] ( identifier[original_key] . identifier[split] ( literal[string] ))
keyword[if] identifier[trial_key] == identifier[test_key] :
identifier[changes_to_make] [ identifier[correct_key] ]= identifier[original_key]
keyword[for] identifier[correct_key] , identifier[original_key] keyword[in] identifier[changes_to_make] . identifier[items] ():
identifier[data] . identifier[data] [ identifier[correct_key] ]= identifier[data] . identifier[data] [ identifier[original_key] ]
identifier[renamed_keys] ={
literal[string] :
literal[string] }
identifier[changes_to_make] ={}
keyword[for] identifier[interim_key] , identifier[final_key] keyword[in] identifier[renamed_keys] . identifier[items] ():
keyword[if] identifier[data] . identifier[data] . identifier[get] ( identifier[interim_key] ):
identifier[changes_to_make] [ identifier[final_key] ]= identifier[interim_key]
keyword[if] identifier[len] ( identifier[changes_to_make] )> literal[int] :
identifier[self] . identifier[errors] . identifier[append] ( literal[string] )
keyword[for] identifier[final_key] , identifier[interim_key] keyword[in] identifier[changes_to_make] . identifier[items] ():
identifier[data] . identifier[data] [ identifier[final_key] ]= identifier[data] . identifier[data] [ identifier[interim_key] ]
identifier[important_fracs] =( literal[int] / literal[int] , literal[int] / literal[int] )
identifier[fracs_to_change] ={}
keyword[for] identifier[label] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[label] keyword[in] identifier[data] . identifier[data] . identifier[keys] ():
keyword[for] identifier[idx] , identifier[frac] keyword[in] identifier[enumerate] ( identifier[data] . identifier[data] [ identifier[label] ]):
keyword[try] :
identifier[frac] = identifier[str2float] ( identifier[frac] )
keyword[except] :
keyword[continue]
keyword[for] identifier[comparison_frac] keyword[in] identifier[important_fracs] :
keyword[if] identifier[abs] ( literal[int] - identifier[frac] / identifier[comparison_frac] )< literal[int] :
identifier[fracs_to_change] [( identifier[label] , identifier[idx] )]= identifier[str] ( identifier[comparison_frac] )
keyword[if] identifier[fracs_to_change] :
identifier[self] . identifier[errors] . identifier[append] ( literal[string]
literal[string] )
keyword[for] ( identifier[label] , identifier[idx] ), identifier[val] keyword[in] identifier[fracs_to_change] . identifier[items] ():
identifier[data] . identifier[data] [ identifier[label] ][ identifier[idx] ]= identifier[val]
keyword[return] identifier[data] | def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
'\n This part of the code deals with handling formats of data as found in\n CIF files extracted from the Springer Materials/Pauling File\n databases, and that are different from standard ICSD formats.\n '
# check for implicit hydrogens, warn if any present
if '_atom_site_attached_hydrogens' in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens'] if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.errors.append('Structure has implicit hydrogens defined, parsed structure unlikely to be suitable for use in calculations unless hydrogens added.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if '_atom_site_type_symbol' in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for (idx, el_row) in enumerate(data['_atom_site_label']):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data['_atom_site_type_symbol'][idx].split(' + ')) > len(data['_atom_site_label'][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data['_atom_site_type_symbol'][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub('\\([0-9]*\\)', '', symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall('\\D+', symbol_str_lst[elocc_idx].strip())[1]).replace('<sup>', '')] = float('0' + re.findall('\\.?\\d+', symbol_str_lst[elocc_idx].strip())[1]) # depends on [control=['for'], data=['elocc_idx']]
x = str2float(data['_atom_site_fract_x'][idx])
y = str2float(data['_atom_site_fract_y'][idx])
z = str2float(data['_atom_site_fract_z'][idx])
for (et, occu) in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z)) # depends on [control=['for'], data=[]]
idxs_to_remove.append(idx) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id] # depends on [control=['for'], data=['id']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['original_key']]
if len(idxs_to_remove) > 0:
self.errors.append('Pauling file corrections applied.')
data.data['_atom_site_label'] += new_atom_site_label
data.data['_atom_site_type_symbol'] += new_atom_site_type_symbol
data.data['_atom_site_occupancy'] += new_atom_site_occupancy
data.data['_atom_site_fract_x'] += new_fract_x
data.data['_atom_site_fract_y'] += new_fract_y
data.data['_atom_site_fract_z'] += new_fract_z # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
'\n This fixes inconsistencies in naming of several magCIF tags\n as a result of magCIF being in widespread use prior to\n specification being finalized (on advice of Branton Campbell).\n '
if self.feature_flags['magcif']:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ['_space_group_symop_magn_operation.xyz', '_space_group_symop_magn_centering.xyz', '_space_group_magn.name_BNS', '_space_group_magn.number_BNS', '_atom_site_moment_crystalaxis_x', '_atom_site_moment_crystalaxis_y', '_atom_site_moment_crystalaxis_z', '_atom_site_moment_label']
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = '_'.join(correct_key.split('.'))
test_key = '_'.join(original_key.split('.'))
if trial_key == test_key:
changes_to_make[correct_key] = original_key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['correct_key']] # depends on [control=['for'], data=['original_key']]
# make changes
for (correct_key, original_key) in changes_to_make.items():
data.data[correct_key] = data.data[original_key] # depends on [control=['for'], data=[]]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {'_magnetic_space_group.transform_to_standard_Pp_abc': '_space_group_magn.transform_BNS_Pp_abc'}
changes_to_make = {}
for (interim_key, final_key) in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(changes_to_make) > 0:
self.errors.append('Keys changed to match new magCIF specification.') # depends on [control=['if'], data=[]]
for (final_key, interim_key) in changes_to_make.items():
data.data[final_key] = data.data[interim_key] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3.0, 2 / 3.0)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for (idx, frac) in enumerate(data.data[label]):
try:
frac = str2float(frac) # depends on [control=['try'], data=[]]
except:
# co-ordinate might not be defined e.g. '?'
continue # depends on [control=['except'], data=[]]
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 0.0001:
fracs_to_change[label, idx] = str(comparison_frac) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['comparison_frac']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['label']] # depends on [control=['for'], data=['label']]
if fracs_to_change:
self.errors.append('Some fractional co-ordinates rounded to ideal values to avoid finite precision errors.')
for ((label, idx), val) in fracs_to_change.items():
data.data[label][idx] = val # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return data |
def connect(self):
"""
Connects to Redis
"""
logger.info("Connecting to Redis on {host}:{port}...".format(
host=self.host, port=self.port))
super(RedisSubscriber, self).connect()
logger.info("Successfully connected to Redis")
# Subscribe to channel
self.pubsub = self.client.pubsub()
self.pubsub.subscribe(self.channel)
logger.info("Subscribed to [{channel}] Redis channel".format(
channel=self.channel))
# Start listening
t = Thread(target=self.listen)
t.setDaemon(True)
t.start() | def function[connect, parameter[self]]:
constant[
Connects to Redis
]
call[name[logger].info, parameter[call[constant[Connecting to Redis on {host}:{port}...].format, parameter[]]]]
call[call[name[super], parameter[name[RedisSubscriber], name[self]]].connect, parameter[]]
call[name[logger].info, parameter[constant[Successfully connected to Redis]]]
name[self].pubsub assign[=] call[name[self].client.pubsub, parameter[]]
call[name[self].pubsub.subscribe, parameter[name[self].channel]]
call[name[logger].info, parameter[call[constant[Subscribed to [{channel}] Redis channel].format, parameter[]]]]
variable[t] assign[=] call[name[Thread], parameter[]]
call[name[t].setDaemon, parameter[constant[True]]]
call[name[t].start, parameter[]] | keyword[def] identifier[connect] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[host] = identifier[self] . identifier[host] , identifier[port] = identifier[self] . identifier[port] ))
identifier[super] ( identifier[RedisSubscriber] , identifier[self] ). identifier[connect] ()
identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[pubsub] = identifier[self] . identifier[client] . identifier[pubsub] ()
identifier[self] . identifier[pubsub] . identifier[subscribe] ( identifier[self] . identifier[channel] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[channel] = identifier[self] . identifier[channel] ))
identifier[t] = identifier[Thread] ( identifier[target] = identifier[self] . identifier[listen] )
identifier[t] . identifier[setDaemon] ( keyword[True] )
identifier[t] . identifier[start] () | def connect(self):
"""
Connects to Redis
"""
logger.info('Connecting to Redis on {host}:{port}...'.format(host=self.host, port=self.port))
super(RedisSubscriber, self).connect()
logger.info('Successfully connected to Redis')
# Subscribe to channel
self.pubsub = self.client.pubsub()
self.pubsub.subscribe(self.channel)
logger.info('Subscribed to [{channel}] Redis channel'.format(channel=self.channel))
# Start listening
t = Thread(target=self.listen)
t.setDaemon(True)
t.start() |
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature) | def function[register, parameter[self, name, method, method_signature]]:
constant[Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
]
if call[name[inspect].ismethod, parameter[name[method]]] begin[:]
<ast.Raise object at 0x7da18f722c80>
call[name[self]._name_to_method_info][name[name]] assign[=] call[name[MethodInfo], parameter[name[name], name[method], name[method_signature]]] | keyword[def] identifier[register] ( identifier[self] , identifier[name] , identifier[method] , identifier[method_signature] = keyword[None] ):
literal[string]
keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[method] ):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[_name_to_method_info] [ identifier[name] ]= identifier[MethodInfo] ( identifier[name] , identifier[method] , identifier[method_signature] ) | def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception('typedjsonrpc does not support making class methods into endpoints') # depends on [control=['if'], data=[]]
self._name_to_method_info[name] = MethodInfo(name, method, method_signature) |
def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self | def function[loudness, parameter[self, gain_db, reference_level]]:
constant[Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
]
if <ast.UnaryOp object at 0x7da1b021e680> begin[:]
<ast.Raise object at 0x7da20c6ab100>
if <ast.UnaryOp object at 0x7da20c6a9270> begin[:]
<ast.Raise object at 0x7da20c6a9f00>
if <ast.BoolOp object at 0x7da20c6a9210> begin[:]
<ast.Raise object at 0x7da20c6a96f0>
variable[effect_args] assign[=] list[[<ast.Constant object at 0x7da20c6aaef0>, <ast.Call object at 0x7da20c6a8ca0>, <ast.Call object at 0x7da20c6aaa70>]]
call[name[self].effects.extend, parameter[name[effect_args]]]
call[name[self].effects_log.append, parameter[constant[loudness]]]
return[name[self]] | keyword[def] identifier[loudness] ( identifier[self] , identifier[gain_db] =- literal[int] , identifier[reference_level] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[is_number] ( identifier[gain_db] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[is_number] ( identifier[reference_level] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[reference_level] > literal[int] keyword[or] identifier[reference_level] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[effect_args] =[
literal[string] ,
literal[string] . identifier[format] ( identifier[gain_db] ),
literal[string] . identifier[format] ( identifier[reference_level] )
]
identifier[self] . identifier[effects] . identifier[extend] ( identifier[effect_args] )
identifier[self] . identifier[effects_log] . identifier[append] ( literal[string] )
keyword[return] identifier[self] | def loudness(self, gain_db=-10.0, reference_level=65.0):
"""Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
"""
if not is_number(gain_db):
raise ValueError('gain_db must be a number.') # depends on [control=['if'], data=[]]
if not is_number(reference_level):
raise ValueError('reference_level must be a number') # depends on [control=['if'], data=[]]
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75') # depends on [control=['if'], data=[]]
effect_args = ['loudness', '{:f}'.format(gain_db), '{:f}'.format(reference_level)]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self |
def show_context_menu(self, item, mouse_pos=None):
"Open a popup menu with options regarding the selected object"
if item:
d = self.tree.GetItemData(item)
if d:
obj = d.GetData()
if obj:
# highligh and store the selected object:
self.highlight(obj.wx_obj)
self.obj = obj
# make the context menu
menu = wx.Menu()
id_del, id_dup, id_raise, id_lower = [wx.NewId() for i
in range(4)]
menu.Append(id_del, "Delete")
menu.Append(id_dup, "Duplicate")
menu.Append(id_raise, "Bring to Front")
menu.Append(id_lower, "Send to Back")
# make submenu!
sm = wx.Menu()
for ctrl in sorted(obj._meta.valid_children,
key=lambda c:
registry.ALL.index(c._meta.name)):
new_id = wx.NewId()
sm.Append(new_id, ctrl._meta.name)
self.Bind(wx.EVT_MENU,
lambda evt, ctrl=ctrl: self.add_child(ctrl, mouse_pos),
id=new_id)
menu.AppendMenu(wx.NewId(), "Add child", sm)
self.Bind(wx.EVT_MENU, self.delete, id=id_del)
self.Bind(wx.EVT_MENU, self.duplicate, id=id_dup)
self.Bind(wx.EVT_MENU, self.bring_to_front, id=id_raise)
self.Bind(wx.EVT_MENU, self.send_to_back, id=id_lower)
self.PopupMenu(menu)
menu.Destroy()
self.load_object(self.root_obj) | def function[show_context_menu, parameter[self, item, mouse_pos]]:
constant[Open a popup menu with options regarding the selected object]
if name[item] begin[:]
variable[d] assign[=] call[name[self].tree.GetItemData, parameter[name[item]]]
if name[d] begin[:]
variable[obj] assign[=] call[name[d].GetData, parameter[]]
if name[obj] begin[:]
call[name[self].highlight, parameter[name[obj].wx_obj]]
name[self].obj assign[=] name[obj]
variable[menu] assign[=] call[name[wx].Menu, parameter[]]
<ast.Tuple object at 0x7da1b0214700> assign[=] <ast.ListComp object at 0x7da1b02175b0>
call[name[menu].Append, parameter[name[id_del], constant[Delete]]]
call[name[menu].Append, parameter[name[id_dup], constant[Duplicate]]]
call[name[menu].Append, parameter[name[id_raise], constant[Bring to Front]]]
call[name[menu].Append, parameter[name[id_lower], constant[Send to Back]]]
variable[sm] assign[=] call[name[wx].Menu, parameter[]]
for taget[name[ctrl]] in starred[call[name[sorted], parameter[name[obj]._meta.valid_children]]] begin[:]
variable[new_id] assign[=] call[name[wx].NewId, parameter[]]
call[name[sm].Append, parameter[name[new_id], name[ctrl]._meta.name]]
call[name[self].Bind, parameter[name[wx].EVT_MENU, <ast.Lambda object at 0x7da1b02163b0>]]
call[name[menu].AppendMenu, parameter[call[name[wx].NewId, parameter[]], constant[Add child], name[sm]]]
call[name[self].Bind, parameter[name[wx].EVT_MENU, name[self].delete]]
call[name[self].Bind, parameter[name[wx].EVT_MENU, name[self].duplicate]]
call[name[self].Bind, parameter[name[wx].EVT_MENU, name[self].bring_to_front]]
call[name[self].Bind, parameter[name[wx].EVT_MENU, name[self].send_to_back]]
call[name[self].PopupMenu, parameter[name[menu]]]
call[name[menu].Destroy, parameter[]]
call[name[self].load_object, parameter[name[self].root_obj]] | keyword[def] identifier[show_context_menu] ( identifier[self] , identifier[item] , identifier[mouse_pos] = keyword[None] ):
literal[string]
keyword[if] identifier[item] :
identifier[d] = identifier[self] . identifier[tree] . identifier[GetItemData] ( identifier[item] )
keyword[if] identifier[d] :
identifier[obj] = identifier[d] . identifier[GetData] ()
keyword[if] identifier[obj] :
identifier[self] . identifier[highlight] ( identifier[obj] . identifier[wx_obj] )
identifier[self] . identifier[obj] = identifier[obj]
identifier[menu] = identifier[wx] . identifier[Menu] ()
identifier[id_del] , identifier[id_dup] , identifier[id_raise] , identifier[id_lower] =[ identifier[wx] . identifier[NewId] () keyword[for] identifier[i]
keyword[in] identifier[range] ( literal[int] )]
identifier[menu] . identifier[Append] ( identifier[id_del] , literal[string] )
identifier[menu] . identifier[Append] ( identifier[id_dup] , literal[string] )
identifier[menu] . identifier[Append] ( identifier[id_raise] , literal[string] )
identifier[menu] . identifier[Append] ( identifier[id_lower] , literal[string] )
identifier[sm] = identifier[wx] . identifier[Menu] ()
keyword[for] identifier[ctrl] keyword[in] identifier[sorted] ( identifier[obj] . identifier[_meta] . identifier[valid_children] ,
identifier[key] = keyword[lambda] identifier[c] :
identifier[registry] . identifier[ALL] . identifier[index] ( identifier[c] . identifier[_meta] . identifier[name] )):
identifier[new_id] = identifier[wx] . identifier[NewId] ()
identifier[sm] . identifier[Append] ( identifier[new_id] , identifier[ctrl] . identifier[_meta] . identifier[name] )
identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] ,
keyword[lambda] identifier[evt] , identifier[ctrl] = identifier[ctrl] : identifier[self] . identifier[add_child] ( identifier[ctrl] , identifier[mouse_pos] ),
identifier[id] = identifier[new_id] )
identifier[menu] . identifier[AppendMenu] ( identifier[wx] . identifier[NewId] (), literal[string] , identifier[sm] )
identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] , identifier[self] . identifier[delete] , identifier[id] = identifier[id_del] )
identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] , identifier[self] . identifier[duplicate] , identifier[id] = identifier[id_dup] )
identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] , identifier[self] . identifier[bring_to_front] , identifier[id] = identifier[id_raise] )
identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] , identifier[self] . identifier[send_to_back] , identifier[id] = identifier[id_lower] )
identifier[self] . identifier[PopupMenu] ( identifier[menu] )
identifier[menu] . identifier[Destroy] ()
identifier[self] . identifier[load_object] ( identifier[self] . identifier[root_obj] ) | def show_context_menu(self, item, mouse_pos=None):
"""Open a popup menu with options regarding the selected object"""
if item:
d = self.tree.GetItemData(item)
if d:
obj = d.GetData()
if obj:
# highligh and store the selected object:
self.highlight(obj.wx_obj)
self.obj = obj
# make the context menu
menu = wx.Menu()
(id_del, id_dup, id_raise, id_lower) = [wx.NewId() for i in range(4)]
menu.Append(id_del, 'Delete')
menu.Append(id_dup, 'Duplicate')
menu.Append(id_raise, 'Bring to Front')
menu.Append(id_lower, 'Send to Back')
# make submenu!
sm = wx.Menu()
for ctrl in sorted(obj._meta.valid_children, key=lambda c: registry.ALL.index(c._meta.name)):
new_id = wx.NewId()
sm.Append(new_id, ctrl._meta.name)
self.Bind(wx.EVT_MENU, lambda evt, ctrl=ctrl: self.add_child(ctrl, mouse_pos), id=new_id) # depends on [control=['for'], data=['ctrl']]
menu.AppendMenu(wx.NewId(), 'Add child', sm)
self.Bind(wx.EVT_MENU, self.delete, id=id_del)
self.Bind(wx.EVT_MENU, self.duplicate, id=id_dup)
self.Bind(wx.EVT_MENU, self.bring_to_front, id=id_raise)
self.Bind(wx.EVT_MENU, self.send_to_back, id=id_lower)
self.PopupMenu(menu)
menu.Destroy()
self.load_object(self.root_obj) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def root_sa_root_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
enable = ET.SubElement(root, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[root_sa_root_enable, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[root_sa] assign[=] call[name[ET].SubElement, parameter[name[config], constant[root-sa]]]
variable[root] assign[=] call[name[ET].SubElement, parameter[name[root_sa], constant[root]]]
variable[enable] assign[=] call[name[ET].SubElement, parameter[name[root], constant[enable]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[root_sa_root_enable] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[root_sa] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[root] = identifier[ET] . identifier[SubElement] ( identifier[root_sa] , literal[string] )
identifier[enable] = identifier[ET] . identifier[SubElement] ( identifier[root] , literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def root_sa_root_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
root_sa = ET.SubElement(config, 'root-sa', xmlns='urn:brocade.com:mgmt:brocade-aaa')
root = ET.SubElement(root_sa, 'root')
enable = ET.SubElement(root, 'enable')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def downloadURL(url, filename):
"""
Inconditianilly download the URL in a temporary directory.
When finished, the file is moved in the real directory.
Like this an other process will not attempt to extract an inclomplete file.
"""
path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, 'tmp', filename)
path_bviewfile = os.path.join(c.raw_data, c.bview_dir, filename)
try:
f = urlopen(url)
except:
return False
if f.getcode() != 200:
publisher.warning('{} unavailable, code: {}'.format(url, f.getcode()))
return False
try:
with open(path_temp_bviewfile, 'w') as outfile:
outfile.write(f.read())
os.rename(path_temp_bviewfile, path_bviewfile)
except:
os.remove(path_temp_bviewfile)
return False
return True | def function[downloadURL, parameter[url, filename]]:
constant[
Inconditianilly download the URL in a temporary directory.
When finished, the file is moved in the real directory.
Like this an other process will not attempt to extract an inclomplete file.
]
variable[path_temp_bviewfile] assign[=] call[name[os].path.join, parameter[name[c].raw_data, name[c].bview_dir, constant[tmp], name[filename]]]
variable[path_bviewfile] assign[=] call[name[os].path.join, parameter[name[c].raw_data, name[c].bview_dir, name[filename]]]
<ast.Try object at 0x7da1b00179a0>
if compare[call[name[f].getcode, parameter[]] not_equal[!=] constant[200]] begin[:]
call[name[publisher].warning, parameter[call[constant[{} unavailable, code: {}].format, parameter[name[url], call[name[f].getcode, parameter[]]]]]]
return[constant[False]]
<ast.Try object at 0x7da1b0014190>
return[constant[True]] | keyword[def] identifier[downloadURL] ( identifier[url] , identifier[filename] ):
literal[string]
identifier[path_temp_bviewfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[c] . identifier[raw_data] , identifier[c] . identifier[bview_dir] , literal[string] , identifier[filename] )
identifier[path_bviewfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[c] . identifier[raw_data] , identifier[c] . identifier[bview_dir] , identifier[filename] )
keyword[try] :
identifier[f] = identifier[urlopen] ( identifier[url] )
keyword[except] :
keyword[return] keyword[False]
keyword[if] identifier[f] . identifier[getcode] ()!= literal[int] :
identifier[publisher] . identifier[warning] ( literal[string] . identifier[format] ( identifier[url] , identifier[f] . identifier[getcode] ()))
keyword[return] keyword[False]
keyword[try] :
keyword[with] identifier[open] ( identifier[path_temp_bviewfile] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[f] . identifier[read] ())
identifier[os] . identifier[rename] ( identifier[path_temp_bviewfile] , identifier[path_bviewfile] )
keyword[except] :
identifier[os] . identifier[remove] ( identifier[path_temp_bviewfile] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def downloadURL(url, filename):
"""
Inconditianilly download the URL in a temporary directory.
When finished, the file is moved in the real directory.
Like this an other process will not attempt to extract an inclomplete file.
"""
path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, 'tmp', filename)
path_bviewfile = os.path.join(c.raw_data, c.bview_dir, filename)
try:
f = urlopen(url) # depends on [control=['try'], data=[]]
except:
return False # depends on [control=['except'], data=[]]
if f.getcode() != 200:
publisher.warning('{} unavailable, code: {}'.format(url, f.getcode()))
return False # depends on [control=['if'], data=[]]
try:
with open(path_temp_bviewfile, 'w') as outfile:
outfile.write(f.read()) # depends on [control=['with'], data=['outfile']]
os.rename(path_temp_bviewfile, path_bviewfile) # depends on [control=['try'], data=[]]
except:
os.remove(path_temp_bviewfile)
return False # depends on [control=['except'], data=[]]
return True |
def get_report(self, value):
"""Return provided field Python value formatted for use in report filter"""
if self.multiselect:
value = value or []
children = []
for child in value:
children.append(self.cast_to_report(child))
return children
return self.cast_to_report(value) | def function[get_report, parameter[self, value]]:
constant[Return provided field Python value formatted for use in report filter]
if name[self].multiselect begin[:]
variable[value] assign[=] <ast.BoolOp object at 0x7da2044c1c00>
variable[children] assign[=] list[[]]
for taget[name[child]] in starred[name[value]] begin[:]
call[name[children].append, parameter[call[name[self].cast_to_report, parameter[name[child]]]]]
return[name[children]]
return[call[name[self].cast_to_report, parameter[name[value]]]] | keyword[def] identifier[get_report] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[self] . identifier[multiselect] :
identifier[value] = identifier[value] keyword[or] []
identifier[children] =[]
keyword[for] identifier[child] keyword[in] identifier[value] :
identifier[children] . identifier[append] ( identifier[self] . identifier[cast_to_report] ( identifier[child] ))
keyword[return] identifier[children]
keyword[return] identifier[self] . identifier[cast_to_report] ( identifier[value] ) | def get_report(self, value):
"""Return provided field Python value formatted for use in report filter"""
if self.multiselect:
value = value or []
children = []
for child in value:
children.append(self.cast_to_report(child)) # depends on [control=['for'], data=['child']]
return children # depends on [control=['if'], data=[]]
return self.cast_to_report(value) |
def nautical(kilometers=0, meters=0, miles=0, feet=0):
"""
TODO docs.
"""
ret = 0.
if feet:
kilometers += feet / ft(1.)
if miles:
kilometers += km(miles=miles)
if meters:
kilometers += meters / 1000.
ret += kilometers / 1.852
return ret | def function[nautical, parameter[kilometers, meters, miles, feet]]:
constant[
TODO docs.
]
variable[ret] assign[=] constant[0.0]
if name[feet] begin[:]
<ast.AugAssign object at 0x7da2044c19c0>
if name[miles] begin[:]
<ast.AugAssign object at 0x7da2044c01c0>
if name[meters] begin[:]
<ast.AugAssign object at 0x7da2044c3b20>
<ast.AugAssign object at 0x7da2044c24d0>
return[name[ret]] | keyword[def] identifier[nautical] ( identifier[kilometers] = literal[int] , identifier[meters] = literal[int] , identifier[miles] = literal[int] , identifier[feet] = literal[int] ):
literal[string]
identifier[ret] = literal[int]
keyword[if] identifier[feet] :
identifier[kilometers] += identifier[feet] / identifier[ft] ( literal[int] )
keyword[if] identifier[miles] :
identifier[kilometers] += identifier[km] ( identifier[miles] = identifier[miles] )
keyword[if] identifier[meters] :
identifier[kilometers] += identifier[meters] / literal[int]
identifier[ret] += identifier[kilometers] / literal[int]
keyword[return] identifier[ret] | def nautical(kilometers=0, meters=0, miles=0, feet=0):
"""
TODO docs.
"""
ret = 0.0
if feet:
kilometers += feet / ft(1.0) # depends on [control=['if'], data=[]]
if miles:
kilometers += km(miles=miles) # depends on [control=['if'], data=[]]
if meters:
kilometers += meters / 1000.0 # depends on [control=['if'], data=[]]
ret += kilometers / 1.852
return ret |
def _parse_caps_cpu(node):
'''
Parse the <cpu> element of the domain capabilities
'''
result = {}
for mode in node.findall('mode'):
if not mode.get('supported') == 'yes':
continue
name = mode.get('name')
if name == 'host-passthrough':
result[name] = True
elif name == 'host-model':
host_model = {}
model_node = mode.find('model')
if model_node is not None:
model = {
'name': model_node.text
}
vendor_id = model_node.get('vendor_id')
if vendor_id:
model['vendor_id'] = vendor_id
fallback = model_node.get('fallback')
if fallback:
model['fallback'] = fallback
host_model['model'] = model
vendor = mode.find('vendor').text if mode.find('vendor') is not None else None
if vendor:
host_model['vendor'] = vendor
features = {feature.get('name'): feature.get('policy') for feature in mode.findall('feature')}
if features:
host_model['features'] = features
result[name] = host_model
elif name == 'custom':
custom_model = {}
models = {model.text: model.get('usable') for model in mode.findall('model')}
if models:
custom_model['models'] = models
result[name] = custom_model
return result | def function[_parse_caps_cpu, parameter[node]]:
constant[
Parse the <cpu> element of the domain capabilities
]
variable[result] assign[=] dictionary[[], []]
for taget[name[mode]] in starred[call[name[node].findall, parameter[constant[mode]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b21f98d0> begin[:]
continue
variable[name] assign[=] call[name[mode].get, parameter[constant[name]]]
if compare[name[name] equal[==] constant[host-passthrough]] begin[:]
call[name[result]][name[name]] assign[=] constant[True]
return[name[result]] | keyword[def] identifier[_parse_caps_cpu] ( identifier[node] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[mode] keyword[in] identifier[node] . identifier[findall] ( literal[string] ):
keyword[if] keyword[not] identifier[mode] . identifier[get] ( literal[string] )== literal[string] :
keyword[continue]
identifier[name] = identifier[mode] . identifier[get] ( literal[string] )
keyword[if] identifier[name] == literal[string] :
identifier[result] [ identifier[name] ]= keyword[True]
keyword[elif] identifier[name] == literal[string] :
identifier[host_model] ={}
identifier[model_node] = identifier[mode] . identifier[find] ( literal[string] )
keyword[if] identifier[model_node] keyword[is] keyword[not] keyword[None] :
identifier[model] ={
literal[string] : identifier[model_node] . identifier[text]
}
identifier[vendor_id] = identifier[model_node] . identifier[get] ( literal[string] )
keyword[if] identifier[vendor_id] :
identifier[model] [ literal[string] ]= identifier[vendor_id]
identifier[fallback] = identifier[model_node] . identifier[get] ( literal[string] )
keyword[if] identifier[fallback] :
identifier[model] [ literal[string] ]= identifier[fallback]
identifier[host_model] [ literal[string] ]= identifier[model]
identifier[vendor] = identifier[mode] . identifier[find] ( literal[string] ). identifier[text] keyword[if] identifier[mode] . identifier[find] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[if] identifier[vendor] :
identifier[host_model] [ literal[string] ]= identifier[vendor]
identifier[features] ={ identifier[feature] . identifier[get] ( literal[string] ): identifier[feature] . identifier[get] ( literal[string] ) keyword[for] identifier[feature] keyword[in] identifier[mode] . identifier[findall] ( literal[string] )}
keyword[if] identifier[features] :
identifier[host_model] [ literal[string] ]= identifier[features]
identifier[result] [ identifier[name] ]= identifier[host_model]
keyword[elif] identifier[name] == literal[string] :
identifier[custom_model] ={}
identifier[models] ={ identifier[model] . identifier[text] : identifier[model] . identifier[get] ( literal[string] ) keyword[for] identifier[model] keyword[in] identifier[mode] . identifier[findall] ( literal[string] )}
keyword[if] identifier[models] :
identifier[custom_model] [ literal[string] ]= identifier[models]
identifier[result] [ identifier[name] ]= identifier[custom_model]
keyword[return] identifier[result] | def _parse_caps_cpu(node):
"""
Parse the <cpu> element of the domain capabilities
"""
result = {}
for mode in node.findall('mode'):
if not mode.get('supported') == 'yes':
continue # depends on [control=['if'], data=[]]
name = mode.get('name')
if name == 'host-passthrough':
result[name] = True # depends on [control=['if'], data=['name']]
elif name == 'host-model':
host_model = {}
model_node = mode.find('model')
if model_node is not None:
model = {'name': model_node.text}
vendor_id = model_node.get('vendor_id')
if vendor_id:
model['vendor_id'] = vendor_id # depends on [control=['if'], data=[]]
fallback = model_node.get('fallback')
if fallback:
model['fallback'] = fallback # depends on [control=['if'], data=[]]
host_model['model'] = model # depends on [control=['if'], data=['model_node']]
vendor = mode.find('vendor').text if mode.find('vendor') is not None else None
if vendor:
host_model['vendor'] = vendor # depends on [control=['if'], data=[]]
features = {feature.get('name'): feature.get('policy') for feature in mode.findall('feature')}
if features:
host_model['features'] = features # depends on [control=['if'], data=[]]
result[name] = host_model # depends on [control=['if'], data=['name']]
elif name == 'custom':
custom_model = {}
models = {model.text: model.get('usable') for model in mode.findall('model')}
if models:
custom_model['models'] = models # depends on [control=['if'], data=[]]
result[name] = custom_model # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['mode']]
return result |
def start(inqueue, outqueue=None):
"""Starts the listener with incoming and outgoing queues."""
conf.init(), db.init(conf.DbPath)
Listener(inqueue, outqueue).run() | def function[start, parameter[inqueue, outqueue]]:
constant[Starts the listener with incoming and outgoing queues.]
tuple[[<ast.Call object at 0x7da1b2459c90>, <ast.Call object at 0x7da1b2459ff0>]]
call[call[name[Listener], parameter[name[inqueue], name[outqueue]]].run, parameter[]] | keyword[def] identifier[start] ( identifier[inqueue] , identifier[outqueue] = keyword[None] ):
literal[string]
identifier[conf] . identifier[init] (), identifier[db] . identifier[init] ( identifier[conf] . identifier[DbPath] )
identifier[Listener] ( identifier[inqueue] , identifier[outqueue] ). identifier[run] () | def start(inqueue, outqueue=None):
"""Starts the listener with incoming and outgoing queues."""
(conf.init(), db.init(conf.DbPath))
Listener(inqueue, outqueue).run() |
def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result
else:
return {'name': targetId, 'data': self._jsonify(result)}, 200
else:
return None, 404
else:
return None, 500 | def function[get, parameter[self, targetId]]:
constant[
Yields the analysed wav data.
:param targetId:
:return:
]
variable[result] assign[=] call[name[self]._targetController.analyse, parameter[name[targetId]]]
if name[result] begin[:]
if compare[call[name[len], parameter[name[result]]] equal[==] constant[2]] begin[:]
if compare[call[name[result]][constant[1]] equal[==] constant[404]] begin[:]
return[name[result]] | keyword[def] identifier[get] ( identifier[self] , identifier[targetId] ):
literal[string]
identifier[result] = identifier[self] . identifier[_targetController] . identifier[analyse] ( identifier[targetId] )
keyword[if] identifier[result] :
keyword[if] identifier[len] ( identifier[result] )== literal[int] :
keyword[if] identifier[result] [ literal[int] ]== literal[int] :
keyword[return] identifier[result]
keyword[else] :
keyword[return] { literal[string] : identifier[targetId] , literal[string] : identifier[self] . identifier[_jsonify] ( identifier[result] )}, literal[int]
keyword[else] :
keyword[return] keyword[None] , literal[int]
keyword[else] :
keyword[return] keyword[None] , literal[int] | def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result # depends on [control=['if'], data=[]]
else:
return ({'name': targetId, 'data': self._jsonify(result)}, 200) # depends on [control=['if'], data=[]]
else:
return (None, 404) # depends on [control=['if'], data=[]]
else:
return (None, 500) |
def article(self):
"""
| Comment: Id of the associated article, if present
"""
if self.api and self.article_id:
return self.api._get_article(self.article_id) | def function[article, parameter[self]]:
constant[
| Comment: Id of the associated article, if present
]
if <ast.BoolOp object at 0x7da20c7cbee0> begin[:]
return[call[name[self].api._get_article, parameter[name[self].article_id]]] | keyword[def] identifier[article] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[api] keyword[and] identifier[self] . identifier[article_id] :
keyword[return] identifier[self] . identifier[api] . identifier[_get_article] ( identifier[self] . identifier[article_id] ) | def article(self):
"""
| Comment: Id of the associated article, if present
"""
if self.api and self.article_id:
return self.api._get_article(self.article_id) # depends on [control=['if'], data=[]] |
def set_id(self,this_id):
"""
Set the identifier for the token
@type this_id: string
@param this_id: the identifier
"""
if self.type == 'NAF':
return self.node.set('id',this_id)
elif self.type == 'KAF':
return self.node.set('wid',this_id) | def function[set_id, parameter[self, this_id]]:
constant[
Set the identifier for the token
@type this_id: string
@param this_id: the identifier
]
if compare[name[self].type equal[==] constant[NAF]] begin[:]
return[call[name[self].node.set, parameter[constant[id], name[this_id]]]] | keyword[def] identifier[set_id] ( identifier[self] , identifier[this_id] ):
literal[string]
keyword[if] identifier[self] . identifier[type] == literal[string] :
keyword[return] identifier[self] . identifier[node] . identifier[set] ( literal[string] , identifier[this_id] )
keyword[elif] identifier[self] . identifier[type] == literal[string] :
keyword[return] identifier[self] . identifier[node] . identifier[set] ( literal[string] , identifier[this_id] ) | def set_id(self, this_id):
"""
Set the identifier for the token
@type this_id: string
@param this_id: the identifier
"""
if self.type == 'NAF':
return self.node.set('id', this_id) # depends on [control=['if'], data=[]]
elif self.type == 'KAF':
return self.node.set('wid', this_id) # depends on [control=['if'], data=[]] |
def _commit_timer_stopped(self, lCall):
"""We're shutting down, clean up our looping call..."""
if self._commit_looper is not lCall:
log.warning('_commit_timer_stopped with wrong timer:%s not:%s',
lCall, self._commit_looper)
else:
log.debug('_commit_timer_stopped: %s %s', lCall,
self._commit_looper)
self._commit_looper = None
self._commit_looper_d = None | def function[_commit_timer_stopped, parameter[self, lCall]]:
constant[We're shutting down, clean up our looping call...]
if compare[name[self]._commit_looper is_not name[lCall]] begin[:]
call[name[log].warning, parameter[constant[_commit_timer_stopped with wrong timer:%s not:%s], name[lCall], name[self]._commit_looper]] | keyword[def] identifier[_commit_timer_stopped] ( identifier[self] , identifier[lCall] ):
literal[string]
keyword[if] identifier[self] . identifier[_commit_looper] keyword[is] keyword[not] identifier[lCall] :
identifier[log] . identifier[warning] ( literal[string] ,
identifier[lCall] , identifier[self] . identifier[_commit_looper] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[lCall] ,
identifier[self] . identifier[_commit_looper] )
identifier[self] . identifier[_commit_looper] = keyword[None]
identifier[self] . identifier[_commit_looper_d] = keyword[None] | def _commit_timer_stopped(self, lCall):
"""We're shutting down, clean up our looping call..."""
if self._commit_looper is not lCall:
log.warning('_commit_timer_stopped with wrong timer:%s not:%s', lCall, self._commit_looper) # depends on [control=['if'], data=['lCall']]
else:
log.debug('_commit_timer_stopped: %s %s', lCall, self._commit_looper)
self._commit_looper = None
self._commit_looper_d = None |
def prepare_pids(self):
"""Prepare persistent identifiers."""
self.pids = []
for fetcher in self.pid_fetchers:
val = fetcher(None, self.revisions[-1][1])
if val:
self.pids.append(val) | def function[prepare_pids, parameter[self]]:
constant[Prepare persistent identifiers.]
name[self].pids assign[=] list[[]]
for taget[name[fetcher]] in starred[name[self].pid_fetchers] begin[:]
variable[val] assign[=] call[name[fetcher], parameter[constant[None], call[call[name[self].revisions][<ast.UnaryOp object at 0x7da1b0168f40>]][constant[1]]]]
if name[val] begin[:]
call[name[self].pids.append, parameter[name[val]]] | keyword[def] identifier[prepare_pids] ( identifier[self] ):
literal[string]
identifier[self] . identifier[pids] =[]
keyword[for] identifier[fetcher] keyword[in] identifier[self] . identifier[pid_fetchers] :
identifier[val] = identifier[fetcher] ( keyword[None] , identifier[self] . identifier[revisions] [- literal[int] ][ literal[int] ])
keyword[if] identifier[val] :
identifier[self] . identifier[pids] . identifier[append] ( identifier[val] ) | def prepare_pids(self):
"""Prepare persistent identifiers."""
self.pids = []
for fetcher in self.pid_fetchers:
val = fetcher(None, self.revisions[-1][1])
if val:
self.pids.append(val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fetcher']] |
def ssh_sa_ssh_server_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
cipher = ET.SubElement(server, "cipher")
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[ssh_sa_ssh_server_cipher, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[ssh_sa] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ssh-sa]]]
variable[ssh] assign[=] call[name[ET].SubElement, parameter[name[ssh_sa], constant[ssh]]]
variable[server] assign[=] call[name[ET].SubElement, parameter[name[ssh], constant[server]]]
variable[cipher] assign[=] call[name[ET].SubElement, parameter[name[server], constant[cipher]]]
name[cipher].text assign[=] call[name[kwargs].pop, parameter[constant[cipher]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[ssh_sa_ssh_server_cipher] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[ssh_sa] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[ssh] = identifier[ET] . identifier[SubElement] ( identifier[ssh_sa] , literal[string] )
identifier[server] = identifier[ET] . identifier[SubElement] ( identifier[ssh] , literal[string] )
identifier[cipher] = identifier[ET] . identifier[SubElement] ( identifier[server] , literal[string] )
identifier[cipher] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def ssh_sa_ssh_server_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
ssh_sa = ET.SubElement(config, 'ssh-sa', xmlns='urn:brocade.com:mgmt:brocade-sec-services')
ssh = ET.SubElement(ssh_sa, 'ssh')
server = ET.SubElement(ssh, 'server')
cipher = ET.SubElement(server, 'cipher')
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_pkg_version():
"""Get version string by parsing PKG-INFO."""
try:
with open("PKG-INFO", "r") as fp:
rgx = re.compile(r"Version: (\d+)")
for line in fp.readlines():
match = rgx.match(line)
if match:
return match.group(1)
except IOError:
return None | def function[get_pkg_version, parameter[]]:
constant[Get version string by parsing PKG-INFO.]
<ast.Try object at 0x7da1b0f9f190> | keyword[def] identifier[get_pkg_version] ():
literal[string]
keyword[try] :
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fp] :
identifier[rgx] = identifier[re] . identifier[compile] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[fp] . identifier[readlines] ():
identifier[match] = identifier[rgx] . identifier[match] ( identifier[line] )
keyword[if] identifier[match] :
keyword[return] identifier[match] . identifier[group] ( literal[int] )
keyword[except] identifier[IOError] :
keyword[return] keyword[None] | def get_pkg_version():
"""Get version string by parsing PKG-INFO."""
try:
with open('PKG-INFO', 'r') as fp:
rgx = re.compile('Version: (\\d+)')
for line in fp.readlines():
match = rgx.match(line)
if match:
return match.group(1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]]
except IOError:
return None # depends on [control=['except'], data=[]] |
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name | def function[visit_ImportFrom, parameter[self, node]]:
constant[Visit an from-import node.]
variable[line] assign[=] call[name[self]._code_lines][binary_operation[name[node].lineno - constant[1]]]
variable[module_name] assign[=] call[call[call[call[call[name[line].split, parameter[constant[from]]]][constant[1]].split, parameter[constant[import]]]][constant[0]].strip, parameter[]]
for taget[name[name]] in starred[name[node].names] begin[:]
variable[imported_name] assign[=] name[name].name
if name[name].asname begin[:]
variable[imported_name] assign[=] binary_operation[binary_operation[name[name].asname + constant[::]] + name[imported_name]]
call[name[self].imported_names][name[imported_name]] assign[=] name[module_name] | keyword[def] identifier[visit_ImportFrom] ( identifier[self] , identifier[node] ):
literal[string]
identifier[line] = identifier[self] . identifier[_code_lines] [ identifier[node] . identifier[lineno] - literal[int] ]
identifier[module_name] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[for] identifier[name] keyword[in] identifier[node] . identifier[names] :
identifier[imported_name] = identifier[name] . identifier[name]
keyword[if] identifier[name] . identifier[asname] :
identifier[imported_name] = identifier[name] . identifier[asname] + literal[string] + identifier[imported_name]
identifier[self] . identifier[imported_names] [ identifier[imported_name] ]= identifier[module_name] | def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split('from')[1].split('import')[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + '::' + imported_name # depends on [control=['if'], data=[]]
self.imported_names[imported_name] = module_name # depends on [control=['for'], data=['name']] |
def detect_format(self, candidates):
"""
Detects the format of the fileset from a list of possible
candidates. If multiple candidates match the potential files, e.g.
NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is
selected.
If a 'format_name' was specified when the fileset was
created then that is used to select between the candidates. Otherwise
the file extensions of the primary path and potential auxiliary files,
or extensions of the files within the directory for directories are
matched against those specified for the file formats
Parameters
----------
candidates : FileFormat
A list of file-formats to select from.
"""
if self._format is not None:
raise ArcanaFileFormatError(
"Format has already been set for {}".format(self))
matches = [c for c in candidates if c.matches(self)]
if not matches:
raise ArcanaFileFormatError(
"None of the candidate file formats ({}) match {}"
.format(', '.join(str(c) for c in candidates), self))
return matches[0] | def function[detect_format, parameter[self, candidates]]:
constant[
Detects the format of the fileset from a list of possible
candidates. If multiple candidates match the potential files, e.g.
NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is
selected.
If a 'format_name' was specified when the fileset was
created then that is used to select between the candidates. Otherwise
the file extensions of the primary path and potential auxiliary files,
or extensions of the files within the directory for directories are
matched against those specified for the file formats
Parameters
----------
candidates : FileFormat
A list of file-formats to select from.
]
if compare[name[self]._format is_not constant[None]] begin[:]
<ast.Raise object at 0x7da18bc72c80>
variable[matches] assign[=] <ast.ListComp object at 0x7da18bc71b70>
if <ast.UnaryOp object at 0x7da18bc72a40> begin[:]
<ast.Raise object at 0x7da18bc735e0>
return[call[name[matches]][constant[0]]] | keyword[def] identifier[detect_format] ( identifier[self] , identifier[candidates] ):
literal[string]
keyword[if] identifier[self] . identifier[_format] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ArcanaFileFormatError] (
literal[string] . identifier[format] ( identifier[self] ))
identifier[matches] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[candidates] keyword[if] identifier[c] . identifier[matches] ( identifier[self] )]
keyword[if] keyword[not] identifier[matches] :
keyword[raise] identifier[ArcanaFileFormatError] (
literal[string]
. identifier[format] ( literal[string] . identifier[join] ( identifier[str] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[candidates] ), identifier[self] ))
keyword[return] identifier[matches] [ literal[int] ] | def detect_format(self, candidates):
"""
Detects the format of the fileset from a list of possible
candidates. If multiple candidates match the potential files, e.g.
NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is
selected.
If a 'format_name' was specified when the fileset was
created then that is used to select between the candidates. Otherwise
the file extensions of the primary path and potential auxiliary files,
or extensions of the files within the directory for directories are
matched against those specified for the file formats
Parameters
----------
candidates : FileFormat
A list of file-formats to select from.
"""
if self._format is not None:
raise ArcanaFileFormatError('Format has already been set for {}'.format(self)) # depends on [control=['if'], data=[]]
matches = [c for c in candidates if c.matches(self)]
if not matches:
raise ArcanaFileFormatError('None of the candidate file formats ({}) match {}'.format(', '.join((str(c) for c in candidates)), self)) # depends on [control=['if'], data=[]]
return matches[0] |
def _read_xml_db(self):
"""
read metadata from an xml string stored in a DB.
:return: the root element of the xml
:rtype: ElementTree.Element
"""
try:
metadata_str = self.db_io.read_metadata_from_uri(
self.layer_uri, 'xml')
root = ElementTree.fromstring(metadata_str)
return root
except HashNotFoundError:
return None | def function[_read_xml_db, parameter[self]]:
constant[
read metadata from an xml string stored in a DB.
:return: the root element of the xml
:rtype: ElementTree.Element
]
<ast.Try object at 0x7da1b0ca1180> | keyword[def] identifier[_read_xml_db] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[metadata_str] = identifier[self] . identifier[db_io] . identifier[read_metadata_from_uri] (
identifier[self] . identifier[layer_uri] , literal[string] )
identifier[root] = identifier[ElementTree] . identifier[fromstring] ( identifier[metadata_str] )
keyword[return] identifier[root]
keyword[except] identifier[HashNotFoundError] :
keyword[return] keyword[None] | def _read_xml_db(self):
"""
read metadata from an xml string stored in a DB.
:return: the root element of the xml
:rtype: ElementTree.Element
"""
try:
metadata_str = self.db_io.read_metadata_from_uri(self.layer_uri, 'xml')
root = ElementTree.fromstring(metadata_str)
return root # depends on [control=['try'], data=[]]
except HashNotFoundError:
return None # depends on [control=['except'], data=[]] |
def setup_logging(
config_directories=None,
config_file=None,
default_level=logging.INFO,
default_filename="logging.yml"
):
"""Setup logging configuration
"""
config_found = False
config_file_path = None
if config_file:
config_file_path = config_file
if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK):
config_found = True
else:
for directory in config_directories:
if directory is None:
continue
config_file_path = os.path.join(directory, default_filename)
if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK):
config_found = True
break
if config_found:
with open(config_file_path, 'rt') as ymlfile:
config = yaml.safe_load(ymlfile.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level) | def function[setup_logging, parameter[config_directories, config_file, default_level, default_filename]]:
constant[Setup logging configuration
]
variable[config_found] assign[=] constant[False]
variable[config_file_path] assign[=] constant[None]
if name[config_file] begin[:]
variable[config_file_path] assign[=] name[config_file]
if <ast.BoolOp object at 0x7da204566e60> begin[:]
variable[config_found] assign[=] constant[True]
if name[config_found] begin[:]
with call[name[open], parameter[name[config_file_path], constant[rt]]] begin[:]
variable[config] assign[=] call[name[yaml].safe_load, parameter[call[name[ymlfile].read, parameter[]]]]
call[name[logging].config.dictConfig, parameter[name[config]]] | keyword[def] identifier[setup_logging] (
identifier[config_directories] = keyword[None] ,
identifier[config_file] = keyword[None] ,
identifier[default_level] = identifier[logging] . identifier[INFO] ,
identifier[default_filename] = literal[string]
):
literal[string]
identifier[config_found] = keyword[False]
identifier[config_file_path] = keyword[None]
keyword[if] identifier[config_file] :
identifier[config_file_path] = identifier[config_file]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[config_file_path] ) keyword[and] identifier[os] . identifier[access] ( identifier[config_file_path] , identifier[os] . identifier[R_OK] ):
identifier[config_found] = keyword[True]
keyword[else] :
keyword[for] identifier[directory] keyword[in] identifier[config_directories] :
keyword[if] identifier[directory] keyword[is] keyword[None] :
keyword[continue]
identifier[config_file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[default_filename] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[config_file_path] ) keyword[and] identifier[os] . identifier[access] ( identifier[config_file_path] , identifier[os] . identifier[R_OK] ):
identifier[config_found] = keyword[True]
keyword[break]
keyword[if] identifier[config_found] :
keyword[with] identifier[open] ( identifier[config_file_path] , literal[string] ) keyword[as] identifier[ymlfile] :
identifier[config] = identifier[yaml] . identifier[safe_load] ( identifier[ymlfile] . identifier[read] ())
identifier[logging] . identifier[config] . identifier[dictConfig] ( identifier[config] )
keyword[else] :
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[default_level] ) | def setup_logging(config_directories=None, config_file=None, default_level=logging.INFO, default_filename='logging.yml'):
"""Setup logging configuration
"""
config_found = False
config_file_path = None
if config_file:
config_file_path = config_file
if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK):
config_found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for directory in config_directories:
if directory is None:
continue # depends on [control=['if'], data=[]]
config_file_path = os.path.join(directory, default_filename)
if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK):
config_found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['directory']]
if config_found:
with open(config_file_path, 'rt') as ymlfile:
config = yaml.safe_load(ymlfile.read()) # depends on [control=['with'], data=['ymlfile']]
logging.config.dictConfig(config) # depends on [control=['if'], data=[]]
else:
logging.basicConfig(level=default_level) |
def observe(self, body):
"""Compute the `Astrometric` position of a body from this location.
To compute the body's astrometric position, it is first asked
for its position at the time `t` of this position itself. The
distance to the body is then divided by the speed of light to
find how long it takes its light to arrive. Finally, the light
travel time is subtracted from `t` and the body is asked for a
series of increasingly exact positions to learn where it was
when it emitted the light that is now reaching this position.
>>> earth.at(t).observe(mars)
<Astrometric position and velocity at date t>
"""
p, v, t, light_time = body._observe_from_bcrs(self)
astrometric = Astrometric(p, v, t, observer_data=self.observer_data)
astrometric.light_time = light_time
return astrometric | def function[observe, parameter[self, body]]:
constant[Compute the `Astrometric` position of a body from this location.
To compute the body's astrometric position, it is first asked
for its position at the time `t` of this position itself. The
distance to the body is then divided by the speed of light to
find how long it takes its light to arrive. Finally, the light
travel time is subtracted from `t` and the body is asked for a
series of increasingly exact positions to learn where it was
when it emitted the light that is now reaching this position.
>>> earth.at(t).observe(mars)
<Astrometric position and velocity at date t>
]
<ast.Tuple object at 0x7da1b1631450> assign[=] call[name[body]._observe_from_bcrs, parameter[name[self]]]
variable[astrometric] assign[=] call[name[Astrometric], parameter[name[p], name[v], name[t]]]
name[astrometric].light_time assign[=] name[light_time]
return[name[astrometric]] | keyword[def] identifier[observe] ( identifier[self] , identifier[body] ):
literal[string]
identifier[p] , identifier[v] , identifier[t] , identifier[light_time] = identifier[body] . identifier[_observe_from_bcrs] ( identifier[self] )
identifier[astrometric] = identifier[Astrometric] ( identifier[p] , identifier[v] , identifier[t] , identifier[observer_data] = identifier[self] . identifier[observer_data] )
identifier[astrometric] . identifier[light_time] = identifier[light_time]
keyword[return] identifier[astrometric] | def observe(self, body):
"""Compute the `Astrometric` position of a body from this location.
To compute the body's astrometric position, it is first asked
for its position at the time `t` of this position itself. The
distance to the body is then divided by the speed of light to
find how long it takes its light to arrive. Finally, the light
travel time is subtracted from `t` and the body is asked for a
series of increasingly exact positions to learn where it was
when it emitted the light that is now reaching this position.
>>> earth.at(t).observe(mars)
<Astrometric position and velocity at date t>
"""
(p, v, t, light_time) = body._observe_from_bcrs(self)
astrometric = Astrometric(p, v, t, observer_data=self.observer_data)
astrometric.light_time = light_time
return astrometric |
def get_subtree(self, random_state, program=None):
"""Get a random subtree from the program.
Parameters
----------
random_state : RandomState instance
The random number generator.
program : list, optional (default=None)
The flattened tree representation of the program. If None, the
embedded tree in the object will be used.
Returns
-------
start, end : tuple of two ints
The indices of the start and end of the random subtree.
"""
if program is None:
program = self.program
# Choice of crossover points follows Koza's (1992) widely used approach
# of choosing functions 90% of the time and leaves 10% of the time.
probs = np.array([0.9 if isinstance(node, _Function) else 0.1
for node in program])
probs = np.cumsum(probs / probs.sum())
start = np.searchsorted(probs, random_state.uniform())
stack = 1
end = start
while stack > end - start:
node = program[end]
if isinstance(node, _Function):
stack += node.arity
end += 1
return start, end | def function[get_subtree, parameter[self, random_state, program]]:
constant[Get a random subtree from the program.
Parameters
----------
random_state : RandomState instance
The random number generator.
program : list, optional (default=None)
The flattened tree representation of the program. If None, the
embedded tree in the object will be used.
Returns
-------
start, end : tuple of two ints
The indices of the start and end of the random subtree.
]
if compare[name[program] is constant[None]] begin[:]
variable[program] assign[=] name[self].program
variable[probs] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1d760b0>]]
variable[probs] assign[=] call[name[np].cumsum, parameter[binary_operation[name[probs] / call[name[probs].sum, parameter[]]]]]
variable[start] assign[=] call[name[np].searchsorted, parameter[name[probs], call[name[random_state].uniform, parameter[]]]]
variable[stack] assign[=] constant[1]
variable[end] assign[=] name[start]
while compare[name[stack] greater[>] binary_operation[name[end] - name[start]]] begin[:]
variable[node] assign[=] call[name[program]][name[end]]
if call[name[isinstance], parameter[name[node], name[_Function]]] begin[:]
<ast.AugAssign object at 0x7da1b1d77370>
<ast.AugAssign object at 0x7da1b1d76e90>
return[tuple[[<ast.Name object at 0x7da1b1d77790>, <ast.Name object at 0x7da1b1d76cb0>]]] | keyword[def] identifier[get_subtree] ( identifier[self] , identifier[random_state] , identifier[program] = keyword[None] ):
literal[string]
keyword[if] identifier[program] keyword[is] keyword[None] :
identifier[program] = identifier[self] . identifier[program]
identifier[probs] = identifier[np] . identifier[array] ([ literal[int] keyword[if] identifier[isinstance] ( identifier[node] , identifier[_Function] ) keyword[else] literal[int]
keyword[for] identifier[node] keyword[in] identifier[program] ])
identifier[probs] = identifier[np] . identifier[cumsum] ( identifier[probs] / identifier[probs] . identifier[sum] ())
identifier[start] = identifier[np] . identifier[searchsorted] ( identifier[probs] , identifier[random_state] . identifier[uniform] ())
identifier[stack] = literal[int]
identifier[end] = identifier[start]
keyword[while] identifier[stack] > identifier[end] - identifier[start] :
identifier[node] = identifier[program] [ identifier[end] ]
keyword[if] identifier[isinstance] ( identifier[node] , identifier[_Function] ):
identifier[stack] += identifier[node] . identifier[arity]
identifier[end] += literal[int]
keyword[return] identifier[start] , identifier[end] | def get_subtree(self, random_state, program=None):
"""Get a random subtree from the program.
Parameters
----------
random_state : RandomState instance
The random number generator.
program : list, optional (default=None)
The flattened tree representation of the program. If None, the
embedded tree in the object will be used.
Returns
-------
start, end : tuple of two ints
The indices of the start and end of the random subtree.
"""
if program is None:
program = self.program # depends on [control=['if'], data=['program']]
# Choice of crossover points follows Koza's (1992) widely used approach
# of choosing functions 90% of the time and leaves 10% of the time.
probs = np.array([0.9 if isinstance(node, _Function) else 0.1 for node in program])
probs = np.cumsum(probs / probs.sum())
start = np.searchsorted(probs, random_state.uniform())
stack = 1
end = start
while stack > end - start:
node = program[end]
if isinstance(node, _Function):
stack += node.arity # depends on [control=['if'], data=[]]
end += 1 # depends on [control=['while'], data=['stack']]
return (start, end) |
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
vlan = ET.SubElement(vnetwork_dvpgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_vnetwork_dvpgs] assign[=] call[name[ET].Element, parameter[constant[get_vnetwork_dvpgs]]]
variable[config] assign[=] name[get_vnetwork_dvpgs]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_vnetwork_dvpgs], constant[output]]]
variable[vnetwork_dvpgs] assign[=] call[name[ET].SubElement, parameter[name[output], constant[vnetwork-dvpgs]]]
variable[vlan] assign[=] call[name[ET].SubElement, parameter[name[vnetwork_dvpgs], constant[vlan]]]
name[vlan].text assign[=] call[name[kwargs].pop, parameter[constant[vlan]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_vnetwork_dvpgs] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_vnetwork_dvpgs]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_vnetwork_dvpgs] , literal[string] )
identifier[vnetwork_dvpgs] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[vlan] = identifier[ET] . identifier[SubElement] ( identifier[vnetwork_dvpgs] , literal[string] )
identifier[vlan] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_vnetwork_dvpgs = ET.Element('get_vnetwork_dvpgs')
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, 'output')
vnetwork_dvpgs = ET.SubElement(output, 'vnetwork-dvpgs')
vlan = ET.SubElement(vnetwork_dvpgs, 'vlan')
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _extract_command_with_args(cmd):
"""Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method.
"""
def _isint(value):
try:
int(value)
return True
except ValueError:
return False
equal_sign = cmd.find('=')
if equal_sign == -1:
return cmd, []
command = cmd[0:equal_sign]
args = cmd[equal_sign+1:].split(',')
converted = [x if not _isint(x) else int(x) for x in args]
return command, converted | def function[_extract_command_with_args, parameter[cmd]]:
constant[Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method.
]
def function[_isint, parameter[value]]:
<ast.Try object at 0x7da1b26ad7b0>
variable[equal_sign] assign[=] call[name[cmd].find, parameter[constant[=]]]
if compare[name[equal_sign] equal[==] <ast.UnaryOp object at 0x7da1b26ac220>] begin[:]
return[tuple[[<ast.Name object at 0x7da1b26ace80>, <ast.List object at 0x7da1b26ae380>]]]
variable[command] assign[=] call[name[cmd]][<ast.Slice object at 0x7da1b26ae350>]
variable[args] assign[=] call[call[name[cmd]][<ast.Slice object at 0x7da1b26ad240>].split, parameter[constant[,]]]
variable[converted] assign[=] <ast.ListComp object at 0x7da1b26af3a0>
return[tuple[[<ast.Name object at 0x7da1b26aff40>, <ast.Name object at 0x7da1b26acd90>]]] | keyword[def] identifier[_extract_command_with_args] ( identifier[cmd] ):
literal[string]
keyword[def] identifier[_isint] ( identifier[value] ):
keyword[try] :
identifier[int] ( identifier[value] )
keyword[return] keyword[True]
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
identifier[equal_sign] = identifier[cmd] . identifier[find] ( literal[string] )
keyword[if] identifier[equal_sign] ==- literal[int] :
keyword[return] identifier[cmd] ,[]
identifier[command] = identifier[cmd] [ literal[int] : identifier[equal_sign] ]
identifier[args] = identifier[cmd] [ identifier[equal_sign] + literal[int] :]. identifier[split] ( literal[string] )
identifier[converted] =[ identifier[x] keyword[if] keyword[not] identifier[_isint] ( identifier[x] ) keyword[else] identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[args] ]
keyword[return] identifier[command] , identifier[converted] | def _extract_command_with_args(cmd):
"""Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method.
"""
def _isint(value):
try:
int(value)
return True # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
equal_sign = cmd.find('=')
if equal_sign == -1:
return (cmd, []) # depends on [control=['if'], data=[]]
command = cmd[0:equal_sign]
args = cmd[equal_sign + 1:].split(',')
converted = [x if not _isint(x) else int(x) for x in args]
return (command, converted) |
def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate) | def function[mono, parameter[self]]:
constant[
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
]
if compare[name[self].channels equal[==] constant[1]] begin[:]
return[name[self]]
variable[x] assign[=] binary_operation[call[name[self].sum, parameter[]] * constant[0.5]]
variable[y] assign[=] binary_operation[name[x] * constant[0.5]]
return[call[name[AudioSamples], parameter[name[y], name[self].samplerate]]] | keyword[def] identifier[mono] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[channels] == literal[int] :
keyword[return] identifier[self]
identifier[x] = identifier[self] . identifier[sum] ( identifier[axis] = literal[int] )* literal[int]
identifier[y] = identifier[x] * literal[int]
keyword[return] identifier[AudioSamples] ( identifier[y] , identifier[self] . identifier[samplerate] ) | def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self # depends on [control=['if'], data=[]]
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate) |
def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
masker = _gen_eval_kwargs(op_name).get('masker', False)
def na_op(x, y):
# TODO:
# should have guarantess on what x, y can be type-wise
# Extension Dtypes are not called here
# Checking that cases that were once handled here are no longer
# reachable.
assert not (is_categorical_dtype(y) and not is_scalar(y))
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
elif is_datetimelike_v_numeric(x, y):
return invalid_comparison(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
# we have a datetime/timedelta and may need to convert
assert not needs_i8_conversion(x)
mask = None
if not is_scalar(y) and needs_i8_conversion(y):
mask = isna(x) | isna(y)
y = y.view('i8')
x = x.view('i8')
method = getattr(x, op_name, None)
if method is not None:
with np.errstate(all='ignore'):
result = method(y)
if result is NotImplemented:
return invalid_comparison(x, y, op)
else:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
res_name = get_op_result_name(self, other)
if isinstance(other, list):
# TODO: same for tuples?
other = np.asarray(other)
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled "
"Series objects")
elif is_categorical_dtype(self):
# Dispatch to Categorical implementation; pd.CategoricalIndex
# behavior is non-canonical GH#19513
res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
return self._constructor(res_values, index=self.index,
name=res_name)
elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
if (isinstance(other, datetime.date) and
not isinstance(other, datetime.datetime)):
# https://github.com/pandas-dev/pandas/issues/21152
# Compatibility for difference between Series comparison w/
# datetime and date
msg = (
"Comparing Series of datetimes with 'datetime.date'. "
"Currently, the 'datetime.date' is coerced to a "
"datetime. In the future pandas will not coerce, "
"and {future}. "
"To retain the current behavior, "
"convert the 'datetime.date' to a datetime with "
"'pd.Timestamp'."
)
if op in {operator.lt, operator.le, operator.gt, operator.ge}:
future = "a TypeError will be raised"
else:
future = (
"'the values will not compare equal to the "
"'datetime.date'"
)
msg = '\n'.join(textwrap.wrap(msg.format(future=future)))
warnings.warn(msg, FutureWarning, stacklevel=2)
other = pd.Timestamp(other)
res_values = dispatch_to_index_op(op, self, other,
pd.DatetimeIndex)
return self._constructor(res_values, index=self.index,
name=res_name)
elif is_timedelta64_dtype(self):
res_values = dispatch_to_index_op(op, self, other,
pd.TimedeltaIndex)
return self._constructor(res_values, index=self.index,
name=res_name)
elif (is_extension_array_dtype(self) or
(is_extension_array_dtype(other) and not is_scalar(other))):
# Note: the `not is_scalar(other)` condition rules out
# e.g. other == "category"
return dispatch_to_extension_op(op, self, other)
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
# rename is needed in case res_name is None and res_values.name
# is not.
return self._constructor(res_values, index=self.index,
name=res_name).rename(res_name)
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if other.ndim != 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
res_values = na_op(self.values, np.asarray(other))
result = self._constructor(res_values, index=self.index)
# rename is needed in case res_name is None and self.name
# is not.
return result.__finalize__(self).rename(res_name)
elif is_scalar(other) and isna(other):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(len(self), dtype=bool)
else:
res_values = np.zeros(len(self), dtype=bool)
return self._constructor(res_values, index=self.index,
name=res_name, dtype='bool')
else:
values = self.get_values()
with np.errstate(all='ignore'):
res = na_op(values, other)
if is_scalar(res):
raise TypeError('Could not compare {typ} type with Series'
.format(typ=type(other)))
# always return a full value series here
res_values = com.values_from_object(res)
return self._constructor(res_values, index=self.index,
name=res_name, dtype='bool')
wrapper.__name__ = op_name
return wrapper | def function[_comp_method_SERIES, parameter[cls, op, special]]:
constant[
Wrapper function for Series arithmetic operations, to avoid
code duplication.
]
variable[op_name] assign[=] call[name[_get_op_name], parameter[name[op], name[special]]]
variable[masker] assign[=] call[call[name[_gen_eval_kwargs], parameter[name[op_name]]].get, parameter[constant[masker], constant[False]]]
def function[na_op, parameter[x, y]]:
assert[<ast.UnaryOp object at 0x7da18dc04670>]
if call[name[is_object_dtype], parameter[name[x].dtype]] begin[:]
variable[result] assign[=] call[name[_comp_method_OBJECT_ARRAY], parameter[name[op], name[x], name[y]]]
return[name[result]]
def function[wrapper, parameter[self, other, axis]]:
if compare[name[axis] is_not constant[None]] begin[:]
call[name[self]._get_axis_number, parameter[name[axis]]]
variable[res_name] assign[=] call[name[get_op_result_name], parameter[name[self], name[other]]]
if call[name[isinstance], parameter[name[other], name[list]]] begin[:]
variable[other] assign[=] call[name[np].asarray, parameter[name[other]]]
if call[name[isinstance], parameter[name[other], name[ABCDataFrame]]] begin[:]
return[name[NotImplemented]]
name[wrapper].__name__ assign[=] name[op_name]
return[name[wrapper]] | keyword[def] identifier[_comp_method_SERIES] ( identifier[cls] , identifier[op] , identifier[special] ):
literal[string]
identifier[op_name] = identifier[_get_op_name] ( identifier[op] , identifier[special] )
identifier[masker] = identifier[_gen_eval_kwargs] ( identifier[op_name] ). identifier[get] ( literal[string] , keyword[False] )
keyword[def] identifier[na_op] ( identifier[x] , identifier[y] ):
keyword[assert] keyword[not] ( identifier[is_categorical_dtype] ( identifier[y] ) keyword[and] keyword[not] identifier[is_scalar] ( identifier[y] ))
keyword[if] identifier[is_object_dtype] ( identifier[x] . identifier[dtype] ):
identifier[result] = identifier[_comp_method_OBJECT_ARRAY] ( identifier[op] , identifier[x] , identifier[y] )
keyword[elif] identifier[is_datetimelike_v_numeric] ( identifier[x] , identifier[y] ):
keyword[return] identifier[invalid_comparison] ( identifier[x] , identifier[y] , identifier[op] )
keyword[else] :
keyword[assert] keyword[not] identifier[needs_i8_conversion] ( identifier[x] )
identifier[mask] = keyword[None]
keyword[if] keyword[not] identifier[is_scalar] ( identifier[y] ) keyword[and] identifier[needs_i8_conversion] ( identifier[y] ):
identifier[mask] = identifier[isna] ( identifier[x] )| identifier[isna] ( identifier[y] )
identifier[y] = identifier[y] . identifier[view] ( literal[string] )
identifier[x] = identifier[x] . identifier[view] ( literal[string] )
identifier[method] = identifier[getattr] ( identifier[x] , identifier[op_name] , keyword[None] )
keyword[if] identifier[method] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[np] . identifier[errstate] ( identifier[all] = literal[string] ):
identifier[result] = identifier[method] ( identifier[y] )
keyword[if] identifier[result] keyword[is] identifier[NotImplemented] :
keyword[return] identifier[invalid_comparison] ( identifier[x] , identifier[y] , identifier[op] )
keyword[else] :
identifier[result] = identifier[op] ( identifier[x] , identifier[y] )
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] keyword[and] identifier[mask] . identifier[any] ():
identifier[result] [ identifier[mask] ]= identifier[masker]
keyword[return] identifier[result]
keyword[def] identifier[wrapper] ( identifier[self] , identifier[other] , identifier[axis] = keyword[None] ):
keyword[if] identifier[axis] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_get_axis_number] ( identifier[axis] )
identifier[res_name] = identifier[get_op_result_name] ( identifier[self] , identifier[other] )
keyword[if] identifier[isinstance] ( identifier[other] , identifier[list] ):
identifier[other] = identifier[np] . identifier[asarray] ( identifier[other] )
keyword[if] identifier[isinstance] ( identifier[other] , identifier[ABCDataFrame] ):
keyword[return] identifier[NotImplemented]
keyword[elif] identifier[isinstance] ( identifier[other] , identifier[ABCSeries] ) keyword[and] keyword[not] identifier[self] . identifier[_indexed_same] ( identifier[other] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[elif] identifier[is_categorical_dtype] ( identifier[self] ):
identifier[res_values] = identifier[dispatch_to_index_op] ( identifier[op] , identifier[self] , identifier[other] , identifier[pd] . identifier[Categorical] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] )
keyword[elif] identifier[is_datetime64_dtype] ( identifier[self] ) keyword[or] identifier[is_datetime64tz_dtype] ( identifier[self] ):
keyword[if] ( identifier[isinstance] ( identifier[other] , identifier[datetime] . identifier[date] ) keyword[and]
keyword[not] identifier[isinstance] ( identifier[other] , identifier[datetime] . identifier[datetime] )):
identifier[msg] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
keyword[if] identifier[op] keyword[in] { identifier[operator] . identifier[lt] , identifier[operator] . identifier[le] , identifier[operator] . identifier[gt] , identifier[operator] . identifier[ge] }:
identifier[future] = literal[string]
keyword[else] :
identifier[future] =(
literal[string]
literal[string]
)
identifier[msg] = literal[string] . identifier[join] ( identifier[textwrap] . identifier[wrap] ( identifier[msg] . identifier[format] ( identifier[future] = identifier[future] )))
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[FutureWarning] , identifier[stacklevel] = literal[int] )
identifier[other] = identifier[pd] . identifier[Timestamp] ( identifier[other] )
identifier[res_values] = identifier[dispatch_to_index_op] ( identifier[op] , identifier[self] , identifier[other] ,
identifier[pd] . identifier[DatetimeIndex] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] )
keyword[elif] identifier[is_timedelta64_dtype] ( identifier[self] ):
identifier[res_values] = identifier[dispatch_to_index_op] ( identifier[op] , identifier[self] , identifier[other] ,
identifier[pd] . identifier[TimedeltaIndex] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] )
keyword[elif] ( identifier[is_extension_array_dtype] ( identifier[self] ) keyword[or]
( identifier[is_extension_array_dtype] ( identifier[other] ) keyword[and] keyword[not] identifier[is_scalar] ( identifier[other] ))):
keyword[return] identifier[dispatch_to_extension_op] ( identifier[op] , identifier[self] , identifier[other] )
keyword[elif] identifier[isinstance] ( identifier[other] , identifier[ABCSeries] ):
identifier[res_values] = identifier[na_op] ( identifier[self] . identifier[values] , identifier[other] . identifier[values] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] ). identifier[rename] ( identifier[res_name] )
keyword[elif] identifier[isinstance] ( identifier[other] ,( identifier[np] . identifier[ndarray] , identifier[pd] . identifier[Index] )):
keyword[if] identifier[other] . identifier[ndim] != literal[int] keyword[and] identifier[len] ( identifier[self] )!= identifier[len] ( identifier[other] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[res_values] = identifier[na_op] ( identifier[self] . identifier[values] , identifier[np] . identifier[asarray] ( identifier[other] ))
identifier[result] = identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] )
keyword[return] identifier[result] . identifier[__finalize__] ( identifier[self] ). identifier[rename] ( identifier[res_name] )
keyword[elif] identifier[is_scalar] ( identifier[other] ) keyword[and] identifier[isna] ( identifier[other] ):
keyword[if] identifier[op] keyword[is] identifier[operator] . identifier[ne] :
identifier[res_values] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[self] ), identifier[dtype] = identifier[bool] )
keyword[else] :
identifier[res_values] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[self] ), identifier[dtype] = identifier[bool] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] , identifier[dtype] = literal[string] )
keyword[else] :
identifier[values] = identifier[self] . identifier[get_values] ()
keyword[with] identifier[np] . identifier[errstate] ( identifier[all] = literal[string] ):
identifier[res] = identifier[na_op] ( identifier[values] , identifier[other] )
keyword[if] identifier[is_scalar] ( identifier[res] ):
keyword[raise] identifier[TypeError] ( literal[string]
. identifier[format] ( identifier[typ] = identifier[type] ( identifier[other] )))
identifier[res_values] = identifier[com] . identifier[values_from_object] ( identifier[res] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[res_values] , identifier[index] = identifier[self] . identifier[index] ,
identifier[name] = identifier[res_name] , identifier[dtype] = literal[string] )
identifier[wrapper] . identifier[__name__] = identifier[op_name]
keyword[return] identifier[wrapper] | def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
masker = _gen_eval_kwargs(op_name).get('masker', False)
def na_op(x, y):
# TODO:
# should have guarantess on what x, y can be type-wise
# Extension Dtypes are not called here
# Checking that cases that were once handled here are no longer
# reachable.
assert not (is_categorical_dtype(y) and (not is_scalar(y)))
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y) # depends on [control=['if'], data=[]]
elif is_datetimelike_v_numeric(x, y):
return invalid_comparison(x, y, op) # depends on [control=['if'], data=[]]
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
# we have a datetime/timedelta and may need to convert
assert not needs_i8_conversion(x)
mask = None
if not is_scalar(y) and needs_i8_conversion(y):
mask = isna(x) | isna(y)
y = y.view('i8')
x = x.view('i8') # depends on [control=['if'], data=[]]
method = getattr(x, op_name, None)
if method is not None:
with np.errstate(all='ignore'):
result = method(y) # depends on [control=['with'], data=[]]
if result is NotImplemented:
return invalid_comparison(x, y, op) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['method']]
else:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker # depends on [control=['if'], data=[]]
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis) # depends on [control=['if'], data=['axis']]
res_name = get_op_result_name(self, other)
if isinstance(other, list):
# TODO: same for tuples?
other = np.asarray(other) # depends on [control=['if'], data=[]]
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented # depends on [control=['if'], data=[]]
elif isinstance(other, ABCSeries) and (not self._indexed_same(other)):
raise ValueError('Can only compare identically-labeled Series objects') # depends on [control=['if'], data=[]]
elif is_categorical_dtype(self):
# Dispatch to Categorical implementation; pd.CategoricalIndex
# behavior is non-canonical GH#19513
res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
return self._constructor(res_values, index=self.index, name=res_name) # depends on [control=['if'], data=[]]
elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
if isinstance(other, datetime.date) and (not isinstance(other, datetime.datetime)):
# https://github.com/pandas-dev/pandas/issues/21152
# Compatibility for difference between Series comparison w/
# datetime and date
msg = "Comparing Series of datetimes with 'datetime.date'. Currently, the 'datetime.date' is coerced to a datetime. In the future pandas will not coerce, and {future}. To retain the current behavior, convert the 'datetime.date' to a datetime with 'pd.Timestamp'."
if op in {operator.lt, operator.le, operator.gt, operator.ge}:
future = 'a TypeError will be raised' # depends on [control=['if'], data=[]]
else:
future = "'the values will not compare equal to the 'datetime.date'"
msg = '\n'.join(textwrap.wrap(msg.format(future=future)))
warnings.warn(msg, FutureWarning, stacklevel=2)
other = pd.Timestamp(other) # depends on [control=['if'], data=[]]
res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex)
return self._constructor(res_values, index=self.index, name=res_name) # depends on [control=['if'], data=[]]
elif is_timedelta64_dtype(self):
res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex)
return self._constructor(res_values, index=self.index, name=res_name) # depends on [control=['if'], data=[]]
elif is_extension_array_dtype(self) or (is_extension_array_dtype(other) and (not is_scalar(other))):
# Note: the `not is_scalar(other)` condition rules out
# e.g. other == "category"
return dispatch_to_extension_op(op, self, other) # depends on [control=['if'], data=[]]
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
# rename is needed in case res_name is None and res_values.name
# is not.
return self._constructor(res_values, index=self.index, name=res_name).rename(res_name) # depends on [control=['if'], data=[]]
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if other.ndim != 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare') # depends on [control=['if'], data=[]]
res_values = na_op(self.values, np.asarray(other))
result = self._constructor(res_values, index=self.index)
# rename is needed in case res_name is None and self.name
# is not.
return result.__finalize__(self).rename(res_name) # depends on [control=['if'], data=[]]
elif is_scalar(other) and isna(other):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(len(self), dtype=bool) # depends on [control=['if'], data=[]]
else:
res_values = np.zeros(len(self), dtype=bool)
return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') # depends on [control=['if'], data=[]]
else:
values = self.get_values()
with np.errstate(all='ignore'):
res = na_op(values, other) # depends on [control=['with'], data=[]]
if is_scalar(res):
raise TypeError('Could not compare {typ} type with Series'.format(typ=type(other))) # depends on [control=['if'], data=[]]
# always return a full value series here
res_values = com.values_from_object(res)
return self._constructor(res_values, index=self.index, name=res_name, dtype='bool')
wrapper.__name__ = op_name
return wrapper |
def print_attrs(data_file, node_name='/', which='user', compress=False):
"""Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
"""
node = data_file.get_node(node_name)
print ('List of attributes for:\n %s\n' % node)
for attr in node._v_attrs._f_list():
print ('\t%s' % attr)
attr_content = repr(node._v_attrs[attr])
if compress:
attr_content = attr_content.split('\n')[0]
print ("\t %s" % attr_content) | def function[print_attrs, parameter[data_file, node_name, which, compress]]:
constant[Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
]
variable[node] assign[=] call[name[data_file].get_node, parameter[name[node_name]]]
call[name[print], parameter[binary_operation[constant[List of attributes for:
%s
] <ast.Mod object at 0x7da2590d6920> name[node]]]]
for taget[name[attr]] in starred[call[name[node]._v_attrs._f_list, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[attr]]]]
variable[attr_content] assign[=] call[name[repr], parameter[call[name[node]._v_attrs][name[attr]]]]
if name[compress] begin[:]
variable[attr_content] assign[=] call[call[name[attr_content].split, parameter[constant[
]]]][constant[0]]
call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[attr_content]]]] | keyword[def] identifier[print_attrs] ( identifier[data_file] , identifier[node_name] = literal[string] , identifier[which] = literal[string] , identifier[compress] = keyword[False] ):
literal[string]
identifier[node] = identifier[data_file] . identifier[get_node] ( identifier[node_name] )
identifier[print] ( literal[string] % identifier[node] )
keyword[for] identifier[attr] keyword[in] identifier[node] . identifier[_v_attrs] . identifier[_f_list] ():
identifier[print] ( literal[string] % identifier[attr] )
identifier[attr_content] = identifier[repr] ( identifier[node] . identifier[_v_attrs] [ identifier[attr] ])
keyword[if] identifier[compress] :
identifier[attr_content] = identifier[attr_content] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[print] ( literal[string] % identifier[attr_content] ) | def print_attrs(data_file, node_name='/', which='user', compress=False):
"""Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
"""
node = data_file.get_node(node_name)
print('List of attributes for:\n %s\n' % node)
for attr in node._v_attrs._f_list():
print('\t%s' % attr)
attr_content = repr(node._v_attrs[attr])
if compress:
attr_content = attr_content.split('\n')[0] # depends on [control=['if'], data=[]]
print('\t %s' % attr_content) # depends on [control=['for'], data=['attr']] |
def head(self, uuid):
""" Get one thread."""
url = "%(base)s/%(uuid)s" % {
'base': self.local_base_url,
'uuid': uuid
}
return self.core.head(url) | def function[head, parameter[self, uuid]]:
constant[ Get one thread.]
variable[url] assign[=] binary_operation[constant[%(base)s/%(uuid)s] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b26ae8f0>, <ast.Constant object at 0x7da1b26ac100>], [<ast.Attribute object at 0x7da1b26adf00>, <ast.Name object at 0x7da1b13b8d90>]]]
return[call[name[self].core.head, parameter[name[url]]]] | keyword[def] identifier[head] ( identifier[self] , identifier[uuid] ):
literal[string]
identifier[url] = literal[string] %{
literal[string] : identifier[self] . identifier[local_base_url] ,
literal[string] : identifier[uuid]
}
keyword[return] identifier[self] . identifier[core] . identifier[head] ( identifier[url] ) | def head(self, uuid):
""" Get one thread."""
url = '%(base)s/%(uuid)s' % {'base': self.local_base_url, 'uuid': uuid}
return self.core.head(url) |
def mktns(self, root):
"""Get/create the target namespace."""
tns = root.get("targetNamespace")
prefix = root.findPrefix(tns)
if prefix is None:
log.debug("warning: tns (%s), not mapped to prefix", tns)
prefix = "tns"
return (prefix, tns) | def function[mktns, parameter[self, root]]:
constant[Get/create the target namespace.]
variable[tns] assign[=] call[name[root].get, parameter[constant[targetNamespace]]]
variable[prefix] assign[=] call[name[root].findPrefix, parameter[name[tns]]]
if compare[name[prefix] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[warning: tns (%s), not mapped to prefix], name[tns]]]
variable[prefix] assign[=] constant[tns]
return[tuple[[<ast.Name object at 0x7da20c9923e0>, <ast.Name object at 0x7da20c993520>]]] | keyword[def] identifier[mktns] ( identifier[self] , identifier[root] ):
literal[string]
identifier[tns] = identifier[root] . identifier[get] ( literal[string] )
identifier[prefix] = identifier[root] . identifier[findPrefix] ( identifier[tns] )
keyword[if] identifier[prefix] keyword[is] keyword[None] :
identifier[log] . identifier[debug] ( literal[string] , identifier[tns] )
identifier[prefix] = literal[string]
keyword[return] ( identifier[prefix] , identifier[tns] ) | def mktns(self, root):
"""Get/create the target namespace."""
tns = root.get('targetNamespace')
prefix = root.findPrefix(tns)
if prefix is None:
log.debug('warning: tns (%s), not mapped to prefix', tns)
prefix = 'tns' # depends on [control=['if'], data=['prefix']]
return (prefix, tns) |
async def _buffer_body(self, reader):
"""
Buffers the body of the request
"""
remaining = int(self.headers.get('Content-Length', 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining)
except asyncio.IncompleteReadError:
raise EOFError() | <ast.AsyncFunctionDef object at 0x7da2044c2fe0> | keyword[async] keyword[def] identifier[_buffer_body] ( identifier[self] , identifier[reader] ):
literal[string]
identifier[remaining] = identifier[int] ( identifier[self] . identifier[headers] . identifier[get] ( literal[string] , literal[int] ))
keyword[if] identifier[remaining] > literal[int] :
keyword[try] :
identifier[self] . identifier[data] = keyword[await] identifier[reader] . identifier[readexactly] ( identifier[remaining] )
keyword[except] identifier[asyncio] . identifier[IncompleteReadError] :
keyword[raise] identifier[EOFError] () | async def _buffer_body(self, reader):
"""
Buffers the body of the request
"""
remaining = int(self.headers.get('Content-Length', 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining) # depends on [control=['try'], data=[]]
except asyncio.IncompleteReadError:
raise EOFError() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['remaining']] |
def install(force=False, lazy=False):
"""
Download the ANTLR v4 tool jar. (Raises :exception:`OSError` if jar
is already available, unless ``lazy`` is ``True``.)
:param bool force: Force download even if local jar already exists.
:param bool lazy: Don't report an error if local jar already exists and
don't try to download it either.
"""
if exists(antlr_jar_path):
if lazy:
return
if not force:
raise OSError(errno.EEXIST, 'file already exists', antlr_jar_path)
tool_url = config['tool_url']
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
with contextlib.closing(urlopen(tool_url, context=ssl_context)) as response:
tool_jar = response.read()
if not isdir(dirname(antlr_jar_path)):
makedirs(dirname(antlr_jar_path))
with open(antlr_jar_path, mode='wb') as tool_file:
tool_file.write(tool_jar) | def function[install, parameter[force, lazy]]:
constant[
Download the ANTLR v4 tool jar. (Raises :exception:`OSError` if jar
is already available, unless ``lazy`` is ``True``.)
:param bool force: Force download even if local jar already exists.
:param bool lazy: Don't report an error if local jar already exists and
don't try to download it either.
]
if call[name[exists], parameter[name[antlr_jar_path]]] begin[:]
if name[lazy] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b1835b10> begin[:]
<ast.Raise object at 0x7da1b18375b0>
variable[tool_url] assign[=] call[name[config]][constant[tool_url]]
variable[ssl_context] assign[=] call[name[ssl].create_default_context, parameter[]]
with call[name[contextlib].closing, parameter[call[name[urlopen], parameter[name[tool_url]]]]] begin[:]
variable[tool_jar] assign[=] call[name[response].read, parameter[]]
if <ast.UnaryOp object at 0x7da1b1836440> begin[:]
call[name[makedirs], parameter[call[name[dirname], parameter[name[antlr_jar_path]]]]]
with call[name[open], parameter[name[antlr_jar_path]]] begin[:]
call[name[tool_file].write, parameter[name[tool_jar]]] | keyword[def] identifier[install] ( identifier[force] = keyword[False] , identifier[lazy] = keyword[False] ):
literal[string]
keyword[if] identifier[exists] ( identifier[antlr_jar_path] ):
keyword[if] identifier[lazy] :
keyword[return]
keyword[if] keyword[not] identifier[force] :
keyword[raise] identifier[OSError] ( identifier[errno] . identifier[EEXIST] , literal[string] , identifier[antlr_jar_path] )
identifier[tool_url] = identifier[config] [ literal[string] ]
identifier[ssl_context] = identifier[ssl] . identifier[create_default_context] ( identifier[purpose] = identifier[ssl] . identifier[Purpose] . identifier[CLIENT_AUTH] )
keyword[with] identifier[contextlib] . identifier[closing] ( identifier[urlopen] ( identifier[tool_url] , identifier[context] = identifier[ssl_context] )) keyword[as] identifier[response] :
identifier[tool_jar] = identifier[response] . identifier[read] ()
keyword[if] keyword[not] identifier[isdir] ( identifier[dirname] ( identifier[antlr_jar_path] )):
identifier[makedirs] ( identifier[dirname] ( identifier[antlr_jar_path] ))
keyword[with] identifier[open] ( identifier[antlr_jar_path] , identifier[mode] = literal[string] ) keyword[as] identifier[tool_file] :
identifier[tool_file] . identifier[write] ( identifier[tool_jar] ) | def install(force=False, lazy=False):
"""
Download the ANTLR v4 tool jar. (Raises :exception:`OSError` if jar
is already available, unless ``lazy`` is ``True``.)
:param bool force: Force download even if local jar already exists.
:param bool lazy: Don't report an error if local jar already exists and
don't try to download it either.
"""
if exists(antlr_jar_path):
if lazy:
return # depends on [control=['if'], data=[]]
if not force:
raise OSError(errno.EEXIST, 'file already exists', antlr_jar_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
tool_url = config['tool_url']
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
with contextlib.closing(urlopen(tool_url, context=ssl_context)) as response:
tool_jar = response.read() # depends on [control=['with'], data=['response']]
if not isdir(dirname(antlr_jar_path)):
makedirs(dirname(antlr_jar_path)) # depends on [control=['if'], data=[]]
with open(antlr_jar_path, mode='wb') as tool_file:
tool_file.write(tool_jar) # depends on [control=['with'], data=['tool_file']] |
def load(self, raw):
"""Unserialize from raw representation. (Wrapper)
Args:
raw (dict): Raw.
Raises:
ParseException: If there was an error parsing data.
"""
try:
self._load(raw)
except (KeyError, ValueError) as e:
raise_from(exception.ParseException('Parse error in %s' % (type(self)), raw), e) | def function[load, parameter[self, raw]]:
constant[Unserialize from raw representation. (Wrapper)
Args:
raw (dict): Raw.
Raises:
ParseException: If there was an error parsing data.
]
<ast.Try object at 0x7da18ede6d40> | keyword[def] identifier[load] ( identifier[self] , identifier[raw] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_load] ( identifier[raw] )
keyword[except] ( identifier[KeyError] , identifier[ValueError] ) keyword[as] identifier[e] :
identifier[raise_from] ( identifier[exception] . identifier[ParseException] ( literal[string] %( identifier[type] ( identifier[self] )), identifier[raw] ), identifier[e] ) | def load(self, raw):
"""Unserialize from raw representation. (Wrapper)
Args:
raw (dict): Raw.
Raises:
ParseException: If there was an error parsing data.
"""
try:
self._load(raw) # depends on [control=['try'], data=[]]
except (KeyError, ValueError) as e:
raise_from(exception.ParseException('Parse error in %s' % type(self), raw), e) # depends on [control=['except'], data=['e']] |
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val) | def function[expect_err, parameter[self, msg]]:
constant[
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
]
if name[self]._is_ok begin[:]
<ast.Raise object at 0x7da18f09ecb0>
return[call[name[cast], parameter[name[E], name[self]._val]]] | keyword[def] identifier[expect_err] ( identifier[self] , identifier[msg] )-> identifier[E] :
literal[string]
keyword[if] identifier[self] . identifier[_is_ok] :
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[return] identifier[cast] ( identifier[E] , identifier[self] . identifier[_val] ) | def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg) # depends on [control=['if'], data=[]]
return cast(E, self._val) |
def read_attributes(self, attributes=None):
'''
Collect read attributes across reads in this PileupCollection into a
pandas.DataFrame.
Valid attributes are the following properties of a pysam.AlignedSegment
instance. See:
http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment
for the meaning of these attributes.
* cigarstring
* flag
* inferred_length
* is_duplicate
* is_paired
* is_proper_pair
* is_qcfail
* is_read1
* is_read2
* is_reverse
* is_secondary
* is_unmapped
* mapping_quality
* mate_is_reverse
* mate_is_unmapped
* next_reference_id
* next_reference_start
* query_alignment_end
* query_alignment_length
* query_alignment_qualities
* query_alignment_sequence
* query_alignment_start
* query_length
* query_name
* reference_end
* reference_id
* reference_length
* reference_start
* template_length
(Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class
variable, so be careful when modifying it.)
Additionally, for alignment "tags" (arbitrary key values associated
with an alignment), a column of the form "TAG_{tag name}" is
included.
Finally, the column "pysam_alignment_record" gives the underlying
`pysam.AlignedSegment` instances.
Parameters
----------
attributes (optional): list of strings
List of columns to include. If unspecified, all columns are
included in the result.
Returns
----------
pandas.DataFrame of read attributes.
'''
def include(attribute):
return attributes is None or attribute in attributes
reads = self.reads()
possible_column_names = list(PileupCollection._READ_ATTRIBUTE_NAMES)
result = OrderedDict(
(name, [getattr(read, name) for read in reads])
for name in PileupCollection._READ_ATTRIBUTE_NAMES
if include(name))
# Add tag columns.
if reads:
tag_dicts = [dict(x.get_tags()) for x in reads]
tag_keys = set.union(
*[set(item.keys()) for item in tag_dicts])
for tag_key in sorted(tag_keys):
column_name = "TAG_%s" % tag_key
possible_column_names.append(column_name)
if include(column_name):
result[column_name] = [d.get(tag_key) for d in tag_dicts]
# Lastly, we include the underlying pysam alignment record.
possible_column_names.append("pysam_alignment_record")
if include("pysam_alignment_record"):
result["pysam_alignment_record"] = reads
# If particular attributes were requested, check that they're here.
if attributes is not None:
for attribute in attributes:
if attribute not in result:
raise ValueError(
"No such attribute: %s. Valid attributes are: %s"
% (attribute, " ".join(possible_column_names)))
assert set(attributes) == set(result)
return pandas.DataFrame(result) | def function[read_attributes, parameter[self, attributes]]:
constant[
Collect read attributes across reads in this PileupCollection into a
pandas.DataFrame.
Valid attributes are the following properties of a pysam.AlignedSegment
instance. See:
http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment
for the meaning of these attributes.
* cigarstring
* flag
* inferred_length
* is_duplicate
* is_paired
* is_proper_pair
* is_qcfail
* is_read1
* is_read2
* is_reverse
* is_secondary
* is_unmapped
* mapping_quality
* mate_is_reverse
* mate_is_unmapped
* next_reference_id
* next_reference_start
* query_alignment_end
* query_alignment_length
* query_alignment_qualities
* query_alignment_sequence
* query_alignment_start
* query_length
* query_name
* reference_end
* reference_id
* reference_length
* reference_start
* template_length
(Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class
variable, so be careful when modifying it.)
Additionally, for alignment "tags" (arbitrary key values associated
with an alignment), a column of the form "TAG_{tag name}" is
included.
Finally, the column "pysam_alignment_record" gives the underlying
`pysam.AlignedSegment` instances.
Parameters
----------
attributes (optional): list of strings
List of columns to include. If unspecified, all columns are
included in the result.
Returns
----------
pandas.DataFrame of read attributes.
]
def function[include, parameter[attribute]]:
return[<ast.BoolOp object at 0x7da20c6c73a0>]
variable[reads] assign[=] call[name[self].reads, parameter[]]
variable[possible_column_names] assign[=] call[name[list], parameter[name[PileupCollection]._READ_ATTRIBUTE_NAMES]]
variable[result] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da2045655a0>]]
if name[reads] begin[:]
variable[tag_dicts] assign[=] <ast.ListComp object at 0x7da2045672b0>
variable[tag_keys] assign[=] call[name[set].union, parameter[<ast.Starred object at 0x7da204565c60>]]
for taget[name[tag_key]] in starred[call[name[sorted], parameter[name[tag_keys]]]] begin[:]
variable[column_name] assign[=] binary_operation[constant[TAG_%s] <ast.Mod object at 0x7da2590d6920> name[tag_key]]
call[name[possible_column_names].append, parameter[name[column_name]]]
if call[name[include], parameter[name[column_name]]] begin[:]
call[name[result]][name[column_name]] assign[=] <ast.ListComp object at 0x7da204566800>
call[name[possible_column_names].append, parameter[constant[pysam_alignment_record]]]
if call[name[include], parameter[constant[pysam_alignment_record]]] begin[:]
call[name[result]][constant[pysam_alignment_record]] assign[=] name[reads]
if compare[name[attributes] is_not constant[None]] begin[:]
for taget[name[attribute]] in starred[name[attributes]] begin[:]
if compare[name[attribute] <ast.NotIn object at 0x7da2590d7190> name[result]] begin[:]
<ast.Raise object at 0x7da18f812dd0>
assert[compare[call[name[set], parameter[name[attributes]]] equal[==] call[name[set], parameter[name[result]]]]]
return[call[name[pandas].DataFrame, parameter[name[result]]]] | keyword[def] identifier[read_attributes] ( identifier[self] , identifier[attributes] = keyword[None] ):
literal[string]
keyword[def] identifier[include] ( identifier[attribute] ):
keyword[return] identifier[attributes] keyword[is] keyword[None] keyword[or] identifier[attribute] keyword[in] identifier[attributes]
identifier[reads] = identifier[self] . identifier[reads] ()
identifier[possible_column_names] = identifier[list] ( identifier[PileupCollection] . identifier[_READ_ATTRIBUTE_NAMES] )
identifier[result] = identifier[OrderedDict] (
( identifier[name] ,[ identifier[getattr] ( identifier[read] , identifier[name] ) keyword[for] identifier[read] keyword[in] identifier[reads] ])
keyword[for] identifier[name] keyword[in] identifier[PileupCollection] . identifier[_READ_ATTRIBUTE_NAMES]
keyword[if] identifier[include] ( identifier[name] ))
keyword[if] identifier[reads] :
identifier[tag_dicts] =[ identifier[dict] ( identifier[x] . identifier[get_tags] ()) keyword[for] identifier[x] keyword[in] identifier[reads] ]
identifier[tag_keys] = identifier[set] . identifier[union] (
*[ identifier[set] ( identifier[item] . identifier[keys] ()) keyword[for] identifier[item] keyword[in] identifier[tag_dicts] ])
keyword[for] identifier[tag_key] keyword[in] identifier[sorted] ( identifier[tag_keys] ):
identifier[column_name] = literal[string] % identifier[tag_key]
identifier[possible_column_names] . identifier[append] ( identifier[column_name] )
keyword[if] identifier[include] ( identifier[column_name] ):
identifier[result] [ identifier[column_name] ]=[ identifier[d] . identifier[get] ( identifier[tag_key] ) keyword[for] identifier[d] keyword[in] identifier[tag_dicts] ]
identifier[possible_column_names] . identifier[append] ( literal[string] )
keyword[if] identifier[include] ( literal[string] ):
identifier[result] [ literal[string] ]= identifier[reads]
keyword[if] identifier[attributes] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[attribute] keyword[in] identifier[attributes] :
keyword[if] identifier[attribute] keyword[not] keyword[in] identifier[result] :
keyword[raise] identifier[ValueError] (
literal[string]
%( identifier[attribute] , literal[string] . identifier[join] ( identifier[possible_column_names] )))
keyword[assert] identifier[set] ( identifier[attributes] )== identifier[set] ( identifier[result] )
keyword[return] identifier[pandas] . identifier[DataFrame] ( identifier[result] ) | def read_attributes(self, attributes=None):
"""
Collect read attributes across reads in this PileupCollection into a
pandas.DataFrame.
Valid attributes are the following properties of a pysam.AlignedSegment
instance. See:
http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment
for the meaning of these attributes.
* cigarstring
* flag
* inferred_length
* is_duplicate
* is_paired
* is_proper_pair
* is_qcfail
* is_read1
* is_read2
* is_reverse
* is_secondary
* is_unmapped
* mapping_quality
* mate_is_reverse
* mate_is_unmapped
* next_reference_id
* next_reference_start
* query_alignment_end
* query_alignment_length
* query_alignment_qualities
* query_alignment_sequence
* query_alignment_start
* query_length
* query_name
* reference_end
* reference_id
* reference_length
* reference_start
* template_length
(Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class
variable, so be careful when modifying it.)
Additionally, for alignment "tags" (arbitrary key values associated
with an alignment), a column of the form "TAG_{tag name}" is
included.
Finally, the column "pysam_alignment_record" gives the underlying
`pysam.AlignedSegment` instances.
Parameters
----------
attributes (optional): list of strings
List of columns to include. If unspecified, all columns are
included in the result.
Returns
----------
pandas.DataFrame of read attributes.
"""
def include(attribute):
return attributes is None or attribute in attributes
reads = self.reads()
possible_column_names = list(PileupCollection._READ_ATTRIBUTE_NAMES)
result = OrderedDict(((name, [getattr(read, name) for read in reads]) for name in PileupCollection._READ_ATTRIBUTE_NAMES if include(name)))
# Add tag columns.
if reads:
tag_dicts = [dict(x.get_tags()) for x in reads]
tag_keys = set.union(*[set(item.keys()) for item in tag_dicts])
for tag_key in sorted(tag_keys):
column_name = 'TAG_%s' % tag_key
possible_column_names.append(column_name)
if include(column_name):
result[column_name] = [d.get(tag_key) for d in tag_dicts] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag_key']] # depends on [control=['if'], data=[]]
# Lastly, we include the underlying pysam alignment record.
possible_column_names.append('pysam_alignment_record')
if include('pysam_alignment_record'):
result['pysam_alignment_record'] = reads # depends on [control=['if'], data=[]]
# If particular attributes were requested, check that they're here.
if attributes is not None:
for attribute in attributes:
if attribute not in result:
raise ValueError('No such attribute: %s. Valid attributes are: %s' % (attribute, ' '.join(possible_column_names))) # depends on [control=['if'], data=['attribute']] # depends on [control=['for'], data=['attribute']]
assert set(attributes) == set(result) # depends on [control=['if'], data=['attributes']]
return pandas.DataFrame(result) |
def get_dust_log(self, **params):
"""Get log of small amounts exchanged for BNB.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'userAssetDribbletLog.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res | def function[get_dust_log, parameter[self]]:
constant[Get log of small amounts exchanged for BNB.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
]
variable[res] assign[=] call[name[self]._request_withdraw_api, parameter[constant[get], constant[userAssetDribbletLog.html], constant[True]]]
if <ast.UnaryOp object at 0x7da20e9b00a0> begin[:]
<ast.Raise object at 0x7da20e9b05e0>
return[name[res]] | keyword[def] identifier[get_dust_log] ( identifier[self] ,** identifier[params] ):
literal[string]
identifier[res] = identifier[self] . identifier[_request_withdraw_api] ( literal[string] , literal[string] , keyword[True] , identifier[data] = identifier[params] )
keyword[if] keyword[not] identifier[res] [ literal[string] ]:
keyword[raise] identifier[BinanceWithdrawException] ( identifier[res] [ literal[string] ])
keyword[return] identifier[res] | def get_dust_log(self, **params):
"""Get log of small amounts exchanged for BNB.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'userAssetDribbletLog.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg']) # depends on [control=['if'], data=[]]
return res |
def is_web_url(string):
"""Check to see if string is an validly-formatted web url."""
assert isinstance(string, basestring)
parsed_url = urllib.parse.urlparse(string)
return (
(
parsed_url.scheme.lower() == 'http'
or parsed_url.scheme.lower() == 'https'
)
and parsed_url.netloc
) | def function[is_web_url, parameter[string]]:
constant[Check to see if string is an validly-formatted web url.]
assert[call[name[isinstance], parameter[name[string], name[basestring]]]]
variable[parsed_url] assign[=] call[name[urllib].parse.urlparse, parameter[name[string]]]
return[<ast.BoolOp object at 0x7da1b025fa00>] | keyword[def] identifier[is_web_url] ( identifier[string] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[string] , identifier[basestring] )
identifier[parsed_url] = identifier[urllib] . identifier[parse] . identifier[urlparse] ( identifier[string] )
keyword[return] (
(
identifier[parsed_url] . identifier[scheme] . identifier[lower] ()== literal[string]
keyword[or] identifier[parsed_url] . identifier[scheme] . identifier[lower] ()== literal[string]
)
keyword[and] identifier[parsed_url] . identifier[netloc]
) | def is_web_url(string):
"""Check to see if string is an validly-formatted web url."""
assert isinstance(string, basestring)
parsed_url = urllib.parse.urlparse(string)
return (parsed_url.scheme.lower() == 'http' or parsed_url.scheme.lower() == 'https') and parsed_url.netloc |
def is_type(obj, typestr_or_type):
"""is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type."""
if typestr_or_type == "all":
return True
if type(typestr_or_type) == types.TypeType:
test_type = typestr_or_type
else:
test_type = typestr2type.get(typestr_or_type, False)
if test_type:
return isinstance(obj, test_type)
return False | def function[is_type, parameter[obj, typestr_or_type]]:
constant[is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type.]
if compare[name[typestr_or_type] equal[==] constant[all]] begin[:]
return[constant[True]]
if compare[call[name[type], parameter[name[typestr_or_type]]] equal[==] name[types].TypeType] begin[:]
variable[test_type] assign[=] name[typestr_or_type]
if name[test_type] begin[:]
return[call[name[isinstance], parameter[name[obj], name[test_type]]]]
return[constant[False]] | keyword[def] identifier[is_type] ( identifier[obj] , identifier[typestr_or_type] ):
literal[string]
keyword[if] identifier[typestr_or_type] == literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[type] ( identifier[typestr_or_type] )== identifier[types] . identifier[TypeType] :
identifier[test_type] = identifier[typestr_or_type]
keyword[else] :
identifier[test_type] = identifier[typestr2type] . identifier[get] ( identifier[typestr_or_type] , keyword[False] )
keyword[if] identifier[test_type] :
keyword[return] identifier[isinstance] ( identifier[obj] , identifier[test_type] )
keyword[return] keyword[False] | def is_type(obj, typestr_or_type):
"""is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type."""
if typestr_or_type == 'all':
return True # depends on [control=['if'], data=[]]
if type(typestr_or_type) == types.TypeType:
test_type = typestr_or_type # depends on [control=['if'], data=[]]
else:
test_type = typestr2type.get(typestr_or_type, False)
if test_type:
return isinstance(obj, test_type) # depends on [control=['if'], data=[]]
return False |
def visit_Set(self, node: ast.Set) -> Set[Any]:
"""Visit the elements and assemble the results into a set."""
result = set(self.visit(node=elt) for elt in node.elts)
self.recomputed_values[node] = result
return result | def function[visit_Set, parameter[self, node]]:
constant[Visit the elements and assemble the results into a set.]
variable[result] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b107aa70>]]
call[name[self].recomputed_values][name[node]] assign[=] name[result]
return[name[result]] | keyword[def] identifier[visit_Set] ( identifier[self] , identifier[node] : identifier[ast] . identifier[Set] )-> identifier[Set] [ identifier[Any] ]:
literal[string]
identifier[result] = identifier[set] ( identifier[self] . identifier[visit] ( identifier[node] = identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[node] . identifier[elts] )
identifier[self] . identifier[recomputed_values] [ identifier[node] ]= identifier[result]
keyword[return] identifier[result] | def visit_Set(self, node: ast.Set) -> Set[Any]:
"""Visit the elements and assemble the results into a set."""
result = set((self.visit(node=elt) for elt in node.elts))
self.recomputed_values[node] = result
return result |
def _channel_exists_and_not_settled(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> bool:
"""Returns if the channel exists and is in a non-settled state"""
try:
channel_state = self._get_channel_state(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
except RaidenRecoverableError:
return False
exists_and_not_settled = (
channel_state > ChannelState.NONEXISTENT and
channel_state < ChannelState.SETTLED
)
return exists_and_not_settled | def function[_channel_exists_and_not_settled, parameter[self, participant1, participant2, block_identifier, channel_identifier]]:
constant[Returns if the channel exists and is in a non-settled state]
<ast.Try object at 0x7da1b19db910>
variable[exists_and_not_settled] assign[=] <ast.BoolOp object at 0x7da1b19d95a0>
return[name[exists_and_not_settled]] | keyword[def] identifier[_channel_exists_and_not_settled] (
identifier[self] ,
identifier[participant1] : identifier[Address] ,
identifier[participant2] : identifier[Address] ,
identifier[block_identifier] : identifier[BlockSpecification] ,
identifier[channel_identifier] : identifier[ChannelID] = keyword[None] ,
)-> identifier[bool] :
literal[string]
keyword[try] :
identifier[channel_state] = identifier[self] . identifier[_get_channel_state] (
identifier[participant1] = identifier[participant1] ,
identifier[participant2] = identifier[participant2] ,
identifier[block_identifier] = identifier[block_identifier] ,
identifier[channel_identifier] = identifier[channel_identifier] ,
)
keyword[except] identifier[RaidenRecoverableError] :
keyword[return] keyword[False]
identifier[exists_and_not_settled] =(
identifier[channel_state] > identifier[ChannelState] . identifier[NONEXISTENT] keyword[and]
identifier[channel_state] < identifier[ChannelState] . identifier[SETTLED]
)
keyword[return] identifier[exists_and_not_settled] | def _channel_exists_and_not_settled(self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID=None) -> bool:
"""Returns if the channel exists and is in a non-settled state"""
try:
channel_state = self._get_channel_state(participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier) # depends on [control=['try'], data=[]]
except RaidenRecoverableError:
return False # depends on [control=['except'], data=[]]
exists_and_not_settled = channel_state > ChannelState.NONEXISTENT and channel_state < ChannelState.SETTLED
return exists_and_not_settled |
def has_implementation(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
"""
name, _, _ = self._canonicalize(number, arch, abi_list)
return super(SimSyscallLibrary, self).has_implementation(name) | def function[has_implementation, parameter[self, number, arch, abi_list]]:
constant[
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
]
<ast.Tuple object at 0x7da18bc71a50> assign[=] call[name[self]._canonicalize, parameter[name[number], name[arch], name[abi_list]]]
return[call[call[name[super], parameter[name[SimSyscallLibrary], name[self]]].has_implementation, parameter[name[name]]]] | keyword[def] identifier[has_implementation] ( identifier[self] , identifier[number] , identifier[arch] , identifier[abi_list] =()):
literal[string]
identifier[name] , identifier[_] , identifier[_] = identifier[self] . identifier[_canonicalize] ( identifier[number] , identifier[arch] , identifier[abi_list] )
keyword[return] identifier[super] ( identifier[SimSyscallLibrary] , identifier[self] ). identifier[has_implementation] ( identifier[name] ) | def has_implementation(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
"""
(name, _, _) = self._canonicalize(number, arch, abi_list)
return super(SimSyscallLibrary, self).has_implementation(name) |
def is_supported():
'''
Check the system for ZFS support
'''
# Check for supported platforms
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
on_supported_platform = False
if salt.utils.platform.is_sunos():
on_supported_platform = True
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'):
on_supported_platform = True
elif salt.utils.platform.is_darwin() and \
os.path.exists('/Library/Extensions/zfs.kext') and \
os.path.exists('/dev/zfs'):
on_supported_platform = True
# Additional check for the zpool command
return (salt.utils.path.which('zpool') and on_supported_platform) is True | def function[is_supported, parameter[]]:
constant[
Check the system for ZFS support
]
variable[on_supported_platform] assign[=] constant[False]
if call[name[salt].utils.platform.is_sunos, parameter[]] begin[:]
variable[on_supported_platform] assign[=] constant[True]
return[compare[<ast.BoolOp object at 0x7da1b215e6e0> is constant[True]]] | keyword[def] identifier[is_supported] ():
literal[string]
identifier[on_supported_platform] = keyword[False]
keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_sunos] ():
identifier[on_supported_platform] = keyword[True]
keyword[elif] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_freebsd] () keyword[and] identifier[_check_retcode] ( literal[string] ):
identifier[on_supported_platform] = keyword[True]
keyword[elif] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_linux] () keyword[and] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
identifier[on_supported_platform] = keyword[True]
keyword[elif] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_linux] () keyword[and] identifier[salt] . identifier[utils] . identifier[path] . identifier[which] ( literal[string] ):
identifier[on_supported_platform] = keyword[True]
keyword[elif] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_darwin] () keyword[and] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
identifier[on_supported_platform] = keyword[True]
keyword[return] ( identifier[salt] . identifier[utils] . identifier[path] . identifier[which] ( literal[string] ) keyword[and] identifier[on_supported_platform] ) keyword[is] keyword[True] | def is_supported():
"""
Check the system for ZFS support
"""
# Check for supported platforms
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
on_supported_platform = False
if salt.utils.platform.is_sunos():
on_supported_platform = True # depends on [control=['if'], data=[]]
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
on_supported_platform = True # depends on [control=['if'], data=[]]
elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'):
on_supported_platform = True # depends on [control=['if'], data=[]]
elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'):
on_supported_platform = True # depends on [control=['if'], data=[]]
elif salt.utils.platform.is_darwin() and os.path.exists('/Library/Extensions/zfs.kext') and os.path.exists('/dev/zfs'):
on_supported_platform = True # depends on [control=['if'], data=[]]
# Additional check for the zpool command
return (salt.utils.path.which('zpool') and on_supported_platform) is True |
def return_markers(self):
"""Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
"""
markers = []
for v in self.mrk['Marker Infos'].values():
if v[0] == 'New Segment':
continue
markers.append({
'name': v[1],
'start': float(v[2]) / self.s_freq,
'end': (float(v[2]) + float(v[3])) / self.s_freq,
})
return markers | def function[return_markers, parameter[self]]:
constant[Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
]
variable[markers] assign[=] list[[]]
for taget[name[v]] in starred[call[call[name[self].mrk][constant[Marker Infos]].values, parameter[]]] begin[:]
if compare[call[name[v]][constant[0]] equal[==] constant[New Segment]] begin[:]
continue
call[name[markers].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0d76a70>, <ast.Constant object at 0x7da1b0d77e80>, <ast.Constant object at 0x7da1b0d75630>], [<ast.Subscript object at 0x7da1b0d766e0>, <ast.BinOp object at 0x7da1b0d771c0>, <ast.BinOp object at 0x7da1b0d760e0>]]]]
return[name[markers]] | keyword[def] identifier[return_markers] ( identifier[self] ):
literal[string]
identifier[markers] =[]
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[mrk] [ literal[string] ]. identifier[values] ():
keyword[if] identifier[v] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[markers] . identifier[append] ({
literal[string] : identifier[v] [ literal[int] ],
literal[string] : identifier[float] ( identifier[v] [ literal[int] ])/ identifier[self] . identifier[s_freq] ,
literal[string] :( identifier[float] ( identifier[v] [ literal[int] ])+ identifier[float] ( identifier[v] [ literal[int] ]))/ identifier[self] . identifier[s_freq] ,
})
keyword[return] identifier[markers] | def return_markers(self):
"""Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
"""
markers = []
for v in self.mrk['Marker Infos'].values():
if v[0] == 'New Segment':
continue # depends on [control=['if'], data=[]]
markers.append({'name': v[1], 'start': float(v[2]) / self.s_freq, 'end': (float(v[2]) + float(v[3])) / self.s_freq}) # depends on [control=['for'], data=['v']]
return markers |
def matches(self, hash_alg, hash_value):
"""
Does our algorithm and hash value match the specified arguments.
:param hash_alg: str: hash algorithm
:param hash_value: str: hash value
:return: boolean
"""
return self.alg == hash_alg and self.value == hash_value | def function[matches, parameter[self, hash_alg, hash_value]]:
constant[
Does our algorithm and hash value match the specified arguments.
:param hash_alg: str: hash algorithm
:param hash_value: str: hash value
:return: boolean
]
return[<ast.BoolOp object at 0x7da18ede77f0>] | keyword[def] identifier[matches] ( identifier[self] , identifier[hash_alg] , identifier[hash_value] ):
literal[string]
keyword[return] identifier[self] . identifier[alg] == identifier[hash_alg] keyword[and] identifier[self] . identifier[value] == identifier[hash_value] | def matches(self, hash_alg, hash_value):
"""
Does our algorithm and hash value match the specified arguments.
:param hash_alg: str: hash algorithm
:param hash_value: str: hash value
:return: boolean
"""
return self.alg == hash_alg and self.value == hash_value |
def get_version(model_instance, version):
"""
try go load from the database one object with specific version
:param model_instance: instance in memory
:param version: version number
:return:
"""
version_field = get_version_fieldname(model_instance)
kwargs = {'pk': model_instance.pk, version_field: version}
return model_instance.__class__.objects.get(**kwargs) | def function[get_version, parameter[model_instance, version]]:
constant[
try go load from the database one object with specific version
:param model_instance: instance in memory
:param version: version number
:return:
]
variable[version_field] assign[=] call[name[get_version_fieldname], parameter[name[model_instance]]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b08d9b70>, <ast.Name object at 0x7da1b08d8a00>], [<ast.Attribute object at 0x7da1b08db1f0>, <ast.Name object at 0x7da1b08d8910>]]
return[call[name[model_instance].__class__.objects.get, parameter[]]] | keyword[def] identifier[get_version] ( identifier[model_instance] , identifier[version] ):
literal[string]
identifier[version_field] = identifier[get_version_fieldname] ( identifier[model_instance] )
identifier[kwargs] ={ literal[string] : identifier[model_instance] . identifier[pk] , identifier[version_field] : identifier[version] }
keyword[return] identifier[model_instance] . identifier[__class__] . identifier[objects] . identifier[get] (** identifier[kwargs] ) | def get_version(model_instance, version):
"""
try go load from the database one object with specific version
:param model_instance: instance in memory
:param version: version number
:return:
"""
version_field = get_version_fieldname(model_instance)
kwargs = {'pk': model_instance.pk, version_field: version}
return model_instance.__class__.objects.get(**kwargs) |
def del_host_downtime(self, downtime_id):
"""Delete a host downtime
Format of the line that triggers function call::
DEL_HOST_DOWNTIME;<downtime_id>
:param downtime_id: downtime id to delete
:type downtime_id: int
:return: None
"""
broks = []
for item in self.daemon.hosts:
if downtime_id in item.downtimes:
broks.extend(item.downtimes[downtime_id].cancel(self.daemon.timeperiods,
self.daemon.hosts,
self.daemon.services))
break
else:
self.send_an_element(make_monitoring_log(
'warning', 'DEL_HOST_DOWNTIME: downtime id: %s does not exist '
'and cannot be deleted.' % downtime_id))
for brok in broks:
self.send_an_element(brok) | def function[del_host_downtime, parameter[self, downtime_id]]:
constant[Delete a host downtime
Format of the line that triggers function call::
DEL_HOST_DOWNTIME;<downtime_id>
:param downtime_id: downtime id to delete
:type downtime_id: int
:return: None
]
variable[broks] assign[=] list[[]]
for taget[name[item]] in starred[name[self].daemon.hosts] begin[:]
if compare[name[downtime_id] in name[item].downtimes] begin[:]
call[name[broks].extend, parameter[call[call[name[item].downtimes][name[downtime_id]].cancel, parameter[name[self].daemon.timeperiods, name[self].daemon.hosts, name[self].daemon.services]]]]
break
for taget[name[brok]] in starred[name[broks]] begin[:]
call[name[self].send_an_element, parameter[name[brok]]] | keyword[def] identifier[del_host_downtime] ( identifier[self] , identifier[downtime_id] ):
literal[string]
identifier[broks] =[]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[daemon] . identifier[hosts] :
keyword[if] identifier[downtime_id] keyword[in] identifier[item] . identifier[downtimes] :
identifier[broks] . identifier[extend] ( identifier[item] . identifier[downtimes] [ identifier[downtime_id] ]. identifier[cancel] ( identifier[self] . identifier[daemon] . identifier[timeperiods] ,
identifier[self] . identifier[daemon] . identifier[hosts] ,
identifier[self] . identifier[daemon] . identifier[services] ))
keyword[break]
keyword[else] :
identifier[self] . identifier[send_an_element] ( identifier[make_monitoring_log] (
literal[string] , literal[string]
literal[string] % identifier[downtime_id] ))
keyword[for] identifier[brok] keyword[in] identifier[broks] :
identifier[self] . identifier[send_an_element] ( identifier[brok] ) | def del_host_downtime(self, downtime_id):
"""Delete a host downtime
Format of the line that triggers function call::
DEL_HOST_DOWNTIME;<downtime_id>
:param downtime_id: downtime id to delete
:type downtime_id: int
:return: None
"""
broks = []
for item in self.daemon.hosts:
if downtime_id in item.downtimes:
broks.extend(item.downtimes[downtime_id].cancel(self.daemon.timeperiods, self.daemon.hosts, self.daemon.services))
break # depends on [control=['if'], data=['downtime_id']] # depends on [control=['for'], data=['item']]
else:
self.send_an_element(make_monitoring_log('warning', 'DEL_HOST_DOWNTIME: downtime id: %s does not exist and cannot be deleted.' % downtime_id))
for brok in broks:
self.send_an_element(brok) # depends on [control=['for'], data=['brok']] |
def call(name,
func,
args=(),
kws=None,
onlyif=None,
unless=None,
creates=None,
output_loglevel='debug',
hide_output=False,
use_vt=False,
**kwargs):
'''
Invoke a pre-defined Python function with arguments specified in the state
declaration. This function is mainly used by the
:mod:`salt.renderers.pydsl` renderer.
The interpretation of ``onlyif`` and ``unless`` arguments are identical to
those of :mod:`cmd.run <salt.states.cmd.run>`, and all other
arguments(``cwd``, ``runas``, ...) allowed by :mod:`cmd.run
<salt.states.cmd.run>` are allowed here, except that their effects apply
only to the commands specified in `onlyif` and `unless` rather than to the
function to be invoked.
In addition, the ``stateful`` argument has no effects here.
The return value of the invoked function will be interpreted as follows.
If it's a dictionary then it will be passed through to the state system,
which expects it to have the usual structure returned by any salt state
function.
Otherwise, the return value (denoted as ``result`` in the code below) is
expected to be a JSON serializable object, and this dictionary is returned:
.. code-block:: python
{
'name': name
'changes': {'retval': result},
'result': True if result is None else bool(result),
'comment': result if isinstance(result, six.string_types) else ''
}
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
cmd_kwargs = {'cwd': kwargs.get('cwd'),
'runas': kwargs.get('user'),
'shell': kwargs.get('shell') or __grains__['shell'],
'env': kwargs.get('env'),
'use_vt': use_vt,
'output_loglevel': output_loglevel,
'hide_output': hide_output,
'umask': kwargs.get('umask')}
cret = mod_run_check(cmd_kwargs, onlyif, unless, creates)
if isinstance(cret, dict):
ret.update(cret)
return ret
if not kws:
kws = {}
result = func(*args, **kws)
if isinstance(result, dict):
ret.update(result)
return ret
else:
# result must be JSON serializable else we get an error
ret['changes'] = {'retval': result}
ret['result'] = True if result is None else bool(result)
if isinstance(result, six.string_types):
ret['comment'] = result
return ret | def function[call, parameter[name, func, args, kws, onlyif, unless, creates, output_loglevel, hide_output, use_vt]]:
constant[
Invoke a pre-defined Python function with arguments specified in the state
declaration. This function is mainly used by the
:mod:`salt.renderers.pydsl` renderer.
The interpretation of ``onlyif`` and ``unless`` arguments are identical to
those of :mod:`cmd.run <salt.states.cmd.run>`, and all other
arguments(``cwd``, ``runas``, ...) allowed by :mod:`cmd.run
<salt.states.cmd.run>` are allowed here, except that their effects apply
only to the commands specified in `onlyif` and `unless` rather than to the
function to be invoked.
In addition, the ``stateful`` argument has no effects here.
The return value of the invoked function will be interpreted as follows.
If it's a dictionary then it will be passed through to the state system,
which expects it to have the usual structure returned by any salt state
function.
Otherwise, the return value (denoted as ``result`` in the code below) is
expected to be a JSON serializable object, and this dictionary is returned:
.. code-block:: python
{
'name': name
'changes': {'retval': result},
'result': True if result is None else bool(result),
'comment': result if isinstance(result, six.string_types) else ''
}
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2010b20>, <ast.Constant object at 0x7da1b2010340>, <ast.Constant object at 0x7da1b2011900>, <ast.Constant object at 0x7da1b2011930>], [<ast.Name object at 0x7da1b2010190>, <ast.Dict object at 0x7da1b20112d0>, <ast.Constant object at 0x7da1b2012980>, <ast.Constant object at 0x7da1b2012200>]]
variable[cmd_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b2011c90>, <ast.Constant object at 0x7da1b2011720>, <ast.Constant object at 0x7da1b2010460>, <ast.Constant object at 0x7da1b2011990>, <ast.Constant object at 0x7da1b20103a0>, <ast.Constant object at 0x7da1b20113f0>, <ast.Constant object at 0x7da1b2010490>, <ast.Constant object at 0x7da1b2010310>], [<ast.Call object at 0x7da1b2011e10>, <ast.Call object at 0x7da1b20104c0>, <ast.BoolOp object at 0x7da1b2013fd0>, <ast.Call object at 0x7da1b2010b80>, <ast.Name object at 0x7da1b2010940>, <ast.Name object at 0x7da1b2010a30>, <ast.Name object at 0x7da1b2010a90>, <ast.Call object at 0x7da1b20107f0>]]
variable[cret] assign[=] call[name[mod_run_check], parameter[name[cmd_kwargs], name[onlyif], name[unless], name[creates]]]
if call[name[isinstance], parameter[name[cret], name[dict]]] begin[:]
call[name[ret].update, parameter[name[cret]]]
return[name[ret]]
if <ast.UnaryOp object at 0x7da1b20105e0> begin[:]
variable[kws] assign[=] dictionary[[], []]
variable[result] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b2012ad0>]]
if call[name[isinstance], parameter[name[result], name[dict]]] begin[:]
call[name[ret].update, parameter[name[result]]]
return[name[ret]] | keyword[def] identifier[call] ( identifier[name] ,
identifier[func] ,
identifier[args] =(),
identifier[kws] = keyword[None] ,
identifier[onlyif] = keyword[None] ,
identifier[unless] = keyword[None] ,
identifier[creates] = keyword[None] ,
identifier[output_loglevel] = literal[string] ,
identifier[hide_output] = keyword[False] ,
identifier[use_vt] = keyword[False] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
identifier[cmd_kwargs] ={ literal[string] : identifier[kwargs] . identifier[get] ( literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] ),
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] identifier[__grains__] [ literal[string] ],
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] ),
literal[string] : identifier[use_vt] ,
literal[string] : identifier[output_loglevel] ,
literal[string] : identifier[hide_output] ,
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] )}
identifier[cret] = identifier[mod_run_check] ( identifier[cmd_kwargs] , identifier[onlyif] , identifier[unless] , identifier[creates] )
keyword[if] identifier[isinstance] ( identifier[cret] , identifier[dict] ):
identifier[ret] . identifier[update] ( identifier[cret] )
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[kws] :
identifier[kws] ={}
identifier[result] = identifier[func] (* identifier[args] ,** identifier[kws] )
keyword[if] identifier[isinstance] ( identifier[result] , identifier[dict] ):
identifier[ret] . identifier[update] ( identifier[result] )
keyword[return] identifier[ret]
keyword[else] :
identifier[ret] [ literal[string] ]={ literal[string] : identifier[result] }
identifier[ret] [ literal[string] ]= keyword[True] keyword[if] identifier[result] keyword[is] keyword[None] keyword[else] identifier[bool] ( identifier[result] )
keyword[if] identifier[isinstance] ( identifier[result] , identifier[six] . identifier[string_types] ):
identifier[ret] [ literal[string] ]= identifier[result]
keyword[return] identifier[ret] | def call(name, func, args=(), kws=None, onlyif=None, unless=None, creates=None, output_loglevel='debug', hide_output=False, use_vt=False, **kwargs):
"""
Invoke a pre-defined Python function with arguments specified in the state
declaration. This function is mainly used by the
:mod:`salt.renderers.pydsl` renderer.
The interpretation of ``onlyif`` and ``unless`` arguments are identical to
those of :mod:`cmd.run <salt.states.cmd.run>`, and all other
arguments(``cwd``, ``runas``, ...) allowed by :mod:`cmd.run
<salt.states.cmd.run>` are allowed here, except that their effects apply
only to the commands specified in `onlyif` and `unless` rather than to the
function to be invoked.
In addition, the ``stateful`` argument has no effects here.
The return value of the invoked function will be interpreted as follows.
If it's a dictionary then it will be passed through to the state system,
which expects it to have the usual structure returned by any salt state
function.
Otherwise, the return value (denoted as ``result`` in the code below) is
expected to be a JSON serializable object, and this dictionary is returned:
.. code-block:: python
{
'name': name
'changes': {'retval': result},
'result': True if result is None else bool(result),
'comment': result if isinstance(result, six.string_types) else ''
}
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
cmd_kwargs = {'cwd': kwargs.get('cwd'), 'runas': kwargs.get('user'), 'shell': kwargs.get('shell') or __grains__['shell'], 'env': kwargs.get('env'), 'use_vt': use_vt, 'output_loglevel': output_loglevel, 'hide_output': hide_output, 'umask': kwargs.get('umask')}
cret = mod_run_check(cmd_kwargs, onlyif, unless, creates)
if isinstance(cret, dict):
ret.update(cret)
return ret # depends on [control=['if'], data=[]]
if not kws:
kws = {} # depends on [control=['if'], data=[]]
result = func(*args, **kws)
if isinstance(result, dict):
ret.update(result)
return ret # depends on [control=['if'], data=[]]
else:
# result must be JSON serializable else we get an error
ret['changes'] = {'retval': result}
ret['result'] = True if result is None else bool(result)
if isinstance(result, six.string_types):
ret['comment'] = result # depends on [control=['if'], data=[]]
return ret |
def undeclared_query_parameters(self):
"""Return undeclared query parameters from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters
:rtype:
list of
:class:`~google.cloud.bigquery.ArrayQueryParameter`,
:class:`~google.cloud.bigquery.ScalarQueryParameter`, or
:class:`~google.cloud.bigquery.StructQueryParameter`
:returns: undeclared parameters, or an empty list if the query has
not yet completed.
"""
parameters = []
undeclared = self._job_statistics().get("undeclaredQueryParameters", ())
for parameter in undeclared:
p_type = parameter["parameterType"]
if "arrayType" in p_type:
klass = ArrayQueryParameter
elif "structTypes" in p_type:
klass = StructQueryParameter
else:
klass = ScalarQueryParameter
parameters.append(klass.from_api_repr(parameter))
return parameters | def function[undeclared_query_parameters, parameter[self]]:
constant[Return undeclared query parameters from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters
:rtype:
list of
:class:`~google.cloud.bigquery.ArrayQueryParameter`,
:class:`~google.cloud.bigquery.ScalarQueryParameter`, or
:class:`~google.cloud.bigquery.StructQueryParameter`
:returns: undeclared parameters, or an empty list if the query has
not yet completed.
]
variable[parameters] assign[=] list[[]]
variable[undeclared] assign[=] call[call[name[self]._job_statistics, parameter[]].get, parameter[constant[undeclaredQueryParameters], tuple[[]]]]
for taget[name[parameter]] in starred[name[undeclared]] begin[:]
variable[p_type] assign[=] call[name[parameter]][constant[parameterType]]
if compare[constant[arrayType] in name[p_type]] begin[:]
variable[klass] assign[=] name[ArrayQueryParameter]
call[name[parameters].append, parameter[call[name[klass].from_api_repr, parameter[name[parameter]]]]]
return[name[parameters]] | keyword[def] identifier[undeclared_query_parameters] ( identifier[self] ):
literal[string]
identifier[parameters] =[]
identifier[undeclared] = identifier[self] . identifier[_job_statistics] (). identifier[get] ( literal[string] ,())
keyword[for] identifier[parameter] keyword[in] identifier[undeclared] :
identifier[p_type] = identifier[parameter] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[p_type] :
identifier[klass] = identifier[ArrayQueryParameter]
keyword[elif] literal[string] keyword[in] identifier[p_type] :
identifier[klass] = identifier[StructQueryParameter]
keyword[else] :
identifier[klass] = identifier[ScalarQueryParameter]
identifier[parameters] . identifier[append] ( identifier[klass] . identifier[from_api_repr] ( identifier[parameter] ))
keyword[return] identifier[parameters] | def undeclared_query_parameters(self):
"""Return undeclared query parameters from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters
:rtype:
list of
:class:`~google.cloud.bigquery.ArrayQueryParameter`,
:class:`~google.cloud.bigquery.ScalarQueryParameter`, or
:class:`~google.cloud.bigquery.StructQueryParameter`
:returns: undeclared parameters, or an empty list if the query has
not yet completed.
"""
parameters = []
undeclared = self._job_statistics().get('undeclaredQueryParameters', ())
for parameter in undeclared:
p_type = parameter['parameterType']
if 'arrayType' in p_type:
klass = ArrayQueryParameter # depends on [control=['if'], data=[]]
elif 'structTypes' in p_type:
klass = StructQueryParameter # depends on [control=['if'], data=[]]
else:
klass = ScalarQueryParameter
parameters.append(klass.from_api_repr(parameter)) # depends on [control=['for'], data=['parameter']]
return parameters |
def set_inlets(self, pores=[], overwrite=False):
r"""
Set the locations from which the invader enters the network
Parameters
----------
pores : array_like
Locations that are initially filled with invader, from which
clusters grow and invade into the network
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False`` (default),
then supplied locations are added to any already existing inlet
locations.
"""
Ps = self._parse_indices(pores)
if np.sum(self['pore.outlets'][Ps]) > 0:
raise Exception('Some inlets are already defined as outlets')
if overwrite:
self['pore.inlets'] = False
self['pore.inlets'][Ps] = True
self['pore.invasion_pressure'][Ps] = 0
self['pore.invasion_sequence'][Ps] = 0 | def function[set_inlets, parameter[self, pores, overwrite]]:
constant[
Set the locations from which the invader enters the network
Parameters
----------
pores : array_like
Locations that are initially filled with invader, from which
clusters grow and invade into the network
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False`` (default),
then supplied locations are added to any already existing inlet
locations.
]
variable[Ps] assign[=] call[name[self]._parse_indices, parameter[name[pores]]]
if compare[call[name[np].sum, parameter[call[call[name[self]][constant[pore.outlets]]][name[Ps]]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da18c4cfb80>
if name[overwrite] begin[:]
call[name[self]][constant[pore.inlets]] assign[=] constant[False]
call[call[name[self]][constant[pore.inlets]]][name[Ps]] assign[=] constant[True]
call[call[name[self]][constant[pore.invasion_pressure]]][name[Ps]] assign[=] constant[0]
call[call[name[self]][constant[pore.invasion_sequence]]][name[Ps]] assign[=] constant[0] | keyword[def] identifier[set_inlets] ( identifier[self] , identifier[pores] =[], identifier[overwrite] = keyword[False] ):
literal[string]
identifier[Ps] = identifier[self] . identifier[_parse_indices] ( identifier[pores] )
keyword[if] identifier[np] . identifier[sum] ( identifier[self] [ literal[string] ][ identifier[Ps] ])> literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[overwrite] :
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[Ps] ]= keyword[True]
identifier[self] [ literal[string] ][ identifier[Ps] ]= literal[int]
identifier[self] [ literal[string] ][ identifier[Ps] ]= literal[int] | def set_inlets(self, pores=[], overwrite=False):
"""
Set the locations from which the invader enters the network
Parameters
----------
pores : array_like
Locations that are initially filled with invader, from which
clusters grow and invade into the network
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False`` (default),
then supplied locations are added to any already existing inlet
locations.
"""
Ps = self._parse_indices(pores)
if np.sum(self['pore.outlets'][Ps]) > 0:
raise Exception('Some inlets are already defined as outlets') # depends on [control=['if'], data=[]]
if overwrite:
self['pore.inlets'] = False # depends on [control=['if'], data=[]]
self['pore.inlets'][Ps] = True
self['pore.invasion_pressure'][Ps] = 0
self['pore.invasion_sequence'][Ps] = 0 |
def start_greedy_ensemble_search(automated_run, session, path):
"""Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
best_ensemble = [] # List containing IDs of best performing ensemble for the last round
secondary_learner = automated_run.base_learner_origin.return_estimator()
secondary_learner.set_params(**module.secondary_learner_hyperparameters)
for i in range(module.max_num_base_learners):
best_score = -float('inf') # Best metric for this round (not in total!)
current_ensemble = best_ensemble[:] # Shallow copy of best ensemble
for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all():
if base_learner in current_ensemble: # Don't append when learner is already in
continue
current_ensemble.append(base_learner)
# Check if our "best ensemble" already exists
existing_ensemble = session.query(models.StackedEnsemble).\
filter_by(base_learner_origin_id=automated_run.base_learner_origin.id,
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learner_ids=sorted([bl.id for bl in current_ensemble])).first()
if existing_ensemble and existing_ensemble.job_status == 'finished':
score = existing_ensemble.individual_score[module.metric_to_optimize]
elif existing_ensemble and existing_ensemble.job_status != 'finished':
eval_stacked_ensemble(existing_ensemble, session, path)
score = existing_ensemble.individual_score[module.metric_to_optimize]
else:
stacked_ensemble = models.StackedEnsemble(
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learners=current_ensemble,
base_learner_origin=automated_run.base_learner_origin,
job_status='started'
)
session.add(stacked_ensemble)
session.commit()
eval_stacked_ensemble(stacked_ensemble, session, path)
score = stacked_ensemble.individual_score[module.metric_to_optimize]
score = -score if module.invert_metric else score
if best_score < score:
best_score = score
best_ensemble = current_ensemble[:]
current_ensemble.pop() | def function[start_greedy_ensemble_search, parameter[automated_run, session, path]]:
constant[Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
]
variable[module] assign[=] call[name[functions].import_string_code_as_module, parameter[name[automated_run].source]]
assert[compare[name[module].metric_to_optimize in name[automated_run].base_learner_origin.metric_generators]]
variable[best_ensemble] assign[=] list[[]]
variable[secondary_learner] assign[=] call[name[automated_run].base_learner_origin.return_estimator, parameter[]]
call[name[secondary_learner].set_params, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[name[module].max_num_base_learners]]] begin[:]
variable[best_score] assign[=] <ast.UnaryOp object at 0x7da20e960760>
variable[current_ensemble] assign[=] call[name[best_ensemble]][<ast.Slice object at 0x7da20e9600d0>]
for taget[name[base_learner]] in starred[call[call[call[name[session].query, parameter[name[models].BaseLearner]].filter_by, parameter[]].all, parameter[]]] begin[:]
if compare[name[base_learner] in name[current_ensemble]] begin[:]
continue
call[name[current_ensemble].append, parameter[name[base_learner]]]
variable[existing_ensemble] assign[=] call[call[call[name[session].query, parameter[name[models].StackedEnsemble]].filter_by, parameter[]].first, parameter[]]
if <ast.BoolOp object at 0x7da18dc9ad70> begin[:]
variable[score] assign[=] call[name[existing_ensemble].individual_score][name[module].metric_to_optimize]
variable[score] assign[=] <ast.IfExp object at 0x7da18dc9bd60>
if compare[name[best_score] less[<] name[score]] begin[:]
variable[best_score] assign[=] name[score]
variable[best_ensemble] assign[=] call[name[current_ensemble]][<ast.Slice object at 0x7da18dc99900>]
call[name[current_ensemble].pop, parameter[]] | keyword[def] identifier[start_greedy_ensemble_search] ( identifier[automated_run] , identifier[session] , identifier[path] ):
literal[string]
identifier[module] = identifier[functions] . identifier[import_string_code_as_module] ( identifier[automated_run] . identifier[source] )
keyword[assert] identifier[module] . identifier[metric_to_optimize] keyword[in] identifier[automated_run] . identifier[base_learner_origin] . identifier[metric_generators]
identifier[best_ensemble] =[]
identifier[secondary_learner] = identifier[automated_run] . identifier[base_learner_origin] . identifier[return_estimator] ()
identifier[secondary_learner] . identifier[set_params] (** identifier[module] . identifier[secondary_learner_hyperparameters] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[module] . identifier[max_num_base_learners] ):
identifier[best_score] =- identifier[float] ( literal[string] )
identifier[current_ensemble] = identifier[best_ensemble] [:]
keyword[for] identifier[base_learner] keyword[in] identifier[session] . identifier[query] ( identifier[models] . identifier[BaseLearner] ). identifier[filter_by] ( identifier[job_status] = literal[string] ). identifier[all] ():
keyword[if] identifier[base_learner] keyword[in] identifier[current_ensemble] :
keyword[continue]
identifier[current_ensemble] . identifier[append] ( identifier[base_learner] )
identifier[existing_ensemble] = identifier[session] . identifier[query] ( identifier[models] . identifier[StackedEnsemble] ). identifier[filter_by] ( identifier[base_learner_origin_id] = identifier[automated_run] . identifier[base_learner_origin] . identifier[id] ,
identifier[secondary_learner_hyperparameters] = identifier[secondary_learner] . identifier[get_params] (),
identifier[base_learner_ids] = identifier[sorted] ([ identifier[bl] . identifier[id] keyword[for] identifier[bl] keyword[in] identifier[current_ensemble] ])). identifier[first] ()
keyword[if] identifier[existing_ensemble] keyword[and] identifier[existing_ensemble] . identifier[job_status] == literal[string] :
identifier[score] = identifier[existing_ensemble] . identifier[individual_score] [ identifier[module] . identifier[metric_to_optimize] ]
keyword[elif] identifier[existing_ensemble] keyword[and] identifier[existing_ensemble] . identifier[job_status] != literal[string] :
identifier[eval_stacked_ensemble] ( identifier[existing_ensemble] , identifier[session] , identifier[path] )
identifier[score] = identifier[existing_ensemble] . identifier[individual_score] [ identifier[module] . identifier[metric_to_optimize] ]
keyword[else] :
identifier[stacked_ensemble] = identifier[models] . identifier[StackedEnsemble] (
identifier[secondary_learner_hyperparameters] = identifier[secondary_learner] . identifier[get_params] (),
identifier[base_learners] = identifier[current_ensemble] ,
identifier[base_learner_origin] = identifier[automated_run] . identifier[base_learner_origin] ,
identifier[job_status] = literal[string]
)
identifier[session] . identifier[add] ( identifier[stacked_ensemble] )
identifier[session] . identifier[commit] ()
identifier[eval_stacked_ensemble] ( identifier[stacked_ensemble] , identifier[session] , identifier[path] )
identifier[score] = identifier[stacked_ensemble] . identifier[individual_score] [ identifier[module] . identifier[metric_to_optimize] ]
identifier[score] =- identifier[score] keyword[if] identifier[module] . identifier[invert_metric] keyword[else] identifier[score]
keyword[if] identifier[best_score] < identifier[score] :
identifier[best_score] = identifier[score]
identifier[best_ensemble] = identifier[current_ensemble] [:]
identifier[current_ensemble] . identifier[pop] () | def start_greedy_ensemble_search(automated_run, session, path):
"""Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
best_ensemble = [] # List containing IDs of best performing ensemble for the last round
secondary_learner = automated_run.base_learner_origin.return_estimator()
secondary_learner.set_params(**module.secondary_learner_hyperparameters)
for i in range(module.max_num_base_learners):
best_score = -float('inf') # Best metric for this round (not in total!)
current_ensemble = best_ensemble[:] # Shallow copy of best ensemble
for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all():
if base_learner in current_ensemble: # Don't append when learner is already in
continue # depends on [control=['if'], data=[]]
current_ensemble.append(base_learner)
# Check if our "best ensemble" already exists
existing_ensemble = session.query(models.StackedEnsemble).filter_by(base_learner_origin_id=automated_run.base_learner_origin.id, secondary_learner_hyperparameters=secondary_learner.get_params(), base_learner_ids=sorted([bl.id for bl in current_ensemble])).first()
if existing_ensemble and existing_ensemble.job_status == 'finished':
score = existing_ensemble.individual_score[module.metric_to_optimize] # depends on [control=['if'], data=[]]
elif existing_ensemble and existing_ensemble.job_status != 'finished':
eval_stacked_ensemble(existing_ensemble, session, path)
score = existing_ensemble.individual_score[module.metric_to_optimize] # depends on [control=['if'], data=[]]
else:
stacked_ensemble = models.StackedEnsemble(secondary_learner_hyperparameters=secondary_learner.get_params(), base_learners=current_ensemble, base_learner_origin=automated_run.base_learner_origin, job_status='started')
session.add(stacked_ensemble)
session.commit()
eval_stacked_ensemble(stacked_ensemble, session, path)
score = stacked_ensemble.individual_score[module.metric_to_optimize]
score = -score if module.invert_metric else score
if best_score < score:
best_score = score
best_ensemble = current_ensemble[:] # depends on [control=['if'], data=['best_score', 'score']]
current_ensemble.pop() # depends on [control=['for'], data=['base_learner']] # depends on [control=['for'], data=[]] |
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1 | def function[visit_Extends, parameter[self, node, frame]]:
constant[Calls the extender.]
if <ast.UnaryOp object at 0x7da18bcc9690> begin[:]
call[name[self].fail, parameter[constant[cannot use extend from a non top-level scope], name[node].lineno]]
if compare[name[self].extends_so_far greater[>] constant[0]] begin[:]
if <ast.UnaryOp object at 0x7da18bcc8910> begin[:]
call[name[self].writeline, parameter[constant[if parent_template is not None:]]]
call[name[self].indent, parameter[]]
call[name[self].writeline, parameter[binary_operation[constant[raise TemplateRuntimeError(%r)] <ast.Mod object at 0x7da2590d6920> constant[extended multiple times]]]]
if name[self].has_known_extends begin[:]
<ast.Raise object at 0x7da18bccb940>
call[name[self].writeline, parameter[constant[parent_template = environment.get_template(], name[node]]]
call[name[self].visit, parameter[name[node].template, name[frame]]]
call[name[self].write, parameter[binary_operation[constant[, %r)] <ast.Mod object at 0x7da2590d6920> name[self].name]]]
call[name[self].writeline, parameter[binary_operation[constant[for name, parent_block in parent_template.blocks.%s():] <ast.Mod object at 0x7da2590d6920> name[dict_item_iter]]]]
call[name[self].indent, parameter[]]
call[name[self].writeline, parameter[constant[context.blocks.setdefault(name, []).append(parent_block)]]]
call[name[self].outdent, parameter[]]
if name[frame].rootlevel begin[:]
name[self].has_known_extends assign[=] constant[True]
<ast.AugAssign object at 0x7da1b1e8d930> | keyword[def] identifier[visit_Extends] ( identifier[self] , identifier[node] , identifier[frame] ):
literal[string]
keyword[if] keyword[not] identifier[frame] . identifier[toplevel] :
identifier[self] . identifier[fail] ( literal[string] ,
identifier[node] . identifier[lineno] )
keyword[if] identifier[self] . identifier[extends_so_far] > literal[int] :
keyword[if] keyword[not] identifier[self] . identifier[has_known_extends] :
identifier[self] . identifier[writeline] ( literal[string] )
identifier[self] . identifier[indent] ()
identifier[self] . identifier[writeline] ( literal[string] %
literal[string] )
keyword[if] identifier[self] . identifier[has_known_extends] :
keyword[raise] identifier[CompilerExit] ()
keyword[else] :
identifier[self] . identifier[outdent] ()
identifier[self] . identifier[writeline] ( literal[string] , identifier[node] )
identifier[self] . identifier[visit] ( identifier[node] . identifier[template] , identifier[frame] )
identifier[self] . identifier[write] ( literal[string] % identifier[self] . identifier[name] )
identifier[self] . identifier[writeline] ( literal[string]
literal[string] % identifier[dict_item_iter] )
identifier[self] . identifier[indent] ()
identifier[self] . identifier[writeline] ( literal[string]
literal[string] )
identifier[self] . identifier[outdent] ()
keyword[if] identifier[frame] . identifier[rootlevel] :
identifier[self] . identifier[has_known_extends] = keyword[True]
identifier[self] . identifier[extends_so_far] += literal[int] | def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope', node.lineno) # depends on [control=['if'], data=[]]
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent() # depends on [control=['if'], data=[]]
self.writeline('raise TemplateRuntimeError(%r)' % 'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit() # depends on [control=['if'], data=[]]
else:
self.outdent() # depends on [control=['if'], data=[]]
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True # depends on [control=['if'], data=[]]
# and now we have one more
self.extends_so_far += 1 |
def map_async(self, callback, pass_batch_into_callback=None,
merge_future=None, **q_options):
"""Map a callback function or tasklet over the query results.
This is the asynchronous version of Query.map().
"""
qry = self._fix_namespace()
return tasklets.get_context().map_query(
qry,
callback,
pass_batch_into_callback=pass_batch_into_callback,
options=self._make_options(q_options),
merge_future=merge_future) | def function[map_async, parameter[self, callback, pass_batch_into_callback, merge_future]]:
constant[Map a callback function or tasklet over the query results.
This is the asynchronous version of Query.map().
]
variable[qry] assign[=] call[name[self]._fix_namespace, parameter[]]
return[call[call[name[tasklets].get_context, parameter[]].map_query, parameter[name[qry], name[callback]]]] | keyword[def] identifier[map_async] ( identifier[self] , identifier[callback] , identifier[pass_batch_into_callback] = keyword[None] ,
identifier[merge_future] = keyword[None] ,** identifier[q_options] ):
literal[string]
identifier[qry] = identifier[self] . identifier[_fix_namespace] ()
keyword[return] identifier[tasklets] . identifier[get_context] (). identifier[map_query] (
identifier[qry] ,
identifier[callback] ,
identifier[pass_batch_into_callback] = identifier[pass_batch_into_callback] ,
identifier[options] = identifier[self] . identifier[_make_options] ( identifier[q_options] ),
identifier[merge_future] = identifier[merge_future] ) | def map_async(self, callback, pass_batch_into_callback=None, merge_future=None, **q_options):
"""Map a callback function or tasklet over the query results.
This is the asynchronous version of Query.map().
"""
qry = self._fix_namespace()
return tasklets.get_context().map_query(qry, callback, pass_batch_into_callback=pass_batch_into_callback, options=self._make_options(q_options), merge_future=merge_future) |
def _peek_unicode(
self, is_long
): # type: (bool) -> Tuple[Optional[str], Optional[str]]
"""
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the unicode value is it's a valid one else None.
"""
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current not in {"u", "U"}:
raise self.parse_error(
InternalParserError, "_peek_unicode() entered on non-unicode value"
)
self.inc() # Dropping prefix
self.mark()
if is_long:
chars = 8
else:
chars = 4
if not self.inc_n(chars):
value, extracted = None, None
else:
extracted = self.extract()
if extracted[0].lower() == "d" and extracted[1].strip("01234567"):
return None, None
try:
value = chr(int(extracted, 16))
except ValueError:
value = None
return value, extracted | def function[_peek_unicode, parameter[self, is_long]]:
constant[
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the unicode value is it's a valid one else None.
]
with call[name[self]._state, parameter[]] begin[:]
if compare[name[self]._current <ast.NotIn object at 0x7da2590d7190> <ast.Set object at 0x7da1b2067730>] begin[:]
<ast.Raise object at 0x7da1b2064b50>
call[name[self].inc, parameter[]]
call[name[self].mark, parameter[]]
if name[is_long] begin[:]
variable[chars] assign[=] constant[8]
if <ast.UnaryOp object at 0x7da1b20650f0> begin[:]
<ast.Tuple object at 0x7da1b2065870> assign[=] tuple[[<ast.Constant object at 0x7da1b2065840>, <ast.Constant object at 0x7da1b20655a0>]]
return[tuple[[<ast.Name object at 0x7da1b20643a0>, <ast.Name object at 0x7da1b2065ba0>]]] | keyword[def] identifier[_peek_unicode] (
identifier[self] , identifier[is_long]
):
literal[string]
keyword[with] identifier[self] . identifier[_state] ( identifier[save_marker] = keyword[True] , identifier[restore] = keyword[True] ):
keyword[if] identifier[self] . identifier[_current] keyword[not] keyword[in] { literal[string] , literal[string] }:
keyword[raise] identifier[self] . identifier[parse_error] (
identifier[InternalParserError] , literal[string]
)
identifier[self] . identifier[inc] ()
identifier[self] . identifier[mark] ()
keyword[if] identifier[is_long] :
identifier[chars] = literal[int]
keyword[else] :
identifier[chars] = literal[int]
keyword[if] keyword[not] identifier[self] . identifier[inc_n] ( identifier[chars] ):
identifier[value] , identifier[extracted] = keyword[None] , keyword[None]
keyword[else] :
identifier[extracted] = identifier[self] . identifier[extract] ()
keyword[if] identifier[extracted] [ literal[int] ]. identifier[lower] ()== literal[string] keyword[and] identifier[extracted] [ literal[int] ]. identifier[strip] ( literal[string] ):
keyword[return] keyword[None] , keyword[None]
keyword[try] :
identifier[value] = identifier[chr] ( identifier[int] ( identifier[extracted] , literal[int] ))
keyword[except] identifier[ValueError] :
identifier[value] = keyword[None]
keyword[return] identifier[value] , identifier[extracted] | def _peek_unicode(self, is_long): # type: (bool) -> Tuple[Optional[str], Optional[str]]
"\n Peeks ahead non-intrusively by cloning then restoring the\n initial state of the parser.\n\n Returns the unicode value is it's a valid one else None.\n "
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current not in {'u', 'U'}:
raise self.parse_error(InternalParserError, '_peek_unicode() entered on non-unicode value') # depends on [control=['if'], data=[]]
self.inc() # Dropping prefix
self.mark()
if is_long:
chars = 8 # depends on [control=['if'], data=[]]
else:
chars = 4
if not self.inc_n(chars):
(value, extracted) = (None, None) # depends on [control=['if'], data=[]]
else:
extracted = self.extract()
if extracted[0].lower() == 'd' and extracted[1].strip('01234567'):
return (None, None) # depends on [control=['if'], data=[]]
try:
value = chr(int(extracted, 16)) # depends on [control=['try'], data=[]]
except ValueError:
value = None # depends on [control=['except'], data=[]]
return (value, extracted) # depends on [control=['with'], data=[]] |
def rmdir(self, foldername):
""" Delete a folder from the server.
:param foldername: the folder to be deleted.
:type foldername: string
"""
current_folder = self._ftp.pwd()
try:
self.cd(foldername)
except error_perm:
print('550 Delete operation failed folder %s '
'does not exist!' % (foldername,))
else:
self.cd(current_folder)
try:
self._ftp.rmd(foldername)
except error_perm: # folder not empty
self.cd(foldername)
contents = self.ls()
#delete the files
map(self._ftp.delete, contents[0])
#delete the subfolders
map(self.rmdir, contents[1])
self.cd(current_folder)
self._ftp.rmd(foldername) | def function[rmdir, parameter[self, foldername]]:
constant[ Delete a folder from the server.
:param foldername: the folder to be deleted.
:type foldername: string
]
variable[current_folder] assign[=] call[name[self]._ftp.pwd, parameter[]]
<ast.Try object at 0x7da1afeab0a0> | keyword[def] identifier[rmdir] ( identifier[self] , identifier[foldername] ):
literal[string]
identifier[current_folder] = identifier[self] . identifier[_ftp] . identifier[pwd] ()
keyword[try] :
identifier[self] . identifier[cd] ( identifier[foldername] )
keyword[except] identifier[error_perm] :
identifier[print] ( literal[string]
literal[string] %( identifier[foldername] ,))
keyword[else] :
identifier[self] . identifier[cd] ( identifier[current_folder] )
keyword[try] :
identifier[self] . identifier[_ftp] . identifier[rmd] ( identifier[foldername] )
keyword[except] identifier[error_perm] :
identifier[self] . identifier[cd] ( identifier[foldername] )
identifier[contents] = identifier[self] . identifier[ls] ()
identifier[map] ( identifier[self] . identifier[_ftp] . identifier[delete] , identifier[contents] [ literal[int] ])
identifier[map] ( identifier[self] . identifier[rmdir] , identifier[contents] [ literal[int] ])
identifier[self] . identifier[cd] ( identifier[current_folder] )
identifier[self] . identifier[_ftp] . identifier[rmd] ( identifier[foldername] ) | def rmdir(self, foldername):
""" Delete a folder from the server.
:param foldername: the folder to be deleted.
:type foldername: string
"""
current_folder = self._ftp.pwd()
try:
self.cd(foldername) # depends on [control=['try'], data=[]]
except error_perm:
print('550 Delete operation failed folder %s does not exist!' % (foldername,)) # depends on [control=['except'], data=[]]
else:
self.cd(current_folder)
try:
self._ftp.rmd(foldername) # depends on [control=['try'], data=[]]
except error_perm: # folder not empty
self.cd(foldername)
contents = self.ls()
#delete the files
map(self._ftp.delete, contents[0])
#delete the subfolders
map(self.rmdir, contents[1])
self.cd(current_folder)
self._ftp.rmd(foldername) # depends on [control=['except'], data=[]] |
def _canonicalize(self, expression):
"""For example, changes x + y to 1.*x + 1.*y"""
expression = super(Objective, self)._canonicalize(expression)
if isinstance(expression, sympy.Basic):
expression *= 1.
else: # pragma: no cover # symengine
expression = (1. * expression).expand()
return expression | def function[_canonicalize, parameter[self, expression]]:
constant[For example, changes x + y to 1.*x + 1.*y]
variable[expression] assign[=] call[call[name[super], parameter[name[Objective], name[self]]]._canonicalize, parameter[name[expression]]]
if call[name[isinstance], parameter[name[expression], name[sympy].Basic]] begin[:]
<ast.AugAssign object at 0x7da1b0b9fdf0>
return[name[expression]] | keyword[def] identifier[_canonicalize] ( identifier[self] , identifier[expression] ):
literal[string]
identifier[expression] = identifier[super] ( identifier[Objective] , identifier[self] ). identifier[_canonicalize] ( identifier[expression] )
keyword[if] identifier[isinstance] ( identifier[expression] , identifier[sympy] . identifier[Basic] ):
identifier[expression] *= literal[int]
keyword[else] :
identifier[expression] =( literal[int] * identifier[expression] ). identifier[expand] ()
keyword[return] identifier[expression] | def _canonicalize(self, expression):
"""For example, changes x + y to 1.*x + 1.*y"""
expression = super(Objective, self)._canonicalize(expression)
if isinstance(expression, sympy.Basic):
expression *= 1.0 # depends on [control=['if'], data=[]]
else: # pragma: no cover # symengine
expression = (1.0 * expression).expand()
return expression |
def _get_query(self, cursor):
'''
Query tempalte for source Solr, sorts by id by default.
'''
query = {'q':'*:*',
'sort':'id desc',
'rows':self._rows,
'cursorMark':cursor}
if self._date_field:
query['sort'] = "{} asc, id desc".format(self._date_field)
if self._per_shard:
query['distrib'] = 'false'
return query | def function[_get_query, parameter[self, cursor]]:
constant[
Query tempalte for source Solr, sorts by id by default.
]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da1b10d79d0>, <ast.Constant object at 0x7da1b10d42e0>, <ast.Constant object at 0x7da1b10d7820>, <ast.Constant object at 0x7da1b10d47c0>], [<ast.Constant object at 0x7da1b10d74f0>, <ast.Constant object at 0x7da1b10d79a0>, <ast.Attribute object at 0x7da1b10d67d0>, <ast.Name object at 0x7da1b10d58d0>]]
if name[self]._date_field begin[:]
call[name[query]][constant[sort]] assign[=] call[constant[{} asc, id desc].format, parameter[name[self]._date_field]]
if name[self]._per_shard begin[:]
call[name[query]][constant[distrib]] assign[=] constant[false]
return[name[query]] | keyword[def] identifier[_get_query] ( identifier[self] , identifier[cursor] ):
literal[string]
identifier[query] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[_rows] ,
literal[string] : identifier[cursor] }
keyword[if] identifier[self] . identifier[_date_field] :
identifier[query] [ literal[string] ]= literal[string] . identifier[format] ( identifier[self] . identifier[_date_field] )
keyword[if] identifier[self] . identifier[_per_shard] :
identifier[query] [ literal[string] ]= literal[string]
keyword[return] identifier[query] | def _get_query(self, cursor):
"""
Query tempalte for source Solr, sorts by id by default.
"""
query = {'q': '*:*', 'sort': 'id desc', 'rows': self._rows, 'cursorMark': cursor}
if self._date_field:
query['sort'] = '{} asc, id desc'.format(self._date_field) # depends on [control=['if'], data=[]]
if self._per_shard:
query['distrib'] = 'false' # depends on [control=['if'], data=[]]
return query |
def check_install(software=None, quiet=True):
'''check_install will attempt to run the singularity command, and
return True if installed. The command line utils will not run
without this check.
Parameters
==========
software: the software to check if installed
quiet: should we be quiet? (default True)
'''
if software is None:
software = "singularity"
cmd = [software, '--version']
try:
version = run_command(cmd,software)
except: # FileNotFoundError
return False
if version is not None:
if quiet is False and version['return_code'] == 0:
version = version['message']
bot.info("Found %s version %s" % (software.upper(), version))
return True
return False | def function[check_install, parameter[software, quiet]]:
constant[check_install will attempt to run the singularity command, and
return True if installed. The command line utils will not run
without this check.
Parameters
==========
software: the software to check if installed
quiet: should we be quiet? (default True)
]
if compare[name[software] is constant[None]] begin[:]
variable[software] assign[=] constant[singularity]
variable[cmd] assign[=] list[[<ast.Name object at 0x7da1b03e3eb0>, <ast.Constant object at 0x7da1b03e09a0>]]
<ast.Try object at 0x7da1b03e3b80>
if compare[name[version] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b02b9330> begin[:]
variable[version] assign[=] call[name[version]][constant[message]]
call[name[bot].info, parameter[binary_operation[constant[Found %s version %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b05bc5b0>, <ast.Name object at 0x7da1b05bf1c0>]]]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[check_install] ( identifier[software] = keyword[None] , identifier[quiet] = keyword[True] ):
literal[string]
keyword[if] identifier[software] keyword[is] keyword[None] :
identifier[software] = literal[string]
identifier[cmd] =[ identifier[software] , literal[string] ]
keyword[try] :
identifier[version] = identifier[run_command] ( identifier[cmd] , identifier[software] )
keyword[except] :
keyword[return] keyword[False]
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[quiet] keyword[is] keyword[False] keyword[and] identifier[version] [ literal[string] ]== literal[int] :
identifier[version] = identifier[version] [ literal[string] ]
identifier[bot] . identifier[info] ( literal[string] %( identifier[software] . identifier[upper] (), identifier[version] ))
keyword[return] keyword[True]
keyword[return] keyword[False] | def check_install(software=None, quiet=True):
"""check_install will attempt to run the singularity command, and
return True if installed. The command line utils will not run
without this check.
Parameters
==========
software: the software to check if installed
quiet: should we be quiet? (default True)
"""
if software is None:
software = 'singularity' # depends on [control=['if'], data=['software']]
cmd = [software, '--version']
try:
version = run_command(cmd, software) # depends on [control=['try'], data=[]]
except: # FileNotFoundError
return False # depends on [control=['except'], data=[]]
if version is not None:
if quiet is False and version['return_code'] == 0:
version = version['message']
bot.info('Found %s version %s' % (software.upper(), version)) # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=['version']]
return False |
def listBlocksParents(self):
"""
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
"""
try :
body = request.body.read()
data = cjson.decode(body)
data = validateJSONInputNoCopy("block", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'block_names' in data.keys() and isinstance(data['block_names'], list) and len(data['block_names'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listBlocksParents is %s." %max_array_size, self.logger.exception)
return self.dbsBlock.listBlockParents(data["block_name"])
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except cjson.DecodeError as de:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (de, traceback.format_exc())
msg = "DBSReaderModel/listBlockParents. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | def function[listBlocksParents, parameter[self]]:
constant[
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
]
<ast.Try object at 0x7da18bcc9bd0> | keyword[def] identifier[listBlocksParents] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[body] = identifier[request] . identifier[body] . identifier[read] ()
identifier[data] = identifier[cjson] . identifier[decode] ( identifier[body] )
identifier[data] = identifier[validateJSONInputNoCopy] ( literal[string] , identifier[data] , identifier[read] = keyword[True] )
identifier[max_array_size] = literal[int]
keyword[if] ( literal[string] keyword[in] identifier[data] . identifier[keys] () keyword[and] identifier[isinstance] ( identifier[data] [ literal[string] ], identifier[list] ) keyword[and] identifier[len] ( identifier[data] [ literal[string] ])> identifier[max_array_size] ):
identifier[dbsExceptionHandler] ( literal[string] ,
literal[string] % identifier[max_array_size] , identifier[self] . identifier[logger] . identifier[exception] )
keyword[return] identifier[self] . identifier[dbsBlock] . identifier[listBlockParents] ( identifier[data] [ literal[string] ])
keyword[except] identifier[dbsException] keyword[as] identifier[de] :
identifier[dbsExceptionHandler] ( identifier[de] . identifier[eCode] , identifier[de] . identifier[message] , identifier[self] . identifier[logger] . identifier[exception] , identifier[de] . identifier[serverError] )
keyword[except] identifier[cjson] . identifier[DecodeError] keyword[as] identifier[de] :
identifier[sError] = literal[string] %( identifier[de] , identifier[traceback] . identifier[format_exc] ())
identifier[msg] = literal[string] % identifier[de]
identifier[dbsExceptionHandler] ( literal[string] , identifier[msg] , identifier[self] . identifier[logger] . identifier[exception] , identifier[sError] )
keyword[except] identifier[HTTPError] keyword[as] identifier[he] :
keyword[raise] identifier[he]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[sError] = literal[string] %( identifier[ex] , identifier[traceback] . identifier[format_exc] ())
identifier[dbsExceptionHandler] ( literal[string] , identifier[dbsExceptionCode] [ literal[string] ], identifier[self] . identifier[logger] . identifier[exception] , identifier[sError] ) | def listBlocksParents(self):
"""
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
"""
try:
body = request.body.read()
data = cjson.decode(body)
data = validateJSONInputNoCopy('block', data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if 'block_names' in data.keys() and isinstance(data['block_names'], list) and (len(data['block_names']) > max_array_size):
dbsExceptionHandler('dbsException-invalid-input', 'The Max list length supported in listBlocksParents is %s.' % max_array_size, self.logger.exception) # depends on [control=['if'], data=[]]
return self.dbsBlock.listBlockParents(data['block_name']) # depends on [control=['try'], data=[]]
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) # depends on [control=['except'], data=['de']]
except cjson.DecodeError as de:
sError = 'DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s' % (de, traceback.format_exc())
msg = 'DBSReaderModel/listBlockParents. %s' % de
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError) # depends on [control=['except'], data=['de']]
except HTTPError as he:
raise he # depends on [control=['except'], data=['he']]
except Exception as ex:
sError = 'DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s' % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) # depends on [control=['except'], data=['ex']] |
def _import_to_py_ast(ctx: GeneratorContext, node: Import) -> GeneratedPyAST:
"""Return a Python AST node for a Basilisp `import*` expression."""
assert node.op == NodeOp.IMPORT
last = None
deps: List[ast.AST] = []
for alias in node.aliases:
safe_name = munge(alias.name)
try:
module = importlib.import_module(safe_name)
if alias.alias is not None:
ctx.add_import(sym.symbol(alias.name), module, sym.symbol(alias.alias))
else:
ctx.add_import(sym.symbol(alias.name), module)
except ModuleNotFoundError as e:
raise ImportError(
f"Python module '{alias.name}' not found", node.form, node
) from e
py_import_alias = (
munge(alias.alias)
if alias.alias is not None
else safe_name.split(".", maxsplit=1)[0]
)
deps.append(
ast.Assign(
targets=[ast.Name(id=py_import_alias, ctx=ast.Store())],
value=ast.Call(
func=_load_attr("builtins.__import__"),
args=[ast.Str(safe_name)],
keywords=[],
),
)
)
last = ast.Name(id=py_import_alias, ctx=ast.Load())
# Note that we add this import to the live running system in the above
# calls to `ctx.add_import`, however, since we compile and cache Python
# bytecode, we need to generate calls to `add_import` for the running
# namespace so when this code is reloaded from the cache, the runtime
# is correctly configured.
deps.append(
ast.Call(
func=_load_attr(f"{_NS_VAR_VALUE}.add_import"),
args=[
ast.Call(
func=_NEW_SYM_FN_NAME, args=[ast.Str(safe_name)], keywords=[]
),
last,
],
keywords=[],
)
)
assert last is not None, "import* node must have at least one import"
return GeneratedPyAST(node=last, dependencies=deps) | def function[_import_to_py_ast, parameter[ctx, node]]:
constant[Return a Python AST node for a Basilisp `import*` expression.]
assert[compare[name[node].op equal[==] name[NodeOp].IMPORT]]
variable[last] assign[=] constant[None]
<ast.AnnAssign object at 0x7da1b0213370>
for taget[name[alias]] in starred[name[node].aliases] begin[:]
variable[safe_name] assign[=] call[name[munge], parameter[name[alias].name]]
<ast.Try object at 0x7da1b0212260>
variable[py_import_alias] assign[=] <ast.IfExp object at 0x7da1b023c8b0>
call[name[deps].append, parameter[call[name[ast].Assign, parameter[]]]]
variable[last] assign[=] call[name[ast].Name, parameter[]]
call[name[deps].append, parameter[call[name[ast].Call, parameter[]]]]
assert[compare[name[last] is_not constant[None]]]
return[call[name[GeneratedPyAST], parameter[]]] | keyword[def] identifier[_import_to_py_ast] ( identifier[ctx] : identifier[GeneratorContext] , identifier[node] : identifier[Import] )-> identifier[GeneratedPyAST] :
literal[string]
keyword[assert] identifier[node] . identifier[op] == identifier[NodeOp] . identifier[IMPORT]
identifier[last] = keyword[None]
identifier[deps] : identifier[List] [ identifier[ast] . identifier[AST] ]=[]
keyword[for] identifier[alias] keyword[in] identifier[node] . identifier[aliases] :
identifier[safe_name] = identifier[munge] ( identifier[alias] . identifier[name] )
keyword[try] :
identifier[module] = identifier[importlib] . identifier[import_module] ( identifier[safe_name] )
keyword[if] identifier[alias] . identifier[alias] keyword[is] keyword[not] keyword[None] :
identifier[ctx] . identifier[add_import] ( identifier[sym] . identifier[symbol] ( identifier[alias] . identifier[name] ), identifier[module] , identifier[sym] . identifier[symbol] ( identifier[alias] . identifier[alias] ))
keyword[else] :
identifier[ctx] . identifier[add_import] ( identifier[sym] . identifier[symbol] ( identifier[alias] . identifier[name] ), identifier[module] )
keyword[except] identifier[ModuleNotFoundError] keyword[as] identifier[e] :
keyword[raise] identifier[ImportError] (
literal[string] , identifier[node] . identifier[form] , identifier[node]
) keyword[from] identifier[e]
identifier[py_import_alias] =(
identifier[munge] ( identifier[alias] . identifier[alias] )
keyword[if] identifier[alias] . identifier[alias] keyword[is] keyword[not] keyword[None]
keyword[else] identifier[safe_name] . identifier[split] ( literal[string] , identifier[maxsplit] = literal[int] )[ literal[int] ]
)
identifier[deps] . identifier[append] (
identifier[ast] . identifier[Assign] (
identifier[targets] =[ identifier[ast] . identifier[Name] ( identifier[id] = identifier[py_import_alias] , identifier[ctx] = identifier[ast] . identifier[Store] ())],
identifier[value] = identifier[ast] . identifier[Call] (
identifier[func] = identifier[_load_attr] ( literal[string] ),
identifier[args] =[ identifier[ast] . identifier[Str] ( identifier[safe_name] )],
identifier[keywords] =[],
),
)
)
identifier[last] = identifier[ast] . identifier[Name] ( identifier[id] = identifier[py_import_alias] , identifier[ctx] = identifier[ast] . identifier[Load] ())
identifier[deps] . identifier[append] (
identifier[ast] . identifier[Call] (
identifier[func] = identifier[_load_attr] ( literal[string] ),
identifier[args] =[
identifier[ast] . identifier[Call] (
identifier[func] = identifier[_NEW_SYM_FN_NAME] , identifier[args] =[ identifier[ast] . identifier[Str] ( identifier[safe_name] )], identifier[keywords] =[]
),
identifier[last] ,
],
identifier[keywords] =[],
)
)
keyword[assert] identifier[last] keyword[is] keyword[not] keyword[None] , literal[string]
keyword[return] identifier[GeneratedPyAST] ( identifier[node] = identifier[last] , identifier[dependencies] = identifier[deps] ) | def _import_to_py_ast(ctx: GeneratorContext, node: Import) -> GeneratedPyAST:
"""Return a Python AST node for a Basilisp `import*` expression."""
assert node.op == NodeOp.IMPORT
last = None
deps: List[ast.AST] = []
for alias in node.aliases:
safe_name = munge(alias.name)
try:
module = importlib.import_module(safe_name)
if alias.alias is not None:
ctx.add_import(sym.symbol(alias.name), module, sym.symbol(alias.alias)) # depends on [control=['if'], data=[]]
else:
ctx.add_import(sym.symbol(alias.name), module) # depends on [control=['try'], data=[]]
except ModuleNotFoundError as e:
raise ImportError(f"Python module '{alias.name}' not found", node.form, node) from e # depends on [control=['except'], data=['e']]
py_import_alias = munge(alias.alias) if alias.alias is not None else safe_name.split('.', maxsplit=1)[0]
deps.append(ast.Assign(targets=[ast.Name(id=py_import_alias, ctx=ast.Store())], value=ast.Call(func=_load_attr('builtins.__import__'), args=[ast.Str(safe_name)], keywords=[])))
last = ast.Name(id=py_import_alias, ctx=ast.Load())
# Note that we add this import to the live running system in the above
# calls to `ctx.add_import`, however, since we compile and cache Python
# bytecode, we need to generate calls to `add_import` for the running
# namespace so when this code is reloaded from the cache, the runtime
# is correctly configured.
deps.append(ast.Call(func=_load_attr(f'{_NS_VAR_VALUE}.add_import'), args=[ast.Call(func=_NEW_SYM_FN_NAME, args=[ast.Str(safe_name)], keywords=[]), last], keywords=[])) # depends on [control=['for'], data=['alias']]
assert last is not None, 'import* node must have at least one import'
return GeneratedPyAST(node=last, dependencies=deps) |
def mtf_resnet_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 32
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_layers", 6)
# Share weights between input and target embeddings
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 32)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("row_blocks", 1)
hparams.add_hparam("col_blocks", 1)
hparams.add_hparam("rows_size", 32)
hparams.add_hparam("cols_size", 32)
# Model-specific parameters
hparams.add_hparam("layer_sizes", [3, 4, 6, 3])
hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512])
hparams.add_hparam("is_cifar", False)
# Variable init
hparams.initializer = "normal_unit_scaling"
hparams.initializer_gain = 2.
# TODO(nikip): Change optimization scheme?
hparams.learning_rate = 0.1
return hparams | def function[mtf_resnet_base, parameter[]]:
constant[Set of hyperparameters.]
variable[hparams] assign[=] call[name[common_hparams].basic_params1, parameter[]]
name[hparams].no_data_parallelism assign[=] constant[True]
name[hparams].use_fixed_batch_size assign[=] constant[True]
name[hparams].batch_size assign[=] constant[32]
name[hparams].max_length assign[=] constant[3072]
name[hparams].hidden_size assign[=] constant[256]
name[hparams].label_smoothing assign[=] constant[0.0]
call[name[hparams].add_hparam, parameter[constant[mesh_shape], constant[batch:8]]]
call[name[hparams].add_hparam, parameter[constant[layout], constant[batch:batch]]]
call[name[hparams].add_hparam, parameter[constant[filter_size], constant[1024]]]
call[name[hparams].add_hparam, parameter[constant[num_layers], constant[6]]]
name[hparams].shared_embedding assign[=] constant[True]
name[hparams].shared_embedding_and_softmax_weights assign[=] constant[True]
name[hparams].optimizer assign[=] constant[Adafactor]
name[hparams].learning_rate_schedule assign[=] constant[rsqrt_decay]
name[hparams].learning_rate_warmup_steps assign[=] constant[10000]
call[name[hparams].add_hparam, parameter[constant[d_kv], constant[32]]]
call[name[hparams].add_hparam, parameter[constant[img_len], constant[32]]]
call[name[hparams].add_hparam, parameter[constant[num_channels], constant[3]]]
call[name[hparams].add_hparam, parameter[constant[row_blocks], constant[1]]]
call[name[hparams].add_hparam, parameter[constant[col_blocks], constant[1]]]
call[name[hparams].add_hparam, parameter[constant[rows_size], constant[32]]]
call[name[hparams].add_hparam, parameter[constant[cols_size], constant[32]]]
call[name[hparams].add_hparam, parameter[constant[layer_sizes], list[[<ast.Constant object at 0x7da1b1e14190>, <ast.Constant object at 0x7da1b1e14460>, <ast.Constant object at 0x7da1b1e146d0>, <ast.Constant object at 0x7da1b1e14730>]]]]
call[name[hparams].add_hparam, parameter[constant[filter_sizes], list[[<ast.Constant object at 0x7da1b1e17eb0>, <ast.Constant object at 0x7da1b1e16a10>, <ast.Constant object at 0x7da1b1e143a0>, <ast.Constant object at 0x7da1b1e14970>, <ast.Constant object at 0x7da1b1e15330>]]]]
call[name[hparams].add_hparam, parameter[constant[is_cifar], constant[False]]]
name[hparams].initializer assign[=] constant[normal_unit_scaling]
name[hparams].initializer_gain assign[=] constant[2.0]
name[hparams].learning_rate assign[=] constant[0.1]
return[name[hparams]] | keyword[def] identifier[mtf_resnet_base] ():
literal[string]
identifier[hparams] = identifier[common_hparams] . identifier[basic_params1] ()
identifier[hparams] . identifier[no_data_parallelism] = keyword[True]
identifier[hparams] . identifier[use_fixed_batch_size] = keyword[True]
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[max_length] = literal[int]
identifier[hparams] . identifier[hidden_size] = literal[int]
identifier[hparams] . identifier[label_smoothing] = literal[int]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[shared_embedding] = keyword[True]
identifier[hparams] . identifier[shared_embedding_and_softmax_weights] = keyword[True]
identifier[hparams] . identifier[optimizer] = literal[string]
identifier[hparams] . identifier[learning_rate_schedule] = literal[string]
identifier[hparams] . identifier[learning_rate_warmup_steps] = literal[int]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] ,[ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[hparams] . identifier[add_hparam] ( literal[string] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[initializer] = literal[string]
identifier[hparams] . identifier[initializer_gain] = literal[int]
identifier[hparams] . identifier[learning_rate] = literal[int]
keyword[return] identifier[hparams] | def mtf_resnet_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 32
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam('mesh_shape', 'batch:8')
hparams.add_hparam('layout', 'batch:batch')
hparams.add_hparam('filter_size', 1024)
hparams.add_hparam('num_layers', 6)
# Share weights between input and target embeddings
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = 'Adafactor'
hparams.learning_rate_schedule = 'rsqrt_decay'
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam('d_kv', 32)
# Image related hparams
hparams.add_hparam('img_len', 32)
hparams.add_hparam('num_channels', 3)
hparams.add_hparam('row_blocks', 1)
hparams.add_hparam('col_blocks', 1)
hparams.add_hparam('rows_size', 32)
hparams.add_hparam('cols_size', 32)
# Model-specific parameters
hparams.add_hparam('layer_sizes', [3, 4, 6, 3])
hparams.add_hparam('filter_sizes', [64, 64, 128, 256, 512])
hparams.add_hparam('is_cifar', False)
# Variable init
hparams.initializer = 'normal_unit_scaling'
hparams.initializer_gain = 2.0
# TODO(nikip): Change optimization scheme?
hparams.learning_rate = 0.1
return hparams |
def get_vocab(docs):
""" Build a DataFrame containing all the words in the docs provided along with their POS tags etc
>>> doc = nlp("Hey Mr. Tangerine Man!")
<BLANKLINE>
...
>>> get_vocab([doc])
word pos tag dep ent_type ent_iob sentiment
0 ! PUNCT . punct O 0.0
1 Hey INTJ UH intj O 0.0
2 Man NOUN NN ROOT PERSON I 0.0
3 Mr. PROPN NNP compound O 0.0
4 Tangerine PROPN NNP compound PERSON B 0.0
"""
if isinstance(docs, spacy.tokens.doc.Doc):
return get_vocab([docs])
vocab = set()
for doc in tqdm(docs):
for tok in doc:
vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment))
# TODO: add ent type info and other flags, e.g. like_url, like_email, etc
return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split()) | def function[get_vocab, parameter[docs]]:
constant[ Build a DataFrame containing all the words in the docs provided along with their POS tags etc
>>> doc = nlp("Hey Mr. Tangerine Man!")
<BLANKLINE>
...
>>> get_vocab([doc])
word pos tag dep ent_type ent_iob sentiment
0 ! PUNCT . punct O 0.0
1 Hey INTJ UH intj O 0.0
2 Man NOUN NN ROOT PERSON I 0.0
3 Mr. PROPN NNP compound O 0.0
4 Tangerine PROPN NNP compound PERSON B 0.0
]
if call[name[isinstance], parameter[name[docs], name[spacy].tokens.doc.Doc]] begin[:]
return[call[name[get_vocab], parameter[list[[<ast.Name object at 0x7da18c4cf760>]]]]]
variable[vocab] assign[=] call[name[set], parameter[]]
for taget[name[doc]] in starred[call[name[tqdm], parameter[name[docs]]]] begin[:]
for taget[name[tok]] in starred[name[doc]] begin[:]
call[name[vocab].add, parameter[tuple[[<ast.Attribute object at 0x7da18c4cee60>, <ast.Attribute object at 0x7da18c4cc880>, <ast.Attribute object at 0x7da18c4ce890>, <ast.Attribute object at 0x7da18c4cc460>, <ast.Attribute object at 0x7da18c4cc820>, <ast.Attribute object at 0x7da18c4cc040>, <ast.Attribute object at 0x7da18c4cf550>]]]]
return[call[name[pd].DataFrame, parameter[call[name[sorted], parameter[name[vocab]]]]]] | keyword[def] identifier[get_vocab] ( identifier[docs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[docs] , identifier[spacy] . identifier[tokens] . identifier[doc] . identifier[Doc] ):
keyword[return] identifier[get_vocab] ([ identifier[docs] ])
identifier[vocab] = identifier[set] ()
keyword[for] identifier[doc] keyword[in] identifier[tqdm] ( identifier[docs] ):
keyword[for] identifier[tok] keyword[in] identifier[doc] :
identifier[vocab] . identifier[add] (( identifier[tok] . identifier[text] , identifier[tok] . identifier[pos_] , identifier[tok] . identifier[tag_] , identifier[tok] . identifier[dep_] , identifier[tok] . identifier[ent_type_] , identifier[tok] . identifier[ent_iob_] , identifier[tok] . identifier[sentiment] ))
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[sorted] ( identifier[vocab] ), identifier[columns] = literal[string] . identifier[split] ()) | def get_vocab(docs):
""" Build a DataFrame containing all the words in the docs provided along with their POS tags etc
>>> doc = nlp("Hey Mr. Tangerine Man!")
<BLANKLINE>
...
>>> get_vocab([doc])
word pos tag dep ent_type ent_iob sentiment
0 ! PUNCT . punct O 0.0
1 Hey INTJ UH intj O 0.0
2 Man NOUN NN ROOT PERSON I 0.0
3 Mr. PROPN NNP compound O 0.0
4 Tangerine PROPN NNP compound PERSON B 0.0
"""
if isinstance(docs, spacy.tokens.doc.Doc):
return get_vocab([docs]) # depends on [control=['if'], data=[]]
vocab = set()
for doc in tqdm(docs):
for tok in doc:
vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment)) # depends on [control=['for'], data=['tok']] # depends on [control=['for'], data=['doc']]
# TODO: add ent type info and other flags, e.g. like_url, like_email, etc
return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split()) |
def collect_env_info():
"""
Returns:
str - a table contains important information about the environment
"""
data = []
data.append(("sys.platform", sys.platform))
data.append(("Python", sys.version.replace("\n", "")))
data.append(("Tensorpack", __git_version__))
data.append(("Numpy", np.__version__))
data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION))
data.append(("TF Compiler Version", tfv1.COMPILER_VERSION))
has_cuda = tf.test.is_built_with_cuda()
data.append(("TF CUDA support", has_cuda))
try:
from tensorflow.python.framework import test_util
data.append(("TF MKL support", test_util.IsMklEnabled()))
except Exception:
pass
try:
from tensorflow.python.framework import test_util
data.append(("TF XLA support", test_util.is_xla_enabled()))
except Exception:
pass
if has_cuda:
data.append(("Nvidia Driver", find_library("nvidia-ml")))
data.append(("CUDA", find_library("cudart")))
data.append(("CUDNN", find_library("cudnn")))
data.append(("NCCL", find_library("nccl")))
# List devices with NVML
data.append(
("CUDA_VISIBLE_DEVICES",
os.environ.get("CUDA_VISIBLE_DEVICES", str(None))))
try:
devs = defaultdict(list)
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
devs[dev.name()].append(str(idx))
for devname, devids in devs.items():
data.append(
("GPU " + ",".join(devids), devname))
except Exception:
data.append(("GPU", "Not found with NVML"))
vram = psutil.virtual_memory()
data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3)))
data.append(("CPU Count", psutil.cpu_count()))
# Other important dependencies:
try:
import horovod
data.append(("horovod", horovod.__version__))
except ImportError:
pass
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
pass
import msgpack
data.append(("msgpack", ".".join([str(x) for x in msgpack.version])))
has_prctl = True
try:
import prctl
_ = prctl.set_pdeathsig # noqa
except Exception:
has_prctl = False
data.append(("python-prctl", has_prctl))
return tabulate(data) | def function[collect_env_info, parameter[]]:
constant[
Returns:
str - a table contains important information about the environment
]
variable[data] assign[=] list[[]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bcca2c0>, <ast.Attribute object at 0x7da18bcc9cf0>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bccb940>, <ast.Call object at 0x7da18bcc8160>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bcca080>, <ast.Name object at 0x7da18bcc9a50>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bcc89a0>, <ast.Attribute object at 0x7da18bccb400>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bccb3a0>, <ast.BinOp object at 0x7da18bcc9b40>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bcca920>, <ast.Attribute object at 0x7da18bccbbe0>]]]]
variable[has_cuda] assign[=] call[name[tf].test.is_built_with_cuda, parameter[]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bcc9360>, <ast.Name object at 0x7da18bcc83a0>]]]]
<ast.Try object at 0x7da18bccbc70>
<ast.Try object at 0x7da18bcc8e80>
if name[has_cuda] begin[:]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bccb4f0>, <ast.Call object at 0x7da18bcc9d50>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18bccb970>, <ast.Call object at 0x7da18bcc91e0>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede4d90>, <ast.Call object at 0x7da18ede5480>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede5ff0>, <ast.Call object at 0x7da18ede6200>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede62f0>, <ast.Call object at 0x7da18ede69b0>]]]]
<ast.Try object at 0x7da18ede5000>
variable[vram] assign[=] call[name[psutil].virtual_memory, parameter[]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede5960>, <ast.Call object at 0x7da18ede5db0>]]]]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede4670>, <ast.Call object at 0x7da18ede61a0>]]]]
<ast.Try object at 0x7da18ede5150>
<ast.Try object at 0x7da18ede5d80>
import module[msgpack]
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18ede6f50>, <ast.Call object at 0x7da18ede6350>]]]]
variable[has_prctl] assign[=] constant[True]
<ast.Try object at 0x7da18ede47f0>
call[name[data].append, parameter[tuple[[<ast.Constant object at 0x7da18f720250>, <ast.Name object at 0x7da18f7201c0>]]]]
return[call[name[tabulate], parameter[name[data]]]] | keyword[def] identifier[collect_env_info] ():
literal[string]
identifier[data] =[]
identifier[data] . identifier[append] (( literal[string] , identifier[sys] . identifier[platform] ))
identifier[data] . identifier[append] (( literal[string] , identifier[sys] . identifier[version] . identifier[replace] ( literal[string] , literal[string] )))
identifier[data] . identifier[append] (( literal[string] , identifier[__git_version__] ))
identifier[data] . identifier[append] (( literal[string] , identifier[np] . identifier[__version__] ))
identifier[data] . identifier[append] (( literal[string] , identifier[tfv1] . identifier[VERSION] + literal[string] + identifier[tfv1] . identifier[GIT_VERSION] ))
identifier[data] . identifier[append] (( literal[string] , identifier[tfv1] . identifier[COMPILER_VERSION] ))
identifier[has_cuda] = identifier[tf] . identifier[test] . identifier[is_built_with_cuda] ()
identifier[data] . identifier[append] (( literal[string] , identifier[has_cuda] ))
keyword[try] :
keyword[from] identifier[tensorflow] . identifier[python] . identifier[framework] keyword[import] identifier[test_util]
identifier[data] . identifier[append] (( literal[string] , identifier[test_util] . identifier[IsMklEnabled] ()))
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
keyword[from] identifier[tensorflow] . identifier[python] . identifier[framework] keyword[import] identifier[test_util]
identifier[data] . identifier[append] (( literal[string] , identifier[test_util] . identifier[is_xla_enabled] ()))
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] identifier[has_cuda] :
identifier[data] . identifier[append] (( literal[string] , identifier[find_library] ( literal[string] )))
identifier[data] . identifier[append] (( literal[string] , identifier[find_library] ( literal[string] )))
identifier[data] . identifier[append] (( literal[string] , identifier[find_library] ( literal[string] )))
identifier[data] . identifier[append] (( literal[string] , identifier[find_library] ( literal[string] )))
identifier[data] . identifier[append] (
( literal[string] ,
identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[str] ( keyword[None] ))))
keyword[try] :
identifier[devs] = identifier[defaultdict] ( identifier[list] )
keyword[with] identifier[NVMLContext] () keyword[as] identifier[ctx] :
keyword[for] identifier[idx] , identifier[dev] keyword[in] identifier[enumerate] ( identifier[ctx] . identifier[devices] ()):
identifier[devs] [ identifier[dev] . identifier[name] ()]. identifier[append] ( identifier[str] ( identifier[idx] ))
keyword[for] identifier[devname] , identifier[devids] keyword[in] identifier[devs] . identifier[items] ():
identifier[data] . identifier[append] (
( literal[string] + literal[string] . identifier[join] ( identifier[devids] ), identifier[devname] ))
keyword[except] identifier[Exception] :
identifier[data] . identifier[append] (( literal[string] , literal[string] ))
identifier[vram] = identifier[psutil] . identifier[virtual_memory] ()
identifier[data] . identifier[append] (( literal[string] , literal[string] . identifier[format] ( identifier[vram] . identifier[available] / literal[int] ** literal[int] , identifier[vram] . identifier[total] / literal[int] ** literal[int] )))
identifier[data] . identifier[append] (( literal[string] , identifier[psutil] . identifier[cpu_count] ()))
keyword[try] :
keyword[import] identifier[horovod]
identifier[data] . identifier[append] (( literal[string] , identifier[horovod] . identifier[__version__] ))
keyword[except] identifier[ImportError] :
keyword[pass]
keyword[try] :
keyword[import] identifier[cv2]
identifier[data] . identifier[append] (( literal[string] , identifier[cv2] . identifier[__version__] ))
keyword[except] identifier[ImportError] :
keyword[pass]
keyword[import] identifier[msgpack]
identifier[data] . identifier[append] (( literal[string] , literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[msgpack] . identifier[version] ])))
identifier[has_prctl] = keyword[True]
keyword[try] :
keyword[import] identifier[prctl]
identifier[_] = identifier[prctl] . identifier[set_pdeathsig]
keyword[except] identifier[Exception] :
identifier[has_prctl] = keyword[False]
identifier[data] . identifier[append] (( literal[string] , identifier[has_prctl] ))
keyword[return] identifier[tabulate] ( identifier[data] ) | def collect_env_info():
"""
Returns:
str - a table contains important information about the environment
"""
data = []
data.append(('sys.platform', sys.platform))
data.append(('Python', sys.version.replace('\n', '')))
data.append(('Tensorpack', __git_version__))
data.append(('Numpy', np.__version__))
data.append(('TensorFlow', tfv1.VERSION + '/' + tfv1.GIT_VERSION))
data.append(('TF Compiler Version', tfv1.COMPILER_VERSION))
has_cuda = tf.test.is_built_with_cuda()
data.append(('TF CUDA support', has_cuda))
try:
from tensorflow.python.framework import test_util
data.append(('TF MKL support', test_util.IsMklEnabled())) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
try:
from tensorflow.python.framework import test_util
data.append(('TF XLA support', test_util.is_xla_enabled())) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
if has_cuda:
data.append(('Nvidia Driver', find_library('nvidia-ml')))
data.append(('CUDA', find_library('cudart')))
data.append(('CUDNN', find_library('cudnn')))
data.append(('NCCL', find_library('nccl')))
# List devices with NVML
data.append(('CUDA_VISIBLE_DEVICES', os.environ.get('CUDA_VISIBLE_DEVICES', str(None))))
try:
devs = defaultdict(list)
with NVMLContext() as ctx:
for (idx, dev) in enumerate(ctx.devices()):
devs[dev.name()].append(str(idx)) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['ctx']]
for (devname, devids) in devs.items():
data.append(('GPU ' + ','.join(devids), devname)) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
data.append(('GPU', 'Not found with NVML')) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
vram = psutil.virtual_memory()
data.append(('Free RAM', '{:.2f}/{:.2f} GB'.format(vram.available / 1024 ** 3, vram.total / 1024 ** 3)))
data.append(('CPU Count', psutil.cpu_count()))
# Other important dependencies:
try:
import horovod
data.append(('horovod', horovod.__version__)) # depends on [control=['try'], data=[]]
except ImportError:
pass # depends on [control=['except'], data=[]]
try:
import cv2
data.append(('cv2', cv2.__version__)) # depends on [control=['try'], data=[]]
except ImportError:
pass # depends on [control=['except'], data=[]]
import msgpack
data.append(('msgpack', '.'.join([str(x) for x in msgpack.version])))
has_prctl = True
try:
import prctl
_ = prctl.set_pdeathsig # noqa # depends on [control=['try'], data=[]]
except Exception:
has_prctl = False # depends on [control=['except'], data=[]]
data.append(('python-prctl', has_prctl))
return tabulate(data) |
def _execute(self, backend_args, archive_args):
"""Execute a backend of Perceval.
Run the backend of Perceval assigned to this job using the
given arguments. It will raise an `AttributeError` when any of
the required parameters to run the backend are not found.
Other exceptions related to the execution of the backend
will be raised too.
This method will return an iterator of the items fetched
by the backend. These items will include some metadata
related to this job.
It will also be possible to retrieve the items from the
archive setting to `True` the parameter `fetch_from_archive`.
:param backend_args: arguments to execute the backend
:param archive_args: archive arguments
:returns: iterator of items fetched by the backend
:raises AttributeError: raised when any of the required
parameters is not found
"""
if not archive_args or not archive_args['fetch_from_archive']:
return perceval.backend.fetch(self._bklass, backend_args, self.category,
manager=self.archive_manager)
else:
return perceval.backend.fetch_from_archive(self._bklass, backend_args,
self.archive_manager, self.category,
archive_args['archived_after']) | def function[_execute, parameter[self, backend_args, archive_args]]:
constant[Execute a backend of Perceval.
Run the backend of Perceval assigned to this job using the
given arguments. It will raise an `AttributeError` when any of
the required parameters to run the backend are not found.
Other exceptions related to the execution of the backend
will be raised too.
This method will return an iterator of the items fetched
by the backend. These items will include some metadata
related to this job.
It will also be possible to retrieve the items from the
archive setting to `True` the parameter `fetch_from_archive`.
:param backend_args: arguments to execute the backend
:param archive_args: archive arguments
:returns: iterator of items fetched by the backend
:raises AttributeError: raised when any of the required
parameters is not found
]
if <ast.BoolOp object at 0x7da20c6ab940> begin[:]
return[call[name[perceval].backend.fetch, parameter[name[self]._bklass, name[backend_args], name[self].category]]] | keyword[def] identifier[_execute] ( identifier[self] , identifier[backend_args] , identifier[archive_args] ):
literal[string]
keyword[if] keyword[not] identifier[archive_args] keyword[or] keyword[not] identifier[archive_args] [ literal[string] ]:
keyword[return] identifier[perceval] . identifier[backend] . identifier[fetch] ( identifier[self] . identifier[_bklass] , identifier[backend_args] , identifier[self] . identifier[category] ,
identifier[manager] = identifier[self] . identifier[archive_manager] )
keyword[else] :
keyword[return] identifier[perceval] . identifier[backend] . identifier[fetch_from_archive] ( identifier[self] . identifier[_bklass] , identifier[backend_args] ,
identifier[self] . identifier[archive_manager] , identifier[self] . identifier[category] ,
identifier[archive_args] [ literal[string] ]) | def _execute(self, backend_args, archive_args):
"""Execute a backend of Perceval.
Run the backend of Perceval assigned to this job using the
given arguments. It will raise an `AttributeError` when any of
the required parameters to run the backend are not found.
Other exceptions related to the execution of the backend
will be raised too.
This method will return an iterator of the items fetched
by the backend. These items will include some metadata
related to this job.
It will also be possible to retrieve the items from the
archive setting to `True` the parameter `fetch_from_archive`.
:param backend_args: arguments to execute the backend
:param archive_args: archive arguments
:returns: iterator of items fetched by the backend
:raises AttributeError: raised when any of the required
parameters is not found
"""
if not archive_args or not archive_args['fetch_from_archive']:
return perceval.backend.fetch(self._bklass, backend_args, self.category, manager=self.archive_manager) # depends on [control=['if'], data=[]]
else:
return perceval.backend.fetch_from_archive(self._bklass, backend_args, self.archive_manager, self.category, archive_args['archived_after']) |
def run(cmd):
""" Execute a shell command.
Both envoy and python-sh failed me...
commands, although deprecated, feels like the easiest tool to use.
"""
status, output = commands.getstatusoutput(cmd)
if status:
print(output)
return status == 0 | def function[run, parameter[cmd]]:
constant[ Execute a shell command.
Both envoy and python-sh failed me...
commands, although deprecated, feels like the easiest tool to use.
]
<ast.Tuple object at 0x7da1b198c280> assign[=] call[name[commands].getstatusoutput, parameter[name[cmd]]]
if name[status] begin[:]
call[name[print], parameter[name[output]]]
return[compare[name[status] equal[==] constant[0]]] | keyword[def] identifier[run] ( identifier[cmd] ):
literal[string]
identifier[status] , identifier[output] = identifier[commands] . identifier[getstatusoutput] ( identifier[cmd] )
keyword[if] identifier[status] :
identifier[print] ( identifier[output] )
keyword[return] identifier[status] == literal[int] | def run(cmd):
""" Execute a shell command.
Both envoy and python-sh failed me...
commands, although deprecated, feels like the easiest tool to use.
"""
(status, output) = commands.getstatusoutput(cmd)
if status:
print(output) # depends on [control=['if'], data=[]]
return status == 0 |
def check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
log.warning('Wrong permissions for %s, ignoring content', signing_file)
return False
mtime = os.path.getmtime(signing_file)
if self.signing_files.get(signing_file, {}).get('mtime') != mtime:
self.signing_files.setdefault(signing_file, {})['mtime'] = mtime
with salt.utils.files.fopen(signing_file, 'r') as fp_:
self.signing_files[signing_file]['data'] = [
entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith('#')
]
return any(salt.utils.stringutils.expr_match(keyid, line) for line
in self.signing_files[signing_file].get('data', [])) | def function[check_signing_file, parameter[self, keyid, signing_file]]:
constant[
Check a keyid for membership in a signing file
]
if <ast.BoolOp object at 0x7da1b20bbfd0> begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b20ba200> begin[:]
call[name[log].warning, parameter[constant[Wrong permissions for %s, ignoring content], name[signing_file]]]
return[constant[False]]
variable[mtime] assign[=] call[name[os].path.getmtime, parameter[name[signing_file]]]
if compare[call[call[name[self].signing_files.get, parameter[name[signing_file], dictionary[[], []]]].get, parameter[constant[mtime]]] not_equal[!=] name[mtime]] begin[:]
call[call[name[self].signing_files.setdefault, parameter[name[signing_file], dictionary[[], []]]]][constant[mtime]] assign[=] name[mtime]
with call[name[salt].utils.files.fopen, parameter[name[signing_file], constant[r]]] begin[:]
call[call[name[self].signing_files][name[signing_file]]][constant[data]] assign[=] <ast.ListComp object at 0x7da1b20b9e70>
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b20babc0>]]] | keyword[def] identifier[check_signing_file] ( identifier[self] , identifier[keyid] , identifier[signing_file] ):
literal[string]
keyword[if] keyword[not] identifier[signing_file] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[signing_file] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[check_permissions] ( identifier[signing_file] ):
identifier[log] . identifier[warning] ( literal[string] , identifier[signing_file] )
keyword[return] keyword[False]
identifier[mtime] = identifier[os] . identifier[path] . identifier[getmtime] ( identifier[signing_file] )
keyword[if] identifier[self] . identifier[signing_files] . identifier[get] ( identifier[signing_file] ,{}). identifier[get] ( literal[string] )!= identifier[mtime] :
identifier[self] . identifier[signing_files] . identifier[setdefault] ( identifier[signing_file] ,{})[ literal[string] ]= identifier[mtime]
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[signing_file] , literal[string] ) keyword[as] identifier[fp_] :
identifier[self] . identifier[signing_files] [ identifier[signing_file] ][ literal[string] ]=[
identifier[entry] keyword[for] identifier[entry] keyword[in] [ identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[fp_] ] keyword[if] keyword[not] identifier[entry] . identifier[strip] (). identifier[startswith] ( literal[string] )
]
keyword[return] identifier[any] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[expr_match] ( identifier[keyid] , identifier[line] ) keyword[for] identifier[line]
keyword[in] identifier[self] . identifier[signing_files] [ identifier[signing_file] ]. identifier[get] ( literal[string] ,[])) | def check_signing_file(self, keyid, signing_file):
"""
Check a keyid for membership in a signing file
"""
if not signing_file or not os.path.exists(signing_file):
return False # depends on [control=['if'], data=[]]
if not self.check_permissions(signing_file):
log.warning('Wrong permissions for %s, ignoring content', signing_file)
return False # depends on [control=['if'], data=[]]
mtime = os.path.getmtime(signing_file)
if self.signing_files.get(signing_file, {}).get('mtime') != mtime:
self.signing_files.setdefault(signing_file, {})['mtime'] = mtime
with salt.utils.files.fopen(signing_file, 'r') as fp_:
self.signing_files[signing_file]['data'] = [entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith('#')] # depends on [control=['with'], data=['fp_']] # depends on [control=['if'], data=['mtime']]
return any((salt.utils.stringutils.expr_match(keyid, line) for line in self.signing_files[signing_file].get('data', []))) |
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest() | def function[aes_kdf, parameter[key, rounds, password, keyfile]]:
constant[Set up a context for AES128-ECB encryption to find transformed_key]
variable[cipher] assign[=] call[name[AES].new, parameter[name[key], name[AES].MODE_ECB]]
variable[key_composite] assign[=] call[name[compute_key_composite], parameter[]]
variable[transformed_key] assign[=] name[key_composite]
for taget[name[_]] in starred[call[name[range], parameter[constant[0], name[rounds]]]] begin[:]
variable[transformed_key] assign[=] call[name[cipher].encrypt, parameter[name[transformed_key]]]
return[call[call[name[hashlib].sha256, parameter[name[transformed_key]]].digest, parameter[]]] | keyword[def] identifier[aes_kdf] ( identifier[key] , identifier[rounds] , identifier[password] = keyword[None] , identifier[keyfile] = keyword[None] ):
literal[string]
identifier[cipher] = identifier[AES] . identifier[new] ( identifier[key] , identifier[AES] . identifier[MODE_ECB] )
identifier[key_composite] = identifier[compute_key_composite] (
identifier[password] = identifier[password] ,
identifier[keyfile] = identifier[keyfile]
)
identifier[transformed_key] = identifier[key_composite]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[rounds] ):
identifier[transformed_key] = identifier[cipher] . identifier[encrypt] ( identifier[transformed_key] )
keyword[return] identifier[hashlib] . identifier[sha256] ( identifier[transformed_key] ). identifier[digest] () | def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(password=password, keyfile=keyfile)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key) # depends on [control=['for'], data=[]]
return hashlib.sha256(transformed_key).digest() |
def read_fcs_header_segment(buf, begin=0):
"""
Read HEADER segment of FCS file.
Parameters
----------
buf : file-like object
Buffer containing data to interpret as HEADER segment.
begin : int
Offset (in bytes) to first byte of HEADER segment in `buf`.
Returns
-------
header : namedtuple
Version information and byte offset values of other FCS segments
(see FCS standards for more information) in the following order:
- version : str
- text_begin : int
- text_end : int
- data_begin : int
- data_end : int
- analysis_begin : int
- analysis_end : int
Notes
-----
Blank ANALYSIS segment offsets are converted to zeros.
OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_).
References
----------
.. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,
"Data file standard for flow cytometry. Data File Standards
Committee of the Society for Analytical Cytology," Cytometry vol
11, pp 323-332, 1990, PMID 2340769.
.. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,
J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow
cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997,
PMID 9181300.
.. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry,
version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID
19937951.
"""
fields = [
'version',
'text_begin',
'text_end',
'data_begin',
'data_end',
'analysis_begin',
'analysis_end']
FCSHeader = collections.namedtuple('FCSHeader', fields)
field_values = []
buf.seek(begin)
field_values.append(buf.read(10).decode(encoding).rstrip()) # version
field_values.append(int(buf.read(8))) # text_begin
field_values.append(int(buf.read(8))) # text_end
field_values.append(int(buf.read(8))) # data_begin
field_values.append(int(buf.read(8))) # data_end
fv = buf.read(8).decode(encoding) # analysis_begin
field_values.append(0 if fv == ' '*8 else int(fv))
fv = buf.read(8).decode(encoding) # analysis_end
field_values.append(0 if fv == ' '*8 else int(fv))
header = FCSHeader._make(field_values)
return header | def function[read_fcs_header_segment, parameter[buf, begin]]:
constant[
Read HEADER segment of FCS file.
Parameters
----------
buf : file-like object
Buffer containing data to interpret as HEADER segment.
begin : int
Offset (in bytes) to first byte of HEADER segment in `buf`.
Returns
-------
header : namedtuple
Version information and byte offset values of other FCS segments
(see FCS standards for more information) in the following order:
- version : str
- text_begin : int
- text_end : int
- data_begin : int
- data_end : int
- analysis_begin : int
- analysis_end : int
Notes
-----
Blank ANALYSIS segment offsets are converted to zeros.
OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_).
References
----------
.. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,
"Data file standard for flow cytometry. Data File Standards
Committee of the Society for Analytical Cytology," Cytometry vol
11, pp 323-332, 1990, PMID 2340769.
.. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,
J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow
cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997,
PMID 9181300.
.. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry,
version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID
19937951.
]
variable[fields] assign[=] list[[<ast.Constant object at 0x7da1b1bac280>, <ast.Constant object at 0x7da1b1baf700>, <ast.Constant object at 0x7da1b1badcc0>, <ast.Constant object at 0x7da1b1bad3c0>, <ast.Constant object at 0x7da1b1badb70>, <ast.Constant object at 0x7da1b1bae320>, <ast.Constant object at 0x7da1b1baceb0>]]
variable[FCSHeader] assign[=] call[name[collections].namedtuple, parameter[constant[FCSHeader], name[fields]]]
variable[field_values] assign[=] list[[]]
call[name[buf].seek, parameter[name[begin]]]
call[name[field_values].append, parameter[call[call[call[name[buf].read, parameter[constant[10]]].decode, parameter[name[encoding]]].rstrip, parameter[]]]]
call[name[field_values].append, parameter[call[name[int], parameter[call[name[buf].read, parameter[constant[8]]]]]]]
call[name[field_values].append, parameter[call[name[int], parameter[call[name[buf].read, parameter[constant[8]]]]]]]
call[name[field_values].append, parameter[call[name[int], parameter[call[name[buf].read, parameter[constant[8]]]]]]]
call[name[field_values].append, parameter[call[name[int], parameter[call[name[buf].read, parameter[constant[8]]]]]]]
variable[fv] assign[=] call[call[name[buf].read, parameter[constant[8]]].decode, parameter[name[encoding]]]
call[name[field_values].append, parameter[<ast.IfExp object at 0x7da1b1caec20>]]
variable[fv] assign[=] call[call[name[buf].read, parameter[constant[8]]].decode, parameter[name[encoding]]]
call[name[field_values].append, parameter[<ast.IfExp object at 0x7da1b1cae740>]]
variable[header] assign[=] call[name[FCSHeader]._make, parameter[name[field_values]]]
return[name[header]] | keyword[def] identifier[read_fcs_header_segment] ( identifier[buf] , identifier[begin] = literal[int] ):
literal[string]
identifier[fields] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[FCSHeader] = identifier[collections] . identifier[namedtuple] ( literal[string] , identifier[fields] )
identifier[field_values] =[]
identifier[buf] . identifier[seek] ( identifier[begin] )
identifier[field_values] . identifier[append] ( identifier[buf] . identifier[read] ( literal[int] ). identifier[decode] ( identifier[encoding] ). identifier[rstrip] ())
identifier[field_values] . identifier[append] ( identifier[int] ( identifier[buf] . identifier[read] ( literal[int] )))
identifier[field_values] . identifier[append] ( identifier[int] ( identifier[buf] . identifier[read] ( literal[int] )))
identifier[field_values] . identifier[append] ( identifier[int] ( identifier[buf] . identifier[read] ( literal[int] )))
identifier[field_values] . identifier[append] ( identifier[int] ( identifier[buf] . identifier[read] ( literal[int] )))
identifier[fv] = identifier[buf] . identifier[read] ( literal[int] ). identifier[decode] ( identifier[encoding] )
identifier[field_values] . identifier[append] ( literal[int] keyword[if] identifier[fv] == literal[string] * literal[int] keyword[else] identifier[int] ( identifier[fv] ))
identifier[fv] = identifier[buf] . identifier[read] ( literal[int] ). identifier[decode] ( identifier[encoding] )
identifier[field_values] . identifier[append] ( literal[int] keyword[if] identifier[fv] == literal[string] * literal[int] keyword[else] identifier[int] ( identifier[fv] ))
identifier[header] = identifier[FCSHeader] . identifier[_make] ( identifier[field_values] )
keyword[return] identifier[header] | def read_fcs_header_segment(buf, begin=0):
"""
Read HEADER segment of FCS file.
Parameters
----------
buf : file-like object
Buffer containing data to interpret as HEADER segment.
begin : int
Offset (in bytes) to first byte of HEADER segment in `buf`.
Returns
-------
header : namedtuple
Version information and byte offset values of other FCS segments
(see FCS standards for more information) in the following order:
- version : str
- text_begin : int
- text_end : int
- data_begin : int
- data_end : int
- analysis_begin : int
- analysis_end : int
Notes
-----
Blank ANALYSIS segment offsets are converted to zeros.
OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_).
References
----------
.. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,
"Data file standard for flow cytometry. Data File Standards
Committee of the Society for Analytical Cytology," Cytometry vol
11, pp 323-332, 1990, PMID 2340769.
.. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,
J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow
cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997,
PMID 9181300.
.. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry,
version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID
19937951.
"""
fields = ['version', 'text_begin', 'text_end', 'data_begin', 'data_end', 'analysis_begin', 'analysis_end']
FCSHeader = collections.namedtuple('FCSHeader', fields)
field_values = []
buf.seek(begin)
field_values.append(buf.read(10).decode(encoding).rstrip()) # version
field_values.append(int(buf.read(8))) # text_begin
field_values.append(int(buf.read(8))) # text_end
field_values.append(int(buf.read(8))) # data_begin
field_values.append(int(buf.read(8))) # data_end
fv = buf.read(8).decode(encoding) # analysis_begin
field_values.append(0 if fv == ' ' * 8 else int(fv))
fv = buf.read(8).decode(encoding) # analysis_end
field_values.append(0 if fv == ' ' * 8 else int(fv))
header = FCSHeader._make(field_values)
return header |
def deprecated_method(to_be_removed_in_version, new_method=None):
"""This is a decorator which can be used to mark methods as deprecated. It will result in a warning being emitted
when the function is used.
:param to_be_removed_in_version: Version of this module the decorated method will be removed in.
:type to_be_removed_in_version: str
:param new_method: Method intended to replace the decorated method. This method's docstrings are included in the
decorated method's docstring.
:type new_method: function
:return: Wrapped function that includes a deprecation warning and update docstrings from the replacement method.
:rtype: types.FunctionType
"""
def decorator(method):
deprecation_message = generate_method_deprecation_message(
to_be_removed_in_version=to_be_removed_in_version,
old_method_name=method.__name__,
method_name=new_method.__name__,
module_name=inspect.getmodule(new_method).__name__,
)
@functools.wraps(method)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
message=deprecation_message,
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return method(*args, **kwargs)
if new_method:
new_func.__doc__ = """\
{message}
Docstring content from this method's replacement copied below:
{new_docstring}
""".format(
message=deprecation_message,
new_docstring=dedent(new_method.__doc__),
)
else:
new_func.__doc__ = deprecation_message
return new_func
return decorator | def function[deprecated_method, parameter[to_be_removed_in_version, new_method]]:
constant[This is a decorator which can be used to mark methods as deprecated. It will result in a warning being emitted
when the function is used.
:param to_be_removed_in_version: Version of this module the decorated method will be removed in.
:type to_be_removed_in_version: str
:param new_method: Method intended to replace the decorated method. This method's docstrings are included in the
decorated method's docstring.
:type new_method: function
:return: Wrapped function that includes a deprecation warning and update docstrings from the replacement method.
:rtype: types.FunctionType
]
def function[decorator, parameter[method]]:
variable[deprecation_message] assign[=] call[name[generate_method_deprecation_message], parameter[]]
def function[new_func, parameter[]]:
call[name[warnings].simplefilter, parameter[constant[always], name[DeprecationWarning]]]
call[name[warnings].warn, parameter[]]
call[name[warnings].simplefilter, parameter[constant[default], name[DeprecationWarning]]]
return[call[name[method], parameter[<ast.Starred object at 0x7da2041da260>]]]
if name[new_method] begin[:]
name[new_func].__doc__ assign[=] call[constant[ {message}
Docstring content from this method's replacement copied below:
{new_docstring}
].format, parameter[]]
return[name[new_func]]
return[name[decorator]] | keyword[def] identifier[deprecated_method] ( identifier[to_be_removed_in_version] , identifier[new_method] = keyword[None] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[method] ):
identifier[deprecation_message] = identifier[generate_method_deprecation_message] (
identifier[to_be_removed_in_version] = identifier[to_be_removed_in_version] ,
identifier[old_method_name] = identifier[method] . identifier[__name__] ,
identifier[method_name] = identifier[new_method] . identifier[__name__] ,
identifier[module_name] = identifier[inspect] . identifier[getmodule] ( identifier[new_method] ). identifier[__name__] ,
)
@ identifier[functools] . identifier[wraps] ( identifier[method] )
keyword[def] identifier[new_func] (* identifier[args] ,** identifier[kwargs] ):
identifier[warnings] . identifier[simplefilter] ( literal[string] , identifier[DeprecationWarning] )
identifier[warnings] . identifier[warn] (
identifier[message] = identifier[deprecation_message] ,
identifier[category] = identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int] ,
)
identifier[warnings] . identifier[simplefilter] ( literal[string] , identifier[DeprecationWarning] )
keyword[return] identifier[method] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[new_method] :
identifier[new_func] . identifier[__doc__] = literal[string] . identifier[format] (
identifier[message] = identifier[deprecation_message] ,
identifier[new_docstring] = identifier[dedent] ( identifier[new_method] . identifier[__doc__] ),
)
keyword[else] :
identifier[new_func] . identifier[__doc__] = identifier[deprecation_message]
keyword[return] identifier[new_func]
keyword[return] identifier[decorator] | def deprecated_method(to_be_removed_in_version, new_method=None):
"""This is a decorator which can be used to mark methods as deprecated. It will result in a warning being emitted
when the function is used.
:param to_be_removed_in_version: Version of this module the decorated method will be removed in.
:type to_be_removed_in_version: str
:param new_method: Method intended to replace the decorated method. This method's docstrings are included in the
decorated method's docstring.
:type new_method: function
:return: Wrapped function that includes a deprecation warning and update docstrings from the replacement method.
:rtype: types.FunctionType
"""
def decorator(method):
deprecation_message = generate_method_deprecation_message(to_be_removed_in_version=to_be_removed_in_version, old_method_name=method.__name__, method_name=new_method.__name__, module_name=inspect.getmodule(new_method).__name__)
@functools.wraps(method)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(message=deprecation_message, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return method(*args, **kwargs)
if new_method:
new_func.__doc__ = " {message}\n Docstring content from this method's replacement copied below:\n {new_docstring}\n ".format(message=deprecation_message, new_docstring=dedent(new_method.__doc__)) # depends on [control=['if'], data=[]]
else:
new_func.__doc__ = deprecation_message
return new_func
return decorator |
def expect_file_to_exist(self, filepath=None, result_format=None, include_config=False,
catch_exceptions=None, meta=None):
"""
Checks to see if a file specified by the user actually exists
Args:
filepath (str or None): \
The filepath to evalutate. If none, will check the currently-configured path object
of this FileDataAsset.
Keyword Args:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
if filepath is not None and os.path.isfile(filepath):
success = True
elif self._path is not None and os.path.isfile(self._path):
success = True
else:
success = False
return {"success":success} | def function[expect_file_to_exist, parameter[self, filepath, result_format, include_config, catch_exceptions, meta]]:
constant[
Checks to see if a file specified by the user actually exists
Args:
filepath (str or None): The filepath to evalutate. If none, will check the currently-configured path object
of this FileDataAsset.
Keyword Args:
result_format (str or None): Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
]
if <ast.BoolOp object at 0x7da1b17c5ae0> begin[:]
variable[success] assign[=] constant[True]
return[dictionary[[<ast.Constant object at 0x7da1b17bd4e0>], [<ast.Name object at 0x7da1b17bd4b0>]]] | keyword[def] identifier[expect_file_to_exist] ( identifier[self] , identifier[filepath] = keyword[None] , identifier[result_format] = keyword[None] , identifier[include_config] = keyword[False] ,
identifier[catch_exceptions] = keyword[None] , identifier[meta] = keyword[None] ):
literal[string]
keyword[if] identifier[filepath] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filepath] ):
identifier[success] = keyword[True]
keyword[elif] identifier[self] . identifier[_path] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[_path] ):
identifier[success] = keyword[True]
keyword[else] :
identifier[success] = keyword[False]
keyword[return] { literal[string] : identifier[success] } | def expect_file_to_exist(self, filepath=None, result_format=None, include_config=False, catch_exceptions=None, meta=None):
"""
Checks to see if a file specified by the user actually exists
Args:
filepath (str or None): The filepath to evalutate. If none, will check the currently-configured path object
of this FileDataAsset.
Keyword Args:
result_format (str or None): Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
if filepath is not None and os.path.isfile(filepath):
success = True # depends on [control=['if'], data=[]]
elif self._path is not None and os.path.isfile(self._path):
success = True # depends on [control=['if'], data=[]]
else:
success = False
return {'success': success} |
def allocate(self, pool, tenant_id=None, **params):
"""Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
"""
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'subnet_id' in params:
create_dict['subnet_id'] = params['subnet_id']
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
if 'description' in params:
create_dict['description'] = params['description']
if 'dns_domain' in params:
create_dict['dns_domain'] = params['dns_domain']
if 'dns_name' in params:
create_dict['dns_name'] = params['dns_name']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip) | def function[allocate, parameter[self, pool, tenant_id]]:
constant[Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
]
if <ast.UnaryOp object at 0x7da1b18dd900> begin[:]
variable[tenant_id] assign[=] name[self].request.user.project_id
variable[create_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b18dde10>, <ast.Constant object at 0x7da1b18dc580>], [<ast.Name object at 0x7da1b18dd960>, <ast.Name object at 0x7da1b18dcf40>]]
if compare[constant[subnet_id] in name[params]] begin[:]
call[name[create_dict]][constant[subnet_id]] assign[=] call[name[params]][constant[subnet_id]]
if compare[constant[floating_ip_address] in name[params]] begin[:]
call[name[create_dict]][constant[floating_ip_address]] assign[=] call[name[params]][constant[floating_ip_address]]
if compare[constant[description] in name[params]] begin[:]
call[name[create_dict]][constant[description]] assign[=] call[name[params]][constant[description]]
if compare[constant[dns_domain] in name[params]] begin[:]
call[name[create_dict]][constant[dns_domain]] assign[=] call[name[params]][constant[dns_domain]]
if compare[constant[dns_name] in name[params]] begin[:]
call[name[create_dict]][constant[dns_name]] assign[=] call[name[params]][constant[dns_name]]
variable[fip] assign[=] call[call[name[self].client.create_floatingip, parameter[dictionary[[<ast.Constant object at 0x7da1b1984220>], [<ast.Name object at 0x7da1b1987d30>]]]].get, parameter[constant[floatingip]]]
call[name[self]._set_instance_info, parameter[name[fip]]]
return[call[name[FloatingIp], parameter[name[fip]]]] | keyword[def] identifier[allocate] ( identifier[self] , identifier[pool] , identifier[tenant_id] = keyword[None] ,** identifier[params] ):
literal[string]
keyword[if] keyword[not] identifier[tenant_id] :
identifier[tenant_id] = identifier[self] . identifier[request] . identifier[user] . identifier[project_id]
identifier[create_dict] ={ literal[string] : identifier[pool] ,
literal[string] : identifier[tenant_id] }
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[create_dict] [ literal[string] ]= identifier[params] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[create_dict] [ literal[string] ]= identifier[params] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[create_dict] [ literal[string] ]= identifier[params] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[create_dict] [ literal[string] ]= identifier[params] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[create_dict] [ literal[string] ]= identifier[params] [ literal[string] ]
identifier[fip] = identifier[self] . identifier[client] . identifier[create_floatingip] (
{ literal[string] : identifier[create_dict] }). identifier[get] ( literal[string] )
identifier[self] . identifier[_set_instance_info] ( identifier[fip] )
keyword[return] identifier[FloatingIp] ( identifier[fip] ) | def allocate(self, pool, tenant_id=None, **params):
"""Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
"""
if not tenant_id:
tenant_id = self.request.user.project_id # depends on [control=['if'], data=[]]
create_dict = {'floating_network_id': pool, 'tenant_id': tenant_id}
if 'subnet_id' in params:
create_dict['subnet_id'] = params['subnet_id'] # depends on [control=['if'], data=['params']]
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address'] # depends on [control=['if'], data=['params']]
if 'description' in params:
create_dict['description'] = params['description'] # depends on [control=['if'], data=['params']]
if 'dns_domain' in params:
create_dict['dns_domain'] = params['dns_domain'] # depends on [control=['if'], data=['params']]
if 'dns_name' in params:
create_dict['dns_name'] = params['dns_name'] # depends on [control=['if'], data=['params']]
fip = self.client.create_floatingip({'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip) |
def get_width(self, cli, ui_content):
" Width to report to the `Window`. "
# Take the width from the first line.
text = token_list_to_text(self.get_prompt_tokens(cli))
return get_cwidth(text) | def function[get_width, parameter[self, cli, ui_content]]:
constant[ Width to report to the `Window`. ]
variable[text] assign[=] call[name[token_list_to_text], parameter[call[name[self].get_prompt_tokens, parameter[name[cli]]]]]
return[call[name[get_cwidth], parameter[name[text]]]] | keyword[def] identifier[get_width] ( identifier[self] , identifier[cli] , identifier[ui_content] ):
literal[string]
identifier[text] = identifier[token_list_to_text] ( identifier[self] . identifier[get_prompt_tokens] ( identifier[cli] ))
keyword[return] identifier[get_cwidth] ( identifier[text] ) | def get_width(self, cli, ui_content):
""" Width to report to the `Window`. """
# Take the width from the first line.
text = token_list_to_text(self.get_prompt_tokens(cli))
return get_cwidth(text) |
def get_blocks(time=None, pool_name=None, api_code=None):
"""Get a list of blocks for a specific day or mining pool.
Both parameters are optional but at least one is required.
:param int time: time in milliseconds
:param str pool_name: name of the mining pool
:param str api_code: Blockchain.info API code (optional)
:return: an array of :class:`SimpleBlock` objects
"""
resource = 'blocks/{0}?format=json'
if api_code is not None:
resource += '&api_code=' + api_code
if time is not None:
resource = resource.format(time)
elif pool_name is not None:
resource = resource.format(pool_name)
else:
resource = resource.format('')
response = util.call_api(resource)
json_response = json.loads(response)
return [SimpleBlock(b) for b in json_response['blocks']] | def function[get_blocks, parameter[time, pool_name, api_code]]:
constant[Get a list of blocks for a specific day or mining pool.
Both parameters are optional but at least one is required.
:param int time: time in milliseconds
:param str pool_name: name of the mining pool
:param str api_code: Blockchain.info API code (optional)
:return: an array of :class:`SimpleBlock` objects
]
variable[resource] assign[=] constant[blocks/{0}?format=json]
if compare[name[api_code] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1dda620>
if compare[name[time] is_not constant[None]] begin[:]
variable[resource] assign[=] call[name[resource].format, parameter[name[time]]]
variable[response] assign[=] call[name[util].call_api, parameter[name[resource]]]
variable[json_response] assign[=] call[name[json].loads, parameter[name[response]]]
return[<ast.ListComp object at 0x7da1b1ddba60>] | keyword[def] identifier[get_blocks] ( identifier[time] = keyword[None] , identifier[pool_name] = keyword[None] , identifier[api_code] = keyword[None] ):
literal[string]
identifier[resource] = literal[string]
keyword[if] identifier[api_code] keyword[is] keyword[not] keyword[None] :
identifier[resource] += literal[string] + identifier[api_code]
keyword[if] identifier[time] keyword[is] keyword[not] keyword[None] :
identifier[resource] = identifier[resource] . identifier[format] ( identifier[time] )
keyword[elif] identifier[pool_name] keyword[is] keyword[not] keyword[None] :
identifier[resource] = identifier[resource] . identifier[format] ( identifier[pool_name] )
keyword[else] :
identifier[resource] = identifier[resource] . identifier[format] ( literal[string] )
identifier[response] = identifier[util] . identifier[call_api] ( identifier[resource] )
identifier[json_response] = identifier[json] . identifier[loads] ( identifier[response] )
keyword[return] [ identifier[SimpleBlock] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[json_response] [ literal[string] ]] | def get_blocks(time=None, pool_name=None, api_code=None):
"""Get a list of blocks for a specific day or mining pool.
Both parameters are optional but at least one is required.
:param int time: time in milliseconds
:param str pool_name: name of the mining pool
:param str api_code: Blockchain.info API code (optional)
:return: an array of :class:`SimpleBlock` objects
"""
resource = 'blocks/{0}?format=json'
if api_code is not None:
resource += '&api_code=' + api_code # depends on [control=['if'], data=['api_code']]
if time is not None:
resource = resource.format(time) # depends on [control=['if'], data=['time']]
elif pool_name is not None:
resource = resource.format(pool_name) # depends on [control=['if'], data=['pool_name']]
else:
resource = resource.format('')
response = util.call_api(resource)
json_response = json.loads(response)
return [SimpleBlock(b) for b in json_response['blocks']] |
def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in self.config["include_paths"]:
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith(".py"):
continue
module_name = f_name[:-3]
# do not overwrite modules if already found
if module_name in user_modules:
pass
user_modules[module_name] = (include_path, f_name)
return user_modules | def function[get_user_modules, parameter[self]]:
constant[
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
]
variable[user_modules] assign[=] dictionary[[], []]
for taget[name[include_path]] in starred[call[name[self].config][constant[include_paths]]] begin[:]
for taget[name[f_name]] in starred[call[name[sorted], parameter[call[name[os].listdir, parameter[name[include_path]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c7c8940> begin[:]
continue
variable[module_name] assign[=] call[name[f_name]][<ast.Slice object at 0x7da20c9913f0>]
if compare[name[module_name] in name[user_modules]] begin[:]
pass
call[name[user_modules]][name[module_name]] assign[=] tuple[[<ast.Name object at 0x7da20c991210>, <ast.Name object at 0x7da20c990a60>]]
return[name[user_modules]] | keyword[def] identifier[get_user_modules] ( identifier[self] ):
literal[string]
identifier[user_modules] ={}
keyword[for] identifier[include_path] keyword[in] identifier[self] . identifier[config] [ literal[string] ]:
keyword[for] identifier[f_name] keyword[in] identifier[sorted] ( identifier[os] . identifier[listdir] ( identifier[include_path] )):
keyword[if] keyword[not] identifier[f_name] . identifier[endswith] ( literal[string] ):
keyword[continue]
identifier[module_name] = identifier[f_name] [:- literal[int] ]
keyword[if] identifier[module_name] keyword[in] identifier[user_modules] :
keyword[pass]
identifier[user_modules] [ identifier[module_name] ]=( identifier[include_path] , identifier[f_name] )
keyword[return] identifier[user_modules] | def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in self.config['include_paths']:
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith('.py'):
continue # depends on [control=['if'], data=[]]
module_name = f_name[:-3]
# do not overwrite modules if already found
if module_name in user_modules:
pass # depends on [control=['if'], data=[]]
user_modules[module_name] = (include_path, f_name) # depends on [control=['for'], data=['f_name']] # depends on [control=['for'], data=['include_path']]
return user_modules |
def drain_OD(q_plant, T, depth_end, SDR):
"""Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
"""
nu = pc.viscosity_kinematic(T)
K_minor = con.PIPE_ENTRANCE_K_MINOR + con.PIPE_EXIT_K_MINOR + con.EL90_K_MINOR
drain_ID = pc.diam_pipe(q_plant, depth_end, depth_end, nu, mat.PVC_PIPE_ROUGH, K_minor)
drain_ND = pipe.SDR_available_ND(drain_ID, SDR)
return pipe.OD(drain_ND).magnitude | def function[drain_OD, parameter[q_plant, T, depth_end, SDR]]:
constant[Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
]
variable[nu] assign[=] call[name[pc].viscosity_kinematic, parameter[name[T]]]
variable[K_minor] assign[=] binary_operation[binary_operation[name[con].PIPE_ENTRANCE_K_MINOR + name[con].PIPE_EXIT_K_MINOR] + name[con].EL90_K_MINOR]
variable[drain_ID] assign[=] call[name[pc].diam_pipe, parameter[name[q_plant], name[depth_end], name[depth_end], name[nu], name[mat].PVC_PIPE_ROUGH, name[K_minor]]]
variable[drain_ND] assign[=] call[name[pipe].SDR_available_ND, parameter[name[drain_ID], name[SDR]]]
return[call[name[pipe].OD, parameter[name[drain_ND]]].magnitude] | keyword[def] identifier[drain_OD] ( identifier[q_plant] , identifier[T] , identifier[depth_end] , identifier[SDR] ):
literal[string]
identifier[nu] = identifier[pc] . identifier[viscosity_kinematic] ( identifier[T] )
identifier[K_minor] = identifier[con] . identifier[PIPE_ENTRANCE_K_MINOR] + identifier[con] . identifier[PIPE_EXIT_K_MINOR] + identifier[con] . identifier[EL90_K_MINOR]
identifier[drain_ID] = identifier[pc] . identifier[diam_pipe] ( identifier[q_plant] , identifier[depth_end] , identifier[depth_end] , identifier[nu] , identifier[mat] . identifier[PVC_PIPE_ROUGH] , identifier[K_minor] )
identifier[drain_ND] = identifier[pipe] . identifier[SDR_available_ND] ( identifier[drain_ID] , identifier[SDR] )
keyword[return] identifier[pipe] . identifier[OD] ( identifier[drain_ND] ). identifier[magnitude] | def drain_OD(q_plant, T, depth_end, SDR):
"""Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
"""
nu = pc.viscosity_kinematic(T)
K_minor = con.PIPE_ENTRANCE_K_MINOR + con.PIPE_EXIT_K_MINOR + con.EL90_K_MINOR
drain_ID = pc.diam_pipe(q_plant, depth_end, depth_end, nu, mat.PVC_PIPE_ROUGH, K_minor)
drain_ND = pipe.SDR_available_ND(drain_ID, SDR)
return pipe.OD(drain_ND).magnitude |
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum | def function[lharmonicmean, parameter[inlist]]:
constant[
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
]
variable[sum] assign[=] constant[0]
for taget[name[item]] in starred[name[inlist]] begin[:]
variable[sum] assign[=] binary_operation[name[sum] + binary_operation[constant[1.0] / name[item]]]
return[binary_operation[call[name[len], parameter[name[inlist]]] / name[sum]]] | keyword[def] identifier[lharmonicmean] ( identifier[inlist] ):
literal[string]
identifier[sum] = literal[int]
keyword[for] identifier[item] keyword[in] identifier[inlist] :
identifier[sum] = identifier[sum] + literal[int] / identifier[item]
keyword[return] identifier[len] ( identifier[inlist] )/ identifier[sum] | def lharmonicmean(inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0 / item # depends on [control=['for'], data=['item']]
return len(inlist) / sum |
def clear_cache(temp_dir=None):
"""
Clears any cached info that was exported from the OS trust store. This will
ensure the latest changes are returned from calls to get_list() and
get_path(), but at the expense of re-exporting and parsing all certificates.
:param temp_dir:
The temporary directory to cache the CA certs in on OS X and Windows.
Needs to have secure permissions so other users can not modify the
contents. Must be the same value passed to get_path().
"""
with memory_lock:
_module_values['last_update'] = None
_module_values['certs'] = None
ca_path, temp = _ca_path(temp_dir)
if temp:
with path_lock:
if os.path.exists(ca_path):
os.remove(ca_path) | def function[clear_cache, parameter[temp_dir]]:
constant[
Clears any cached info that was exported from the OS trust store. This will
ensure the latest changes are returned from calls to get_list() and
get_path(), but at the expense of re-exporting and parsing all certificates.
:param temp_dir:
The temporary directory to cache the CA certs in on OS X and Windows.
Needs to have secure permissions so other users can not modify the
contents. Must be the same value passed to get_path().
]
with name[memory_lock] begin[:]
call[name[_module_values]][constant[last_update]] assign[=] constant[None]
call[name[_module_values]][constant[certs]] assign[=] constant[None]
<ast.Tuple object at 0x7da20e961b70> assign[=] call[name[_ca_path], parameter[name[temp_dir]]]
if name[temp] begin[:]
with name[path_lock] begin[:]
if call[name[os].path.exists, parameter[name[ca_path]]] begin[:]
call[name[os].remove, parameter[name[ca_path]]] | keyword[def] identifier[clear_cache] ( identifier[temp_dir] = keyword[None] ):
literal[string]
keyword[with] identifier[memory_lock] :
identifier[_module_values] [ literal[string] ]= keyword[None]
identifier[_module_values] [ literal[string] ]= keyword[None]
identifier[ca_path] , identifier[temp] = identifier[_ca_path] ( identifier[temp_dir] )
keyword[if] identifier[temp] :
keyword[with] identifier[path_lock] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[ca_path] ):
identifier[os] . identifier[remove] ( identifier[ca_path] ) | def clear_cache(temp_dir=None):
"""
Clears any cached info that was exported from the OS trust store. This will
ensure the latest changes are returned from calls to get_list() and
get_path(), but at the expense of re-exporting and parsing all certificates.
:param temp_dir:
The temporary directory to cache the CA certs in on OS X and Windows.
Needs to have secure permissions so other users can not modify the
contents. Must be the same value passed to get_path().
"""
with memory_lock:
_module_values['last_update'] = None
_module_values['certs'] = None # depends on [control=['with'], data=[]]
(ca_path, temp) = _ca_path(temp_dir)
if temp:
with path_lock:
if os.path.exists(ca_path):
os.remove(ca_path) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] |
def lock(self):
"""Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
"""
provider = self.get_provider()
reporter = self.get_reporter()
resolver = resolvelib.Resolver(provider, reporter)
with vistir.cd(self.project.root):
state = resolver.resolve(self.requirements)
traces = trace_graph(state.graph)
hash_cache = HashCache()
for r in state.mapping.values():
if not r.hashes:
r.hashes = get_hashes(hash_cache, r)
set_metadata(
state.mapping, traces,
provider.fetched_dependencies,
provider.collected_requires_pythons,
)
lockfile = plette.Lockfile.with_meta_from(self.project.pipfile)
lockfile["default"] = _collect_derived_entries(
state, traces, self.default_requirements,
)
lockfile["develop"] = _collect_derived_entries(
state, traces, self.develop_requirements,
)
self.project.lockfile = lockfile | def function[lock, parameter[self]]:
constant[Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
]
variable[provider] assign[=] call[name[self].get_provider, parameter[]]
variable[reporter] assign[=] call[name[self].get_reporter, parameter[]]
variable[resolver] assign[=] call[name[resolvelib].Resolver, parameter[name[provider], name[reporter]]]
with call[name[vistir].cd, parameter[name[self].project.root]] begin[:]
variable[state] assign[=] call[name[resolver].resolve, parameter[name[self].requirements]]
variable[traces] assign[=] call[name[trace_graph], parameter[name[state].graph]]
variable[hash_cache] assign[=] call[name[HashCache], parameter[]]
for taget[name[r]] in starred[call[name[state].mapping.values, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1e954b0> begin[:]
name[r].hashes assign[=] call[name[get_hashes], parameter[name[hash_cache], name[r]]]
call[name[set_metadata], parameter[name[state].mapping, name[traces], name[provider].fetched_dependencies, name[provider].collected_requires_pythons]]
variable[lockfile] assign[=] call[name[plette].Lockfile.with_meta_from, parameter[name[self].project.pipfile]]
call[name[lockfile]][constant[default]] assign[=] call[name[_collect_derived_entries], parameter[name[state], name[traces], name[self].default_requirements]]
call[name[lockfile]][constant[develop]] assign[=] call[name[_collect_derived_entries], parameter[name[state], name[traces], name[self].develop_requirements]]
name[self].project.lockfile assign[=] name[lockfile] | keyword[def] identifier[lock] ( identifier[self] ):
literal[string]
identifier[provider] = identifier[self] . identifier[get_provider] ()
identifier[reporter] = identifier[self] . identifier[get_reporter] ()
identifier[resolver] = identifier[resolvelib] . identifier[Resolver] ( identifier[provider] , identifier[reporter] )
keyword[with] identifier[vistir] . identifier[cd] ( identifier[self] . identifier[project] . identifier[root] ):
identifier[state] = identifier[resolver] . identifier[resolve] ( identifier[self] . identifier[requirements] )
identifier[traces] = identifier[trace_graph] ( identifier[state] . identifier[graph] )
identifier[hash_cache] = identifier[HashCache] ()
keyword[for] identifier[r] keyword[in] identifier[state] . identifier[mapping] . identifier[values] ():
keyword[if] keyword[not] identifier[r] . identifier[hashes] :
identifier[r] . identifier[hashes] = identifier[get_hashes] ( identifier[hash_cache] , identifier[r] )
identifier[set_metadata] (
identifier[state] . identifier[mapping] , identifier[traces] ,
identifier[provider] . identifier[fetched_dependencies] ,
identifier[provider] . identifier[collected_requires_pythons] ,
)
identifier[lockfile] = identifier[plette] . identifier[Lockfile] . identifier[with_meta_from] ( identifier[self] . identifier[project] . identifier[pipfile] )
identifier[lockfile] [ literal[string] ]= identifier[_collect_derived_entries] (
identifier[state] , identifier[traces] , identifier[self] . identifier[default_requirements] ,
)
identifier[lockfile] [ literal[string] ]= identifier[_collect_derived_entries] (
identifier[state] , identifier[traces] , identifier[self] . identifier[develop_requirements] ,
)
identifier[self] . identifier[project] . identifier[lockfile] = identifier[lockfile] | def lock(self):
"""Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
"""
provider = self.get_provider()
reporter = self.get_reporter()
resolver = resolvelib.Resolver(provider, reporter)
with vistir.cd(self.project.root):
state = resolver.resolve(self.requirements) # depends on [control=['with'], data=[]]
traces = trace_graph(state.graph)
hash_cache = HashCache()
for r in state.mapping.values():
if not r.hashes:
r.hashes = get_hashes(hash_cache, r) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
set_metadata(state.mapping, traces, provider.fetched_dependencies, provider.collected_requires_pythons)
lockfile = plette.Lockfile.with_meta_from(self.project.pipfile)
lockfile['default'] = _collect_derived_entries(state, traces, self.default_requirements)
lockfile['develop'] = _collect_derived_entries(state, traces, self.develop_requirements)
self.project.lockfile = lockfile |
def delete_metadata_for_uri(self, uri):
"""Delete metadata for a URI in the metadata database.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLITE database for the metadata. If there is an existing
record for the hash, the entire record will be erased.
.. seealso:: write_metadata_for_uri, read_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname=\'osm\' host=localhost
port=5432 user=\'foo\'password=\'bar\' sslmode=disable key=\'id\'
srid=4326```
:type uri: str
"""
hash_value = self.hash_for_datasource(uri)
try:
cursor = self.get_cursor()
# now see if we have any data for our hash
sql = 'delete from metadata where hash = \'' + hash_value + '\';'
cursor.execute(sql)
self.connection.commit()
except sqlite.Error as e:
LOGGER.debug("SQLITE Error %s:" % e.args[0])
self.connection.rollback()
except Exception as e:
LOGGER.debug("Error %s:" % e.args[0])
self.connection.rollback()
raise
finally:
self.close_connection() | def function[delete_metadata_for_uri, parameter[self, uri]]:
constant[Delete metadata for a URI in the metadata database.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLITE database for the metadata. If there is an existing
record for the hash, the entire record will be erased.
.. seealso:: write_metadata_for_uri, read_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname='osm' host=localhost
port=5432 user='foo'password='bar' sslmode=disable key='id'
srid=4326```
:type uri: str
]
variable[hash_value] assign[=] call[name[self].hash_for_datasource, parameter[name[uri]]]
<ast.Try object at 0x7da18fe90a00> | keyword[def] identifier[delete_metadata_for_uri] ( identifier[self] , identifier[uri] ):
literal[string]
identifier[hash_value] = identifier[self] . identifier[hash_for_datasource] ( identifier[uri] )
keyword[try] :
identifier[cursor] = identifier[self] . identifier[get_cursor] ()
identifier[sql] = literal[string] + identifier[hash_value] + literal[string]
identifier[cursor] . identifier[execute] ( identifier[sql] )
identifier[self] . identifier[connection] . identifier[commit] ()
keyword[except] identifier[sqlite] . identifier[Error] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[e] . identifier[args] [ literal[int] ])
identifier[self] . identifier[connection] . identifier[rollback] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[e] . identifier[args] [ literal[int] ])
identifier[self] . identifier[connection] . identifier[rollback] ()
keyword[raise]
keyword[finally] :
identifier[self] . identifier[close_connection] () | def delete_metadata_for_uri(self, uri):
"""Delete metadata for a URI in the metadata database.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLITE database for the metadata. If there is an existing
record for the hash, the entire record will be erased.
.. seealso:: write_metadata_for_uri, read_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname='osm' host=localhost
port=5432 user='foo'password='bar' sslmode=disable key='id'
srid=4326```
:type uri: str
"""
hash_value = self.hash_for_datasource(uri)
try:
cursor = self.get_cursor()
# now see if we have any data for our hash
sql = "delete from metadata where hash = '" + hash_value + "';"
cursor.execute(sql)
self.connection.commit() # depends on [control=['try'], data=[]]
except sqlite.Error as e:
LOGGER.debug('SQLITE Error %s:' % e.args[0])
self.connection.rollback() # depends on [control=['except'], data=['e']]
except Exception as e:
LOGGER.debug('Error %s:' % e.args[0])
self.connection.rollback()
raise # depends on [control=['except'], data=['e']]
finally:
self.close_connection() |
def get_connection(cls, pid, connection):
"""Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection
"""
with cls._lock:
return cls._pools[pid].connection_handle(connection) | def function[get_connection, parameter[cls, pid, connection]]:
constant[Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection
]
with name[cls]._lock begin[:]
return[call[call[name[cls]._pools][name[pid]].connection_handle, parameter[name[connection]]]] | keyword[def] identifier[get_connection] ( identifier[cls] , identifier[pid] , identifier[connection] ):
literal[string]
keyword[with] identifier[cls] . identifier[_lock] :
keyword[return] identifier[cls] . identifier[_pools] [ identifier[pid] ]. identifier[connection_handle] ( identifier[connection] ) | def get_connection(cls, pid, connection):
"""Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection
"""
with cls._lock:
return cls._pools[pid].connection_handle(connection) # depends on [control=['with'], data=[]] |
def modelresource_factory(model, resource_class=ModelResource):
"""
Factory for creating ``ModelResource`` class for given Django model.
"""
attrs = {'model': model}
Meta = type(str('Meta'), (object,), attrs)
class_name = model.__name__ + str('Resource')
class_attrs = {
'Meta': Meta,
}
metaclass = ModelDeclarativeMetaclass
return metaclass(class_name, (resource_class,), class_attrs) | def function[modelresource_factory, parameter[model, resource_class]]:
constant[
Factory for creating ``ModelResource`` class for given Django model.
]
variable[attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d4fe80>], [<ast.Name object at 0x7da1b1d4dc90>]]
variable[Meta] assign[=] call[name[type], parameter[call[name[str], parameter[constant[Meta]]], tuple[[<ast.Name object at 0x7da1b1d4f550>]], name[attrs]]]
variable[class_name] assign[=] binary_operation[name[model].__name__ + call[name[str], parameter[constant[Resource]]]]
variable[class_attrs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d4da50>], [<ast.Name object at 0x7da1b1d4fc40>]]
variable[metaclass] assign[=] name[ModelDeclarativeMetaclass]
return[call[name[metaclass], parameter[name[class_name], tuple[[<ast.Name object at 0x7da1b1d8b340>]], name[class_attrs]]]] | keyword[def] identifier[modelresource_factory] ( identifier[model] , identifier[resource_class] = identifier[ModelResource] ):
literal[string]
identifier[attrs] ={ literal[string] : identifier[model] }
identifier[Meta] = identifier[type] ( identifier[str] ( literal[string] ),( identifier[object] ,), identifier[attrs] )
identifier[class_name] = identifier[model] . identifier[__name__] + identifier[str] ( literal[string] )
identifier[class_attrs] ={
literal[string] : identifier[Meta] ,
}
identifier[metaclass] = identifier[ModelDeclarativeMetaclass]
keyword[return] identifier[metaclass] ( identifier[class_name] ,( identifier[resource_class] ,), identifier[class_attrs] ) | def modelresource_factory(model, resource_class=ModelResource):
"""
Factory for creating ``ModelResource`` class for given Django model.
"""
attrs = {'model': model}
Meta = type(str('Meta'), (object,), attrs)
class_name = model.__name__ + str('Resource')
class_attrs = {'Meta': Meta}
metaclass = ModelDeclarativeMetaclass
return metaclass(class_name, (resource_class,), class_attrs) |
def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc | def function[_maybe_make_unit_desc, parameter[self, unit_desc]]:
constant[Return the UnitDescriptor or convert a string to one.]
if <ast.BoolOp object at 0x7da1b18c3430> begin[:]
variable[unit_desc] assign[=] call[name[units].Unit, parameter[name[unit_desc]]]
if <ast.UnaryOp object at 0x7da1b18c30d0> begin[:]
<ast.Raise object at 0x7da1b18c1b70>
return[name[unit_desc]] | keyword[def] identifier[_maybe_make_unit_desc] ( identifier[self] , identifier[unit_desc] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[unit_desc] , identifier[str] ) keyword[or] identifier[unit_desc] keyword[is] keyword[None] :
identifier[unit_desc] = identifier[units] . identifier[Unit] ( identifier[unit_desc] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[unit_desc] , identifier[units] . identifier[UnitDescriptor] ):
keyword[raise] identifier[TypeError] ( literal[string] %( identifier[self] . identifier[name] ,
identifier[unit_desc] ))
keyword[return] identifier[unit_desc] | def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc) # depends on [control=['if'], data=[]]
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name, unit_desc)) # depends on [control=['if'], data=[]]
return unit_desc |
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href')) | def function[get_pages, parameter[url]]:
constant[
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
]
while constant[True] begin[:]
<ast.Yield object at 0x7da1b10cf3d0>
variable[doc] assign[=] call[call[name[html].parse, parameter[name[url]]].find, parameter[constant[body]]]
variable[links] assign[=] <ast.ListComp object at 0x7da1b10cd690>
if <ast.UnaryOp object at 0x7da1b10cd4b0> begin[:]
break
variable[url] assign[=] call[name[urljoin], parameter[name[url], call[call[name[links]][constant[0]].get, parameter[constant[href]]]]] | keyword[def] identifier[get_pages] ( identifier[url] ):
literal[string]
keyword[while] keyword[True] :
keyword[yield] identifier[url]
identifier[doc] = identifier[html] . identifier[parse] ( identifier[url] ). identifier[find] ( literal[string] )
identifier[links] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[doc] . identifier[findall] ( literal[string] ) keyword[if] identifier[a] . identifier[text] keyword[and] identifier[a] . identifier[text] . identifier[startswith] ( literal[string] )]
keyword[if] keyword[not] identifier[links] :
keyword[break]
identifier[url] = identifier[urljoin] ( identifier[url] , identifier[links] [ literal[int] ]. identifier[get] ( literal[string] )) | def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find('body')
links = [a for a in doc.findall('.//a') if a.text and a.text.startswith('next ')]
if not links:
break # depends on [control=['if'], data=[]]
url = urljoin(url, links[0].get('href')) # depends on [control=['while'], data=[]] |
def generate_templates(self, exercise_questions=False):
"""
Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`.
"""
self.generate_template(channeldir=self.channeldir,
filename=self.channelinfo,
header=CHANNEL_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.contentinfo,
header=CONTENT_INFO_HEADER)
if exercise_questions:
self.generate_template(channeldir=self.channeldir,
filename=self.exercisesinfo,
header=EXERCISE_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.questionsinfo,
header=EXERCISE_QUESTIONS_INFO_HEADER) | def function[generate_templates, parameter[self, exercise_questions]]:
constant[
Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`.
]
call[name[self].generate_template, parameter[]]
call[name[self].generate_template, parameter[]]
if name[exercise_questions] begin[:]
call[name[self].generate_template, parameter[]]
call[name[self].generate_template, parameter[]] | keyword[def] identifier[generate_templates] ( identifier[self] , identifier[exercise_questions] = keyword[False] ):
literal[string]
identifier[self] . identifier[generate_template] ( identifier[channeldir] = identifier[self] . identifier[channeldir] ,
identifier[filename] = identifier[self] . identifier[channelinfo] ,
identifier[header] = identifier[CHANNEL_INFO_HEADER] )
identifier[self] . identifier[generate_template] ( identifier[channeldir] = identifier[self] . identifier[channeldir] ,
identifier[filename] = identifier[self] . identifier[contentinfo] ,
identifier[header] = identifier[CONTENT_INFO_HEADER] )
keyword[if] identifier[exercise_questions] :
identifier[self] . identifier[generate_template] ( identifier[channeldir] = identifier[self] . identifier[channeldir] ,
identifier[filename] = identifier[self] . identifier[exercisesinfo] ,
identifier[header] = identifier[EXERCISE_INFO_HEADER] )
identifier[self] . identifier[generate_template] ( identifier[channeldir] = identifier[self] . identifier[channeldir] ,
identifier[filename] = identifier[self] . identifier[questionsinfo] ,
identifier[header] = identifier[EXERCISE_QUESTIONS_INFO_HEADER] ) | def generate_templates(self, exercise_questions=False):
"""
Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`.
"""
self.generate_template(channeldir=self.channeldir, filename=self.channelinfo, header=CHANNEL_INFO_HEADER)
self.generate_template(channeldir=self.channeldir, filename=self.contentinfo, header=CONTENT_INFO_HEADER)
if exercise_questions:
self.generate_template(channeldir=self.channeldir, filename=self.exercisesinfo, header=EXERCISE_INFO_HEADER)
self.generate_template(channeldir=self.channeldir, filename=self.questionsinfo, header=EXERCISE_QUESTIONS_INFO_HEADER) # depends on [control=['if'], data=[]] |
def nlevenshtein(seq1, seq2, method=1):
"""Compute the normalized Levenshtein distance between `seq1` and `seq2`.
Two normalization methods are provided. For both of them, the normalized
distance will be a float between 0 and 1, where 0 means equal and 1
completely different. The computation obeys the following patterns:
0.0 if seq1 == seq2
1.0 if len(seq1) == 0 or len(seq2) == 0
edit distance / factor otherwise
The `method` parameter specifies which normalization factor should be used.
It can have the value 1 or 2, which correspond to the following:
1: the length of the shortest alignment between the sequences
(that is, the length of the longest sequence)
2: the length of the longest alignment between the sequences
Which normalization factor should be chosen is a matter of taste. The first
one is cheap to compute. The second one is more costly, but it accounts
better than the first one for parallelisms of symbols between the sequences.
For the rationale behind the use of the second method, see:
Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein
Distance", 2004, p. 130 sq, which is available online at:
http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf
"""
if seq1 == seq2:
return 0.0
len1, len2 = len(seq1), len(seq2)
if len1 == 0 or len2 == 0:
return 1.0
if len1 < len2: # minimize the arrays size
len1, len2 = len2, len1
seq1, seq2 = seq2, seq1
if method == 1:
return levenshtein(seq1, seq2) / float(len1)
if method != 2:
raise ValueError("expected either 1 or 2 for `method` parameter")
column = array('L', range(len2 + 1))
length = array('L', range(len2 + 1))
for x in range(1, len1 + 1):
column[0] = length[0] = x
last = llast = x - 1
for y in range(1, len2 + 1):
# dist
old = column[y]
ic = column[y - 1] + 1
dc = column[y] + 1
rc = last + (seq1[x - 1] != seq2[y - 1])
column[y] = min(ic, dc, rc)
last = old
# length
lold = length[y]
lic = length[y - 1] + 1 if ic == column[y] else 0
ldc = length[y] + 1 if dc == column[y] else 0
lrc = llast + 1 if rc == column[y] else 0
length[y] = max(ldc, lic, lrc)
llast = lold
return column[y] / float(length[y]) | def function[nlevenshtein, parameter[seq1, seq2, method]]:
constant[Compute the normalized Levenshtein distance between `seq1` and `seq2`.
Two normalization methods are provided. For both of them, the normalized
distance will be a float between 0 and 1, where 0 means equal and 1
completely different. The computation obeys the following patterns:
0.0 if seq1 == seq2
1.0 if len(seq1) == 0 or len(seq2) == 0
edit distance / factor otherwise
The `method` parameter specifies which normalization factor should be used.
It can have the value 1 or 2, which correspond to the following:
1: the length of the shortest alignment between the sequences
(that is, the length of the longest sequence)
2: the length of the longest alignment between the sequences
Which normalization factor should be chosen is a matter of taste. The first
one is cheap to compute. The second one is more costly, but it accounts
better than the first one for parallelisms of symbols between the sequences.
For the rationale behind the use of the second method, see:
Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein
Distance", 2004, p. 130 sq, which is available online at:
http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf
]
if compare[name[seq1] equal[==] name[seq2]] begin[:]
return[constant[0.0]]
<ast.Tuple object at 0x7da18f00f6a0> assign[=] tuple[[<ast.Call object at 0x7da18f00f280>, <ast.Call object at 0x7da18f00eb90>]]
if <ast.BoolOp object at 0x7da18f00dde0> begin[:]
return[constant[1.0]]
if compare[name[len1] less[<] name[len2]] begin[:]
<ast.Tuple object at 0x7da18f00c2b0> assign[=] tuple[[<ast.Name object at 0x7da18f00c0d0>, <ast.Name object at 0x7da18f00c7c0>]]
<ast.Tuple object at 0x7da18f00d4b0> assign[=] tuple[[<ast.Name object at 0x7da18f00e290>, <ast.Name object at 0x7da18f00ee90>]]
if compare[name[method] equal[==] constant[1]] begin[:]
return[binary_operation[call[name[levenshtein], parameter[name[seq1], name[seq2]]] / call[name[float], parameter[name[len1]]]]]
if compare[name[method] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da18f00c8e0>
variable[column] assign[=] call[name[array], parameter[constant[L], call[name[range], parameter[binary_operation[name[len2] + constant[1]]]]]]
variable[length] assign[=] call[name[array], parameter[constant[L], call[name[range], parameter[binary_operation[name[len2] + constant[1]]]]]]
for taget[name[x]] in starred[call[name[range], parameter[constant[1], binary_operation[name[len1] + constant[1]]]]] begin[:]
call[name[column]][constant[0]] assign[=] name[x]
variable[last] assign[=] binary_operation[name[x] - constant[1]]
for taget[name[y]] in starred[call[name[range], parameter[constant[1], binary_operation[name[len2] + constant[1]]]]] begin[:]
variable[old] assign[=] call[name[column]][name[y]]
variable[ic] assign[=] binary_operation[call[name[column]][binary_operation[name[y] - constant[1]]] + constant[1]]
variable[dc] assign[=] binary_operation[call[name[column]][name[y]] + constant[1]]
variable[rc] assign[=] binary_operation[name[last] + compare[call[name[seq1]][binary_operation[name[x] - constant[1]]] not_equal[!=] call[name[seq2]][binary_operation[name[y] - constant[1]]]]]
call[name[column]][name[y]] assign[=] call[name[min], parameter[name[ic], name[dc], name[rc]]]
variable[last] assign[=] name[old]
variable[lold] assign[=] call[name[length]][name[y]]
variable[lic] assign[=] <ast.IfExp object at 0x7da18f00c4c0>
variable[ldc] assign[=] <ast.IfExp object at 0x7da18f00db10>
variable[lrc] assign[=] <ast.IfExp object at 0x7da18f00ee60>
call[name[length]][name[y]] assign[=] call[name[max], parameter[name[ldc], name[lic], name[lrc]]]
variable[llast] assign[=] name[lold]
return[binary_operation[call[name[column]][name[y]] / call[name[float], parameter[call[name[length]][name[y]]]]]] | keyword[def] identifier[nlevenshtein] ( identifier[seq1] , identifier[seq2] , identifier[method] = literal[int] ):
literal[string]
keyword[if] identifier[seq1] == identifier[seq2] :
keyword[return] literal[int]
identifier[len1] , identifier[len2] = identifier[len] ( identifier[seq1] ), identifier[len] ( identifier[seq2] )
keyword[if] identifier[len1] == literal[int] keyword[or] identifier[len2] == literal[int] :
keyword[return] literal[int]
keyword[if] identifier[len1] < identifier[len2] :
identifier[len1] , identifier[len2] = identifier[len2] , identifier[len1]
identifier[seq1] , identifier[seq2] = identifier[seq2] , identifier[seq1]
keyword[if] identifier[method] == literal[int] :
keyword[return] identifier[levenshtein] ( identifier[seq1] , identifier[seq2] )/ identifier[float] ( identifier[len1] )
keyword[if] identifier[method] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[column] = identifier[array] ( literal[string] , identifier[range] ( identifier[len2] + literal[int] ))
identifier[length] = identifier[array] ( literal[string] , identifier[range] ( identifier[len2] + literal[int] ))
keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , identifier[len1] + literal[int] ):
identifier[column] [ literal[int] ]= identifier[length] [ literal[int] ]= identifier[x]
identifier[last] = identifier[llast] = identifier[x] - literal[int]
keyword[for] identifier[y] keyword[in] identifier[range] ( literal[int] , identifier[len2] + literal[int] ):
identifier[old] = identifier[column] [ identifier[y] ]
identifier[ic] = identifier[column] [ identifier[y] - literal[int] ]+ literal[int]
identifier[dc] = identifier[column] [ identifier[y] ]+ literal[int]
identifier[rc] = identifier[last] +( identifier[seq1] [ identifier[x] - literal[int] ]!= identifier[seq2] [ identifier[y] - literal[int] ])
identifier[column] [ identifier[y] ]= identifier[min] ( identifier[ic] , identifier[dc] , identifier[rc] )
identifier[last] = identifier[old]
identifier[lold] = identifier[length] [ identifier[y] ]
identifier[lic] = identifier[length] [ identifier[y] - literal[int] ]+ literal[int] keyword[if] identifier[ic] == identifier[column] [ identifier[y] ] keyword[else] literal[int]
identifier[ldc] = identifier[length] [ identifier[y] ]+ literal[int] keyword[if] identifier[dc] == identifier[column] [ identifier[y] ] keyword[else] literal[int]
identifier[lrc] = identifier[llast] + literal[int] keyword[if] identifier[rc] == identifier[column] [ identifier[y] ] keyword[else] literal[int]
identifier[length] [ identifier[y] ]= identifier[max] ( identifier[ldc] , identifier[lic] , identifier[lrc] )
identifier[llast] = identifier[lold]
keyword[return] identifier[column] [ identifier[y] ]/ identifier[float] ( identifier[length] [ identifier[y] ]) | def nlevenshtein(seq1, seq2, method=1):
"""Compute the normalized Levenshtein distance between `seq1` and `seq2`.
Two normalization methods are provided. For both of them, the normalized
distance will be a float between 0 and 1, where 0 means equal and 1
completely different. The computation obeys the following patterns:
0.0 if seq1 == seq2
1.0 if len(seq1) == 0 or len(seq2) == 0
edit distance / factor otherwise
The `method` parameter specifies which normalization factor should be used.
It can have the value 1 or 2, which correspond to the following:
1: the length of the shortest alignment between the sequences
(that is, the length of the longest sequence)
2: the length of the longest alignment between the sequences
Which normalization factor should be chosen is a matter of taste. The first
one is cheap to compute. The second one is more costly, but it accounts
better than the first one for parallelisms of symbols between the sequences.
For the rationale behind the use of the second method, see:
Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein
Distance", 2004, p. 130 sq, which is available online at:
http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf
"""
if seq1 == seq2:
return 0.0 # depends on [control=['if'], data=[]]
(len1, len2) = (len(seq1), len(seq2))
if len1 == 0 or len2 == 0:
return 1.0 # depends on [control=['if'], data=[]]
if len1 < len2: # minimize the arrays size
(len1, len2) = (len2, len1)
(seq1, seq2) = (seq2, seq1) # depends on [control=['if'], data=['len1', 'len2']]
if method == 1:
return levenshtein(seq1, seq2) / float(len1) # depends on [control=['if'], data=[]]
if method != 2:
raise ValueError('expected either 1 or 2 for `method` parameter') # depends on [control=['if'], data=[]]
column = array('L', range(len2 + 1))
length = array('L', range(len2 + 1))
for x in range(1, len1 + 1):
column[0] = length[0] = x
last = llast = x - 1
for y in range(1, len2 + 1): # dist
old = column[y]
ic = column[y - 1] + 1
dc = column[y] + 1
rc = last + (seq1[x - 1] != seq2[y - 1])
column[y] = min(ic, dc, rc)
last = old # length
lold = length[y]
lic = length[y - 1] + 1 if ic == column[y] else 0
ldc = length[y] + 1 if dc == column[y] else 0
lrc = llast + 1 if rc == column[y] else 0
length[y] = max(ldc, lic, lrc)
llast = lold # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['x']]
return column[y] / float(length[y]) |
def run(data):
"""HLA typing with bwakit, parsing output from called genotype files.
"""
bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem")))
hla_fqs = tz.get_in(["hla", "fastq"], data, [])
if len(hla_fqs) > 0:
hla_base = os.path.commonprefix(hla_fqs)
while hla_base.endswith("."):
hla_base = hla_base[:-1]
out_file = hla_base + ".top"
if not utils.file_exists(out_file):
cmd = "{bwakit_dir}/run-HLA {hla_base}"
do.run(cmd.format(**locals()), "HLA typing with bwakit")
out_file = _organize_calls(out_file, hla_base, data)
data["hla"].update({"call_file": out_file,
"hlacaller": "bwakit"})
return data | def function[run, parameter[data]]:
constant[HLA typing with bwakit, parsing output from called genotype files.
]
variable[bwakit_dir] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[call[name[utils].which, parameter[constant[run-bwamem]]]]]]]
variable[hla_fqs] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da20c76f070>, <ast.Constant object at 0x7da20c76f3d0>]], name[data], list[[]]]]
if compare[call[name[len], parameter[name[hla_fqs]]] greater[>] constant[0]] begin[:]
variable[hla_base] assign[=] call[name[os].path.commonprefix, parameter[name[hla_fqs]]]
while call[name[hla_base].endswith, parameter[constant[.]]] begin[:]
variable[hla_base] assign[=] call[name[hla_base]][<ast.Slice object at 0x7da20c76f670>]
variable[out_file] assign[=] binary_operation[name[hla_base] + constant[.top]]
if <ast.UnaryOp object at 0x7da20c76f2b0> begin[:]
variable[cmd] assign[=] constant[{bwakit_dir}/run-HLA {hla_base}]
call[name[do].run, parameter[call[name[cmd].format, parameter[]], constant[HLA typing with bwakit]]]
variable[out_file] assign[=] call[name[_organize_calls], parameter[name[out_file], name[hla_base], name[data]]]
call[call[name[data]][constant[hla]].update, parameter[dictionary[[<ast.Constant object at 0x7da20c76e200>, <ast.Constant object at 0x7da20c76c2e0>], [<ast.Name object at 0x7da20c76c760>, <ast.Constant object at 0x7da20c76fc10>]]]]
return[name[data]] | keyword[def] identifier[run] ( identifier[data] ):
literal[string]
identifier[bwakit_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[utils] . identifier[which] ( literal[string] )))
identifier[hla_fqs] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] ], identifier[data] ,[])
keyword[if] identifier[len] ( identifier[hla_fqs] )> literal[int] :
identifier[hla_base] = identifier[os] . identifier[path] . identifier[commonprefix] ( identifier[hla_fqs] )
keyword[while] identifier[hla_base] . identifier[endswith] ( literal[string] ):
identifier[hla_base] = identifier[hla_base] [:- literal[int] ]
identifier[out_file] = identifier[hla_base] + literal[string]
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ):
identifier[cmd] = literal[string]
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] )
identifier[out_file] = identifier[_organize_calls] ( identifier[out_file] , identifier[hla_base] , identifier[data] )
identifier[data] [ literal[string] ]. identifier[update] ({ literal[string] : identifier[out_file] ,
literal[string] : literal[string] })
keyword[return] identifier[data] | def run(data):
"""HLA typing with bwakit, parsing output from called genotype files.
"""
bwakit_dir = os.path.dirname(os.path.realpath(utils.which('run-bwamem')))
hla_fqs = tz.get_in(['hla', 'fastq'], data, [])
if len(hla_fqs) > 0:
hla_base = os.path.commonprefix(hla_fqs)
while hla_base.endswith('.'):
hla_base = hla_base[:-1] # depends on [control=['while'], data=[]]
out_file = hla_base + '.top'
if not utils.file_exists(out_file):
cmd = '{bwakit_dir}/run-HLA {hla_base}'
do.run(cmd.format(**locals()), 'HLA typing with bwakit')
out_file = _organize_calls(out_file, hla_base, data) # depends on [control=['if'], data=[]]
data['hla'].update({'call_file': out_file, 'hlacaller': 'bwakit'}) # depends on [control=['if'], data=[]]
return data |
def login(self, came_from=lurl('/')):
"""Start the user login."""
login_counter = request.environ.get('repoze.who.logins', 0)
if login_counter > 0:
flash(_('Wrong credentials'), 'warning')
return dict(page='login', login_counter=str(login_counter),
came_from=came_from) | def function[login, parameter[self, came_from]]:
constant[Start the user login.]
variable[login_counter] assign[=] call[name[request].environ.get, parameter[constant[repoze.who.logins], constant[0]]]
if compare[name[login_counter] greater[>] constant[0]] begin[:]
call[name[flash], parameter[call[name[_], parameter[constant[Wrong credentials]]], constant[warning]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[login] ( identifier[self] , identifier[came_from] = identifier[lurl] ( literal[string] )):
literal[string]
identifier[login_counter] = identifier[request] . identifier[environ] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[login_counter] > literal[int] :
identifier[flash] ( identifier[_] ( literal[string] ), literal[string] )
keyword[return] identifier[dict] ( identifier[page] = literal[string] , identifier[login_counter] = identifier[str] ( identifier[login_counter] ),
identifier[came_from] = identifier[came_from] ) | def login(self, came_from=lurl('/')):
"""Start the user login."""
login_counter = request.environ.get('repoze.who.logins', 0)
if login_counter > 0:
flash(_('Wrong credentials'), 'warning') # depends on [control=['if'], data=[]]
return dict(page='login', login_counter=str(login_counter), came_from=came_from) |
def remove(self, path, recursive=True):
"""
Remove file or directory at location `path`.
"""
if recursive:
cmd = ["rm", "-r", path]
else:
cmd = ["rm", path]
self.remote_context.check_output(cmd) | def function[remove, parameter[self, path, recursive]]:
constant[
Remove file or directory at location `path`.
]
if name[recursive] begin[:]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18c4ce830>, <ast.Constant object at 0x7da18c4ce110>, <ast.Name object at 0x7da18c4cec20>]]
call[name[self].remote_context.check_output, parameter[name[cmd]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[path] , identifier[recursive] = keyword[True] ):
literal[string]
keyword[if] identifier[recursive] :
identifier[cmd] =[ literal[string] , literal[string] , identifier[path] ]
keyword[else] :
identifier[cmd] =[ literal[string] , identifier[path] ]
identifier[self] . identifier[remote_context] . identifier[check_output] ( identifier[cmd] ) | def remove(self, path, recursive=True):
"""
Remove file or directory at location `path`.
"""
if recursive:
cmd = ['rm', '-r', path] # depends on [control=['if'], data=[]]
else:
cmd = ['rm', path]
self.remote_context.check_output(cmd) |
def load_file(self, afile=None, verbose=False):
"""
Load a new network from a network file type (e.g. SIF, XGMML, etc.).
Use network import file to load networks from Excel or csv files. This
command will create a new network collection if no current network collection
is selected, otherwise it will add the network to the current collection.
The SUIDs of the new networks and views are returned.
:param afile (string): Select a network format file. This command does
not support csv or Excel files. Use network import file for that.
:param verbose: print more
:returns: { SUIDs of the new networks and views }
"""
PARAMS=set_param(["file"],[afile])
response=api(url=self.__url+"/load file", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | def function[load_file, parameter[self, afile, verbose]]:
constant[
Load a new network from a network file type (e.g. SIF, XGMML, etc.).
Use network import file to load networks from Excel or csv files. This
command will create a new network collection if no current network collection
is selected, otherwise it will add the network to the current collection.
The SUIDs of the new networks and views are returned.
:param afile (string): Select a network format file. This command does
not support csv or Excel files. Use network import file for that.
:param verbose: print more
:returns: { SUIDs of the new networks and views }
]
variable[PARAMS] assign[=] call[name[set_param], parameter[list[[<ast.Constant object at 0x7da18f00eef0>]], list[[<ast.Name object at 0x7da18f00fcd0>]]]]
variable[response] assign[=] call[name[api], parameter[]]
return[name[response]] | keyword[def] identifier[load_file] ( identifier[self] , identifier[afile] = keyword[None] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[PARAMS] = identifier[set_param] ([ literal[string] ],[ identifier[afile] ])
identifier[response] = identifier[api] ( identifier[url] = identifier[self] . identifier[__url] + literal[string] , identifier[PARAMS] = identifier[PARAMS] , identifier[method] = literal[string] , identifier[verbose] = identifier[verbose] )
keyword[return] identifier[response] | def load_file(self, afile=None, verbose=False):
"""
Load a new network from a network file type (e.g. SIF, XGMML, etc.).
Use network import file to load networks from Excel or csv files. This
command will create a new network collection if no current network collection
is selected, otherwise it will add the network to the current collection.
The SUIDs of the new networks and views are returned.
:param afile (string): Select a network format file. This command does
not support csv or Excel files. Use network import file for that.
:param verbose: print more
:returns: { SUIDs of the new networks and views }
"""
PARAMS = set_param(['file'], [afile])
response = api(url=self.__url + '/load file', PARAMS=PARAMS, method='POST', verbose=verbose)
return response |
def to_(self, data_pts):
"""Reverse of :meth:`from_`."""
data_pts = np.asarray(data_pts, dtype=np.float)
has_z = (data_pts.shape[-1] > 2)
if self.use_center:
data_pts = data_pts - self.viewer.data_off
# subtract data indexes at center reference pixel
ref_pt = [self.viewer._org_x, self.viewer._org_y]
if has_z:
ref_pt.append(self.viewer._org_z)
off_pts = np.subtract(data_pts, ref_pt)
return off_pts | def function[to_, parameter[self, data_pts]]:
constant[Reverse of :meth:`from_`.]
variable[data_pts] assign[=] call[name[np].asarray, parameter[name[data_pts]]]
variable[has_z] assign[=] compare[call[name[data_pts].shape][<ast.UnaryOp object at 0x7da2041d9840>] greater[>] constant[2]]
if name[self].use_center begin[:]
variable[data_pts] assign[=] binary_operation[name[data_pts] - name[self].viewer.data_off]
variable[ref_pt] assign[=] list[[<ast.Attribute object at 0x7da2041db520>, <ast.Attribute object at 0x7da2041da3e0>]]
if name[has_z] begin[:]
call[name[ref_pt].append, parameter[name[self].viewer._org_z]]
variable[off_pts] assign[=] call[name[np].subtract, parameter[name[data_pts], name[ref_pt]]]
return[name[off_pts]] | keyword[def] identifier[to_] ( identifier[self] , identifier[data_pts] ):
literal[string]
identifier[data_pts] = identifier[np] . identifier[asarray] ( identifier[data_pts] , identifier[dtype] = identifier[np] . identifier[float] )
identifier[has_z] =( identifier[data_pts] . identifier[shape] [- literal[int] ]> literal[int] )
keyword[if] identifier[self] . identifier[use_center] :
identifier[data_pts] = identifier[data_pts] - identifier[self] . identifier[viewer] . identifier[data_off]
identifier[ref_pt] =[ identifier[self] . identifier[viewer] . identifier[_org_x] , identifier[self] . identifier[viewer] . identifier[_org_y] ]
keyword[if] identifier[has_z] :
identifier[ref_pt] . identifier[append] ( identifier[self] . identifier[viewer] . identifier[_org_z] )
identifier[off_pts] = identifier[np] . identifier[subtract] ( identifier[data_pts] , identifier[ref_pt] )
keyword[return] identifier[off_pts] | def to_(self, data_pts):
"""Reverse of :meth:`from_`."""
data_pts = np.asarray(data_pts, dtype=np.float)
has_z = data_pts.shape[-1] > 2
if self.use_center:
data_pts = data_pts - self.viewer.data_off # depends on [control=['if'], data=[]]
# subtract data indexes at center reference pixel
ref_pt = [self.viewer._org_x, self.viewer._org_y]
if has_z:
ref_pt.append(self.viewer._org_z) # depends on [control=['if'], data=[]]
off_pts = np.subtract(data_pts, ref_pt)
return off_pts |
def add_moving_element(self, element):
"""Add elements to the board"""
element.initialize(self.canvas)
self.elements.append(element) | def function[add_moving_element, parameter[self, element]]:
constant[Add elements to the board]
call[name[element].initialize, parameter[name[self].canvas]]
call[name[self].elements.append, parameter[name[element]]] | keyword[def] identifier[add_moving_element] ( identifier[self] , identifier[element] ):
literal[string]
identifier[element] . identifier[initialize] ( identifier[self] . identifier[canvas] )
identifier[self] . identifier[elements] . identifier[append] ( identifier[element] ) | def add_moving_element(self, element):
"""Add elements to the board"""
element.initialize(self.canvas)
self.elements.append(element) |
def as_sound(self, filename, speed=60, cutoff=50):
"""
Convert AstonFrame into a WAV file.
Parameters
----------
filename : str
Name of wavfile to create.
speed : float, optional
How much to speed up for sound recording, e.g. a value of 60
will turn an hour-long AstonFrame into a minute-long sound clip.
cutoff : float, optional
m/z's under this value will be clipped out.
"""
# make a 1d array for the sound
def to_t(t):
return (t - self.index[0]) / speed
wav_len = int(to_t(self.index[-1]) * 60 * 44100)
wav = np.zeros(wav_len)
# create an artificial array to interpolate times out of
tmask = np.linspace(0, 1, self.shape[0])
# come up with a mapping from mz to tone
min_hz, max_hz = 50, 1000
min_mz, max_mz = min(self.columns), max(self.columns)
def mz_to_wv(mz):
"""
Maps a wavelength/mz to a tone.
"""
try:
mz = float(mz)
except:
return 100
wv = (mz * (max_hz - min_hz) -
max_hz * min_mz + min_hz * max_mz) / (max_mz - min_mz)
return int(44100 / wv)
# go through each trace and map it into the sound array
for i, mz in enumerate(self.columns):
if float(mz) < cutoff:
# clip out mz/wv below a certain threshold
# handy if data has low level noise
continue
print(str(i) + '/' + str(self.shape[1]))
inter_x = np.linspace(0, 1, wav[::mz_to_wv(mz)].shape[0])
wav[::mz_to_wv(mz)] += np.interp(inter_x, tmask, self.values[:, i])
# scale the new array and write it out
scaled = wav / np.max(np.abs(wav))
scaled = scipy.signal.fftconvolve(scaled, np.ones(5) / 5, mode='same')
scaled = np.int16(scaled * 32767)
scipy.io.wavfile.write(filename, 44100, scaled) | def function[as_sound, parameter[self, filename, speed, cutoff]]:
constant[
Convert AstonFrame into a WAV file.
Parameters
----------
filename : str
Name of wavfile to create.
speed : float, optional
How much to speed up for sound recording, e.g. a value of 60
will turn an hour-long AstonFrame into a minute-long sound clip.
cutoff : float, optional
m/z's under this value will be clipped out.
]
def function[to_t, parameter[t]]:
return[binary_operation[binary_operation[name[t] - call[name[self].index][constant[0]]] / name[speed]]]
variable[wav_len] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[to_t], parameter[call[name[self].index][<ast.UnaryOp object at 0x7da18f58fb20>]]] * constant[60]] * constant[44100]]]]
variable[wav] assign[=] call[name[np].zeros, parameter[name[wav_len]]]
variable[tmask] assign[=] call[name[np].linspace, parameter[constant[0], constant[1], call[name[self].shape][constant[0]]]]
<ast.Tuple object at 0x7da18f58cf40> assign[=] tuple[[<ast.Constant object at 0x7da18f58fd30>, <ast.Constant object at 0x7da18f58d2a0>]]
<ast.Tuple object at 0x7da18f58e680> assign[=] tuple[[<ast.Call object at 0x7da18f58d0c0>, <ast.Call object at 0x7da18f58d630>]]
def function[mz_to_wv, parameter[mz]]:
constant[
Maps a wavelength/mz to a tone.
]
<ast.Try object at 0x7da18f58ebc0>
variable[wv] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[mz] * binary_operation[name[max_hz] - name[min_hz]]] - binary_operation[name[max_hz] * name[min_mz]]] + binary_operation[name[min_hz] * name[max_mz]]] / binary_operation[name[max_mz] - name[min_mz]]]
return[call[name[int], parameter[binary_operation[constant[44100] / name[wv]]]]]
for taget[tuple[[<ast.Name object at 0x7da20eb29d50>, <ast.Name object at 0x7da20eb29e70>]]] in starred[call[name[enumerate], parameter[name[self].columns]]] begin[:]
if compare[call[name[float], parameter[name[mz]]] less[<] name[cutoff]] begin[:]
continue
call[name[print], parameter[binary_operation[binary_operation[call[name[str], parameter[name[i]]] + constant[/]] + call[name[str], parameter[call[name[self].shape][constant[1]]]]]]]
variable[inter_x] assign[=] call[name[np].linspace, parameter[constant[0], constant[1], call[call[name[wav]][<ast.Slice object at 0x7da1b26ae5f0>].shape][constant[0]]]]
<ast.AugAssign object at 0x7da1b26af0a0>
variable[scaled] assign[=] binary_operation[name[wav] / call[name[np].max, parameter[call[name[np].abs, parameter[name[wav]]]]]]
variable[scaled] assign[=] call[name[scipy].signal.fftconvolve, parameter[name[scaled], binary_operation[call[name[np].ones, parameter[constant[5]]] / constant[5]]]]
variable[scaled] assign[=] call[name[np].int16, parameter[binary_operation[name[scaled] * constant[32767]]]]
call[name[scipy].io.wavfile.write, parameter[name[filename], constant[44100], name[scaled]]] | keyword[def] identifier[as_sound] ( identifier[self] , identifier[filename] , identifier[speed] = literal[int] , identifier[cutoff] = literal[int] ):
literal[string]
keyword[def] identifier[to_t] ( identifier[t] ):
keyword[return] ( identifier[t] - identifier[self] . identifier[index] [ literal[int] ])/ identifier[speed]
identifier[wav_len] = identifier[int] ( identifier[to_t] ( identifier[self] . identifier[index] [- literal[int] ])* literal[int] * literal[int] )
identifier[wav] = identifier[np] . identifier[zeros] ( identifier[wav_len] )
identifier[tmask] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[self] . identifier[shape] [ literal[int] ])
identifier[min_hz] , identifier[max_hz] = literal[int] , literal[int]
identifier[min_mz] , identifier[max_mz] = identifier[min] ( identifier[self] . identifier[columns] ), identifier[max] ( identifier[self] . identifier[columns] )
keyword[def] identifier[mz_to_wv] ( identifier[mz] ):
literal[string]
keyword[try] :
identifier[mz] = identifier[float] ( identifier[mz] )
keyword[except] :
keyword[return] literal[int]
identifier[wv] =( identifier[mz] *( identifier[max_hz] - identifier[min_hz] )-
identifier[max_hz] * identifier[min_mz] + identifier[min_hz] * identifier[max_mz] )/( identifier[max_mz] - identifier[min_mz] )
keyword[return] identifier[int] ( literal[int] / identifier[wv] )
keyword[for] identifier[i] , identifier[mz] keyword[in] identifier[enumerate] ( identifier[self] . identifier[columns] ):
keyword[if] identifier[float] ( identifier[mz] )< identifier[cutoff] :
keyword[continue]
identifier[print] ( identifier[str] ( identifier[i] )+ literal[string] + identifier[str] ( identifier[self] . identifier[shape] [ literal[int] ]))
identifier[inter_x] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[wav] [:: identifier[mz_to_wv] ( identifier[mz] )]. identifier[shape] [ literal[int] ])
identifier[wav] [:: identifier[mz_to_wv] ( identifier[mz] )]+= identifier[np] . identifier[interp] ( identifier[inter_x] , identifier[tmask] , identifier[self] . identifier[values] [:, identifier[i] ])
identifier[scaled] = identifier[wav] / identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[wav] ))
identifier[scaled] = identifier[scipy] . identifier[signal] . identifier[fftconvolve] ( identifier[scaled] , identifier[np] . identifier[ones] ( literal[int] )/ literal[int] , identifier[mode] = literal[string] )
identifier[scaled] = identifier[np] . identifier[int16] ( identifier[scaled] * literal[int] )
identifier[scipy] . identifier[io] . identifier[wavfile] . identifier[write] ( identifier[filename] , literal[int] , identifier[scaled] ) | def as_sound(self, filename, speed=60, cutoff=50):
"""
Convert AstonFrame into a WAV file.
Parameters
----------
filename : str
Name of wavfile to create.
speed : float, optional
How much to speed up for sound recording, e.g. a value of 60
will turn an hour-long AstonFrame into a minute-long sound clip.
cutoff : float, optional
m/z's under this value will be clipped out.
"""
# make a 1d array for the sound
def to_t(t):
return (t - self.index[0]) / speed
wav_len = int(to_t(self.index[-1]) * 60 * 44100)
wav = np.zeros(wav_len)
# create an artificial array to interpolate times out of
tmask = np.linspace(0, 1, self.shape[0])
# come up with a mapping from mz to tone
(min_hz, max_hz) = (50, 1000)
(min_mz, max_mz) = (min(self.columns), max(self.columns))
def mz_to_wv(mz):
"""
Maps a wavelength/mz to a tone.
"""
try:
mz = float(mz) # depends on [control=['try'], data=[]]
except:
return 100 # depends on [control=['except'], data=[]]
wv = (mz * (max_hz - min_hz) - max_hz * min_mz + min_hz * max_mz) / (max_mz - min_mz)
return int(44100 / wv)
# go through each trace and map it into the sound array
for (i, mz) in enumerate(self.columns):
if float(mz) < cutoff:
# clip out mz/wv below a certain threshold
# handy if data has low level noise
continue # depends on [control=['if'], data=[]]
print(str(i) + '/' + str(self.shape[1]))
inter_x = np.linspace(0, 1, wav[::mz_to_wv(mz)].shape[0])
wav[::mz_to_wv(mz)] += np.interp(inter_x, tmask, self.values[:, i]) # depends on [control=['for'], data=[]]
# scale the new array and write it out
scaled = wav / np.max(np.abs(wav))
scaled = scipy.signal.fftconvolve(scaled, np.ones(5) / 5, mode='same')
scaled = np.int16(scaled * 32767)
scipy.io.wavfile.write(filename, 44100, scaled) |
def RegisterEventHandler(self, Event, Target):
"""Registers any callable as an event handler.
:Parameters:
Event : str
Name of the event. For event names, see the respective ``...Events`` class.
Target : callable
Callable to register as the event handler.
:return: True is callable was successfully registered, False if it was already registered.
:rtype: bool
:see: `UnregisterEventHandler`
"""
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
if Target in self._EventHandlers[Event]:
return False
self._EventHandlers[Event].append(Target)
self.__Logger.info('registered %s: %s', Event, repr(Target))
return True | def function[RegisterEventHandler, parameter[self, Event, Target]]:
constant[Registers any callable as an event handler.
:Parameters:
Event : str
Name of the event. For event names, see the respective ``...Events`` class.
Target : callable
Callable to register as the event handler.
:return: True is callable was successfully registered, False if it was already registered.
:rtype: bool
:see: `UnregisterEventHandler`
]
if <ast.UnaryOp object at 0x7da1b0667040> begin[:]
<ast.Raise object at 0x7da1b06671f0>
if compare[name[Event] <ast.NotIn object at 0x7da2590d7190> name[self]._EventHandlers] begin[:]
<ast.Raise object at 0x7da20ed4a8f0>
if compare[name[Target] in call[name[self]._EventHandlers][name[Event]]] begin[:]
return[constant[False]]
call[call[name[self]._EventHandlers][name[Event]].append, parameter[name[Target]]]
call[name[self].__Logger.info, parameter[constant[registered %s: %s], name[Event], call[name[repr], parameter[name[Target]]]]]
return[constant[True]] | keyword[def] identifier[RegisterEventHandler] ( identifier[self] , identifier[Event] , identifier[Target] ):
literal[string]
keyword[if] keyword[not] identifier[callable] ( identifier[Target] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[repr] ( identifier[Target] ))
keyword[if] identifier[Event] keyword[not] keyword[in] identifier[self] . identifier[_EventHandlers] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[Event] , identifier[self] . identifier[__class__] . identifier[__name__] ))
keyword[if] identifier[Target] keyword[in] identifier[self] . identifier[_EventHandlers] [ identifier[Event] ]:
keyword[return] keyword[False]
identifier[self] . identifier[_EventHandlers] [ identifier[Event] ]. identifier[append] ( identifier[Target] )
identifier[self] . identifier[__Logger] . identifier[info] ( literal[string] , identifier[Event] , identifier[repr] ( identifier[Target] ))
keyword[return] keyword[True] | def RegisterEventHandler(self, Event, Target):
"""Registers any callable as an event handler.
:Parameters:
Event : str
Name of the event. For event names, see the respective ``...Events`` class.
Target : callable
Callable to register as the event handler.
:return: True is callable was successfully registered, False if it was already registered.
:rtype: bool
:see: `UnregisterEventHandler`
"""
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target)) # depends on [control=['if'], data=[]]
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__)) # depends on [control=['if'], data=['Event']]
if Target in self._EventHandlers[Event]:
return False # depends on [control=['if'], data=[]]
self._EventHandlers[Event].append(Target)
self.__Logger.info('registered %s: %s', Event, repr(Target))
return True |
def get_rt_data(self, code):
"""
获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ==========================================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x['code'] = code
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
'last_close', 'avg_price', 'volume', 'turnover'
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table | def function[get_rt_data, parameter[self, code]]:
constant[
获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ==========================================================================
]
if <ast.BoolOp object at 0x7da1b07bd4b0> begin[:]
variable[error_str] assign[=] binary_operation[name[ERROR_STR_PREFIX] + constant[the type of param in code is wrong]]
return[tuple[[<ast.Name object at 0x7da1b07bd2d0>, <ast.Name object at 0x7da1b07bc3a0>]]]
variable[query_processor] assign[=] call[name[self]._get_sync_query_processor, parameter[name[RtDataQuery].pack_req, name[RtDataQuery].unpack_rsp]]
variable[kargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b07bf9a0>, <ast.Constant object at 0x7da1b07bc6d0>], [<ast.Name object at 0x7da1b07bf580>, <ast.Call object at 0x7da1b07bdea0>]]
<ast.Tuple object at 0x7da1b07bdde0> assign[=] call[name[query_processor], parameter[]]
if compare[name[ret_code] equal[==] name[RET_ERROR]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b07bccd0>, <ast.Name object at 0x7da1b07bd660>]]]
for taget[name[x]] in starred[name[rt_data_list]] begin[:]
call[name[x]][constant[code]] assign[=] name[code]
variable[col_list] assign[=] list[[<ast.Constant object at 0x7da1b07bcc40>, <ast.Constant object at 0x7da1b07bf850>, <ast.Constant object at 0x7da1b07bc580>, <ast.Constant object at 0x7da1b07bc850>, <ast.Constant object at 0x7da1b07bcb80>, <ast.Constant object at 0x7da1b07bd150>, <ast.Constant object at 0x7da1b07bc130>, <ast.Constant object at 0x7da1b07bd8a0>, <ast.Constant object at 0x7da1b07bc910>]]
variable[rt_data_table] assign[=] call[name[pd].DataFrame, parameter[name[rt_data_list]]]
return[tuple[[<ast.Name object at 0x7da1b07bdd80>, <ast.Name object at 0x7da1b07bcdc0>]]] | keyword[def] identifier[get_rt_data] ( identifier[self] , identifier[code] ):
literal[string]
keyword[if] identifier[code] keyword[is] keyword[None] keyword[or] identifier[is_str] ( identifier[code] ) keyword[is] keyword[False] :
identifier[error_str] = identifier[ERROR_STR_PREFIX] + literal[string]
keyword[return] identifier[RET_ERROR] , identifier[error_str]
identifier[query_processor] = identifier[self] . identifier[_get_sync_query_processor] (
identifier[RtDataQuery] . identifier[pack_req] , identifier[RtDataQuery] . identifier[unpack_rsp] )
identifier[kargs] ={
literal[string] : identifier[code] ,
literal[string] : identifier[self] . identifier[get_sync_conn_id] ()
}
identifier[ret_code] , identifier[msg] , identifier[rt_data_list] = identifier[query_processor] (** identifier[kargs] )
keyword[if] identifier[ret_code] == identifier[RET_ERROR] :
keyword[return] identifier[ret_code] , identifier[msg]
keyword[for] identifier[x] keyword[in] identifier[rt_data_list] :
identifier[x] [ literal[string] ]= identifier[code]
identifier[col_list] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string]
]
identifier[rt_data_table] = identifier[pd] . identifier[DataFrame] ( identifier[rt_data_list] , identifier[columns] = identifier[col_list] )
keyword[return] identifier[RET_OK] , identifier[rt_data_table] | def get_rt_data(self, code):
"""
获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ==========================================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + 'the type of param in code is wrong'
return (RET_ERROR, error_str) # depends on [control=['if'], data=[]]
query_processor = self._get_sync_query_processor(RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {'code': code, 'conn_id': self.get_sync_conn_id()}
(ret_code, msg, rt_data_list) = query_processor(**kargs)
if ret_code == RET_ERROR:
return (ret_code, msg) # depends on [control=['if'], data=['ret_code']]
for x in rt_data_list:
x['code'] = code # depends on [control=['for'], data=['x']]
col_list = ['code', 'time', 'is_blank', 'opened_mins', 'cur_price', 'last_close', 'avg_price', 'volume', 'turnover']
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return (RET_OK, rt_data_table) |
def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(
['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)()
return cls._instance[name] | def function[instance, parameter[cls, name]]:
constant[Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
]
if <ast.UnaryOp object at 0x7da1b054a2f0> begin[:]
variable[model_name] assign[=] call[name[name].split, parameter[constant[.]]]
variable[ins_name] assign[=] call[constant[.].join, parameter[list[[<ast.Constant object at 0x7da1b05482b0>, <ast.Subscript object at 0x7da1b0548ee0>, <ast.Constant object at 0x7da1b054b8b0>, <ast.Subscript object at 0x7da1b05494b0>]]]]
call[name[cls]._instance][name[name]] assign[=] call[call[name[cls].import_model, parameter[name[ins_name]]], parameter[]]
return[call[name[cls]._instance][name[name]]] | keyword[def] identifier[instance] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[cls] . identifier[_instance] . identifier[get] ( identifier[name] ):
identifier[model_name] = identifier[name] . identifier[split] ( literal[string] )
identifier[ins_name] = literal[string] . identifier[join] (
[ literal[string] , identifier[model_name] [ literal[int] ], literal[string] , identifier[model_name] [ literal[int] ]])
identifier[cls] . identifier[_instance] [ identifier[name] ]= identifier[cls] . identifier[import_model] ( identifier[ins_name] )()
keyword[return] identifier[cls] . identifier[_instance] [ identifier[name] ] | def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)() # depends on [control=['if'], data=[]]
return cls._instance[name] |
def finalize(self, initial=True):
"""Call this after you've created all the PawnSpot you need and are ready to add them to the board."""
if getattr(self, '_finalized', False):
return
if (
self.proxy is None or
not hasattr(self.proxy, 'name')
):
Clock.schedule_once(self.finalize, 0)
return
if initial:
self.name = self.proxy.name
self.paths = self.proxy.setdefault(
'_image_paths', self.default_image_paths
)
zeroes = [0] * len(self.paths)
self.offxs = self.proxy.setdefault('_offxs', zeroes)
self.offys = self.proxy.setdefault('_offys', zeroes)
self.proxy.connect(self._trigger_pull_from_proxy)
self.bind(
paths=self._trigger_push_image_paths,
offxs=self._trigger_push_offxs,
offys=self._trigger_push_offys
)
self._finalized = True
self.finalize_children() | def function[finalize, parameter[self, initial]]:
constant[Call this after you've created all the PawnSpot you need and are ready to add them to the board.]
if call[name[getattr], parameter[name[self], constant[_finalized], constant[False]]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b0b5de40> begin[:]
call[name[Clock].schedule_once, parameter[name[self].finalize, constant[0]]]
return[None]
if name[initial] begin[:]
name[self].name assign[=] name[self].proxy.name
name[self].paths assign[=] call[name[self].proxy.setdefault, parameter[constant[_image_paths], name[self].default_image_paths]]
variable[zeroes] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0b77550>]] * call[name[len], parameter[name[self].paths]]]
name[self].offxs assign[=] call[name[self].proxy.setdefault, parameter[constant[_offxs], name[zeroes]]]
name[self].offys assign[=] call[name[self].proxy.setdefault, parameter[constant[_offys], name[zeroes]]]
call[name[self].proxy.connect, parameter[name[self]._trigger_pull_from_proxy]]
call[name[self].bind, parameter[]]
name[self]._finalized assign[=] constant[True]
call[name[self].finalize_children, parameter[]] | keyword[def] identifier[finalize] ( identifier[self] , identifier[initial] = keyword[True] ):
literal[string]
keyword[if] identifier[getattr] ( identifier[self] , literal[string] , keyword[False] ):
keyword[return]
keyword[if] (
identifier[self] . identifier[proxy] keyword[is] keyword[None] keyword[or]
keyword[not] identifier[hasattr] ( identifier[self] . identifier[proxy] , literal[string] )
):
identifier[Clock] . identifier[schedule_once] ( identifier[self] . identifier[finalize] , literal[int] )
keyword[return]
keyword[if] identifier[initial] :
identifier[self] . identifier[name] = identifier[self] . identifier[proxy] . identifier[name]
identifier[self] . identifier[paths] = identifier[self] . identifier[proxy] . identifier[setdefault] (
literal[string] , identifier[self] . identifier[default_image_paths]
)
identifier[zeroes] =[ literal[int] ]* identifier[len] ( identifier[self] . identifier[paths] )
identifier[self] . identifier[offxs] = identifier[self] . identifier[proxy] . identifier[setdefault] ( literal[string] , identifier[zeroes] )
identifier[self] . identifier[offys] = identifier[self] . identifier[proxy] . identifier[setdefault] ( literal[string] , identifier[zeroes] )
identifier[self] . identifier[proxy] . identifier[connect] ( identifier[self] . identifier[_trigger_pull_from_proxy] )
identifier[self] . identifier[bind] (
identifier[paths] = identifier[self] . identifier[_trigger_push_image_paths] ,
identifier[offxs] = identifier[self] . identifier[_trigger_push_offxs] ,
identifier[offys] = identifier[self] . identifier[_trigger_push_offys]
)
identifier[self] . identifier[_finalized] = keyword[True]
identifier[self] . identifier[finalize_children] () | def finalize(self, initial=True):
"""Call this after you've created all the PawnSpot you need and are ready to add them to the board."""
if getattr(self, '_finalized', False):
return # depends on [control=['if'], data=[]]
if self.proxy is None or not hasattr(self.proxy, 'name'):
Clock.schedule_once(self.finalize, 0)
return # depends on [control=['if'], data=[]]
if initial:
self.name = self.proxy.name
self.paths = self.proxy.setdefault('_image_paths', self.default_image_paths)
zeroes = [0] * len(self.paths)
self.offxs = self.proxy.setdefault('_offxs', zeroes)
self.offys = self.proxy.setdefault('_offys', zeroes)
self.proxy.connect(self._trigger_pull_from_proxy) # depends on [control=['if'], data=[]]
self.bind(paths=self._trigger_push_image_paths, offxs=self._trigger_push_offxs, offys=self._trigger_push_offys)
self._finalized = True
self.finalize_children() |
def isVisible(self, instance, mode='view', default=None, field=None):
"""
This function returns the visibility of the widget depending on whether
the rejection workflow is enabled or not.
"""
vis = super(RejectionWidget, self).isVisible(
instance=instance, mode=mode, default=default, field=field)
if instance.bika_setup.isRejectionWorkflowEnabled():
return vis
else:
return 'invisible' | def function[isVisible, parameter[self, instance, mode, default, field]]:
constant[
This function returns the visibility of the widget depending on whether
the rejection workflow is enabled or not.
]
variable[vis] assign[=] call[call[name[super], parameter[name[RejectionWidget], name[self]]].isVisible, parameter[]]
if call[name[instance].bika_setup.isRejectionWorkflowEnabled, parameter[]] begin[:]
return[name[vis]] | keyword[def] identifier[isVisible] ( identifier[self] , identifier[instance] , identifier[mode] = literal[string] , identifier[default] = keyword[None] , identifier[field] = keyword[None] ):
literal[string]
identifier[vis] = identifier[super] ( identifier[RejectionWidget] , identifier[self] ). identifier[isVisible] (
identifier[instance] = identifier[instance] , identifier[mode] = identifier[mode] , identifier[default] = identifier[default] , identifier[field] = identifier[field] )
keyword[if] identifier[instance] . identifier[bika_setup] . identifier[isRejectionWorkflowEnabled] ():
keyword[return] identifier[vis]
keyword[else] :
keyword[return] literal[string] | def isVisible(self, instance, mode='view', default=None, field=None):
"""
This function returns the visibility of the widget depending on whether
the rejection workflow is enabled or not.
"""
vis = super(RejectionWidget, self).isVisible(instance=instance, mode=mode, default=default, field=field)
if instance.bika_setup.isRejectionWorkflowEnabled():
return vis # depends on [control=['if'], data=[]]
else:
return 'invisible' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.