code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):
"""
Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.
Args:
fn_x: A callable returning the value $f(x)$ at $x$.
x_init: Initial solution guess $x_0$.
base_value: Value $f(x')$ at $x = x'$.
target_value: Value $f(x_0)$ at $x = x_0$.
estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.
Returns:
A solution $x$ to the problem as given by the solver.
"""
return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement) | def function[tf_solve, parameter[self, fn_x, x_init, base_value, target_value, estimated_improvement]]:
constant[
Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.
Args:
fn_x: A callable returning the value $f(x)$ at $x$.
x_init: Initial solution guess $x_0$.
base_value: Value $f(x')$ at $x = x'$.
target_value: Value $f(x_0)$ at $x = x_0$.
estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.
Returns:
A solution $x$ to the problem as given by the solver.
]
return[call[call[name[super], parameter[name[LineSearch], name[self]]].tf_solve, parameter[name[fn_x], name[x_init], name[base_value], name[target_value], name[estimated_improvement]]]] | keyword[def] identifier[tf_solve] ( identifier[self] , identifier[fn_x] , identifier[x_init] , identifier[base_value] , identifier[target_value] , identifier[estimated_improvement] = keyword[None] ):
literal[string]
keyword[return] identifier[super] ( identifier[LineSearch] , identifier[self] ). identifier[tf_solve] ( identifier[fn_x] , identifier[x_init] , identifier[base_value] , identifier[target_value] , identifier[estimated_improvement] ) | def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):
"""
Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.
Args:
fn_x: A callable returning the value $f(x)$ at $x$.
x_init: Initial solution guess $x_0$.
base_value: Value $f(x')$ at $x = x'$.
target_value: Value $f(x_0)$ at $x = x_0$.
estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.
Returns:
A solution $x$ to the problem as given by the solver.
"""
return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement) |
def _handle_successor(self, job, successor, all_successors):
"""
Process each successor generated by the job, and return a new list of succeeding jobs.
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param list all_successors: A list of all successors.
:return: A list of newly created jobs from the successor.
:rtype: list
"""
# Initialize parameters
addr = job.addr
jumpkind = successor.history.jumpkind
#
# Get instruction pointer
#
if job.is_return_jump:
ret_target = job.call_stack.current_return_target
if ret_target is None:
# We have no where to go according to our call stack. However, the callstack might be corrupted
l.debug("According to the call stack, we have nowhere to return to.")
return [ ]
successor.ip = ret_target
# this try-except block is to handle cases where the instruction pointer is symbolic
try:
successor_addrs = successor.solver.eval_upto(successor.ip, 2)
except SimValueError:
# TODO: Should fall back to reading targets from CFG
# It cannot be concretized currently. Maybe we could handle
# it later, maybe it just cannot be concretized
return [ ]
if len(successor_addrs) > 1:
# multiple concrete targets
if job.is_return_jump:
# It might be caused by state merging
# We may retrieve the correct ip from call stack
successor.ip = job.call_stack.current_return_target
else:
return self._handle_successor_multitargets(job, successor, all_successors)
# Now there should be one single target for the successor
successor_addr = successor.solver.eval_one(successor.ip)
# Get the fake ret successor
fakeret_successor = None
if self._is_call_jumpkind(jumpkind):
fakeret_successor = all_successors[-1]
# If the function we're calling into doesn't return, we should discard it
if self._cfg is not None:
func = self.kb.functions.function(addr=job.call_target)
if func is not None and func.returning is False and len(all_successors) == 2:
del all_successors[-1]
fakeret_successor = None
if self._is_call_jumpkind(jumpkind):
# Create a new call stack for the successor
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug("Cannot create a new callstack for address %#x", successor_addr)
job.dbg_exit_status[successor] = ""
return [ ]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
new_function_key = FunctionKey.new(successor_addr, new_call_stack_suffix)
# Save the initial state for the function
self._save_function_initial_state(new_function_key, successor_addr, successor.copy())
# bail out if we hit the interfunction_level cap
if len(job.call_stack) >= self._interfunction_level:
l.debug('We are not tracing into a new function %#08x as we hit interfunction_level limit', successor_addr)
# mark it as skipped
job.dbg_exit_status[successor] = "Skipped"
job.call_skipped = True
job.call_function_key = new_function_key
job.call_task.skipped = True
return [ ]
elif jumpkind == 'Ijk_Ret':
# Pop the current function out from the call stack
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug("Cannot create a new callstack for address %#x", successor_addr)
job.dbg_exit_status[successor] = ""
return [ ]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
else:
new_call_stack = job.call_stack
new_call_stack_suffix = job.call_stack_suffix
# Generate the new block ID
new_block_id = BlockID.new(successor_addr, new_call_stack_suffix, jumpkind)
#
# Generate new VFG jobs
#
if jumpkind == "Ijk_Ret":
assert not job.is_call_jump
# Record this return
self._return_target_sources[successor_addr].append(job.call_stack_suffix + (addr,))
# Check if this return is inside our pending returns list
if new_block_id in self._pending_returns:
del self._pending_returns[new_block_id]
# Check if we have reached a fix-point
if jumpkind != 'Ijk_FakeRet' and \
new_block_id in self._nodes:
last_state = self._nodes[new_block_id].state
_, _, merged = last_state.merge(successor, plugin_whitelist=self._mergeable_plugins)
if merged:
l.debug("%s didn't reach a fix-point", new_block_id)
else:
l.debug("%s reaches a fix-point.", new_block_id)
job.dbg_exit_status[successor] = "Merged due to reaching a fix-point"
return [ ]
new_jobs = self._create_new_jobs(job, successor, new_block_id, new_call_stack)
return new_jobs | def function[_handle_successor, parameter[self, job, successor, all_successors]]:
constant[
Process each successor generated by the job, and return a new list of succeeding jobs.
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param list all_successors: A list of all successors.
:return: A list of newly created jobs from the successor.
:rtype: list
]
variable[addr] assign[=] name[job].addr
variable[jumpkind] assign[=] name[successor].history.jumpkind
if name[job].is_return_jump begin[:]
variable[ret_target] assign[=] name[job].call_stack.current_return_target
if compare[name[ret_target] is constant[None]] begin[:]
call[name[l].debug, parameter[constant[According to the call stack, we have nowhere to return to.]]]
return[list[[]]]
name[successor].ip assign[=] name[ret_target]
<ast.Try object at 0x7da20c6c6f50>
if compare[call[name[len], parameter[name[successor_addrs]]] greater[>] constant[1]] begin[:]
if name[job].is_return_jump begin[:]
name[successor].ip assign[=] name[job].call_stack.current_return_target
variable[successor_addr] assign[=] call[name[successor].solver.eval_one, parameter[name[successor].ip]]
variable[fakeret_successor] assign[=] constant[None]
if call[name[self]._is_call_jumpkind, parameter[name[jumpkind]]] begin[:]
variable[fakeret_successor] assign[=] call[name[all_successors]][<ast.UnaryOp object at 0x7da20c6c4fd0>]
if compare[name[self]._cfg is_not constant[None]] begin[:]
variable[func] assign[=] call[name[self].kb.functions.function, parameter[]]
if <ast.BoolOp object at 0x7da20c6c4130> begin[:]
<ast.Delete object at 0x7da20c6c45e0>
variable[fakeret_successor] assign[=] constant[None]
if call[name[self]._is_call_jumpkind, parameter[name[jumpkind]]] begin[:]
variable[new_call_stack] assign[=] call[name[self]._create_callstack, parameter[name[job], name[successor_addr], name[jumpkind], name[fakeret_successor]]]
if compare[name[new_call_stack] is constant[None]] begin[:]
call[name[l].debug, parameter[constant[Cannot create a new callstack for address %#x], name[successor_addr]]]
call[name[job].dbg_exit_status][name[successor]] assign[=] constant[]
return[list[[]]]
variable[new_call_stack_suffix] assign[=] call[name[new_call_stack].stack_suffix, parameter[name[self]._context_sensitivity_level]]
variable[new_function_key] assign[=] call[name[FunctionKey].new, parameter[name[successor_addr], name[new_call_stack_suffix]]]
call[name[self]._save_function_initial_state, parameter[name[new_function_key], name[successor_addr], call[name[successor].copy, parameter[]]]]
if compare[call[name[len], parameter[name[job].call_stack]] greater_or_equal[>=] name[self]._interfunction_level] begin[:]
call[name[l].debug, parameter[constant[We are not tracing into a new function %#08x as we hit interfunction_level limit], name[successor_addr]]]
call[name[job].dbg_exit_status][name[successor]] assign[=] constant[Skipped]
name[job].call_skipped assign[=] constant[True]
name[job].call_function_key assign[=] name[new_function_key]
name[job].call_task.skipped assign[=] constant[True]
return[list[[]]]
variable[new_block_id] assign[=] call[name[BlockID].new, parameter[name[successor_addr], name[new_call_stack_suffix], name[jumpkind]]]
if compare[name[jumpkind] equal[==] constant[Ijk_Ret]] begin[:]
assert[<ast.UnaryOp object at 0x7da20c7c8910>]
call[call[name[self]._return_target_sources][name[successor_addr]].append, parameter[binary_operation[name[job].call_stack_suffix + tuple[[<ast.Name object at 0x7da20c7caec0>]]]]]
if compare[name[new_block_id] in name[self]._pending_returns] begin[:]
<ast.Delete object at 0x7da20c7ca410>
if <ast.BoolOp object at 0x7da20c7c9810> begin[:]
variable[last_state] assign[=] call[name[self]._nodes][name[new_block_id]].state
<ast.Tuple object at 0x7da20c7ca5f0> assign[=] call[name[last_state].merge, parameter[name[successor]]]
if name[merged] begin[:]
call[name[l].debug, parameter[constant[%s didn't reach a fix-point], name[new_block_id]]]
variable[new_jobs] assign[=] call[name[self]._create_new_jobs, parameter[name[job], name[successor], name[new_block_id], name[new_call_stack]]]
return[name[new_jobs]] | keyword[def] identifier[_handle_successor] ( identifier[self] , identifier[job] , identifier[successor] , identifier[all_successors] ):
literal[string]
identifier[addr] = identifier[job] . identifier[addr]
identifier[jumpkind] = identifier[successor] . identifier[history] . identifier[jumpkind]
keyword[if] identifier[job] . identifier[is_return_jump] :
identifier[ret_target] = identifier[job] . identifier[call_stack] . identifier[current_return_target]
keyword[if] identifier[ret_target] keyword[is] keyword[None] :
identifier[l] . identifier[debug] ( literal[string] )
keyword[return] []
identifier[successor] . identifier[ip] = identifier[ret_target]
keyword[try] :
identifier[successor_addrs] = identifier[successor] . identifier[solver] . identifier[eval_upto] ( identifier[successor] . identifier[ip] , literal[int] )
keyword[except] identifier[SimValueError] :
keyword[return] []
keyword[if] identifier[len] ( identifier[successor_addrs] )> literal[int] :
keyword[if] identifier[job] . identifier[is_return_jump] :
identifier[successor] . identifier[ip] = identifier[job] . identifier[call_stack] . identifier[current_return_target]
keyword[else] :
keyword[return] identifier[self] . identifier[_handle_successor_multitargets] ( identifier[job] , identifier[successor] , identifier[all_successors] )
identifier[successor_addr] = identifier[successor] . identifier[solver] . identifier[eval_one] ( identifier[successor] . identifier[ip] )
identifier[fakeret_successor] = keyword[None]
keyword[if] identifier[self] . identifier[_is_call_jumpkind] ( identifier[jumpkind] ):
identifier[fakeret_successor] = identifier[all_successors] [- literal[int] ]
keyword[if] identifier[self] . identifier[_cfg] keyword[is] keyword[not] keyword[None] :
identifier[func] = identifier[self] . identifier[kb] . identifier[functions] . identifier[function] ( identifier[addr] = identifier[job] . identifier[call_target] )
keyword[if] identifier[func] keyword[is] keyword[not] keyword[None] keyword[and] identifier[func] . identifier[returning] keyword[is] keyword[False] keyword[and] identifier[len] ( identifier[all_successors] )== literal[int] :
keyword[del] identifier[all_successors] [- literal[int] ]
identifier[fakeret_successor] = keyword[None]
keyword[if] identifier[self] . identifier[_is_call_jumpkind] ( identifier[jumpkind] ):
identifier[new_call_stack] = identifier[self] . identifier[_create_callstack] ( identifier[job] , identifier[successor_addr] , identifier[jumpkind] , identifier[fakeret_successor] )
keyword[if] identifier[new_call_stack] keyword[is] keyword[None] :
identifier[l] . identifier[debug] ( literal[string] , identifier[successor_addr] )
identifier[job] . identifier[dbg_exit_status] [ identifier[successor] ]= literal[string]
keyword[return] []
identifier[new_call_stack_suffix] = identifier[new_call_stack] . identifier[stack_suffix] ( identifier[self] . identifier[_context_sensitivity_level] )
identifier[new_function_key] = identifier[FunctionKey] . identifier[new] ( identifier[successor_addr] , identifier[new_call_stack_suffix] )
identifier[self] . identifier[_save_function_initial_state] ( identifier[new_function_key] , identifier[successor_addr] , identifier[successor] . identifier[copy] ())
keyword[if] identifier[len] ( identifier[job] . identifier[call_stack] )>= identifier[self] . identifier[_interfunction_level] :
identifier[l] . identifier[debug] ( literal[string] , identifier[successor_addr] )
identifier[job] . identifier[dbg_exit_status] [ identifier[successor] ]= literal[string]
identifier[job] . identifier[call_skipped] = keyword[True]
identifier[job] . identifier[call_function_key] = identifier[new_function_key]
identifier[job] . identifier[call_task] . identifier[skipped] = keyword[True]
keyword[return] []
keyword[elif] identifier[jumpkind] == literal[string] :
identifier[new_call_stack] = identifier[self] . identifier[_create_callstack] ( identifier[job] , identifier[successor_addr] , identifier[jumpkind] , identifier[fakeret_successor] )
keyword[if] identifier[new_call_stack] keyword[is] keyword[None] :
identifier[l] . identifier[debug] ( literal[string] , identifier[successor_addr] )
identifier[job] . identifier[dbg_exit_status] [ identifier[successor] ]= literal[string]
keyword[return] []
identifier[new_call_stack_suffix] = identifier[new_call_stack] . identifier[stack_suffix] ( identifier[self] . identifier[_context_sensitivity_level] )
keyword[else] :
identifier[new_call_stack] = identifier[job] . identifier[call_stack]
identifier[new_call_stack_suffix] = identifier[job] . identifier[call_stack_suffix]
identifier[new_block_id] = identifier[BlockID] . identifier[new] ( identifier[successor_addr] , identifier[new_call_stack_suffix] , identifier[jumpkind] )
keyword[if] identifier[jumpkind] == literal[string] :
keyword[assert] keyword[not] identifier[job] . identifier[is_call_jump]
identifier[self] . identifier[_return_target_sources] [ identifier[successor_addr] ]. identifier[append] ( identifier[job] . identifier[call_stack_suffix] +( identifier[addr] ,))
keyword[if] identifier[new_block_id] keyword[in] identifier[self] . identifier[_pending_returns] :
keyword[del] identifier[self] . identifier[_pending_returns] [ identifier[new_block_id] ]
keyword[if] identifier[jumpkind] != literal[string] keyword[and] identifier[new_block_id] keyword[in] identifier[self] . identifier[_nodes] :
identifier[last_state] = identifier[self] . identifier[_nodes] [ identifier[new_block_id] ]. identifier[state]
identifier[_] , identifier[_] , identifier[merged] = identifier[last_state] . identifier[merge] ( identifier[successor] , identifier[plugin_whitelist] = identifier[self] . identifier[_mergeable_plugins] )
keyword[if] identifier[merged] :
identifier[l] . identifier[debug] ( literal[string] , identifier[new_block_id] )
keyword[else] :
identifier[l] . identifier[debug] ( literal[string] , identifier[new_block_id] )
identifier[job] . identifier[dbg_exit_status] [ identifier[successor] ]= literal[string]
keyword[return] []
identifier[new_jobs] = identifier[self] . identifier[_create_new_jobs] ( identifier[job] , identifier[successor] , identifier[new_block_id] , identifier[new_call_stack] )
keyword[return] identifier[new_jobs] | def _handle_successor(self, job, successor, all_successors):
"""
Process each successor generated by the job, and return a new list of succeeding jobs.
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param list all_successors: A list of all successors.
:return: A list of newly created jobs from the successor.
:rtype: list
"""
# Initialize parameters
addr = job.addr
jumpkind = successor.history.jumpkind
#
# Get instruction pointer
#
if job.is_return_jump:
ret_target = job.call_stack.current_return_target
if ret_target is None:
# We have no where to go according to our call stack. However, the callstack might be corrupted
l.debug('According to the call stack, we have nowhere to return to.')
return [] # depends on [control=['if'], data=[]]
successor.ip = ret_target # depends on [control=['if'], data=[]]
# this try-except block is to handle cases where the instruction pointer is symbolic
try:
successor_addrs = successor.solver.eval_upto(successor.ip, 2) # depends on [control=['try'], data=[]]
except SimValueError:
# TODO: Should fall back to reading targets from CFG
# It cannot be concretized currently. Maybe we could handle
# it later, maybe it just cannot be concretized
return [] # depends on [control=['except'], data=[]]
if len(successor_addrs) > 1:
# multiple concrete targets
if job.is_return_jump:
# It might be caused by state merging
# We may retrieve the correct ip from call stack
successor.ip = job.call_stack.current_return_target # depends on [control=['if'], data=[]]
else:
return self._handle_successor_multitargets(job, successor, all_successors) # depends on [control=['if'], data=[]]
# Now there should be one single target for the successor
successor_addr = successor.solver.eval_one(successor.ip)
# Get the fake ret successor
fakeret_successor = None
if self._is_call_jumpkind(jumpkind):
fakeret_successor = all_successors[-1]
# If the function we're calling into doesn't return, we should discard it
if self._cfg is not None:
func = self.kb.functions.function(addr=job.call_target)
if func is not None and func.returning is False and (len(all_successors) == 2):
del all_successors[-1]
fakeret_successor = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self._is_call_jumpkind(jumpkind):
# Create a new call stack for the successor
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug('Cannot create a new callstack for address %#x', successor_addr)
job.dbg_exit_status[successor] = ''
return [] # depends on [control=['if'], data=[]]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
new_function_key = FunctionKey.new(successor_addr, new_call_stack_suffix)
# Save the initial state for the function
self._save_function_initial_state(new_function_key, successor_addr, successor.copy())
# bail out if we hit the interfunction_level cap
if len(job.call_stack) >= self._interfunction_level:
l.debug('We are not tracing into a new function %#08x as we hit interfunction_level limit', successor_addr)
# mark it as skipped
job.dbg_exit_status[successor] = 'Skipped'
job.call_skipped = True
job.call_function_key = new_function_key
job.call_task.skipped = True
return [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif jumpkind == 'Ijk_Ret':
# Pop the current function out from the call stack
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug('Cannot create a new callstack for address %#x', successor_addr)
job.dbg_exit_status[successor] = ''
return [] # depends on [control=['if'], data=[]]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level) # depends on [control=['if'], data=['jumpkind']]
else:
new_call_stack = job.call_stack
new_call_stack_suffix = job.call_stack_suffix
# Generate the new block ID
new_block_id = BlockID.new(successor_addr, new_call_stack_suffix, jumpkind)
#
# Generate new VFG jobs
#
if jumpkind == 'Ijk_Ret':
assert not job.is_call_jump
# Record this return
self._return_target_sources[successor_addr].append(job.call_stack_suffix + (addr,))
# Check if this return is inside our pending returns list
if new_block_id in self._pending_returns:
del self._pending_returns[new_block_id] # depends on [control=['if'], data=['new_block_id']] # depends on [control=['if'], data=[]]
# Check if we have reached a fix-point
if jumpkind != 'Ijk_FakeRet' and new_block_id in self._nodes:
last_state = self._nodes[new_block_id].state
(_, _, merged) = last_state.merge(successor, plugin_whitelist=self._mergeable_plugins)
if merged:
l.debug("%s didn't reach a fix-point", new_block_id) # depends on [control=['if'], data=[]]
else:
l.debug('%s reaches a fix-point.', new_block_id)
job.dbg_exit_status[successor] = 'Merged due to reaching a fix-point'
return [] # depends on [control=['if'], data=[]]
new_jobs = self._create_new_jobs(job, successor, new_block_id, new_call_stack)
return new_jobs |
def depth_january_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_january_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_january_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_january_average_ground_temperature`'.format(value))
self._depth_january_average_ground_temperature = value | def function[depth_january_average_ground_temperature, parameter[self, value]]:
constant[Corresponds to IDD Field `depth_january_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_january_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da20c6c7b80>
name[self]._depth_january_average_ground_temperature assign[=] name[value] | keyword[def] identifier[depth_january_average_ground_temperature] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[self] . identifier[_depth_january_average_ground_temperature] = identifier[value] | def depth_january_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_january_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_january_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_january_average_ground_temperature`'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']]
self._depth_january_average_ground_temperature = value |
def unhook_symbol(self, symbol_name):
"""
Remove the hook on a symbol.
This function will fail if the symbol is provided by the extern object, as that would result in a state where
analysis would be unable to cope with a call to this symbol.
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
if sym.owner is self.loader._extern_object:
l.warning("Refusing to unhook external symbol %s, replace it with another hook if you want to change it",
symbol_name)
return False
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
self.unhook(hook_addr)
return True | def function[unhook_symbol, parameter[self, symbol_name]]:
constant[
Remove the hook on a symbol.
This function will fail if the symbol is provided by the extern object, as that would result in a state where
analysis would be unable to cope with a call to this symbol.
]
variable[sym] assign[=] call[name[self].loader.find_symbol, parameter[name[symbol_name]]]
if compare[name[sym] is constant[None]] begin[:]
call[name[l].warning, parameter[constant[Could not find symbol %s], name[symbol_name]]]
return[constant[False]]
if compare[name[sym].owner is name[self].loader._extern_object] begin[:]
call[name[l].warning, parameter[constant[Refusing to unhook external symbol %s, replace it with another hook if you want to change it], name[symbol_name]]]
return[constant[False]]
<ast.Tuple object at 0x7da18ede7b50> assign[=] call[name[self].simos.prepare_function_symbol, parameter[name[symbol_name]]]
call[name[self].unhook, parameter[name[hook_addr]]]
return[constant[True]] | keyword[def] identifier[unhook_symbol] ( identifier[self] , identifier[symbol_name] ):
literal[string]
identifier[sym] = identifier[self] . identifier[loader] . identifier[find_symbol] ( identifier[symbol_name] )
keyword[if] identifier[sym] keyword[is] keyword[None] :
identifier[l] . identifier[warning] ( literal[string] , identifier[symbol_name] )
keyword[return] keyword[False]
keyword[if] identifier[sym] . identifier[owner] keyword[is] identifier[self] . identifier[loader] . identifier[_extern_object] :
identifier[l] . identifier[warning] ( literal[string] ,
identifier[symbol_name] )
keyword[return] keyword[False]
identifier[hook_addr] , identifier[_] = identifier[self] . identifier[simos] . identifier[prepare_function_symbol] ( identifier[symbol_name] , identifier[basic_addr] = identifier[sym] . identifier[rebased_addr] )
identifier[self] . identifier[unhook] ( identifier[hook_addr] )
keyword[return] keyword[True] | def unhook_symbol(self, symbol_name):
"""
Remove the hook on a symbol.
This function will fail if the symbol is provided by the extern object, as that would result in a state where
analysis would be unable to cope with a call to this symbol.
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning('Could not find symbol %s', symbol_name)
return False # depends on [control=['if'], data=[]]
if sym.owner is self.loader._extern_object:
l.warning('Refusing to unhook external symbol %s, replace it with another hook if you want to change it', symbol_name)
return False # depends on [control=['if'], data=[]]
(hook_addr, _) = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
self.unhook(hook_addr)
return True |
def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job | def function[create_job, parameter[cpu_width, time_height]]:
constant[
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
]
variable[shell_command] assign[=] call[name[stress_string].format, parameter[name[cpu_width], name[time_height]]]
variable[job] assign[=] call[name[JobBlock], parameter[name[cpu_width], name[time_height]]]
call[name[job].set_job, parameter[name[subprocess].call, name[shell_command]]]
return[name[job]] | keyword[def] identifier[create_job] ( identifier[cpu_width] , identifier[time_height] ):
literal[string]
identifier[shell_command] = identifier[stress_string] . identifier[format] ( identifier[cpu_width] , identifier[time_height] )
identifier[job] = identifier[JobBlock] ( identifier[cpu_width] , identifier[time_height] )
identifier[job] . identifier[set_job] ( identifier[subprocess] . identifier[call] , identifier[shell_command] , identifier[shell] = keyword[True] )
keyword[return] identifier[job] | def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job |
def download_file(url, filename=None, show_progress=draw_pbar):
'''
Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to.
'''
if filename is None:
filename = url.split('/')[-1]
r = requests.get(url, stream=True)
size = int(r.headers['Content-Length'].strip())
seen = 0
show_progress(0, size)
seen = 1024
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
seen += 1024
show_progress(seen, size)
if chunk:
f.write(chunk)
f.flush()
return filename | def function[download_file, parameter[url, filename, show_progress]]:
constant[
Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to.
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[call[name[url].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b0a37490>]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
variable[size] assign[=] call[name[int], parameter[call[call[name[r].headers][constant[Content-Length]].strip, parameter[]]]]
variable[seen] assign[=] constant[0]
call[name[show_progress], parameter[constant[0], name[size]]]
variable[seen] assign[=] constant[1024]
with call[name[open], parameter[name[filename], constant[wb]]] begin[:]
for taget[name[chunk]] in starred[call[name[r].iter_content, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b0a36a70>
call[name[show_progress], parameter[name[seen], name[size]]]
if name[chunk] begin[:]
call[name[f].write, parameter[name[chunk]]]
call[name[f].flush, parameter[]]
return[name[filename]] | keyword[def] identifier[download_file] ( identifier[url] , identifier[filename] = keyword[None] , identifier[show_progress] = identifier[draw_pbar] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[url] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[stream] = keyword[True] )
identifier[size] = identifier[int] ( identifier[r] . identifier[headers] [ literal[string] ]. identifier[strip] ())
identifier[seen] = literal[int]
identifier[show_progress] ( literal[int] , identifier[size] )
identifier[seen] = literal[int]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[chunk] keyword[in] identifier[r] . identifier[iter_content] ( identifier[chunk_size] = literal[int] ):
identifier[seen] += literal[int]
identifier[show_progress] ( identifier[seen] , identifier[size] )
keyword[if] identifier[chunk] :
identifier[f] . identifier[write] ( identifier[chunk] )
identifier[f] . identifier[flush] ()
keyword[return] identifier[filename] | def download_file(url, filename=None, show_progress=draw_pbar):
"""
Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to.
"""
if filename is None:
filename = url.split('/')[-1] # depends on [control=['if'], data=['filename']]
r = requests.get(url, stream=True)
size = int(r.headers['Content-Length'].strip())
seen = 0
show_progress(0, size)
seen = 1024
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
seen += 1024
show_progress(seen, size)
if chunk:
f.write(chunk)
f.flush() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['f']]
return filename |
def get_parameters(self):
"""gets from all wrapped processors"""
d = {}
for p in self.processors:
parameter_names = list(p.PARAMETERS.keys())
parameter_values = [getattr(p, n) for n in parameter_names]
d.update(dict(zip(parameter_names, parameter_values)))
return d | def function[get_parameters, parameter[self]]:
constant[gets from all wrapped processors]
variable[d] assign[=] dictionary[[], []]
for taget[name[p]] in starred[name[self].processors] begin[:]
variable[parameter_names] assign[=] call[name[list], parameter[call[name[p].PARAMETERS.keys, parameter[]]]]
variable[parameter_values] assign[=] <ast.ListComp object at 0x7da18bcc9f30>
call[name[d].update, parameter[call[name[dict], parameter[call[name[zip], parameter[name[parameter_names], name[parameter_values]]]]]]]
return[name[d]] | keyword[def] identifier[get_parameters] ( identifier[self] ):
literal[string]
identifier[d] ={}
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[processors] :
identifier[parameter_names] = identifier[list] ( identifier[p] . identifier[PARAMETERS] . identifier[keys] ())
identifier[parameter_values] =[ identifier[getattr] ( identifier[p] , identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[parameter_names] ]
identifier[d] . identifier[update] ( identifier[dict] ( identifier[zip] ( identifier[parameter_names] , identifier[parameter_values] )))
keyword[return] identifier[d] | def get_parameters(self):
"""gets from all wrapped processors"""
d = {}
for p in self.processors:
parameter_names = list(p.PARAMETERS.keys())
parameter_values = [getattr(p, n) for n in parameter_names]
d.update(dict(zip(parameter_names, parameter_values))) # depends on [control=['for'], data=['p']]
return d |
def to_mllp(self, encoding_chars=None, trailing_children=False):
"""
Returns the er7 representation of the message wrapped with mllp encoding characters
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is ``None``
:return: the ER7-encoded string wrapped with the mllp encoding characters
"""
if encoding_chars is None:
encoding_chars = self.encoding_chars
return "{0}{1}{2}{3}{2}".format(MLLP_ENCODING_CHARS.SB,
self.to_er7(encoding_chars, trailing_children),
MLLP_ENCODING_CHARS.CR,
MLLP_ENCODING_CHARS.EB) | def function[to_mllp, parameter[self, encoding_chars, trailing_children]]:
constant[
Returns the er7 representation of the message wrapped with mllp encoding characters
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is ``None``
:return: the ER7-encoded string wrapped with the mllp encoding characters
]
if compare[name[encoding_chars] is constant[None]] begin[:]
variable[encoding_chars] assign[=] name[self].encoding_chars
return[call[constant[{0}{1}{2}{3}{2}].format, parameter[name[MLLP_ENCODING_CHARS].SB, call[name[self].to_er7, parameter[name[encoding_chars], name[trailing_children]]], name[MLLP_ENCODING_CHARS].CR, name[MLLP_ENCODING_CHARS].EB]]] | keyword[def] identifier[to_mllp] ( identifier[self] , identifier[encoding_chars] = keyword[None] , identifier[trailing_children] = keyword[False] ):
literal[string]
keyword[if] identifier[encoding_chars] keyword[is] keyword[None] :
identifier[encoding_chars] = identifier[self] . identifier[encoding_chars]
keyword[return] literal[string] . identifier[format] ( identifier[MLLP_ENCODING_CHARS] . identifier[SB] ,
identifier[self] . identifier[to_er7] ( identifier[encoding_chars] , identifier[trailing_children] ),
identifier[MLLP_ENCODING_CHARS] . identifier[CR] ,
identifier[MLLP_ENCODING_CHARS] . identifier[EB] ) | def to_mllp(self, encoding_chars=None, trailing_children=False):
"""
Returns the er7 representation of the message wrapped with mllp encoding characters
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is ``None``
:return: the ER7-encoded string wrapped with the mllp encoding characters
"""
if encoding_chars is None:
encoding_chars = self.encoding_chars # depends on [control=['if'], data=['encoding_chars']]
return '{0}{1}{2}{3}{2}'.format(MLLP_ENCODING_CHARS.SB, self.to_er7(encoding_chars, trailing_children), MLLP_ENCODING_CHARS.CR, MLLP_ENCODING_CHARS.EB) |
def submit_jobs(root_dir, jobs, sgeargs=None):
""" Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
"""
waiting = list(jobs) # List of jobs still to be done
# Loop over the list of pending jobs, while there still are any
while len(waiting) > 0:
# extract submittable jobs
submittable = extract_submittable_jobs(waiting)
# run those jobs
submit_safe_jobs(root_dir, submittable, sgeargs)
# remove those from the waiting list
for job in submittable:
waiting.remove(job) | def function[submit_jobs, parameter[root_dir, jobs, sgeargs]]:
constant[ Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
]
variable[waiting] assign[=] call[name[list], parameter[name[jobs]]]
while compare[call[name[len], parameter[name[waiting]]] greater[>] constant[0]] begin[:]
variable[submittable] assign[=] call[name[extract_submittable_jobs], parameter[name[waiting]]]
call[name[submit_safe_jobs], parameter[name[root_dir], name[submittable], name[sgeargs]]]
for taget[name[job]] in starred[name[submittable]] begin[:]
call[name[waiting].remove, parameter[name[job]]] | keyword[def] identifier[submit_jobs] ( identifier[root_dir] , identifier[jobs] , identifier[sgeargs] = keyword[None] ):
literal[string]
identifier[waiting] = identifier[list] ( identifier[jobs] )
keyword[while] identifier[len] ( identifier[waiting] )> literal[int] :
identifier[submittable] = identifier[extract_submittable_jobs] ( identifier[waiting] )
identifier[submit_safe_jobs] ( identifier[root_dir] , identifier[submittable] , identifier[sgeargs] )
keyword[for] identifier[job] keyword[in] identifier[submittable] :
identifier[waiting] . identifier[remove] ( identifier[job] ) | def submit_jobs(root_dir, jobs, sgeargs=None):
""" Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
"""
waiting = list(jobs) # List of jobs still to be done
# Loop over the list of pending jobs, while there still are any
while len(waiting) > 0:
# extract submittable jobs
submittable = extract_submittable_jobs(waiting)
# run those jobs
submit_safe_jobs(root_dir, submittable, sgeargs)
# remove those from the waiting list
for job in submittable:
waiting.remove(job) # depends on [control=['for'], data=['job']] # depends on [control=['while'], data=[]] |
def are_done(self, keys):
'''
Return a list of boolean values corresponding to whether or not each
key in keys is marked done. This method can be faster than
individually checking each key, depending on how many keys you
want to check.
:param keys: a list of json-serializable keys
'''
# No keys are done b/c the file does not even exist yet.
if not os.path.exists(self.path):
return [False] * len(keys)
done_lines = set([self._done_line(key) for key in keys])
undone_lines = set([self._undone_line(key) for key in keys])
status = {}
with open(self.path) as fh:
for line in fh:
if line in done_lines:
# extract serialized key
status[line[5:-1]] = True
elif line in undone_lines:
status[line[5:-1]] = False
serialized_keys = [self._serialize(key) for key in keys]
return [status.get(sk, False) for sk in serialized_keys] | def function[are_done, parameter[self, keys]]:
constant[
Return a list of boolean values corresponding to whether or not each
key in keys is marked done. This method can be faster than
individually checking each key, depending on how many keys you
want to check.
:param keys: a list of json-serializable keys
]
if <ast.UnaryOp object at 0x7da20c7c8040> begin[:]
return[binary_operation[list[[<ast.Constant object at 0x7da20c7c9960>]] * call[name[len], parameter[name[keys]]]]]
variable[done_lines] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20c7cb8b0>]]
variable[undone_lines] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20c7c89a0>]]
variable[status] assign[=] dictionary[[], []]
with call[name[open], parameter[name[self].path]] begin[:]
for taget[name[line]] in starred[name[fh]] begin[:]
if compare[name[line] in name[done_lines]] begin[:]
call[name[status]][call[name[line]][<ast.Slice object at 0x7da20e956bf0>]] assign[=] constant[True]
variable[serialized_keys] assign[=] <ast.ListComp object at 0x7da20e957580>
return[<ast.ListComp object at 0x7da20e955780>] | keyword[def] identifier[are_done] ( identifier[self] , identifier[keys] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[path] ):
keyword[return] [ keyword[False] ]* identifier[len] ( identifier[keys] )
identifier[done_lines] = identifier[set] ([ identifier[self] . identifier[_done_line] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[keys] ])
identifier[undone_lines] = identifier[set] ([ identifier[self] . identifier[_undone_line] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[keys] ])
identifier[status] ={}
keyword[with] identifier[open] ( identifier[self] . identifier[path] ) keyword[as] identifier[fh] :
keyword[for] identifier[line] keyword[in] identifier[fh] :
keyword[if] identifier[line] keyword[in] identifier[done_lines] :
identifier[status] [ identifier[line] [ literal[int] :- literal[int] ]]= keyword[True]
keyword[elif] identifier[line] keyword[in] identifier[undone_lines] :
identifier[status] [ identifier[line] [ literal[int] :- literal[int] ]]= keyword[False]
identifier[serialized_keys] =[ identifier[self] . identifier[_serialize] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[keys] ]
keyword[return] [ identifier[status] . identifier[get] ( identifier[sk] , keyword[False] ) keyword[for] identifier[sk] keyword[in] identifier[serialized_keys] ] | def are_done(self, keys):
"""
Return a list of boolean values corresponding to whether or not each
key in keys is marked done. This method can be faster than
individually checking each key, depending on how many keys you
want to check.
:param keys: a list of json-serializable keys
"""
# No keys are done b/c the file does not even exist yet.
if not os.path.exists(self.path):
return [False] * len(keys) # depends on [control=['if'], data=[]]
done_lines = set([self._done_line(key) for key in keys])
undone_lines = set([self._undone_line(key) for key in keys])
status = {}
with open(self.path) as fh:
for line in fh:
if line in done_lines:
# extract serialized key
status[line[5:-1]] = True # depends on [control=['if'], data=['line']]
elif line in undone_lines:
status[line[5:-1]] = False # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fh']]
serialized_keys = [self._serialize(key) for key in keys]
return [status.get(sk, False) for sk in serialized_keys] |
def createLrrBafPlot(raw_dir, problematic_samples, format, dpi, out_prefix):
"""Creates the LRR and BAF plot.
:param raw_dir: the directory containing the intensities.
:param problematic_samples: the file containing the problematic samples.
:param format: the format of the plot.
:param dpi: the DPI of the resulting images.
:param out_prefix: the prefix of the output file.
:type raw_dir: str
:type problematic_samples: str
:type format: str
:type out_prefix: str
Creates the LRR (Log R Ratio) and BAF (B Allele Frequency) of the
problematic samples using the :py:mod:`pyGenClean.SexCheck.baf_lrr_plot`
module.
"""
# First, we create an output directory
dir_name = out_prefix + ".LRR_BAF"
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
# The options
baf_lrr_plot_options = ["--problematic-samples", problematic_samples,
"--raw-dir", raw_dir, "--format", format,
"--dpi", str(dpi),
"--out", os.path.join(dir_name, "baf_lrr")]
try:
baf_lrr_plot.main(baf_lrr_plot_options)
except baf_lrr_plot.ProgramError as e:
msg = "BAF LRR plot: {}".format(e)
raise ProgramError(msg) | def function[createLrrBafPlot, parameter[raw_dir, problematic_samples, format, dpi, out_prefix]]:
constant[Creates the LRR and BAF plot.
:param raw_dir: the directory containing the intensities.
:param problematic_samples: the file containing the problematic samples.
:param format: the format of the plot.
:param dpi: the DPI of the resulting images.
:param out_prefix: the prefix of the output file.
:type raw_dir: str
:type problematic_samples: str
:type format: str
:type out_prefix: str
Creates the LRR (Log R Ratio) and BAF (B Allele Frequency) of the
problematic samples using the :py:mod:`pyGenClean.SexCheck.baf_lrr_plot`
module.
]
variable[dir_name] assign[=] binary_operation[name[out_prefix] + constant[.LRR_BAF]]
if <ast.UnaryOp object at 0x7da1b0a42d70> begin[:]
call[name[os].mkdir, parameter[name[dir_name]]]
variable[baf_lrr_plot_options] assign[=] list[[<ast.Constant object at 0x7da1b0a436d0>, <ast.Name object at 0x7da1b0a439a0>, <ast.Constant object at 0x7da1b0a43940>, <ast.Name object at 0x7da1b0a420e0>, <ast.Constant object at 0x7da1b0a422c0>, <ast.Name object at 0x7da1b0a40340>, <ast.Constant object at 0x7da1b0a43c70>, <ast.Call object at 0x7da1b0a42290>, <ast.Constant object at 0x7da1b0a42f50>, <ast.Call object at 0x7da1b0a42260>]]
<ast.Try object at 0x7da1b0a41720> | keyword[def] identifier[createLrrBafPlot] ( identifier[raw_dir] , identifier[problematic_samples] , identifier[format] , identifier[dpi] , identifier[out_prefix] ):
literal[string]
identifier[dir_name] = identifier[out_prefix] + literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[dir_name] ):
identifier[os] . identifier[mkdir] ( identifier[dir_name] )
identifier[baf_lrr_plot_options] =[ literal[string] , identifier[problematic_samples] ,
literal[string] , identifier[raw_dir] , literal[string] , identifier[format] ,
literal[string] , identifier[str] ( identifier[dpi] ),
literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] , literal[string] )]
keyword[try] :
identifier[baf_lrr_plot] . identifier[main] ( identifier[baf_lrr_plot_options] )
keyword[except] identifier[baf_lrr_plot] . identifier[ProgramError] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[e] )
keyword[raise] identifier[ProgramError] ( identifier[msg] ) | def createLrrBafPlot(raw_dir, problematic_samples, format, dpi, out_prefix):
"""Creates the LRR and BAF plot.
:param raw_dir: the directory containing the intensities.
:param problematic_samples: the file containing the problematic samples.
:param format: the format of the plot.
:param dpi: the DPI of the resulting images.
:param out_prefix: the prefix of the output file.
:type raw_dir: str
:type problematic_samples: str
:type format: str
:type out_prefix: str
Creates the LRR (Log R Ratio) and BAF (B Allele Frequency) of the
problematic samples using the :py:mod:`pyGenClean.SexCheck.baf_lrr_plot`
module.
"""
# First, we create an output directory
dir_name = out_prefix + '.LRR_BAF'
if not os.path.isdir(dir_name):
os.mkdir(dir_name) # depends on [control=['if'], data=[]]
# The options
baf_lrr_plot_options = ['--problematic-samples', problematic_samples, '--raw-dir', raw_dir, '--format', format, '--dpi', str(dpi), '--out', os.path.join(dir_name, 'baf_lrr')]
try:
baf_lrr_plot.main(baf_lrr_plot_options) # depends on [control=['try'], data=[]]
except baf_lrr_plot.ProgramError as e:
msg = 'BAF LRR plot: {}'.format(e)
raise ProgramError(msg) # depends on [control=['except'], data=['e']] |
def delete_pool(hostname, username, password, name):
'''
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'delete', params={
'hostname': hostname,
'username': username,
'password': password,
'name': name,
}
)
#is this pool currently configured?
existing = __salt__['bigip.list_pool'](hostname, username, password, name)
# if it exists by name
if existing['code'] == 200:
deleted = __salt__['bigip.delete_pool'](hostname, username, password, name)
# did we get rid of it?
if deleted['code'] == 200:
ret['result'] = True
ret['comment'] = 'Pool was successfully deleted.'
ret['changes']['old'] = existing['content']
ret['changes']['new'] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing['code'] == 404:
ret['result'] = True
ret['comment'] = 'This pool already does not exist. No changes made.'
ret['changes']['old'] = {}
ret['changes']['new'] = {}
else:
ret = _load_result(existing, ret)
return ret | def function[delete_pool, parameter[hostname, username, password, name]]:
constant[
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93550>, <ast.Constant object at 0x7da18fe93280>, <ast.Constant object at 0x7da18fe93160>, <ast.Constant object at 0x7da18fe913c0>], [<ast.Name object at 0x7da18fe910f0>, <ast.Dict object at 0x7da18fe909d0>, <ast.Constant object at 0x7da18fe91090>, <ast.Constant object at 0x7da18fe92b30>]]
if call[name[__opts__]][constant[test]] begin[:]
return[call[name[_test_output], parameter[name[ret], constant[delete]]]]
variable[existing] assign[=] call[call[name[__salt__]][constant[bigip.list_pool]], parameter[name[hostname], name[username], name[password], name[name]]]
if compare[call[name[existing]][constant[code]] equal[==] constant[200]] begin[:]
variable[deleted] assign[=] call[call[name[__salt__]][constant[bigip.delete_pool]], parameter[name[hostname], name[username], name[password], name[name]]]
if compare[call[name[deleted]][constant[code]] equal[==] constant[200]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[Pool was successfully deleted.]
call[call[name[ret]][constant[changes]]][constant[old]] assign[=] call[name[existing]][constant[content]]
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] dictionary[[], []]
return[name[ret]] | keyword[def] identifier[delete_pool] ( identifier[hostname] , identifier[username] , identifier[password] , identifier[name] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[False] , literal[string] : literal[string] }
keyword[if] identifier[__opts__] [ literal[string] ]:
keyword[return] identifier[_test_output] ( identifier[ret] , literal[string] , identifier[params] ={
literal[string] : identifier[hostname] ,
literal[string] : identifier[username] ,
literal[string] : identifier[password] ,
literal[string] : identifier[name] ,
}
)
identifier[existing] = identifier[__salt__] [ literal[string] ]( identifier[hostname] , identifier[username] , identifier[password] , identifier[name] )
keyword[if] identifier[existing] [ literal[string] ]== literal[int] :
identifier[deleted] = identifier[__salt__] [ literal[string] ]( identifier[hostname] , identifier[username] , identifier[password] , identifier[name] )
keyword[if] identifier[deleted] [ literal[string] ]== literal[int] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[existing] [ literal[string] ]
identifier[ret] [ literal[string] ][ literal[string] ]={}
keyword[else] :
identifier[ret] = identifier[_load_result] ( identifier[deleted] , identifier[ret] )
keyword[elif] identifier[existing] [ literal[string] ]== literal[int] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]={}
identifier[ret] [ literal[string] ][ literal[string] ]={}
keyword[else] :
identifier[ret] = identifier[_load_result] ( identifier[existing] , identifier[ret] )
keyword[return] identifier[ret] | def delete_pool(hostname, username, password, name):
"""
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'delete', params={'hostname': hostname, 'username': username, 'password': password, 'name': name}) # depends on [control=['if'], data=[]]
#is this pool currently configured?
existing = __salt__['bigip.list_pool'](hostname, username, password, name)
# if it exists by name
if existing['code'] == 200:
deleted = __salt__['bigip.delete_pool'](hostname, username, password, name)
# did we get rid of it?
if deleted['code'] == 200:
ret['result'] = True
ret['comment'] = 'Pool was successfully deleted.'
ret['changes']['old'] = existing['content']
ret['changes']['new'] = {} # depends on [control=['if'], data=[]]
else:
# something bad happened
ret = _load_result(deleted, ret) # depends on [control=['if'], data=[]]
# not found
elif existing['code'] == 404:
ret['result'] = True
ret['comment'] = 'This pool already does not exist. No changes made.'
ret['changes']['old'] = {}
ret['changes']['new'] = {} # depends on [control=['if'], data=[]]
else:
ret = _load_result(existing, ret)
return ret |
def disagg_outputs(value):
"""
Validate disaggregation outputs. For instance
>>> disagg_outputs('TRT Mag_Dist')
['TRT', 'Mag_Dist']
>>> disagg_outputs('TRT, Mag_Dist')
['TRT', 'Mag_Dist']
"""
values = value.replace(',', ' ').split()
for val in values:
if val not in disagg.pmf_map:
raise ValueError('Invalid disagg output: %s' % val)
return values | def function[disagg_outputs, parameter[value]]:
constant[
Validate disaggregation outputs. For instance
>>> disagg_outputs('TRT Mag_Dist')
['TRT', 'Mag_Dist']
>>> disagg_outputs('TRT, Mag_Dist')
['TRT', 'Mag_Dist']
]
variable[values] assign[=] call[call[name[value].replace, parameter[constant[,], constant[ ]]].split, parameter[]]
for taget[name[val]] in starred[name[values]] begin[:]
if compare[name[val] <ast.NotIn object at 0x7da2590d7190> name[disagg].pmf_map] begin[:]
<ast.Raise object at 0x7da18ede4820>
return[name[values]] | keyword[def] identifier[disagg_outputs] ( identifier[value] ):
literal[string]
identifier[values] = identifier[value] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ()
keyword[for] identifier[val] keyword[in] identifier[values] :
keyword[if] identifier[val] keyword[not] keyword[in] identifier[disagg] . identifier[pmf_map] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[val] )
keyword[return] identifier[values] | def disagg_outputs(value):
"""
Validate disaggregation outputs. For instance
>>> disagg_outputs('TRT Mag_Dist')
['TRT', 'Mag_Dist']
>>> disagg_outputs('TRT, Mag_Dist')
['TRT', 'Mag_Dist']
"""
values = value.replace(',', ' ').split()
for val in values:
if val not in disagg.pmf_map:
raise ValueError('Invalid disagg output: %s' % val) # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=['val']]
return values |
def validate_analysis_period(self):
"""Get a collection where the header analysis_period aligns with datetimes.
This means that checks for five criteria will be performed:
1) All datetimes in the data collection are in chronological orderstarting
from the analysis_period start hour to the end hour.
2) No duplicate datetimes exist in the data collection.
3) There are no datetimes that lie outside of the analysis_period time range.
4) There are no datetimes that do not align with the analysis_period timestep.
5) Datetimes for February 29th are excluded if is_leap_year is False on
the analysis_period.
Note that there is no need to run this check any time that a discontinous
data collection has been derived from a continuous one or when the
validated_a_period attribute of the collection is True. Furthermore, most
methods on this data collection will still run without a validated
analysis_period.
"""
a_per = self.header.analysis_period
n_ap = [a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month,
a_per.end_day, a_per.end_hour, a_per.timestep, a_per.is_leap_year]
# make sure that datetimes are all in chronological order.
sort_datetimes, sort_values = zip(*sorted(zip(self.datetimes, self.values)))
if not a_per.is_reversed and not a_per.is_annual:
if sort_datetimes[0].doy < a_per.st_time.doy:
n_ap[0] = sort_datetimes[0].month
n_ap[1] = sort_datetimes[0].day
if sort_datetimes[-1].doy > a_per.end_time.doy:
n_ap[3] = sort_datetimes[-1].month
n_ap[4] = sort_datetimes[-1].day
elif a_per.is_reversed:
last_ind = None
for i, date_t in enumerate(sort_datetimes):
last_ind = i if date_t.moy <= a_per.end_time.moy else last_ind
if last_ind is not None:
last_ind = last_ind + 1
sort_datetimes = sort_datetimes[last_ind:] + sort_datetimes[:last_ind]
sort_values = sort_values[last_ind:] + sort_values[:last_ind]
# If datetimes are outside the a_period range, just make it annual.
# There's no way to know what side of the analysis_period should be etended.
if sort_datetimes[0].doy > a_per.end_time.doy and \
sort_datetimes[0].doy < a_per.st_time.doy:
n_ap[0], n_ap[1], n_ap[3], n_ap[4] = 1, 1, 12, 31
sort_datetimes, sort_values = zip(*sorted(zip(
self.datetimes, self.values)))
# check that no hours lie outside of the analysis_period
if not a_per.is_annual:
if a_per.st_hour != 0:
for date_t in sort_datetimes:
n_ap[2] = date_t.hour if date_t.hour < n_ap[2] else n_ap[2]
if a_per.end_hour != 23:
for date_t in sort_datetimes:
n_ap[5] = date_t.hour if date_t.hour > n_ap[5] else n_ap[5]
# check that there are no duplicate datetimes.
for i in xrange(len(sort_datetimes)):
assert sort_datetimes[i] != sort_datetimes[i - 1], 'Duplicate datetime ' \
'was found in the collection: {}'.format(sort_datetimes[i])
# check that the analysis_period timestep is correct.
mins_per_step = int(60 / n_ap[6])
for date_t in sort_datetimes:
if date_t.moy % mins_per_step != 0:
i = 0
valid_steps = sorted(a_per.VALIDTIMESTEPS.keys())
while date_t.moy % mins_per_step != 0 and i < len(valid_steps):
mins_per_step = int(60 / valid_steps[i])
i += 1
n_ap[6] = int(60 / mins_per_step)
# check that the analysis_period leap_year is correct.
if a_per.is_leap_year is False:
for date_t in sort_datetimes:
if date_t.month == 2 and date_t.day == 29:
n_ap[7] = True
# build a validated collection.
new_ap = AnalysisPeriod(*n_ap)
new_header = self.header.duplicate()
new_header._analysis_period = new_ap
new_coll = HourlyDiscontinuousCollection(new_header, sort_values, sort_datetimes)
new_coll._validated_a_period = True
return new_coll | def function[validate_analysis_period, parameter[self]]:
constant[Get a collection where the header analysis_period aligns with datetimes.
This means that checks for five criteria will be performed:
1) All datetimes in the data collection are in chronological orderstarting
from the analysis_period start hour to the end hour.
2) No duplicate datetimes exist in the data collection.
3) There are no datetimes that lie outside of the analysis_period time range.
4) There are no datetimes that do not align with the analysis_period timestep.
5) Datetimes for February 29th are excluded if is_leap_year is False on
the analysis_period.
Note that there is no need to run this check any time that a discontinous
data collection has been derived from a continuous one or when the
validated_a_period attribute of the collection is True. Furthermore, most
methods on this data collection will still run without a validated
analysis_period.
]
variable[a_per] assign[=] name[self].header.analysis_period
variable[n_ap] assign[=] list[[<ast.Attribute object at 0x7da1b12a86a0>, <ast.Attribute object at 0x7da1b12a8eb0>, <ast.Attribute object at 0x7da1b12aa830>, <ast.Attribute object at 0x7da1b12a8520>, <ast.Attribute object at 0x7da1b12aa650>, <ast.Attribute object at 0x7da1b12a8850>, <ast.Attribute object at 0x7da1b12a91e0>, <ast.Attribute object at 0x7da1b12a8d30>]]
<ast.Tuple object at 0x7da1b12abdc0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b12a8640>]]
if <ast.BoolOp object at 0x7da1b12ab010> begin[:]
if compare[call[name[sort_datetimes]][constant[0]].doy less[<] name[a_per].st_time.doy] begin[:]
call[name[n_ap]][constant[0]] assign[=] call[name[sort_datetimes]][constant[0]].month
call[name[n_ap]][constant[1]] assign[=] call[name[sort_datetimes]][constant[0]].day
if compare[call[name[sort_datetimes]][<ast.UnaryOp object at 0x7da1b12ab7f0>].doy greater[>] name[a_per].end_time.doy] begin[:]
call[name[n_ap]][constant[3]] assign[=] call[name[sort_datetimes]][<ast.UnaryOp object at 0x7da1b12aba60>].month
call[name[n_ap]][constant[4]] assign[=] call[name[sort_datetimes]][<ast.UnaryOp object at 0x7da1b12ab700>].day
if <ast.UnaryOp object at 0x7da1b1266230> begin[:]
if compare[name[a_per].st_hour not_equal[!=] constant[0]] begin[:]
for taget[name[date_t]] in starred[name[sort_datetimes]] begin[:]
call[name[n_ap]][constant[2]] assign[=] <ast.IfExp object at 0x7da1b12655a0>
if compare[name[a_per].end_hour not_equal[!=] constant[23]] begin[:]
for taget[name[date_t]] in starred[name[sort_datetimes]] begin[:]
call[name[n_ap]][constant[5]] assign[=] <ast.IfExp object at 0x7da1b1267070>
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[sort_datetimes]]]]]] begin[:]
assert[compare[call[name[sort_datetimes]][name[i]] not_equal[!=] call[name[sort_datetimes]][binary_operation[name[i] - constant[1]]]]]
variable[mins_per_step] assign[=] call[name[int], parameter[binary_operation[constant[60] / call[name[n_ap]][constant[6]]]]]
for taget[name[date_t]] in starred[name[sort_datetimes]] begin[:]
if compare[binary_operation[name[date_t].moy <ast.Mod object at 0x7da2590d6920> name[mins_per_step]] not_equal[!=] constant[0]] begin[:]
variable[i] assign[=] constant[0]
variable[valid_steps] assign[=] call[name[sorted], parameter[call[name[a_per].VALIDTIMESTEPS.keys, parameter[]]]]
while <ast.BoolOp object at 0x7da1b12654e0> begin[:]
variable[mins_per_step] assign[=] call[name[int], parameter[binary_operation[constant[60] / call[name[valid_steps]][name[i]]]]]
<ast.AugAssign object at 0x7da1b12772b0>
call[name[n_ap]][constant[6]] assign[=] call[name[int], parameter[binary_operation[constant[60] / name[mins_per_step]]]]
if compare[name[a_per].is_leap_year is constant[False]] begin[:]
for taget[name[date_t]] in starred[name[sort_datetimes]] begin[:]
if <ast.BoolOp object at 0x7da1b12750c0> begin[:]
call[name[n_ap]][constant[7]] assign[=] constant[True]
variable[new_ap] assign[=] call[name[AnalysisPeriod], parameter[<ast.Starred object at 0x7da1b1275e70>]]
variable[new_header] assign[=] call[name[self].header.duplicate, parameter[]]
name[new_header]._analysis_period assign[=] name[new_ap]
variable[new_coll] assign[=] call[name[HourlyDiscontinuousCollection], parameter[name[new_header], name[sort_values], name[sort_datetimes]]]
name[new_coll]._validated_a_period assign[=] constant[True]
return[name[new_coll]] | keyword[def] identifier[validate_analysis_period] ( identifier[self] ):
literal[string]
identifier[a_per] = identifier[self] . identifier[header] . identifier[analysis_period]
identifier[n_ap] =[ identifier[a_per] . identifier[st_month] , identifier[a_per] . identifier[st_day] , identifier[a_per] . identifier[st_hour] , identifier[a_per] . identifier[end_month] ,
identifier[a_per] . identifier[end_day] , identifier[a_per] . identifier[end_hour] , identifier[a_per] . identifier[timestep] , identifier[a_per] . identifier[is_leap_year] ]
identifier[sort_datetimes] , identifier[sort_values] = identifier[zip] (* identifier[sorted] ( identifier[zip] ( identifier[self] . identifier[datetimes] , identifier[self] . identifier[values] )))
keyword[if] keyword[not] identifier[a_per] . identifier[is_reversed] keyword[and] keyword[not] identifier[a_per] . identifier[is_annual] :
keyword[if] identifier[sort_datetimes] [ literal[int] ]. identifier[doy] < identifier[a_per] . identifier[st_time] . identifier[doy] :
identifier[n_ap] [ literal[int] ]= identifier[sort_datetimes] [ literal[int] ]. identifier[month]
identifier[n_ap] [ literal[int] ]= identifier[sort_datetimes] [ literal[int] ]. identifier[day]
keyword[if] identifier[sort_datetimes] [- literal[int] ]. identifier[doy] > identifier[a_per] . identifier[end_time] . identifier[doy] :
identifier[n_ap] [ literal[int] ]= identifier[sort_datetimes] [- literal[int] ]. identifier[month]
identifier[n_ap] [ literal[int] ]= identifier[sort_datetimes] [- literal[int] ]. identifier[day]
keyword[elif] identifier[a_per] . identifier[is_reversed] :
identifier[last_ind] = keyword[None]
keyword[for] identifier[i] , identifier[date_t] keyword[in] identifier[enumerate] ( identifier[sort_datetimes] ):
identifier[last_ind] = identifier[i] keyword[if] identifier[date_t] . identifier[moy] <= identifier[a_per] . identifier[end_time] . identifier[moy] keyword[else] identifier[last_ind]
keyword[if] identifier[last_ind] keyword[is] keyword[not] keyword[None] :
identifier[last_ind] = identifier[last_ind] + literal[int]
identifier[sort_datetimes] = identifier[sort_datetimes] [ identifier[last_ind] :]+ identifier[sort_datetimes] [: identifier[last_ind] ]
identifier[sort_values] = identifier[sort_values] [ identifier[last_ind] :]+ identifier[sort_values] [: identifier[last_ind] ]
keyword[if] identifier[sort_datetimes] [ literal[int] ]. identifier[doy] > identifier[a_per] . identifier[end_time] . identifier[doy] keyword[and] identifier[sort_datetimes] [ literal[int] ]. identifier[doy] < identifier[a_per] . identifier[st_time] . identifier[doy] :
identifier[n_ap] [ literal[int] ], identifier[n_ap] [ literal[int] ], identifier[n_ap] [ literal[int] ], identifier[n_ap] [ literal[int] ]= literal[int] , literal[int] , literal[int] , literal[int]
identifier[sort_datetimes] , identifier[sort_values] = identifier[zip] (* identifier[sorted] ( identifier[zip] (
identifier[self] . identifier[datetimes] , identifier[self] . identifier[values] )))
keyword[if] keyword[not] identifier[a_per] . identifier[is_annual] :
keyword[if] identifier[a_per] . identifier[st_hour] != literal[int] :
keyword[for] identifier[date_t] keyword[in] identifier[sort_datetimes] :
identifier[n_ap] [ literal[int] ]= identifier[date_t] . identifier[hour] keyword[if] identifier[date_t] . identifier[hour] < identifier[n_ap] [ literal[int] ] keyword[else] identifier[n_ap] [ literal[int] ]
keyword[if] identifier[a_per] . identifier[end_hour] != literal[int] :
keyword[for] identifier[date_t] keyword[in] identifier[sort_datetimes] :
identifier[n_ap] [ literal[int] ]= identifier[date_t] . identifier[hour] keyword[if] identifier[date_t] . identifier[hour] > identifier[n_ap] [ literal[int] ] keyword[else] identifier[n_ap] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[sort_datetimes] )):
keyword[assert] identifier[sort_datetimes] [ identifier[i] ]!= identifier[sort_datetimes] [ identifier[i] - literal[int] ], literal[string] literal[string] . identifier[format] ( identifier[sort_datetimes] [ identifier[i] ])
identifier[mins_per_step] = identifier[int] ( literal[int] / identifier[n_ap] [ literal[int] ])
keyword[for] identifier[date_t] keyword[in] identifier[sort_datetimes] :
keyword[if] identifier[date_t] . identifier[moy] % identifier[mins_per_step] != literal[int] :
identifier[i] = literal[int]
identifier[valid_steps] = identifier[sorted] ( identifier[a_per] . identifier[VALIDTIMESTEPS] . identifier[keys] ())
keyword[while] identifier[date_t] . identifier[moy] % identifier[mins_per_step] != literal[int] keyword[and] identifier[i] < identifier[len] ( identifier[valid_steps] ):
identifier[mins_per_step] = identifier[int] ( literal[int] / identifier[valid_steps] [ identifier[i] ])
identifier[i] += literal[int]
identifier[n_ap] [ literal[int] ]= identifier[int] ( literal[int] / identifier[mins_per_step] )
keyword[if] identifier[a_per] . identifier[is_leap_year] keyword[is] keyword[False] :
keyword[for] identifier[date_t] keyword[in] identifier[sort_datetimes] :
keyword[if] identifier[date_t] . identifier[month] == literal[int] keyword[and] identifier[date_t] . identifier[day] == literal[int] :
identifier[n_ap] [ literal[int] ]= keyword[True]
identifier[new_ap] = identifier[AnalysisPeriod] (* identifier[n_ap] )
identifier[new_header] = identifier[self] . identifier[header] . identifier[duplicate] ()
identifier[new_header] . identifier[_analysis_period] = identifier[new_ap]
identifier[new_coll] = identifier[HourlyDiscontinuousCollection] ( identifier[new_header] , identifier[sort_values] , identifier[sort_datetimes] )
identifier[new_coll] . identifier[_validated_a_period] = keyword[True]
keyword[return] identifier[new_coll] | def validate_analysis_period(self):
"""Get a collection where the header analysis_period aligns with datetimes.
This means that checks for five criteria will be performed:
1) All datetimes in the data collection are in chronological orderstarting
from the analysis_period start hour to the end hour.
2) No duplicate datetimes exist in the data collection.
3) There are no datetimes that lie outside of the analysis_period time range.
4) There are no datetimes that do not align with the analysis_period timestep.
5) Datetimes for February 29th are excluded if is_leap_year is False on
the analysis_period.
Note that there is no need to run this check any time that a discontinous
data collection has been derived from a continuous one or when the
validated_a_period attribute of the collection is True. Furthermore, most
methods on this data collection will still run without a validated
analysis_period.
"""
a_per = self.header.analysis_period
n_ap = [a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, a_per.timestep, a_per.is_leap_year]
# make sure that datetimes are all in chronological order.
(sort_datetimes, sort_values) = zip(*sorted(zip(self.datetimes, self.values)))
if not a_per.is_reversed and (not a_per.is_annual):
if sort_datetimes[0].doy < a_per.st_time.doy:
n_ap[0] = sort_datetimes[0].month
n_ap[1] = sort_datetimes[0].day # depends on [control=['if'], data=[]]
if sort_datetimes[-1].doy > a_per.end_time.doy:
n_ap[3] = sort_datetimes[-1].month
n_ap[4] = sort_datetimes[-1].day # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif a_per.is_reversed:
last_ind = None
for (i, date_t) in enumerate(sort_datetimes):
last_ind = i if date_t.moy <= a_per.end_time.moy else last_ind # depends on [control=['for'], data=[]]
if last_ind is not None:
last_ind = last_ind + 1
sort_datetimes = sort_datetimes[last_ind:] + sort_datetimes[:last_ind]
sort_values = sort_values[last_ind:] + sort_values[:last_ind] # depends on [control=['if'], data=['last_ind']]
# If datetimes are outside the a_period range, just make it annual.
# There's no way to know what side of the analysis_period should be etended.
if sort_datetimes[0].doy > a_per.end_time.doy and sort_datetimes[0].doy < a_per.st_time.doy:
(n_ap[0], n_ap[1], n_ap[3], n_ap[4]) = (1, 1, 12, 31)
(sort_datetimes, sort_values) = zip(*sorted(zip(self.datetimes, self.values))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# check that no hours lie outside of the analysis_period
if not a_per.is_annual:
if a_per.st_hour != 0:
for date_t in sort_datetimes:
n_ap[2] = date_t.hour if date_t.hour < n_ap[2] else n_ap[2] # depends on [control=['for'], data=['date_t']] # depends on [control=['if'], data=[]]
if a_per.end_hour != 23:
for date_t in sort_datetimes:
n_ap[5] = date_t.hour if date_t.hour > n_ap[5] else n_ap[5] # depends on [control=['for'], data=['date_t']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# check that there are no duplicate datetimes.
for i in xrange(len(sort_datetimes)):
assert sort_datetimes[i] != sort_datetimes[i - 1], 'Duplicate datetime was found in the collection: {}'.format(sort_datetimes[i]) # depends on [control=['for'], data=['i']]
# check that the analysis_period timestep is correct.
mins_per_step = int(60 / n_ap[6])
for date_t in sort_datetimes:
if date_t.moy % mins_per_step != 0:
i = 0
valid_steps = sorted(a_per.VALIDTIMESTEPS.keys())
while date_t.moy % mins_per_step != 0 and i < len(valid_steps):
mins_per_step = int(60 / valid_steps[i])
i += 1 # depends on [control=['while'], data=[]]
n_ap[6] = int(60 / mins_per_step) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['date_t']]
# check that the analysis_period leap_year is correct.
if a_per.is_leap_year is False:
for date_t in sort_datetimes:
if date_t.month == 2 and date_t.day == 29:
n_ap[7] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['date_t']] # depends on [control=['if'], data=[]]
# build a validated collection.
new_ap = AnalysisPeriod(*n_ap)
new_header = self.header.duplicate()
new_header._analysis_period = new_ap
new_coll = HourlyDiscontinuousCollection(new_header, sort_values, sort_datetimes)
new_coll._validated_a_period = True
return new_coll |
def output(self, value):
"""SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
return super(Map, self).output(self.stream, value) | def function[output, parameter[self, value]]:
constant[SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
]
return[call[call[name[super], parameter[name[Map], name[self]]].output, parameter[name[self].stream, name[value]]]] | keyword[def] identifier[output] ( identifier[self] , identifier[value] ):
literal[string]
keyword[return] identifier[super] ( identifier[Map] , identifier[self] ). identifier[output] ( identifier[self] . identifier[stream] , identifier[value] ) | def output(self, value):
"""SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
return super(Map, self).output(self.stream, value) |
def change_token(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Change a user's token.
:param id: User ID as an int.
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.post(self.base+str(id)+'/token/')
return self.service.decode(schema, resp) | def function[change_token, parameter[self, id]]:
constant[Change a user's token.
:param id: User ID as an int.
:return: :class:`users.User <users.User>` object
:rtype: users.User
]
variable[schema] assign[=] call[name[UserSchema], parameter[]]
variable[resp] assign[=] call[name[self].service.post, parameter[binary_operation[binary_operation[name[self].base + call[name[str], parameter[name[id]]]] + constant[/token/]]]]
return[call[name[self].service.decode, parameter[name[schema], name[resp]]]] | keyword[def] identifier[change_token] ( identifier[self] , identifier[id] ):
literal[string]
identifier[schema] = identifier[UserSchema] ( identifier[exclude] =( literal[string] , literal[string] ))
identifier[resp] = identifier[self] . identifier[service] . identifier[post] ( identifier[self] . identifier[base] + identifier[str] ( identifier[id] )+ literal[string] )
keyword[return] identifier[self] . identifier[service] . identifier[decode] ( identifier[schema] , identifier[resp] ) | def change_token(self, id): # pylint: disable=invalid-name,redefined-builtin
"Change a user's token.\n\n :param id: User ID as an int.\n :return: :class:`users.User <users.User>` object\n :rtype: users.User\n "
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.post(self.base + str(id) + '/token/')
return self.service.decode(schema, resp) |
def customCompute(self, recordNum, patternNZ, classification):
"""
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# If the compute flag has not been initialized (for example if we
# restored a model from an old checkpoint) initialize it to False.
if not hasattr(self, "_computeFlag"):
self._computeFlag = False
if self._computeFlag:
# Will raise an exception if the deprecated method customCompute() is
# being used at the same time as the compute function.
warnings.simplefilter('error', DeprecationWarning)
warnings.warn("The customCompute() method should not be "
"called at the same time as the compute() "
"method. The compute() method is called "
"whenever network.run() is called.",
DeprecationWarning)
return self._sdrClassifier.compute(recordNum,
patternNZ,
classification,
self.learningMode,
self.inferenceMode) | def function[customCompute, parameter[self, recordNum, patternNZ, classification]]:
constant[
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
]
if <ast.UnaryOp object at 0x7da1b01472e0> begin[:]
name[self]._computeFlag assign[=] constant[False]
if name[self]._computeFlag begin[:]
call[name[warnings].simplefilter, parameter[constant[error], name[DeprecationWarning]]]
call[name[warnings].warn, parameter[constant[The customCompute() method should not be called at the same time as the compute() method. The compute() method is called whenever network.run() is called.], name[DeprecationWarning]]]
return[call[name[self]._sdrClassifier.compute, parameter[name[recordNum], name[patternNZ], name[classification], name[self].learningMode, name[self].inferenceMode]]] | keyword[def] identifier[customCompute] ( identifier[self] , identifier[recordNum] , identifier[patternNZ] , identifier[classification] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_computeFlag] = keyword[False]
keyword[if] identifier[self] . identifier[_computeFlag] :
identifier[warnings] . identifier[simplefilter] ( literal[string] , identifier[DeprecationWarning] )
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[DeprecationWarning] )
keyword[return] identifier[self] . identifier[_sdrClassifier] . identifier[compute] ( identifier[recordNum] ,
identifier[patternNZ] ,
identifier[classification] ,
identifier[self] . identifier[learningMode] ,
identifier[self] . identifier[inferenceMode] ) | def customCompute(self, recordNum, patternNZ, classification):
"""
Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# If the compute flag has not been initialized (for example if we
# restored a model from an old checkpoint) initialize it to False.
if not hasattr(self, '_computeFlag'):
self._computeFlag = False # depends on [control=['if'], data=[]]
if self._computeFlag:
# Will raise an exception if the deprecated method customCompute() is
# being used at the same time as the compute function.
warnings.simplefilter('error', DeprecationWarning)
warnings.warn('The customCompute() method should not be called at the same time as the compute() method. The compute() method is called whenever network.run() is called.', DeprecationWarning) # depends on [control=['if'], data=[]]
return self._sdrClassifier.compute(recordNum, patternNZ, classification, self.learningMode, self.inferenceMode) |
def _leave_event_hide(self):
""" Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip).
"""
if (self.hide_timer_on and not self._hide_timer.isActive() and
# If Enter events always came after Leave events, we wouldn't need
# this check. But on Mac OS, it sometimes happens the other way
# around when the tooltip is created.
self.app.topLevelAt(QCursor.pos()) != self):
self._hide_timer.start(800, self) | def function[_leave_event_hide, parameter[self]]:
constant[ Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip).
]
if <ast.BoolOp object at 0x7da1b1ea0220> begin[:]
call[name[self]._hide_timer.start, parameter[constant[800], name[self]]] | keyword[def] identifier[_leave_event_hide] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[hide_timer_on] keyword[and] keyword[not] identifier[self] . identifier[_hide_timer] . identifier[isActive] () keyword[and]
identifier[self] . identifier[app] . identifier[topLevelAt] ( identifier[QCursor] . identifier[pos] ())!= identifier[self] ):
identifier[self] . identifier[_hide_timer] . identifier[start] ( literal[int] , identifier[self] ) | def _leave_event_hide(self):
""" Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip).
"""
if self.hide_timer_on and (not self._hide_timer.isActive()) and (self.app.topLevelAt(QCursor.pos()) != self):
# If Enter events always came after Leave events, we wouldn't need
# this check. But on Mac OS, it sometimes happens the other way
# around when the tooltip is created.
self._hide_timer.start(800, self) # depends on [control=['if'], data=[]] |
def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib Line2D object for this region (`matplotlib.lines.Line2D`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.lines.Line2D` object accepts
Returns
-------
point : `~matplotlib.lines.Line2D`
Matplotlib Line2D object.
"""
from matplotlib.lines import Line2D
mpl_params = self.mpl_properties_default('LINE2D')
mpl_params.update(kwargs)
point = Line2D([self.center.x - origin[0]], [self.center.y - origin[1]],
**mpl_params)
return point | def function[as_artist, parameter[self, origin]]:
constant[
Matplotlib Line2D object for this region (`matplotlib.lines.Line2D`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.lines.Line2D` object accepts
Returns
-------
point : `~matplotlib.lines.Line2D`
Matplotlib Line2D object.
]
from relative_module[matplotlib.lines] import module[Line2D]
variable[mpl_params] assign[=] call[name[self].mpl_properties_default, parameter[constant[LINE2D]]]
call[name[mpl_params].update, parameter[name[kwargs]]]
variable[point] assign[=] call[name[Line2D], parameter[list[[<ast.BinOp object at 0x7da1b15db4f0>]], list[[<ast.BinOp object at 0x7da1b15db8e0>]]]]
return[name[point]] | keyword[def] identifier[as_artist] ( identifier[self] , identifier[origin] =( literal[int] , literal[int] ),** identifier[kwargs] ):
literal[string]
keyword[from] identifier[matplotlib] . identifier[lines] keyword[import] identifier[Line2D]
identifier[mpl_params] = identifier[self] . identifier[mpl_properties_default] ( literal[string] )
identifier[mpl_params] . identifier[update] ( identifier[kwargs] )
identifier[point] = identifier[Line2D] ([ identifier[self] . identifier[center] . identifier[x] - identifier[origin] [ literal[int] ]],[ identifier[self] . identifier[center] . identifier[y] - identifier[origin] [ literal[int] ]],
** identifier[mpl_params] )
keyword[return] identifier[point] | def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib Line2D object for this region (`matplotlib.lines.Line2D`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.lines.Line2D` object accepts
Returns
-------
point : `~matplotlib.lines.Line2D`
Matplotlib Line2D object.
"""
from matplotlib.lines import Line2D
mpl_params = self.mpl_properties_default('LINE2D')
mpl_params.update(kwargs)
point = Line2D([self.center.x - origin[0]], [self.center.y - origin[1]], **mpl_params)
return point |
def vote_best(candidates, votes, n_winners):
"""Select the artifact with the single best evaluation as the winner of
the vote.
Ties are resolved randomly.
:param candidates: All candidates in the vote
:param votes: Votes from the agents
:param int n_winners: The number of vote winners
"""
best = [votes[0][0]]
for v in votes[1:]:
if v[0][1] > best[0][1]:
best = [v[0]]
return best | def function[vote_best, parameter[candidates, votes, n_winners]]:
constant[Select the artifact with the single best evaluation as the winner of
the vote.
Ties are resolved randomly.
:param candidates: All candidates in the vote
:param votes: Votes from the agents
:param int n_winners: The number of vote winners
]
variable[best] assign[=] list[[<ast.Subscript object at 0x7da18f00c2e0>]]
for taget[name[v]] in starred[call[name[votes]][<ast.Slice object at 0x7da18f58f760>]] begin[:]
if compare[call[call[name[v]][constant[0]]][constant[1]] greater[>] call[call[name[best]][constant[0]]][constant[1]]] begin[:]
variable[best] assign[=] list[[<ast.Subscript object at 0x7da18f58da50>]]
return[name[best]] | keyword[def] identifier[vote_best] ( identifier[candidates] , identifier[votes] , identifier[n_winners] ):
literal[string]
identifier[best] =[ identifier[votes] [ literal[int] ][ literal[int] ]]
keyword[for] identifier[v] keyword[in] identifier[votes] [ literal[int] :]:
keyword[if] identifier[v] [ literal[int] ][ literal[int] ]> identifier[best] [ literal[int] ][ literal[int] ]:
identifier[best] =[ identifier[v] [ literal[int] ]]
keyword[return] identifier[best] | def vote_best(candidates, votes, n_winners):
"""Select the artifact with the single best evaluation as the winner of
the vote.
Ties are resolved randomly.
:param candidates: All candidates in the vote
:param votes: Votes from the agents
:param int n_winners: The number of vote winners
"""
best = [votes[0][0]]
for v in votes[1:]:
if v[0][1] > best[0][1]:
best = [v[0]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
return best |
def parse_lines(self, lines: Iterable[str]) -> List[ParseResults]:
"""Parse multiple lines in succession."""
return [
self.parseString(line, line_number)
for line_number, line in enumerate(lines)
] | def function[parse_lines, parameter[self, lines]]:
constant[Parse multiple lines in succession.]
return[<ast.ListComp object at 0x7da1b0e44160>] | keyword[def] identifier[parse_lines] ( identifier[self] , identifier[lines] : identifier[Iterable] [ identifier[str] ])-> identifier[List] [ identifier[ParseResults] ]:
literal[string]
keyword[return] [
identifier[self] . identifier[parseString] ( identifier[line] , identifier[line_number] )
keyword[for] identifier[line_number] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] )
] | def parse_lines(self, lines: Iterable[str]) -> List[ParseResults]:
"""Parse multiple lines in succession."""
return [self.parseString(line, line_number) for (line_number, line) in enumerate(lines)] |
def convert_table(shell_output, delimiter='\t|\s{2,}', output='dict'):
'''
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
'''
# retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index)
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass
elif output == 'list':
python_list.append(column_headers)
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ','
csv_string += column_headers[i]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ','
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
# return output
if csv_string:
return csv_string
return python_list | def function[convert_table, parameter[shell_output, delimiter, output]]:
constant[
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
]
import module[re]
variable[gap_pattern] assign[=] call[name[re].compile, parameter[name[delimiter]]]
variable[output_lines] assign[=] call[name[shell_output].splitlines, parameter[]]
variable[column_headers] assign[=] call[name[gap_pattern].split, parameter[call[name[output_lines]][constant[0]]]]
variable[blank_index] assign[=] call[name[column_headers].index, parameter[constant[]]]
if compare[name[blank_index] greater[>] <ast.UnaryOp object at 0x7da18c4cf7f0>] begin[:]
call[name[column_headers].pop, parameter[name[blank_index]]]
variable[indices] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[column_headers]]]]]] begin[:]
if compare[binary_operation[name[i] + constant[1]] less[<] call[name[len], parameter[name[column_headers]]]] begin[:]
call[name[indices].append, parameter[tuple[[<ast.Call object at 0x7da1b15a1210>, <ast.Call object at 0x7da1b15a1b70>]]]]
variable[python_list] assign[=] list[[]]
variable[csv_string] assign[=] constant[]
if compare[name[output] equal[==] constant[dict]] begin[:]
pass
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[output_lines]]]]]] begin[:]
if compare[name[output] equal[==] constant[dict]] begin[:]
variable[row_details] assign[=] dictionary[[], []]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[column_headers]]]]]] begin[:]
call[name[row_details]][call[name[column_headers]][name[j]]] assign[=] call[call[call[name[output_lines]][name[i]]][<ast.Slice object at 0x7da204344af0>].rstrip, parameter[]]
call[name[python_list].append, parameter[name[row_details]]]
if name[csv_string] begin[:]
return[name[csv_string]]
return[name[python_list]] | keyword[def] identifier[convert_table] ( identifier[shell_output] , identifier[delimiter] = literal[string] , identifier[output] = literal[string] ):
literal[string]
keyword[import] identifier[re]
identifier[gap_pattern] = identifier[re] . identifier[compile] ( identifier[delimiter] )
identifier[output_lines] = identifier[shell_output] . identifier[splitlines] ()
identifier[column_headers] = identifier[gap_pattern] . identifier[split] ( identifier[output_lines] [ literal[int] ])
identifier[blank_index] = identifier[column_headers] . identifier[index] ( literal[string] )
keyword[if] identifier[blank_index] >- literal[int] :
identifier[column_headers] . identifier[pop] ( identifier[blank_index] )
identifier[indices] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[column_headers] )):
keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[column_headers] ):
identifier[indices] . identifier[append] ((
identifier[output_lines] [ literal[int] ]. identifier[find] ( identifier[column_headers] [ identifier[i] ]),
identifier[output_lines] [ literal[int] ]. identifier[find] ( identifier[column_headers] [ identifier[i] + literal[int] ])
))
keyword[else] :
identifier[indices] . identifier[append] ((
identifier[output_lines] [ literal[int] ]. identifier[find] ( identifier[column_headers] [ identifier[i] ]),
- literal[int]
))
identifier[python_list] =[]
identifier[csv_string] = literal[string]
keyword[if] identifier[output] == literal[string] :
keyword[pass]
keyword[elif] identifier[output] == literal[string] :
identifier[python_list] . identifier[append] ( identifier[column_headers] )
keyword[elif] identifier[output] == literal[string] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[column_headers] )):
keyword[if] identifier[i] :
identifier[csv_string] += literal[string]
identifier[csv_string] += identifier[column_headers] [ identifier[i] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[output_lines] )):
keyword[if] identifier[output] == literal[string] :
identifier[row_details] ={}
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[column_headers] )):
identifier[row_details] [ identifier[column_headers] [ identifier[j] ]]= identifier[output_lines] [ identifier[i] ][ identifier[indices] [ identifier[j] ][ literal[int] ]: identifier[indices] [ identifier[j] ][ literal[int] ]]. identifier[rstrip] ()
identifier[python_list] . identifier[append] ( identifier[row_details] )
keyword[elif] identifier[output] == literal[string] :
identifier[row_list] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[column_headers] )):
identifier[row_list] . identifier[append] ( identifier[output_lines] [ identifier[i] ][ identifier[indices] [ identifier[j] ][ literal[int] ]: identifier[indices] [ identifier[j] ][ literal[int] ]]). identifier[rstrip] ()
identifier[python_list] . identifier[append] ( identifier[row_list] )
keyword[elif] identifier[output] == literal[string] :
identifier[csv_string] += literal[string]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[column_headers] )):
keyword[if] identifier[j] :
identifier[csv_string] += literal[string]
identifier[csv_string] += identifier[output_lines] [ identifier[i] ][ identifier[indices] [ identifier[j] ][ literal[int] ]: identifier[indices] [ identifier[j] ][ literal[int] ]]. identifier[rstrip] ()
keyword[if] identifier[csv_string] :
keyword[return] identifier[csv_string]
keyword[return] identifier[python_list] | def convert_table(shell_output, delimiter='\t|\\s{2,}', output='dict'):
"""
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
""" # retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index) # depends on [control=['if'], data=['blank_index']]
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((output_lines[0].find(column_headers[i]), output_lines[0].find(column_headers[i + 1]))) # depends on [control=['if'], data=[]]
else:
indices.append((output_lines[0].find(column_headers[i]), -1)) # depends on [control=['for'], data=['i']]
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass # depends on [control=['if'], data=[]]
elif output == 'list':
python_list.append(column_headers) # depends on [control=['if'], data=[]]
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ',' # depends on [control=['if'], data=[]]
csv_string += column_headers[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip() # depends on [control=['for'], data=['j']]
python_list.append(row_details) # depends on [control=['if'], data=[]]
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip() # depends on [control=['for'], data=['j']]
python_list.append(row_list) # depends on [control=['if'], data=[]]
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ',' # depends on [control=['if'], data=[]]
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip() # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# return output
if csv_string:
return csv_string # depends on [control=['if'], data=[]]
return python_list |
def _scan_line(self, line):
""" Reviews each line in email message and determines fragment type
line - a row of text from an email message
"""
is_quote_header = self.QUOTE_HDR_REGEX.match(line) is not None
is_quoted = self.QUOTED_REGEX.match(line) is not None
is_header = is_quote_header or self.HEADER_REGEX.match(line) is not None
if self.fragment and len(line.strip()) == 0:
if self.SIG_REGEX.match(self.fragment.lines[-1].strip()):
self.fragment.signature = True
self._finish_fragment()
if self.fragment \
and ((self.fragment.headers == is_header and self.fragment.quoted == is_quoted) or
(self.fragment.quoted and (is_quote_header or len(line.strip()) == 0))):
self.fragment.lines.append(line)
else:
self._finish_fragment()
self.fragment = Fragment(is_quoted, line, headers=is_header) | def function[_scan_line, parameter[self, line]]:
constant[ Reviews each line in email message and determines fragment type
line - a row of text from an email message
]
variable[is_quote_header] assign[=] compare[call[name[self].QUOTE_HDR_REGEX.match, parameter[name[line]]] is_not constant[None]]
variable[is_quoted] assign[=] compare[call[name[self].QUOTED_REGEX.match, parameter[name[line]]] is_not constant[None]]
variable[is_header] assign[=] <ast.BoolOp object at 0x7da20c7969e0>
if <ast.BoolOp object at 0x7da20c7956c0> begin[:]
if call[name[self].SIG_REGEX.match, parameter[call[call[name[self].fragment.lines][<ast.UnaryOp object at 0x7da20c7954b0>].strip, parameter[]]]] begin[:]
name[self].fragment.signature assign[=] constant[True]
call[name[self]._finish_fragment, parameter[]]
if <ast.BoolOp object at 0x7da20c7962f0> begin[:]
call[name[self].fragment.lines.append, parameter[name[line]]] | keyword[def] identifier[_scan_line] ( identifier[self] , identifier[line] ):
literal[string]
identifier[is_quote_header] = identifier[self] . identifier[QUOTE_HDR_REGEX] . identifier[match] ( identifier[line] ) keyword[is] keyword[not] keyword[None]
identifier[is_quoted] = identifier[self] . identifier[QUOTED_REGEX] . identifier[match] ( identifier[line] ) keyword[is] keyword[not] keyword[None]
identifier[is_header] = identifier[is_quote_header] keyword[or] identifier[self] . identifier[HEADER_REGEX] . identifier[match] ( identifier[line] ) keyword[is] keyword[not] keyword[None]
keyword[if] identifier[self] . identifier[fragment] keyword[and] identifier[len] ( identifier[line] . identifier[strip] ())== literal[int] :
keyword[if] identifier[self] . identifier[SIG_REGEX] . identifier[match] ( identifier[self] . identifier[fragment] . identifier[lines] [- literal[int] ]. identifier[strip] ()):
identifier[self] . identifier[fragment] . identifier[signature] = keyword[True]
identifier[self] . identifier[_finish_fragment] ()
keyword[if] identifier[self] . identifier[fragment] keyword[and] (( identifier[self] . identifier[fragment] . identifier[headers] == identifier[is_header] keyword[and] identifier[self] . identifier[fragment] . identifier[quoted] == identifier[is_quoted] ) keyword[or]
( identifier[self] . identifier[fragment] . identifier[quoted] keyword[and] ( identifier[is_quote_header] keyword[or] identifier[len] ( identifier[line] . identifier[strip] ())== literal[int] ))):
identifier[self] . identifier[fragment] . identifier[lines] . identifier[append] ( identifier[line] )
keyword[else] :
identifier[self] . identifier[_finish_fragment] ()
identifier[self] . identifier[fragment] = identifier[Fragment] ( identifier[is_quoted] , identifier[line] , identifier[headers] = identifier[is_header] ) | def _scan_line(self, line):
""" Reviews each line in email message and determines fragment type
line - a row of text from an email message
"""
is_quote_header = self.QUOTE_HDR_REGEX.match(line) is not None
is_quoted = self.QUOTED_REGEX.match(line) is not None
is_header = is_quote_header or self.HEADER_REGEX.match(line) is not None
if self.fragment and len(line.strip()) == 0:
if self.SIG_REGEX.match(self.fragment.lines[-1].strip()):
self.fragment.signature = True
self._finish_fragment() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.fragment and (self.fragment.headers == is_header and self.fragment.quoted == is_quoted or (self.fragment.quoted and (is_quote_header or len(line.strip()) == 0))):
self.fragment.lines.append(line) # depends on [control=['if'], data=[]]
else:
self._finish_fragment()
self.fragment = Fragment(is_quoted, line, headers=is_header) |
def getMaxAge(self):
"get the max-age in seconds from the saved headers data"
max_age = 0
cache_control = self.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
max_age = int(params.get('max-age', '0'))
return max_age | def function[getMaxAge, parameter[self]]:
constant[get the max-age in seconds from the saved headers data]
variable[max_age] assign[=] constant[0]
variable[cache_control] assign[=] call[name[self].headers.get, parameter[constant[cache-control]]]
if name[cache_control] begin[:]
variable[params] assign[=] call[name[dict], parameter[call[name[urlparse].parse_qsl, parameter[name[cache_control]]]]]
variable[max_age] assign[=] call[name[int], parameter[call[name[params].get, parameter[constant[max-age], constant[0]]]]]
return[name[max_age]] | keyword[def] identifier[getMaxAge] ( identifier[self] ):
literal[string]
identifier[max_age] = literal[int]
identifier[cache_control] = identifier[self] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[cache_control] :
identifier[params] = identifier[dict] ( identifier[urlparse] . identifier[parse_qsl] ( identifier[cache_control] ))
identifier[max_age] = identifier[int] ( identifier[params] . identifier[get] ( literal[string] , literal[string] ))
keyword[return] identifier[max_age] | def getMaxAge(self):
"""get the max-age in seconds from the saved headers data"""
max_age = 0
cache_control = self.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
max_age = int(params.get('max-age', '0')) # depends on [control=['if'], data=[]]
return max_age |
def config():
"""
Reading config file in $HOME directory
/home/user/.alarm/config
"""
alarm_day = alarm_time = alarm_attempts = song = []
for line in open(alarm_config, "r"):
line = line.lstrip()
if line.startswith("DAY"):
alarm_day = line[4:].split()
if line.startswith("ALARM_TIME"):
alarm_time = line[11:].split()
if line.startswith("ATTEMPTS"):
alarm_attempts = line[9:].split()
if line.startswith("SONG"):
song = line[5:].split()
if alarm_day == ["today"]:
alarm_day = time.strftime("%d").split()
alarm_args = alarm_day + alarm_time + alarm_attempts + song
if alarm_args:
if len(alarm_args) == 4:
return alarm_args
else:
print("Error: config file: missing argument")
sys.exit()
else:
print("Error: config file: missing argument")
sys.exit() | def function[config, parameter[]]:
constant[
Reading config file in $HOME directory
/home/user/.alarm/config
]
variable[alarm_day] assign[=] list[[]]
for taget[name[line]] in starred[call[name[open], parameter[name[alarm_config], constant[r]]]] begin[:]
variable[line] assign[=] call[name[line].lstrip, parameter[]]
if call[name[line].startswith, parameter[constant[DAY]]] begin[:]
variable[alarm_day] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b265b4f0>].split, parameter[]]
if call[name[line].startswith, parameter[constant[ALARM_TIME]]] begin[:]
variable[alarm_time] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b2659cc0>].split, parameter[]]
if call[name[line].startswith, parameter[constant[ATTEMPTS]]] begin[:]
variable[alarm_attempts] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b265b820>].split, parameter[]]
if call[name[line].startswith, parameter[constant[SONG]]] begin[:]
variable[song] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b265bf10>].split, parameter[]]
if compare[name[alarm_day] equal[==] list[[<ast.Constant object at 0x7da1b265aec0>]]] begin[:]
variable[alarm_day] assign[=] call[call[name[time].strftime, parameter[constant[%d]]].split, parameter[]]
variable[alarm_args] assign[=] binary_operation[binary_operation[binary_operation[name[alarm_day] + name[alarm_time]] + name[alarm_attempts]] + name[song]]
if name[alarm_args] begin[:]
if compare[call[name[len], parameter[name[alarm_args]]] equal[==] constant[4]] begin[:]
return[name[alarm_args]] | keyword[def] identifier[config] ():
literal[string]
identifier[alarm_day] = identifier[alarm_time] = identifier[alarm_attempts] = identifier[song] =[]
keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[alarm_config] , literal[string] ):
identifier[line] = identifier[line] . identifier[lstrip] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[alarm_day] = identifier[line] [ literal[int] :]. identifier[split] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[alarm_time] = identifier[line] [ literal[int] :]. identifier[split] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[alarm_attempts] = identifier[line] [ literal[int] :]. identifier[split] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[song] = identifier[line] [ literal[int] :]. identifier[split] ()
keyword[if] identifier[alarm_day] ==[ literal[string] ]:
identifier[alarm_day] = identifier[time] . identifier[strftime] ( literal[string] ). identifier[split] ()
identifier[alarm_args] = identifier[alarm_day] + identifier[alarm_time] + identifier[alarm_attempts] + identifier[song]
keyword[if] identifier[alarm_args] :
keyword[if] identifier[len] ( identifier[alarm_args] )== literal[int] :
keyword[return] identifier[alarm_args]
keyword[else] :
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ()
keyword[else] :
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] () | def config():
"""
Reading config file in $HOME directory
/home/user/.alarm/config
"""
alarm_day = alarm_time = alarm_attempts = song = []
for line in open(alarm_config, 'r'):
line = line.lstrip()
if line.startswith('DAY'):
alarm_day = line[4:].split() # depends on [control=['if'], data=[]]
if line.startswith('ALARM_TIME'):
alarm_time = line[11:].split() # depends on [control=['if'], data=[]]
if line.startswith('ATTEMPTS'):
alarm_attempts = line[9:].split() # depends on [control=['if'], data=[]]
if line.startswith('SONG'):
song = line[5:].split() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if alarm_day == ['today']:
alarm_day = time.strftime('%d').split() # depends on [control=['if'], data=['alarm_day']]
alarm_args = alarm_day + alarm_time + alarm_attempts + song
if alarm_args:
if len(alarm_args) == 4:
return alarm_args # depends on [control=['if'], data=[]]
else:
print('Error: config file: missing argument')
sys.exit() # depends on [control=['if'], data=[]]
else:
print('Error: config file: missing argument')
sys.exit() |
def format_string(target, kwargs):
"""Formats a string in any of three ways (or not at all).
Args:
target: The target string to format. This can be a function that takes a
dict as its only argument, a string with {}- or %-based formatting, or
a basic string with none of those. In the latter case, the string is
returned as-is, but in all other cases the string is formatted (or the
callback called) with the given kwargs.
If this is None (or otherwise falsey), it is returned immediately.
kwargs: The arguments to use for formatting.
Passed to safe_format, %, or target if it's
callable.
"""
if not target:
return target
if callable(target):
return target(**kwargs)
if not isinstance(target, six.string_types):
return target
if '{' in target:
return partial_format(target, **kwargs)
if '%' in target:
return target % kwargs
return target | def function[format_string, parameter[target, kwargs]]:
constant[Formats a string in any of three ways (or not at all).
Args:
target: The target string to format. This can be a function that takes a
dict as its only argument, a string with {}- or %-based formatting, or
a basic string with none of those. In the latter case, the string is
returned as-is, but in all other cases the string is formatted (or the
callback called) with the given kwargs.
If this is None (or otherwise falsey), it is returned immediately.
kwargs: The arguments to use for formatting.
Passed to safe_format, %, or target if it's
callable.
]
if <ast.UnaryOp object at 0x7da1b18db9a0> begin[:]
return[name[target]]
if call[name[callable], parameter[name[target]]] begin[:]
return[call[name[target], parameter[]]]
if <ast.UnaryOp object at 0x7da1b1602c80> begin[:]
return[name[target]]
if compare[constant[{] in name[target]] begin[:]
return[call[name[partial_format], parameter[name[target]]]]
if compare[constant[%] in name[target]] begin[:]
return[binary_operation[name[target] <ast.Mod object at 0x7da2590d6920> name[kwargs]]]
return[name[target]] | keyword[def] identifier[format_string] ( identifier[target] , identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[target] :
keyword[return] identifier[target]
keyword[if] identifier[callable] ( identifier[target] ):
keyword[return] identifier[target] (** identifier[kwargs] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[target] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[target]
keyword[if] literal[string] keyword[in] identifier[target] :
keyword[return] identifier[partial_format] ( identifier[target] ,** identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[target] :
keyword[return] identifier[target] % identifier[kwargs]
keyword[return] identifier[target] | def format_string(target, kwargs):
"""Formats a string in any of three ways (or not at all).
Args:
target: The target string to format. This can be a function that takes a
dict as its only argument, a string with {}- or %-based formatting, or
a basic string with none of those. In the latter case, the string is
returned as-is, but in all other cases the string is formatted (or the
callback called) with the given kwargs.
If this is None (or otherwise falsey), it is returned immediately.
kwargs: The arguments to use for formatting.
Passed to safe_format, %, or target if it's
callable.
"""
if not target:
return target # depends on [control=['if'], data=[]]
if callable(target):
return target(**kwargs) # depends on [control=['if'], data=[]]
if not isinstance(target, six.string_types):
return target # depends on [control=['if'], data=[]]
if '{' in target:
return partial_format(target, **kwargs) # depends on [control=['if'], data=['target']]
if '%' in target:
return target % kwargs # depends on [control=['if'], data=['target']]
return target |
def init_static_field(state, field_class_name, field_name, field_type):
"""
Initialize the static field with an allocated, but not initialized,
object of the given type.
:param state: State associated to the field.
:param field_class_name: Class containing the field.
:param field_name: Name of the field.
:param field_type: Type of the field and the new object.
"""
field_ref = SimSootValue_StaticFieldRef.get_ref(state, field_class_name,
field_name, field_type)
field_val = SimSootValue_ThisRef.new_object(state, field_type)
state.memory.store(field_ref, field_val) | def function[init_static_field, parameter[state, field_class_name, field_name, field_type]]:
constant[
Initialize the static field with an allocated, but not initialized,
object of the given type.
:param state: State associated to the field.
:param field_class_name: Class containing the field.
:param field_name: Name of the field.
:param field_type: Type of the field and the new object.
]
variable[field_ref] assign[=] call[name[SimSootValue_StaticFieldRef].get_ref, parameter[name[state], name[field_class_name], name[field_name], name[field_type]]]
variable[field_val] assign[=] call[name[SimSootValue_ThisRef].new_object, parameter[name[state], name[field_type]]]
call[name[state].memory.store, parameter[name[field_ref], name[field_val]]] | keyword[def] identifier[init_static_field] ( identifier[state] , identifier[field_class_name] , identifier[field_name] , identifier[field_type] ):
literal[string]
identifier[field_ref] = identifier[SimSootValue_StaticFieldRef] . identifier[get_ref] ( identifier[state] , identifier[field_class_name] ,
identifier[field_name] , identifier[field_type] )
identifier[field_val] = identifier[SimSootValue_ThisRef] . identifier[new_object] ( identifier[state] , identifier[field_type] )
identifier[state] . identifier[memory] . identifier[store] ( identifier[field_ref] , identifier[field_val] ) | def init_static_field(state, field_class_name, field_name, field_type):
"""
Initialize the static field with an allocated, but not initialized,
object of the given type.
:param state: State associated to the field.
:param field_class_name: Class containing the field.
:param field_name: Name of the field.
:param field_type: Type of the field and the new object.
"""
field_ref = SimSootValue_StaticFieldRef.get_ref(state, field_class_name, field_name, field_type)
field_val = SimSootValue_ThisRef.new_object(state, field_type)
state.memory.store(field_ref, field_val) |
def _hook_id(self, info):
""" Extract id from info. Override for custom behaviour. """
if isinstance(info, dict) and 'id' in info.keys():
self.id = info['id'] | def function[_hook_id, parameter[self, info]]:
constant[ Extract id from info. Override for custom behaviour. ]
if <ast.BoolOp object at 0x7da207f98040> begin[:]
name[self].id assign[=] call[name[info]][constant[id]] | keyword[def] identifier[_hook_id] ( identifier[self] , identifier[info] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[info] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[info] . identifier[keys] ():
identifier[self] . identifier[id] = identifier[info] [ literal[string] ] | def _hook_id(self, info):
""" Extract id from info. Override for custom behaviour. """
if isinstance(info, dict) and 'id' in info.keys():
self.id = info['id'] # depends on [control=['if'], data=[]] |
def start_service(addr, n):
""" Start a service """
s = Service(addr)
s.register('add', lambda x, y: x + y)
started = time.time()
for _ in range(n):
s.process()
duration = time.time() - started
time.sleep(0.1)
print('Service stats:')
util.print_stats(n, duration)
return | def function[start_service, parameter[addr, n]]:
constant[ Start a service ]
variable[s] assign[=] call[name[Service], parameter[name[addr]]]
call[name[s].register, parameter[constant[add], <ast.Lambda object at 0x7da1b1045870>]]
variable[started] assign[=] call[name[time].time, parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[name[n]]]] begin[:]
call[name[s].process, parameter[]]
variable[duration] assign[=] binary_operation[call[name[time].time, parameter[]] - name[started]]
call[name[time].sleep, parameter[constant[0.1]]]
call[name[print], parameter[constant[Service stats:]]]
call[name[util].print_stats, parameter[name[n], name[duration]]]
return[None] | keyword[def] identifier[start_service] ( identifier[addr] , identifier[n] ):
literal[string]
identifier[s] = identifier[Service] ( identifier[addr] )
identifier[s] . identifier[register] ( literal[string] , keyword[lambda] identifier[x] , identifier[y] : identifier[x] + identifier[y] )
identifier[started] = identifier[time] . identifier[time] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[n] ):
identifier[s] . identifier[process] ()
identifier[duration] = identifier[time] . identifier[time] ()- identifier[started]
identifier[time] . identifier[sleep] ( literal[int] )
identifier[print] ( literal[string] )
identifier[util] . identifier[print_stats] ( identifier[n] , identifier[duration] )
keyword[return] | def start_service(addr, n):
""" Start a service """
s = Service(addr)
s.register('add', lambda x, y: x + y)
started = time.time()
for _ in range(n):
s.process() # depends on [control=['for'], data=[]]
duration = time.time() - started
time.sleep(0.1)
print('Service stats:')
util.print_stats(n, duration)
return |
def use_general_sv_bins(data):
"""Check if we should use a general binning approach for a sample.
Checks if CNVkit is enabled and we haven't already run CNVkit.
"""
if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]):
if not _get_original_coverage(data):
return True
return False | def function[use_general_sv_bins, parameter[data]]:
constant[Check if we should use a general binning approach for a sample.
Checks if CNVkit is enabled and we haven't already run CNVkit.
]
if call[name[any], parameter[<ast.ListComp object at 0x7da20c6e68c0>]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e67a0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[use_general_sv_bins] ( identifier[data] ):
literal[string]
keyword[if] identifier[any] ([ identifier[c] keyword[in] identifier[dd] . identifier[get_svcaller] ( identifier[data] ) keyword[for] identifier[c] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]]):
keyword[if] keyword[not] identifier[_get_original_coverage] ( identifier[data] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def use_general_sv_bins(data):
"""Check if we should use a general binning approach for a sample.
Checks if CNVkit is enabled and we haven't already run CNVkit.
"""
if any([c in dd.get_svcaller(data) for c in ['cnvkit', 'titancna', 'purecn', 'gatk-cnv']]):
if not _get_original_coverage(data):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False |
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples):
"""Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
"""
mutated_files = set()
dst_record_file = None
for src, dst in mutated_file_tuples:
if src == src_record_file:
dst_record_file = dst
else:
mutated_files.add(dst)
if not dst_record_file:
raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file))
output_records = []
file_name = os.path.join(workspace, dst_record_file)
for line in read_file(file_name).splitlines():
filename, fingerprint_str, size_str = line.rsplit(',', 3)
if filename in mutated_files:
fingerprint_str, size_str = fingerprint_file(workspace, filename)
output_line = ','.join((filename, fingerprint_str, size_str))
else:
output_line = line
output_records.append(output_line)
safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n') | def function[rewrite_record_file, parameter[workspace, src_record_file, mutated_file_tuples]]:
constant[Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
]
variable[mutated_files] assign[=] call[name[set], parameter[]]
variable[dst_record_file] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b227b7c0>, <ast.Name object at 0x7da1b2279b40>]]] in starred[name[mutated_file_tuples]] begin[:]
if compare[name[src] equal[==] name[src_record_file]] begin[:]
variable[dst_record_file] assign[=] name[dst]
if <ast.UnaryOp object at 0x7da1b227b130> begin[:]
<ast.Raise object at 0x7da1b227a740>
variable[output_records] assign[=] list[[]]
variable[file_name] assign[=] call[name[os].path.join, parameter[name[workspace], name[dst_record_file]]]
for taget[name[line]] in starred[call[call[name[read_file], parameter[name[file_name]]].splitlines, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b227bbb0> assign[=] call[name[line].rsplit, parameter[constant[,], constant[3]]]
if compare[name[filename] in name[mutated_files]] begin[:]
<ast.Tuple object at 0x7da1b22a5b40> assign[=] call[name[fingerprint_file], parameter[name[workspace], name[filename]]]
variable[output_line] assign[=] call[constant[,].join, parameter[tuple[[<ast.Name object at 0x7da1b2278f70>, <ast.Name object at 0x7da1b227acb0>, <ast.Name object at 0x7da1b227aef0>]]]]
call[name[output_records].append, parameter[name[output_line]]]
call[name[safe_file_dump], parameter[name[file_name], binary_operation[call[constant[
].join, parameter[name[output_records]]] + constant[
]]]] | keyword[def] identifier[rewrite_record_file] ( identifier[workspace] , identifier[src_record_file] , identifier[mutated_file_tuples] ):
literal[string]
identifier[mutated_files] = identifier[set] ()
identifier[dst_record_file] = keyword[None]
keyword[for] identifier[src] , identifier[dst] keyword[in] identifier[mutated_file_tuples] :
keyword[if] identifier[src] == identifier[src_record_file] :
identifier[dst_record_file] = identifier[dst]
keyword[else] :
identifier[mutated_files] . identifier[add] ( identifier[dst] )
keyword[if] keyword[not] identifier[dst_record_file] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[src_record_file] ))
identifier[output_records] =[]
identifier[file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[workspace] , identifier[dst_record_file] )
keyword[for] identifier[line] keyword[in] identifier[read_file] ( identifier[file_name] ). identifier[splitlines] ():
identifier[filename] , identifier[fingerprint_str] , identifier[size_str] = identifier[line] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[if] identifier[filename] keyword[in] identifier[mutated_files] :
identifier[fingerprint_str] , identifier[size_str] = identifier[fingerprint_file] ( identifier[workspace] , identifier[filename] )
identifier[output_line] = literal[string] . identifier[join] (( identifier[filename] , identifier[fingerprint_str] , identifier[size_str] ))
keyword[else] :
identifier[output_line] = identifier[line]
identifier[output_records] . identifier[append] ( identifier[output_line] )
identifier[safe_file_dump] ( identifier[file_name] , literal[string] . identifier[join] ( identifier[output_records] )+ literal[string] ) | def rewrite_record_file(workspace, src_record_file, mutated_file_tuples):
"""Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
"""
mutated_files = set()
dst_record_file = None
for (src, dst) in mutated_file_tuples:
if src == src_record_file:
dst_record_file = dst # depends on [control=['if'], data=[]]
else:
mutated_files.add(dst) # depends on [control=['for'], data=[]]
if not dst_record_file:
raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file)) # depends on [control=['if'], data=[]]
output_records = []
file_name = os.path.join(workspace, dst_record_file)
for line in read_file(file_name).splitlines():
(filename, fingerprint_str, size_str) = line.rsplit(',', 3)
if filename in mutated_files:
(fingerprint_str, size_str) = fingerprint_file(workspace, filename)
output_line = ','.join((filename, fingerprint_str, size_str)) # depends on [control=['if'], data=['filename']]
else:
output_line = line
output_records.append(output_line) # depends on [control=['for'], data=['line']]
safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n') |
def app_name_from_ini_file(ini_file_path):
"""
Returns the name of the main application from the given ini file. See
:function:`app_name_from_ini_parser` for details.
:param ini_file_path: Path to the .ini file to parse.
"""
parser = configparser.SafeConfigParser()
parser.read(ini_file_path)
return app_name_from_ini_parser(parser) | def function[app_name_from_ini_file, parameter[ini_file_path]]:
constant[
Returns the name of the main application from the given ini file. See
:function:`app_name_from_ini_parser` for details.
:param ini_file_path: Path to the .ini file to parse.
]
variable[parser] assign[=] call[name[configparser].SafeConfigParser, parameter[]]
call[name[parser].read, parameter[name[ini_file_path]]]
return[call[name[app_name_from_ini_parser], parameter[name[parser]]]] | keyword[def] identifier[app_name_from_ini_file] ( identifier[ini_file_path] ):
literal[string]
identifier[parser] = identifier[configparser] . identifier[SafeConfigParser] ()
identifier[parser] . identifier[read] ( identifier[ini_file_path] )
keyword[return] identifier[app_name_from_ini_parser] ( identifier[parser] ) | def app_name_from_ini_file(ini_file_path):
"""
Returns the name of the main application from the given ini file. See
:function:`app_name_from_ini_parser` for details.
:param ini_file_path: Path to the .ini file to parse.
"""
parser = configparser.SafeConfigParser()
parser.read(ini_file_path)
return app_name_from_ini_parser(parser) |
def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:
"""
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
try:
dataset = create_dataset(config)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
method = getattr(dataset, method_name)
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex)
try:
method()
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) | def function[invoke_dataset_method, parameter[config_path, method_name, output_root, cl_arguments]]:
constant[
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
]
variable[config] assign[=] constant[None]
<ast.Try object at 0x7da18fe93ca0>
<ast.Try object at 0x7da18fe90850>
<ast.Try object at 0x7da20c7ca800>
<ast.Try object at 0x7da20c7c8d00> | keyword[def] identifier[invoke_dataset_method] ( identifier[config_path] : identifier[str] , identifier[method_name] : identifier[str] , identifier[output_root] : identifier[str] , identifier[cl_arguments] : identifier[Iterable] [ identifier[str] ])-> keyword[None] :
literal[string]
identifier[config] = identifier[dataset] = identifier[method] = identifier[output_dir] = keyword[None]
keyword[try] :
identifier[config_path] = identifier[find_config] ( identifier[config_path] )
identifier[config] = identifier[load_config] ( identifier[config_file] = identifier[config_path] , identifier[additional_args] = identifier[cl_arguments] )
keyword[assert] literal[string] keyword[in] identifier[config] , literal[string]
identifier[logging] . identifier[debug] ( literal[string] , identifier[config] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[fallback] ( literal[string] , identifier[ex] )
keyword[try] :
identifier[dataset] = identifier[create_dataset] ( identifier[config] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[fallback] ( literal[string] , identifier[ex] )
keyword[try] :
identifier[method] = identifier[getattr] ( identifier[dataset] , identifier[method_name] )
keyword[except] identifier[AttributeError] keyword[as] identifier[ex] :
identifier[fallback] ( literal[string] . identifier[format] ( identifier[method_name] ), identifier[ex] )
keyword[try] :
identifier[method] ()
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[fallback] ( literal[string] . identifier[format] ( identifier[method_name] ), identifier[ex] ) | def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:
"""
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config) # depends on [control=['try'], data=[]]
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex) # depends on [control=['except'], data=['ex']]
try:
dataset = create_dataset(config) # depends on [control=['try'], data=[]]
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex) # depends on [control=['except'], data=['ex']]
try:
method = getattr(dataset, method_name) # depends on [control=['try'], data=[]]
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex) # depends on [control=['except'], data=['ex']]
try:
method() # depends on [control=['try'], data=[]]
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) # depends on [control=['except'], data=['ex']] |
def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr)
return correlation_id | def function[send_request, parameter[self, request, correlation_id]]:
constant[Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
]
call[name[log].debug, parameter[constant[Sending request %s], name[request]]]
if compare[name[correlation_id] is constant[None]] begin[:]
variable[correlation_id] assign[=] call[name[self]._next_correlation_id, parameter[]]
variable[header] assign[=] call[name[RequestHeader], parameter[name[request]]]
variable[message] assign[=] call[constant[b''].join, parameter[list[[<ast.Call object at 0x7da1b2179480>, <ast.Call object at 0x7da1b2178340>]]]]
variable[size] assign[=] call[name[Int32].encode, parameter[call[name[len], parameter[name[message]]]]]
variable[data] assign[=] binary_operation[name[size] + name[message]]
call[name[self].bytes_to_send.append, parameter[name[data]]]
if call[name[request].expect_response, parameter[]] begin[:]
variable[ifr] assign[=] tuple[[<ast.Name object at 0x7da1b2178e50>, <ast.Name object at 0x7da1b2179330>]]
call[name[self].in_flight_requests.append, parameter[name[ifr]]]
return[name[correlation_id]] | keyword[def] identifier[send_request] ( identifier[self] , identifier[request] , identifier[correlation_id] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[request] )
keyword[if] identifier[correlation_id] keyword[is] keyword[None] :
identifier[correlation_id] = identifier[self] . identifier[_next_correlation_id] ()
identifier[header] = identifier[RequestHeader] ( identifier[request] ,
identifier[correlation_id] = identifier[correlation_id] ,
identifier[client_id] = identifier[self] . identifier[_client_id] )
identifier[message] = literal[string] . identifier[join] ([ identifier[header] . identifier[encode] (), identifier[request] . identifier[encode] ()])
identifier[size] = identifier[Int32] . identifier[encode] ( identifier[len] ( identifier[message] ))
identifier[data] = identifier[size] + identifier[message]
identifier[self] . identifier[bytes_to_send] . identifier[append] ( identifier[data] )
keyword[if] identifier[request] . identifier[expect_response] ():
identifier[ifr] =( identifier[correlation_id] , identifier[request] )
identifier[self] . identifier[in_flight_requests] . identifier[append] ( identifier[ifr] )
keyword[return] identifier[correlation_id] | def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id() # depends on [control=['if'], data=['correlation_id']]
header = RequestHeader(request, correlation_id=correlation_id, client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr) # depends on [control=['if'], data=[]]
return correlation_id |
def segment_volume(seg):
'''Compute the volume of a segment.
Approximated as a conical frustum.
'''
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h = point_dist(seg[0], seg[1])
return math.pi * h * ((r0 * r0) + (r0 * r1) + (r1 * r1)) / 3.0 | def function[segment_volume, parameter[seg]]:
constant[Compute the volume of a segment.
Approximated as a conical frustum.
]
variable[r0] assign[=] call[call[name[seg]][constant[0]]][name[COLS].R]
variable[r1] assign[=] call[call[name[seg]][constant[1]]][name[COLS].R]
variable[h] assign[=] call[name[point_dist], parameter[call[name[seg]][constant[0]], call[name[seg]][constant[1]]]]
return[binary_operation[binary_operation[binary_operation[name[math].pi * name[h]] * binary_operation[binary_operation[binary_operation[name[r0] * name[r0]] + binary_operation[name[r0] * name[r1]]] + binary_operation[name[r1] * name[r1]]]] / constant[3.0]]] | keyword[def] identifier[segment_volume] ( identifier[seg] ):
literal[string]
identifier[r0] = identifier[seg] [ literal[int] ][ identifier[COLS] . identifier[R] ]
identifier[r1] = identifier[seg] [ literal[int] ][ identifier[COLS] . identifier[R] ]
identifier[h] = identifier[point_dist] ( identifier[seg] [ literal[int] ], identifier[seg] [ literal[int] ])
keyword[return] identifier[math] . identifier[pi] * identifier[h] *(( identifier[r0] * identifier[r0] )+( identifier[r0] * identifier[r1] )+( identifier[r1] * identifier[r1] ))/ literal[int] | def segment_volume(seg):
"""Compute the volume of a segment.
Approximated as a conical frustum.
"""
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
h = point_dist(seg[0], seg[1])
return math.pi * h * (r0 * r0 + r0 * r1 + r1 * r1) / 3.0 |
def _index_to_row_col(lines, index):
r"""
>>> lines = ['hello\n', 'world\n']
>>> _index_to_row_col(lines, 0)
(0, 0)
>>> _index_to_row_col(lines, 7)
(1, 1)
"""
if index < 0:
raise IndexError('negative index')
current_index = 0
for line_number, line in enumerate(lines):
line_length = len(line)
if current_index + line_length > index:
return line_number, index - current_index
current_index += line_length
raise IndexError('index %d out of range' % index) | def function[_index_to_row_col, parameter[lines, index]]:
constant[
>>> lines = ['hello\n', 'world\n']
>>> _index_to_row_col(lines, 0)
(0, 0)
>>> _index_to_row_col(lines, 7)
(1, 1)
]
if compare[name[index] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1dd9780>
variable[current_index] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b1dd9990>, <ast.Name object at 0x7da1b1dd9ab0>]]] in starred[call[name[enumerate], parameter[name[lines]]]] begin[:]
variable[line_length] assign[=] call[name[len], parameter[name[line]]]
if compare[binary_operation[name[current_index] + name[line_length]] greater[>] name[index]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1ddb6a0>, <ast.BinOp object at 0x7da1b1dd9c60>]]]
<ast.AugAssign object at 0x7da1b1ddad40>
<ast.Raise object at 0x7da1b1ddafb0> | keyword[def] identifier[_index_to_row_col] ( identifier[lines] , identifier[index] ):
literal[string]
keyword[if] identifier[index] < literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] )
identifier[current_index] = literal[int]
keyword[for] identifier[line_number] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] ):
identifier[line_length] = identifier[len] ( identifier[line] )
keyword[if] identifier[current_index] + identifier[line_length] > identifier[index] :
keyword[return] identifier[line_number] , identifier[index] - identifier[current_index]
identifier[current_index] += identifier[line_length]
keyword[raise] identifier[IndexError] ( literal[string] % identifier[index] ) | def _index_to_row_col(lines, index):
"""
>>> lines = ['hello\\n', 'world\\n']
>>> _index_to_row_col(lines, 0)
(0, 0)
>>> _index_to_row_col(lines, 7)
(1, 1)
"""
if index < 0:
raise IndexError('negative index') # depends on [control=['if'], data=[]]
current_index = 0
for (line_number, line) in enumerate(lines):
line_length = len(line)
if current_index + line_length > index:
return (line_number, index - current_index) # depends on [control=['if'], data=['index']]
current_index += line_length # depends on [control=['for'], data=[]]
raise IndexError('index %d out of range' % index) |
def new_table(self, name, add_id=True, **kwargs):
'''Add a table to the schema, or update it it already exists.
If updating, will only update data.
'''
from . import Table
from .exc import NotFoundError
try:
table = self.table(name)
extant = True
except NotFoundError:
extant = False
if 'sequence_id' not in kwargs:
kwargs['sequence_id'] = self._database.next_sequence_id(Dataset, self.vid, Table)
table = Table(name=name, d_vid=self.vid, **kwargs)
table.update_id()
# Update possibly extant data
table.data = dict(
(list(table.data.items()) if table.data else []) + list(kwargs.get('data', {}).items()))
for key, value in list(kwargs.items()):
if not key:
continue
if key[0] != '_' and key not in ['vid', 'id', 'id_', 'd_id', 'name', 'sequence_id', 'table', 'column', 'data']:
setattr(table, key, value)
if add_id:
table.add_id_column()
if not extant:
self.tables.append(table)
return table | def function[new_table, parameter[self, name, add_id]]:
constant[Add a table to the schema, or update it it already exists.
If updating, will only update data.
]
from relative_module[None] import module[Table]
from relative_module[exc] import module[NotFoundError]
<ast.Try object at 0x7da20c7948e0>
name[table].data assign[=] call[name[dict], parameter[binary_operation[<ast.IfExp object at 0x7da18f00e1d0> + call[name[list], parameter[call[call[name[kwargs].get, parameter[constant[data], dictionary[[], []]]].items, parameter[]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da18f00e590>, <ast.Name object at 0x7da18f00e140>]]] in starred[call[name[list], parameter[call[name[kwargs].items, parameter[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f00e050> begin[:]
continue
if <ast.BoolOp object at 0x7da18f00db10> begin[:]
call[name[setattr], parameter[name[table], name[key], name[value]]]
if name[add_id] begin[:]
call[name[table].add_id_column, parameter[]]
if <ast.UnaryOp object at 0x7da18f00f070> begin[:]
call[name[self].tables.append, parameter[name[table]]]
return[name[table]] | keyword[def] identifier[new_table] ( identifier[self] , identifier[name] , identifier[add_id] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[from] . keyword[import] identifier[Table]
keyword[from] . identifier[exc] keyword[import] identifier[NotFoundError]
keyword[try] :
identifier[table] = identifier[self] . identifier[table] ( identifier[name] )
identifier[extant] = keyword[True]
keyword[except] identifier[NotFoundError] :
identifier[extant] = keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_database] . identifier[next_sequence_id] ( identifier[Dataset] , identifier[self] . identifier[vid] , identifier[Table] )
identifier[table] = identifier[Table] ( identifier[name] = identifier[name] , identifier[d_vid] = identifier[self] . identifier[vid] ,** identifier[kwargs] )
identifier[table] . identifier[update_id] ()
identifier[table] . identifier[data] = identifier[dict] (
( identifier[list] ( identifier[table] . identifier[data] . identifier[items] ()) keyword[if] identifier[table] . identifier[data] keyword[else] [])+ identifier[list] ( identifier[kwargs] . identifier[get] ( literal[string] ,{}). identifier[items] ()))
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[list] ( identifier[kwargs] . identifier[items] ()):
keyword[if] keyword[not] identifier[key] :
keyword[continue]
keyword[if] identifier[key] [ literal[int] ]!= literal[string] keyword[and] identifier[key] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[setattr] ( identifier[table] , identifier[key] , identifier[value] )
keyword[if] identifier[add_id] :
identifier[table] . identifier[add_id_column] ()
keyword[if] keyword[not] identifier[extant] :
identifier[self] . identifier[tables] . identifier[append] ( identifier[table] )
keyword[return] identifier[table] | def new_table(self, name, add_id=True, **kwargs):
"""Add a table to the schema, or update it it already exists.
If updating, will only update data.
"""
from . import Table
from .exc import NotFoundError
try:
table = self.table(name)
extant = True # depends on [control=['try'], data=[]]
except NotFoundError:
extant = False
if 'sequence_id' not in kwargs:
kwargs['sequence_id'] = self._database.next_sequence_id(Dataset, self.vid, Table) # depends on [control=['if'], data=['kwargs']]
table = Table(name=name, d_vid=self.vid, **kwargs)
table.update_id() # depends on [control=['except'], data=[]]
# Update possibly extant data
table.data = dict((list(table.data.items()) if table.data else []) + list(kwargs.get('data', {}).items()))
for (key, value) in list(kwargs.items()):
if not key:
continue # depends on [control=['if'], data=[]]
if key[0] != '_' and key not in ['vid', 'id', 'id_', 'd_id', 'name', 'sequence_id', 'table', 'column', 'data']:
setattr(table, key, value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if add_id:
table.add_id_column() # depends on [control=['if'], data=[]]
if not extant:
self.tables.append(table) # depends on [control=['if'], data=[]]
return table |
def getSKOSDirectSubs(self, aURI):
"""
2015-08-19: currenlty not used, inferred from above
"""
aURI = aURI
qres = self.rdflib_graph.query("""SELECT DISTINCT ?x
WHERE {
{
{ ?x skos:broader <%s> }
UNION
{ <%s> skos:narrower ?s }
}
FILTER (!isBlank(?x))
}
""" % (aURI, aURI))
return list(qres) | def function[getSKOSDirectSubs, parameter[self, aURI]]:
constant[
2015-08-19: currenlty not used, inferred from above
]
variable[aURI] assign[=] name[aURI]
variable[qres] assign[=] call[name[self].rdflib_graph.query, parameter[binary_operation[constant[SELECT DISTINCT ?x
WHERE {
{
{ ?x skos:broader <%s> }
UNION
{ <%s> skos:narrower ?s }
}
FILTER (!isBlank(?x))
}
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b112a9b0>, <ast.Name object at 0x7da1b112b0a0>]]]]]
return[call[name[list], parameter[name[qres]]]] | keyword[def] identifier[getSKOSDirectSubs] ( identifier[self] , identifier[aURI] ):
literal[string]
identifier[aURI] = identifier[aURI]
identifier[qres] = identifier[self] . identifier[rdflib_graph] . identifier[query] ( literal[string] %( identifier[aURI] , identifier[aURI] ))
keyword[return] identifier[list] ( identifier[qres] ) | def getSKOSDirectSubs(self, aURI):
"""
2015-08-19: currenlty not used, inferred from above
"""
aURI = aURI
qres = self.rdflib_graph.query('SELECT DISTINCT ?x\n WHERE {\n {\n { ?x skos:broader <%s> }\n UNION\n { <%s> skos:narrower ?s }\n }\n FILTER (!isBlank(?x))\n }\n ' % (aURI, aURI))
return list(qres) |
async def dist(self, mesg):
'''
Distribute an existing event tuple.
Args:
mesg ((str,dict)): An event tuple.
Example:
await base.dist( ('foo',{'bar':'baz'}) )
'''
if self.isfini:
return ()
ret = []
for func in self._syn_funcs.get(mesg[0], ()):
try:
ret.append(await s_coro.ornot(func, mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
for func in self._syn_links:
try:
ret.append(await func(mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
return ret | <ast.AsyncFunctionDef object at 0x7da1b230bc10> | keyword[async] keyword[def] identifier[dist] ( identifier[self] , identifier[mesg] ):
literal[string]
keyword[if] identifier[self] . identifier[isfini] :
keyword[return] ()
identifier[ret] =[]
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[_syn_funcs] . identifier[get] ( identifier[mesg] [ literal[int] ],()):
keyword[try] :
identifier[ret] . identifier[append] ( keyword[await] identifier[s_coro] . identifier[ornot] ( identifier[func] , identifier[mesg] ))
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[self] , identifier[mesg] )
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[_syn_links] :
keyword[try] :
identifier[ret] . identifier[append] ( keyword[await] identifier[func] ( identifier[mesg] ))
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[self] , identifier[mesg] )
keyword[return] identifier[ret] | async def dist(self, mesg):
"""
Distribute an existing event tuple.
Args:
mesg ((str,dict)): An event tuple.
Example:
await base.dist( ('foo',{'bar':'baz'}) )
"""
if self.isfini:
return () # depends on [control=['if'], data=[]]
ret = []
for func in self._syn_funcs.get(mesg[0], ()):
try:
ret.append(await s_coro.ornot(func, mesg)) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
raise # depends on [control=['except'], data=[]]
except Exception:
logger.exception('base %s error with mesg %s', self, mesg) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['func']]
for func in self._syn_links:
try:
ret.append(await func(mesg)) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
raise # depends on [control=['except'], data=[]]
except Exception:
logger.exception('base %s error with mesg %s', self, mesg) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['func']]
return ret |
def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink':
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/'
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/'
content = permalink_url
else:
if qualifier:
content = '%s: %s' % (string.lower(qualifier), content)
return DCIdentifier(content=content) | def function[identifier_director, parameter[]]:
constant[Direct how to handle the identifier element.]
variable[ark] assign[=] call[name[kwargs].get, parameter[constant[ark], constant[None]]]
variable[domain_name] assign[=] call[name[kwargs].get, parameter[constant[domain_name], constant[None]]]
variable[scheme] assign[=] <ast.BoolOp object at 0x7da18c4cdd20>
variable[qualifier] assign[=] call[name[kwargs].get, parameter[constant[qualifier], constant[None]]]
variable[content] assign[=] call[name[kwargs].get, parameter[constant[content], constant[]]]
if <ast.BoolOp object at 0x7da2044c2230> begin[:]
variable[content] assign[=] binary_operation[constant[ark: %s] <ast.Mod object at 0x7da2590d6920> name[ark]]
if <ast.BoolOp object at 0x7da2044c0ca0> begin[:]
if <ast.UnaryOp object at 0x7da18c4cc2e0> begin[:]
<ast.AugAssign object at 0x7da18c4cc400>
variable[permalink_url] assign[=] binary_operation[constant[%s://%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18c4ce770>, <ast.Name object at 0x7da18c4ce530>, <ast.Name object at 0x7da18c4cfc70>]]]
if <ast.UnaryOp object at 0x7da18c4ce050> begin[:]
<ast.AugAssign object at 0x7da18c4cd600>
variable[content] assign[=] name[permalink_url]
return[call[name[DCIdentifier], parameter[]]] | keyword[def] identifier[identifier_director] (** identifier[kwargs] ):
literal[string]
identifier[ark] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[domain_name] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[scheme] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[qualifier] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[content] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[ark] keyword[and] identifier[qualifier] == literal[string] :
identifier[content] = literal[string] % identifier[ark]
keyword[if] identifier[domain_name] keyword[and] identifier[ark] keyword[and] identifier[qualifier] == literal[string] :
keyword[if] keyword[not] identifier[domain_name] . identifier[endswith] ( literal[string] ):
identifier[domain_name] += literal[string]
identifier[permalink_url] = literal[string] %( identifier[scheme] , identifier[domain_name] , identifier[ark] )
keyword[if] keyword[not] identifier[permalink_url] . identifier[endswith] ( literal[string] ):
identifier[permalink_url] += literal[string]
identifier[content] = identifier[permalink_url]
keyword[else] :
keyword[if] identifier[qualifier] :
identifier[content] = literal[string] %( identifier[string] . identifier[lower] ( identifier[qualifier] ), identifier[content] )
keyword[return] identifier[DCIdentifier] ( identifier[content] = identifier[content] ) | def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark # depends on [control=['if'], data=[]]
if domain_name and ark and (qualifier == 'permalink'):
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/' # depends on [control=['if'], data=[]]
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/' # depends on [control=['if'], data=[]]
content = permalink_url # depends on [control=['if'], data=[]]
elif qualifier:
content = '%s: %s' % (string.lower(qualifier), content) # depends on [control=['if'], data=[]]
return DCIdentifier(content=content) |
def MoVScoring(profile, scoringVector):
"""
Returns an integer that represents the winning candidate given an election profile.
The winner has the largest score.
Tie-breaking rule: numerically increasing order
:ivar Profile profile: A Profile object that represents an election profile.
:ivar list<int> scoringVector: A list of integers (or floats) that give the scores assigned to
each position in a ranking from first to last.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "csv" and elecType != "toc":
print("ERROR: unsupported profile type")
exit()
winners = MechanismPosScoring(scoringVector).getWinners(profile)
if len(winners) > 1:
return 1
n = profile.numVoters
m = profile.numCands
if len(scoringVector) != m:
print("ERROR: the length of the scoring vector is not correct!")
exit()
# Construct the score matrix--values
prefcounts = array(profile.getPreferenceCounts())
rankmaps = profile.getRankMaps()
len_prefcounts = len(prefcounts)
values = zeros([len_prefcounts, m], dtype=int)
if min(list(rankmaps[0].keys())) == 0:
delta = 0
else:
delta = 1
for i in range(len_prefcounts):
for j in range(delta, m + delta):
values[i][j - delta] = scoringVector[rankmaps[i][j] - 1]
# Compute the scores of all the candidates
score = dot(array(prefcounts), values)
# Compute the winner of the original profile
d = argmax(score, axis=0) + delta
# print("d=",d)
alter = delete(range(delta, m + delta), d - delta)
# Initialize
MoV = n * ones(m, dtype=int)
# for c in [3]:
for c in alter:
# The difference vector of d and c
difference = values[:, c - delta] - values[:, d - delta]
# print("dif=", difference)
index = argsort(difference, axis=0, kind='mergesort')
# The vector that each element is the gain in the difference
# between d and c if the pattern of the vote changed to [c > others > d]
change = scoringVector[0] - difference
# The total_difference between score(d) and score(c)
total_difference = score[d - delta] - score[c - delta]
# print("total-dif=", total_difference)
for i in range(len_prefcounts):
# The number of votes of the first i kinds of patterns
temp_sum = sum(prefcounts[index][0:i])
# print("temp_sum=", temp_sum)
# The aggregate gain (of the first i kinds of patterns)
# in the difference between d and c if changed to [c > others > d]
lower_bound = dot(prefcounts[index][0:i], change[index][0:i])
# print("lower_bound=", lower_bound)
# The aggregate gain (of the first i+1 kinds of patterns)
# in the difference between d and c if changed to [c > others > d]
upper_bound = dot(prefcounts[index][0:i + 1], change[index][0:i + 1])
# print("upper_bound=", upper_bound)
# if lower_bound < total_difference <= upper_bound:
if lower_bound <= total_difference < upper_bound:
# MoV[c - delta] = temp_sum + math.floor(float(total_difference - lower_bound)/change[index][i]) + 1
# Update on Apr 13 2019
MoV[c - delta] = temp_sum + math.ceil(float(total_difference - lower_bound) / change[index][i])
break
# print("MoV=", MoV)
return min(MoV) | def function[MoVScoring, parameter[profile, scoringVector]]:
constant[
Returns an integer that represents the winning candidate given an election profile.
The winner has the largest score.
Tie-breaking rule: numerically increasing order
:ivar Profile profile: A Profile object that represents an election profile.
:ivar list<int> scoringVector: A list of integers (or floats) that give the scores assigned to
each position in a ranking from first to last.
]
variable[elecType] assign[=] call[name[profile].getElecType, parameter[]]
if <ast.BoolOp object at 0x7da1b230b460> begin[:]
call[name[print], parameter[constant[ERROR: unsupported profile type]]]
call[name[exit], parameter[]]
variable[winners] assign[=] call[call[name[MechanismPosScoring], parameter[name[scoringVector]]].getWinners, parameter[name[profile]]]
if compare[call[name[len], parameter[name[winners]]] greater[>] constant[1]] begin[:]
return[constant[1]]
variable[n] assign[=] name[profile].numVoters
variable[m] assign[=] name[profile].numCands
if compare[call[name[len], parameter[name[scoringVector]]] not_equal[!=] name[m]] begin[:]
call[name[print], parameter[constant[ERROR: the length of the scoring vector is not correct!]]]
call[name[exit], parameter[]]
variable[prefcounts] assign[=] call[name[array], parameter[call[name[profile].getPreferenceCounts, parameter[]]]]
variable[rankmaps] assign[=] call[name[profile].getRankMaps, parameter[]]
variable[len_prefcounts] assign[=] call[name[len], parameter[name[prefcounts]]]
variable[values] assign[=] call[name[zeros], parameter[list[[<ast.Name object at 0x7da1b230a530>, <ast.Name object at 0x7da1b230a500>]]]]
if compare[call[name[min], parameter[call[name[list], parameter[call[call[name[rankmaps]][constant[0]].keys, parameter[]]]]]] equal[==] constant[0]] begin[:]
variable[delta] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[delta], binary_operation[name[m] + name[delta]]]]] begin[:]
call[call[name[values]][name[i]]][binary_operation[name[j] - name[delta]]] assign[=] call[name[scoringVector]][binary_operation[call[call[name[rankmaps]][name[i]]][name[j]] - constant[1]]]
variable[score] assign[=] call[name[dot], parameter[call[name[array], parameter[name[prefcounts]]], name[values]]]
variable[d] assign[=] binary_operation[call[name[argmax], parameter[name[score]]] + name[delta]]
variable[alter] assign[=] call[name[delete], parameter[call[name[range], parameter[name[delta], binary_operation[name[m] + name[delta]]]], binary_operation[name[d] - name[delta]]]]
variable[MoV] assign[=] binary_operation[name[n] * call[name[ones], parameter[name[m]]]]
for taget[name[c]] in starred[name[alter]] begin[:]
variable[difference] assign[=] binary_operation[call[name[values]][tuple[[<ast.Slice object at 0x7da1b2309240>, <ast.BinOp object at 0x7da1b2309210>]]] - call[name[values]][tuple[[<ast.Slice object at 0x7da1b23090f0>, <ast.BinOp object at 0x7da1b23090c0>]]]]
variable[index] assign[=] call[name[argsort], parameter[name[difference]]]
variable[change] assign[=] binary_operation[call[name[scoringVector]][constant[0]] - name[difference]]
variable[total_difference] assign[=] binary_operation[call[name[score]][binary_operation[name[d] - name[delta]]] - call[name[score]][binary_operation[name[c] - name[delta]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
variable[temp_sum] assign[=] call[name[sum], parameter[call[call[name[prefcounts]][name[index]]][<ast.Slice object at 0x7da1b2308850>]]]
variable[lower_bound] assign[=] call[name[dot], parameter[call[call[name[prefcounts]][name[index]]][<ast.Slice object at 0x7da1b2350fd0>], call[call[name[change]][name[index]]][<ast.Slice object at 0x7da1b2351660>]]]
variable[upper_bound] assign[=] call[name[dot], parameter[call[call[name[prefcounts]][name[index]]][<ast.Slice object at 0x7da1b2351f00>], call[call[name[change]][name[index]]][<ast.Slice object at 0x7da1b2350d00>]]]
if compare[name[lower_bound] less_or_equal[<=] name[total_difference]] begin[:]
call[name[MoV]][binary_operation[name[c] - name[delta]]] assign[=] binary_operation[name[temp_sum] + call[name[math].ceil, parameter[binary_operation[call[name[float], parameter[binary_operation[name[total_difference] - name[lower_bound]]]] / call[call[name[change]][name[index]]][name[i]]]]]]
break
return[call[name[min], parameter[name[MoV]]]] | keyword[def] identifier[MoVScoring] ( identifier[profile] , identifier[scoringVector] ):
literal[string]
identifier[elecType] = identifier[profile] . identifier[getElecType] ()
keyword[if] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] :
identifier[print] ( literal[string] )
identifier[exit] ()
identifier[winners] = identifier[MechanismPosScoring] ( identifier[scoringVector] ). identifier[getWinners] ( identifier[profile] )
keyword[if] identifier[len] ( identifier[winners] )> literal[int] :
keyword[return] literal[int]
identifier[n] = identifier[profile] . identifier[numVoters]
identifier[m] = identifier[profile] . identifier[numCands]
keyword[if] identifier[len] ( identifier[scoringVector] )!= identifier[m] :
identifier[print] ( literal[string] )
identifier[exit] ()
identifier[prefcounts] = identifier[array] ( identifier[profile] . identifier[getPreferenceCounts] ())
identifier[rankmaps] = identifier[profile] . identifier[getRankMaps] ()
identifier[len_prefcounts] = identifier[len] ( identifier[prefcounts] )
identifier[values] = identifier[zeros] ([ identifier[len_prefcounts] , identifier[m] ], identifier[dtype] = identifier[int] )
keyword[if] identifier[min] ( identifier[list] ( identifier[rankmaps] [ literal[int] ]. identifier[keys] ()))== literal[int] :
identifier[delta] = literal[int]
keyword[else] :
identifier[delta] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[delta] , identifier[m] + identifier[delta] ):
identifier[values] [ identifier[i] ][ identifier[j] - identifier[delta] ]= identifier[scoringVector] [ identifier[rankmaps] [ identifier[i] ][ identifier[j] ]- literal[int] ]
identifier[score] = identifier[dot] ( identifier[array] ( identifier[prefcounts] ), identifier[values] )
identifier[d] = identifier[argmax] ( identifier[score] , identifier[axis] = literal[int] )+ identifier[delta]
identifier[alter] = identifier[delete] ( identifier[range] ( identifier[delta] , identifier[m] + identifier[delta] ), identifier[d] - identifier[delta] )
identifier[MoV] = identifier[n] * identifier[ones] ( identifier[m] , identifier[dtype] = identifier[int] )
keyword[for] identifier[c] keyword[in] identifier[alter] :
identifier[difference] = identifier[values] [:, identifier[c] - identifier[delta] ]- identifier[values] [:, identifier[d] - identifier[delta] ]
identifier[index] = identifier[argsort] ( identifier[difference] , identifier[axis] = literal[int] , identifier[kind] = literal[string] )
identifier[change] = identifier[scoringVector] [ literal[int] ]- identifier[difference]
identifier[total_difference] = identifier[score] [ identifier[d] - identifier[delta] ]- identifier[score] [ identifier[c] - identifier[delta] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
identifier[temp_sum] = identifier[sum] ( identifier[prefcounts] [ identifier[index] ][ literal[int] : identifier[i] ])
identifier[lower_bound] = identifier[dot] ( identifier[prefcounts] [ identifier[index] ][ literal[int] : identifier[i] ], identifier[change] [ identifier[index] ][ literal[int] : identifier[i] ])
identifier[upper_bound] = identifier[dot] ( identifier[prefcounts] [ identifier[index] ][ literal[int] : identifier[i] + literal[int] ], identifier[change] [ identifier[index] ][ literal[int] : identifier[i] + literal[int] ])
keyword[if] identifier[lower_bound] <= identifier[total_difference] < identifier[upper_bound] :
identifier[MoV] [ identifier[c] - identifier[delta] ]= identifier[temp_sum] + identifier[math] . identifier[ceil] ( identifier[float] ( identifier[total_difference] - identifier[lower_bound] )/ identifier[change] [ identifier[index] ][ identifier[i] ])
keyword[break]
keyword[return] identifier[min] ( identifier[MoV] ) | def MoVScoring(profile, scoringVector):
"""
Returns an integer that represents the winning candidate given an election profile.
The winner has the largest score.
Tie-breaking rule: numerically increasing order
:ivar Profile profile: A Profile object that represents an election profile.
:ivar list<int> scoringVector: A list of integers (or floats) that give the scores assigned to
each position in a ranking from first to last.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != 'soc' and elecType != 'csv' and (elecType != 'toc'):
print('ERROR: unsupported profile type')
exit() # depends on [control=['if'], data=[]]
winners = MechanismPosScoring(scoringVector).getWinners(profile)
if len(winners) > 1:
return 1 # depends on [control=['if'], data=[]]
n = profile.numVoters
m = profile.numCands
if len(scoringVector) != m:
print('ERROR: the length of the scoring vector is not correct!')
exit() # depends on [control=['if'], data=[]]
# Construct the score matrix--values
prefcounts = array(profile.getPreferenceCounts())
rankmaps = profile.getRankMaps()
len_prefcounts = len(prefcounts)
values = zeros([len_prefcounts, m], dtype=int)
if min(list(rankmaps[0].keys())) == 0:
delta = 0 # depends on [control=['if'], data=[]]
else:
delta = 1
for i in range(len_prefcounts):
for j in range(delta, m + delta):
values[i][j - delta] = scoringVector[rankmaps[i][j] - 1] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
# Compute the scores of all the candidates
score = dot(array(prefcounts), values)
# Compute the winner of the original profile
d = argmax(score, axis=0) + delta
# print("d=",d)
alter = delete(range(delta, m + delta), d - delta)
# Initialize
MoV = n * ones(m, dtype=int)
# for c in [3]:
for c in alter:
# The difference vector of d and c
difference = values[:, c - delta] - values[:, d - delta]
# print("dif=", difference)
index = argsort(difference, axis=0, kind='mergesort')
# The vector that each element is the gain in the difference
# between d and c if the pattern of the vote changed to [c > others > d]
change = scoringVector[0] - difference
# The total_difference between score(d) and score(c)
total_difference = score[d - delta] - score[c - delta]
# print("total-dif=", total_difference)
for i in range(len_prefcounts):
# The number of votes of the first i kinds of patterns
temp_sum = sum(prefcounts[index][0:i])
# print("temp_sum=", temp_sum)
# The aggregate gain (of the first i kinds of patterns)
# in the difference between d and c if changed to [c > others > d]
lower_bound = dot(prefcounts[index][0:i], change[index][0:i])
# print("lower_bound=", lower_bound)
# The aggregate gain (of the first i+1 kinds of patterns)
# in the difference between d and c if changed to [c > others > d]
upper_bound = dot(prefcounts[index][0:i + 1], change[index][0:i + 1])
# print("upper_bound=", upper_bound)
# if lower_bound < total_difference <= upper_bound:
if lower_bound <= total_difference < upper_bound:
# MoV[c - delta] = temp_sum + math.floor(float(total_difference - lower_bound)/change[index][i]) + 1
# Update on Apr 13 2019
MoV[c - delta] = temp_sum + math.ceil(float(total_difference - lower_bound) / change[index][i])
break # depends on [control=['if'], data=['lower_bound', 'total_difference']] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['c']]
# print("MoV=", MoV)
return min(MoV) |
def set_default(self):
"""
Set this key as the default key
:returns: An updated list of added keys
"""
req = self.request(self.mist_client.uri+'/keys/'+self.id)
req.post()
self.is_default = True
self.mist_client.update_keys() | def function[set_default, parameter[self]]:
constant[
Set this key as the default key
:returns: An updated list of added keys
]
variable[req] assign[=] call[name[self].request, parameter[binary_operation[binary_operation[name[self].mist_client.uri + constant[/keys/]] + name[self].id]]]
call[name[req].post, parameter[]]
name[self].is_default assign[=] constant[True]
call[name[self].mist_client.update_keys, parameter[]] | keyword[def] identifier[set_default] ( identifier[self] ):
literal[string]
identifier[req] = identifier[self] . identifier[request] ( identifier[self] . identifier[mist_client] . identifier[uri] + literal[string] + identifier[self] . identifier[id] )
identifier[req] . identifier[post] ()
identifier[self] . identifier[is_default] = keyword[True]
identifier[self] . identifier[mist_client] . identifier[update_keys] () | def set_default(self):
"""
Set this key as the default key
:returns: An updated list of added keys
"""
req = self.request(self.mist_client.uri + '/keys/' + self.id)
req.post()
self.is_default = True
self.mist_client.update_keys() |
def verify_permitted_to_read(gs_path):
"""Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
"""
# TODO(qimingj): Storage APIs need to be modified to allow absence of project
# or credential on Items. When that happens we can move the function
# to Items class.
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials()
args = {
'maxResults': Api._MAX_RESULTS,
'projection': 'noAcl'
}
if prefix is not None:
args['prefix'] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
try:
datalab.utils.Http.request(url, args=args, credentials=credentials)
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception('Not permitted to read from specified path. '
'Please sign in and make sure you have read access.')
raise e | def function[verify_permitted_to_read, parameter[gs_path]]:
constant[Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
]
from relative_module[None] import module[_bucket]
<ast.Tuple object at 0x7da18f00d480> assign[=] call[name[_bucket].parse_name, parameter[name[gs_path]]]
variable[credentials] assign[=] constant[None]
if call[name[datalab].context.Context.is_signed_in, parameter[]] begin[:]
variable[credentials] assign[=] call[name[datalab].context._utils.get_credentials, parameter[]]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da18f00db10>, <ast.Constant object at 0x7da18f00d900>], [<ast.Attribute object at 0x7da18f00e830>, <ast.Constant object at 0x7da18f00f550>]]
if compare[name[prefix] is_not constant[None]] begin[:]
call[name[args]][constant[prefix]] assign[=] name[prefix]
variable[url] assign[=] binary_operation[name[Api]._ENDPOINT + binary_operation[name[Api]._OBJECT_PATH <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00d8d0>, <ast.Constant object at 0x7da18f00dd20>]]]]
<ast.Try object at 0x7da18f00f820> | keyword[def] identifier[verify_permitted_to_read] ( identifier[gs_path] ):
literal[string]
keyword[from] . keyword[import] identifier[_bucket]
identifier[bucket] , identifier[prefix] = identifier[_bucket] . identifier[parse_name] ( identifier[gs_path] )
identifier[credentials] = keyword[None]
keyword[if] identifier[datalab] . identifier[context] . identifier[Context] . identifier[is_signed_in] ():
identifier[credentials] = identifier[datalab] . identifier[context] . identifier[_utils] . identifier[get_credentials] ()
identifier[args] ={
literal[string] : identifier[Api] . identifier[_MAX_RESULTS] ,
literal[string] : literal[string]
}
keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] :
identifier[args] [ literal[string] ]= identifier[prefix]
identifier[url] = identifier[Api] . identifier[_ENDPOINT] +( identifier[Api] . identifier[_OBJECT_PATH] %( identifier[bucket] , literal[string] ))
keyword[try] :
identifier[datalab] . identifier[utils] . identifier[Http] . identifier[request] ( identifier[url] , identifier[args] = identifier[args] , identifier[credentials] = identifier[credentials] )
keyword[except] identifier[datalab] . identifier[utils] . identifier[RequestException] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[status] == literal[int] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
keyword[raise] identifier[e] | def verify_permitted_to_read(gs_path):
"""Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read.
"""
# TODO(qimingj): Storage APIs need to be modified to allow absence of project
# or credential on Items. When that happens we can move the function
# to Items class.
from . import _bucket
(bucket, prefix) = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials() # depends on [control=['if'], data=[]]
args = {'maxResults': Api._MAX_RESULTS, 'projection': 'noAcl'}
if prefix is not None:
args['prefix'] = prefix # depends on [control=['if'], data=['prefix']]
url = Api._ENDPOINT + Api._OBJECT_PATH % (bucket, '')
try:
datalab.utils.Http.request(url, args=args, credentials=credentials) # depends on [control=['try'], data=[]]
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception('Not permitted to read from specified path. Please sign in and make sure you have read access.') # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']] |
def model(self, *args, **kwargs):
"""Retrieve a single model belonging to this scope.
See :class:`pykechain.Client.model` for available parameters.
"""
return self._client.model(*args, bucket=self.bucket.get('id'), **kwargs) | def function[model, parameter[self]]:
constant[Retrieve a single model belonging to this scope.
See :class:`pykechain.Client.model` for available parameters.
]
return[call[name[self]._client.model, parameter[<ast.Starred object at 0x7da2046224a0>]]] | keyword[def] identifier[model] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_client] . identifier[model] (* identifier[args] , identifier[bucket] = identifier[self] . identifier[bucket] . identifier[get] ( literal[string] ),** identifier[kwargs] ) | def model(self, *args, **kwargs):
"""Retrieve a single model belonging to this scope.
See :class:`pykechain.Client.model` for available parameters.
"""
return self._client.model(*args, bucket=self.bucket.get('id'), **kwargs) |
def radius_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[radius_server_host_use_vrf, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[radius_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[radius-server]]]
variable[host] assign[=] call[name[ET].SubElement, parameter[name[radius_server], constant[host]]]
variable[hostname_key] assign[=] call[name[ET].SubElement, parameter[name[host], constant[hostname]]]
name[hostname_key].text assign[=] call[name[kwargs].pop, parameter[constant[hostname]]]
variable[use_vrf] assign[=] call[name[ET].SubElement, parameter[name[host], constant[use-vrf]]]
name[use_vrf].text assign[=] call[name[kwargs].pop, parameter[constant[use_vrf]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[radius_server_host_use_vrf] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[radius_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[host] = identifier[ET] . identifier[SubElement] ( identifier[radius_server] , literal[string] )
identifier[hostname_key] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[hostname_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[use_vrf] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[use_vrf] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def radius_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
radius_server = ET.SubElement(config, 'radius-server', xmlns='urn:brocade.com:mgmt:brocade-aaa')
host = ET.SubElement(radius_server, 'host')
hostname_key = ET.SubElement(host, 'hostname')
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, 'use-vrf')
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def knuth_sum(a, b):
"""Error-free transformation of the sum of two floating point numbers
according to
D.E. Knuth.
The Art of Computer Programming: Seminumerical Algorithms, volume 2.
Addison Wesley, Reading, Massachusetts, second edition, 1981.
The underlying problem is that the exact sum a+b of two floating point
number a and b is not necessarily a floating point number; for example if
you add a very large and a very small number. It is however known that the
difference between the best floating point approximation of a+b and the
exact a+b is again a floating point number. This routine returns the sum
and the error.
Algorithm 3.1 in <https://doi.org/10.1137/030601818>.
"""
x = a + b
z = x - a
y = (a - (x - z)) + (b - z)
return x, y | def function[knuth_sum, parameter[a, b]]:
constant[Error-free transformation of the sum of two floating point numbers
according to
D.E. Knuth.
The Art of Computer Programming: Seminumerical Algorithms, volume 2.
Addison Wesley, Reading, Massachusetts, second edition, 1981.
The underlying problem is that the exact sum a+b of two floating point
number a and b is not necessarily a floating point number; for example if
you add a very large and a very small number. It is however known that the
difference between the best floating point approximation of a+b and the
exact a+b is again a floating point number. This routine returns the sum
and the error.
Algorithm 3.1 in <https://doi.org/10.1137/030601818>.
]
variable[x] assign[=] binary_operation[name[a] + name[b]]
variable[z] assign[=] binary_operation[name[x] - name[a]]
variable[y] assign[=] binary_operation[binary_operation[name[a] - binary_operation[name[x] - name[z]]] + binary_operation[name[b] - name[z]]]
return[tuple[[<ast.Name object at 0x7da20e9b3880>, <ast.Name object at 0x7da20e9b3c70>]]] | keyword[def] identifier[knuth_sum] ( identifier[a] , identifier[b] ):
literal[string]
identifier[x] = identifier[a] + identifier[b]
identifier[z] = identifier[x] - identifier[a]
identifier[y] =( identifier[a] -( identifier[x] - identifier[z] ))+( identifier[b] - identifier[z] )
keyword[return] identifier[x] , identifier[y] | def knuth_sum(a, b):
"""Error-free transformation of the sum of two floating point numbers
according to
D.E. Knuth.
The Art of Computer Programming: Seminumerical Algorithms, volume 2.
Addison Wesley, Reading, Massachusetts, second edition, 1981.
The underlying problem is that the exact sum a+b of two floating point
number a and b is not necessarily a floating point number; for example if
you add a very large and a very small number. It is however known that the
difference between the best floating point approximation of a+b and the
exact a+b is again a floating point number. This routine returns the sum
and the error.
Algorithm 3.1 in <https://doi.org/10.1137/030601818>.
"""
x = a + b
z = x - a
y = a - (x - z) + (b - z)
return (x, y) |
def vel_critical(self):
"""The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices The speed of
falling water is 0.841 m/s for all linear flow orifice meters of
height 20 cm, independent of total plant flow rate. """
return (4 / (3 * math.pi) * (2 * pc.gravity * self.hl) ** (1 / 2)).to(u.m/u.s) | def function[vel_critical, parameter[self]]:
constant[The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices The speed of
falling water is 0.841 m/s for all linear flow orifice meters of
height 20 cm, independent of total plant flow rate. ]
return[call[binary_operation[binary_operation[constant[4] / binary_operation[constant[3] * name[math].pi]] * binary_operation[binary_operation[binary_operation[constant[2] * name[pc].gravity] * name[self].hl] ** binary_operation[constant[1] / constant[2]]]].to, parameter[binary_operation[name[u].m / name[u].s]]]] | keyword[def] identifier[vel_critical] ( identifier[self] ):
literal[string]
keyword[return] ( literal[int] /( literal[int] * identifier[math] . identifier[pi] )*( literal[int] * identifier[pc] . identifier[gravity] * identifier[self] . identifier[hl] )**( literal[int] / literal[int] )). identifier[to] ( identifier[u] . identifier[m] / identifier[u] . identifier[s] ) | def vel_critical(self):
"""The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices The speed of
falling water is 0.841 m/s for all linear flow orifice meters of
height 20 cm, independent of total plant flow rate. """
return (4 / (3 * math.pi) * (2 * pc.gravity * self.hl) ** (1 / 2)).to(u.m / u.s) |
def GetHTTPHeaders(self):
"""Returns the HTTP headers required for request authorization.
Returns:
A dictionary containing the required headers.
"""
http_headers = self._adwords_client.oauth2_client.CreateHttpHeader()
if self.enable_compression:
http_headers['accept-encoding'] = 'gzip'
http_headers.update(self.custom_http_headers)
return http_headers | def function[GetHTTPHeaders, parameter[self]]:
constant[Returns the HTTP headers required for request authorization.
Returns:
A dictionary containing the required headers.
]
variable[http_headers] assign[=] call[name[self]._adwords_client.oauth2_client.CreateHttpHeader, parameter[]]
if name[self].enable_compression begin[:]
call[name[http_headers]][constant[accept-encoding]] assign[=] constant[gzip]
call[name[http_headers].update, parameter[name[self].custom_http_headers]]
return[name[http_headers]] | keyword[def] identifier[GetHTTPHeaders] ( identifier[self] ):
literal[string]
identifier[http_headers] = identifier[self] . identifier[_adwords_client] . identifier[oauth2_client] . identifier[CreateHttpHeader] ()
keyword[if] identifier[self] . identifier[enable_compression] :
identifier[http_headers] [ literal[string] ]= literal[string]
identifier[http_headers] . identifier[update] ( identifier[self] . identifier[custom_http_headers] )
keyword[return] identifier[http_headers] | def GetHTTPHeaders(self):
"""Returns the HTTP headers required for request authorization.
Returns:
A dictionary containing the required headers.
"""
http_headers = self._adwords_client.oauth2_client.CreateHttpHeader()
if self.enable_compression:
http_headers['accept-encoding'] = 'gzip' # depends on [control=['if'], data=[]]
http_headers.update(self.custom_http_headers)
return http_headers |
def _bits_to_geohash(value):
"""Convert a list of GeoHash bits to a GeoHash."""
ret = []
# Get 5 bits at a time
for i in (value[i:i+5] for i in xrange(0, len(value), 5)):
# Convert binary to integer
# Note: reverse here, the slice above doesn't work quite right in reverse.
total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])])
ret.append(BASE32MAPR[total])
# Join the string and return
return "".join(ret) | def function[_bits_to_geohash, parameter[value]]:
constant[Convert a list of GeoHash bits to a GeoHash.]
variable[ret] assign[=] list[[]]
for taget[name[i]] in starred[<ast.GeneratorExp object at 0x7da1b101bd90>] begin[:]
variable[total] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b1019cc0>]]
call[name[ret].append, parameter[call[name[BASE32MAPR]][name[total]]]]
return[call[constant[].join, parameter[name[ret]]]] | keyword[def] identifier[_bits_to_geohash] ( identifier[value] ):
literal[string]
identifier[ret] =[]
keyword[for] identifier[i] keyword[in] ( identifier[value] [ identifier[i] : identifier[i] + literal[int] ] keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[value] ), literal[int] )):
identifier[total] = identifier[sum] ([( identifier[bit] * literal[int] ** identifier[count] ) keyword[for] identifier[count] , identifier[bit] keyword[in] identifier[enumerate] ( identifier[i] [::- literal[int] ])])
identifier[ret] . identifier[append] ( identifier[BASE32MAPR] [ identifier[total] ])
keyword[return] literal[string] . identifier[join] ( identifier[ret] ) | def _bits_to_geohash(value):
"""Convert a list of GeoHash bits to a GeoHash."""
ret = []
# Get 5 bits at a time
for i in (value[i:i + 5] for i in xrange(0, len(value), 5)):
# Convert binary to integer
# Note: reverse here, the slice above doesn't work quite right in reverse.
total = sum([bit * 2 ** count for (count, bit) in enumerate(i[::-1])])
ret.append(BASE32MAPR[total]) # depends on [control=['for'], data=['i']]
# Join the string and return
return ''.join(ret) |
def get(self, name):
"""
Get a resource from the remote site.
Parameters
-------------
name : str
Asset name, i.e. 'quadknot.obj.mtl'
"""
# do import here to keep soft dependency
import requests
# append base url to requested name
url = urljoin(self.base_url, name)
# fetch the data from the remote url
response = requests.get(url)
# return the bytes of the response
return response.content | def function[get, parameter[self, name]]:
constant[
Get a resource from the remote site.
Parameters
-------------
name : str
Asset name, i.e. 'quadknot.obj.mtl'
]
import module[requests]
variable[url] assign[=] call[name[urljoin], parameter[name[self].base_url, name[name]]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
return[name[response].content] | keyword[def] identifier[get] ( identifier[self] , identifier[name] ):
literal[string]
keyword[import] identifier[requests]
identifier[url] = identifier[urljoin] ( identifier[self] . identifier[base_url] , identifier[name] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[return] identifier[response] . identifier[content] | def get(self, name):
"""
Get a resource from the remote site.
Parameters
-------------
name : str
Asset name, i.e. 'quadknot.obj.mtl'
"""
# do import here to keep soft dependency
import requests
# append base url to requested name
url = urljoin(self.base_url, name)
# fetch the data from the remote url
response = requests.get(url)
# return the bytes of the response
return response.content |
def users(self):
"""Get current users and add in any search results.
:returns: a list of dicts with keys
- id
- title
:rtype: list
"""
existing_users = self.existing_users()
existing_user_ids = [x['id'] for x in existing_users]
# Only add search results that are not already members
sharing = getMultiAdapter((self.my_workspace(), self.request),
name='sharing')
search_results = sharing.user_search_results()
users = existing_users + [x for x in search_results
if x['id'] not in existing_user_ids]
users.sort(key=lambda x: safe_unicode(x["title"]))
return users | def function[users, parameter[self]]:
constant[Get current users and add in any search results.
:returns: a list of dicts with keys
- id
- title
:rtype: list
]
variable[existing_users] assign[=] call[name[self].existing_users, parameter[]]
variable[existing_user_ids] assign[=] <ast.ListComp object at 0x7da204622950>
variable[sharing] assign[=] call[name[getMultiAdapter], parameter[tuple[[<ast.Call object at 0x7da1b15f0100>, <ast.Attribute object at 0x7da1b15f2b60>]]]]
variable[search_results] assign[=] call[name[sharing].user_search_results, parameter[]]
variable[users] assign[=] binary_operation[name[existing_users] + <ast.ListComp object at 0x7da1b15f1570>]
call[name[users].sort, parameter[]]
return[name[users]] | keyword[def] identifier[users] ( identifier[self] ):
literal[string]
identifier[existing_users] = identifier[self] . identifier[existing_users] ()
identifier[existing_user_ids] =[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[existing_users] ]
identifier[sharing] = identifier[getMultiAdapter] (( identifier[self] . identifier[my_workspace] (), identifier[self] . identifier[request] ),
identifier[name] = literal[string] )
identifier[search_results] = identifier[sharing] . identifier[user_search_results] ()
identifier[users] = identifier[existing_users] +[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[search_results]
keyword[if] identifier[x] [ literal[string] ] keyword[not] keyword[in] identifier[existing_user_ids] ]
identifier[users] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[safe_unicode] ( identifier[x] [ literal[string] ]))
keyword[return] identifier[users] | def users(self):
"""Get current users and add in any search results.
:returns: a list of dicts with keys
- id
- title
:rtype: list
"""
existing_users = self.existing_users()
existing_user_ids = [x['id'] for x in existing_users]
# Only add search results that are not already members
sharing = getMultiAdapter((self.my_workspace(), self.request), name='sharing')
search_results = sharing.user_search_results()
users = existing_users + [x for x in search_results if x['id'] not in existing_user_ids]
users.sort(key=lambda x: safe_unicode(x['title']))
return users |
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath)) | def function[commit_times_for, parameter[self, git, use_files]]:
constant[
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
]
variable[use_files_paths] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b15f5570>]]
variable[first_commit] assign[=] call[name[str], parameter[name[git].first_commit]]
variable[commit_times] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b15f6d10> assign[=] tuple[[<ast.Constant object at 0x7da1b15f6da0>, <ast.Dict object at 0x7da1b15f5990>]]
if name[self].with_cache begin[:]
variable[sorted_relpaths] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b15f5d80>]]
<ast.Tuple object at 0x7da1b15f4460> assign[=] call[name[get_cached_commit_times], parameter[name[self].root_folder, name[self].parent_dir, name[sorted_relpaths]]]
if compare[name[cached_commit] equal[==] name[first_commit]] begin[:]
variable[commit_times] assign[=] name[cached_commit_times]
if <ast.UnaryOp object at 0x7da1b15f7700> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b15f65c0>, <ast.Name object at 0x7da1b15f51e0>, <ast.Name object at 0x7da1b15f44c0>]]] in starred[call[name[git].file_commit_times, parameter[name[use_files_paths]]]] begin[:]
for taget[name[path]] in starred[name[different_paths]] begin[:]
call[name[commit_times]][name[path]] assign[=] name[commit_time]
if name[self].with_cache begin[:]
call[name[set_cached_commit_times], parameter[name[self].root_folder, name[self].parent_dir, name[first_commit], name[commit_times], name[sorted_relpaths]]]
for taget[name[key]] in starred[name[use_files]] begin[:]
if name[key].relpath begin[:]
variable[path] assign[=] call[name[getattr], parameter[name[key], constant[real_relpath], name[key].path]]
variable[relpath] assign[=] call[name[getattr], parameter[name[key], constant[real_relpath], name[key].relpath]]
if compare[name[path] in name[commit_times]] begin[:]
<ast.Yield object at 0x7da1b15f4160> | keyword[def] identifier[commit_times_for] ( identifier[self] , identifier[git] , identifier[use_files] ):
literal[string]
identifier[use_files_paths] = identifier[set] ([ identifier[getattr] ( identifier[p] , literal[string] , identifier[p] . identifier[path] ) keyword[for] identifier[p] keyword[in] identifier[use_files] keyword[if] identifier[p] . identifier[relpath] ])
identifier[first_commit] = identifier[str] ( identifier[git] . identifier[first_commit] )
identifier[commit_times] ={}
identifier[cached_commit] , identifier[cached_commit_times] = keyword[None] ,{}
keyword[if] identifier[self] . identifier[with_cache] :
identifier[sorted_relpaths] = identifier[sorted] ([ identifier[p] . identifier[relpath] keyword[for] identifier[p] keyword[in] identifier[use_files] ])
identifier[cached_commit] , identifier[cached_commit_times] = identifier[get_cached_commit_times] ( identifier[self] . identifier[root_folder] , identifier[self] . identifier[parent_dir] , identifier[sorted_relpaths] )
keyword[if] identifier[cached_commit] == identifier[first_commit] :
identifier[commit_times] = identifier[cached_commit_times]
keyword[if] keyword[not] identifier[commit_times] :
keyword[for] identifier[commit_id] , identifier[commit_time] , identifier[different_paths] keyword[in] identifier[git] . identifier[file_commit_times] ( identifier[use_files_paths] , identifier[debug] = identifier[self] . identifier[debug] ):
keyword[for] identifier[path] keyword[in] identifier[different_paths] :
identifier[commit_times] [ identifier[path] ]= identifier[commit_time]
keyword[if] identifier[self] . identifier[with_cache] :
identifier[set_cached_commit_times] ( identifier[self] . identifier[root_folder] , identifier[self] . identifier[parent_dir] , identifier[first_commit] , identifier[commit_times] , identifier[sorted_relpaths] )
keyword[for] identifier[key] keyword[in] identifier[use_files] :
keyword[if] identifier[key] . identifier[relpath] :
identifier[path] = identifier[getattr] ( identifier[key] , literal[string] , identifier[key] . identifier[path] )
identifier[relpath] = identifier[getattr] ( identifier[key] , literal[string] , identifier[key] . identifier[relpath] )
keyword[if] identifier[path] keyword[in] identifier[commit_times] :
keyword[yield] identifier[key] . identifier[relpath] , identifier[commit_times] [ identifier[path] ]
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[relpath] )) | def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, 'real_relpath', p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
(cached_commit, cached_commit_times) = (None, {})
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
(cached_commit, cached_commit_times) = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for (commit_id, commit_time, different_paths) in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time # depends on [control=['for'], data=['path']] # depends on [control=['for'], data=[]]
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, 'real_relpath', key.path)
relpath = getattr(key, 'real_relpath', key.relpath)
if path in commit_times:
yield (key.relpath, commit_times[path]) # depends on [control=['if'], data=['path', 'commit_times']]
else:
log.warning("Couldn't find commit time for {0}".format(relpath)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def get_code(results):
"""Determines the exit status code to be returned from a script by
inspecting the results returned from validating file(s).
Status codes are binary OR'd together, so exit codes can communicate
multiple error conditions.
"""
status = EXIT_SUCCESS
for file_result in results:
error = any(object_result.errors for object_result in file_result.object_results)
fatal = file_result.fatal
if error:
status |= EXIT_SCHEMA_INVALID
if fatal:
status |= EXIT_VALIDATION_ERROR
return status | def function[get_code, parameter[results]]:
constant[Determines the exit status code to be returned from a script by
inspecting the results returned from validating file(s).
Status codes are binary OR'd together, so exit codes can communicate
multiple error conditions.
]
variable[status] assign[=] name[EXIT_SUCCESS]
for taget[name[file_result]] in starred[name[results]] begin[:]
variable[error] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b0faffa0>]]
variable[fatal] assign[=] name[file_result].fatal
if name[error] begin[:]
<ast.AugAssign object at 0x7da1b0fac100>
if name[fatal] begin[:]
<ast.AugAssign object at 0x7da1b0fad240>
return[name[status]] | keyword[def] identifier[get_code] ( identifier[results] ):
literal[string]
identifier[status] = identifier[EXIT_SUCCESS]
keyword[for] identifier[file_result] keyword[in] identifier[results] :
identifier[error] = identifier[any] ( identifier[object_result] . identifier[errors] keyword[for] identifier[object_result] keyword[in] identifier[file_result] . identifier[object_results] )
identifier[fatal] = identifier[file_result] . identifier[fatal]
keyword[if] identifier[error] :
identifier[status] |= identifier[EXIT_SCHEMA_INVALID]
keyword[if] identifier[fatal] :
identifier[status] |= identifier[EXIT_VALIDATION_ERROR]
keyword[return] identifier[status] | def get_code(results):
"""Determines the exit status code to be returned from a script by
inspecting the results returned from validating file(s).
Status codes are binary OR'd together, so exit codes can communicate
multiple error conditions.
"""
status = EXIT_SUCCESS
for file_result in results:
error = any((object_result.errors for object_result in file_result.object_results))
fatal = file_result.fatal
if error:
status |= EXIT_SCHEMA_INVALID # depends on [control=['if'], data=[]]
if fatal:
status |= EXIT_VALIDATION_ERROR # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_result']]
return status |
def mcast_sender(mcgroup=MC_GROUP):
"""Non-object interface for sending multicast messages.
"""
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if _is_broadcast_group(mcgroup):
group = '<broadcast>'
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
elif((int(mcgroup.split(".")[0]) > 239) or
(int(mcgroup.split(".")[0]) < 224)):
raise IOError("Invalid multicast address.")
else:
group = mcgroup
ttl = struct.pack('b', TTL_LOCALNET) # Time-to-live
sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl)
return sock, group | def function[mcast_sender, parameter[mcgroup]]:
constant[Non-object interface for sending multicast messages.
]
variable[sock] assign[=] call[name[socket], parameter[name[AF_INET], name[SOCK_DGRAM]]]
call[name[sock].setsockopt, parameter[name[SOL_SOCKET], name[SO_REUSEADDR], constant[1]]]
if call[name[_is_broadcast_group], parameter[name[mcgroup]]] begin[:]
variable[group] assign[=] constant[<broadcast>]
call[name[sock].setsockopt, parameter[name[SOL_SOCKET], name[SO_BROADCAST], constant[1]]]
return[tuple[[<ast.Name object at 0x7da1b18015a0>, <ast.Name object at 0x7da1b18014e0>]]] | keyword[def] identifier[mcast_sender] ( identifier[mcgroup] = identifier[MC_GROUP] ):
literal[string]
identifier[sock] = identifier[socket] ( identifier[AF_INET] , identifier[SOCK_DGRAM] )
identifier[sock] . identifier[setsockopt] ( identifier[SOL_SOCKET] , identifier[SO_REUSEADDR] , literal[int] )
keyword[if] identifier[_is_broadcast_group] ( identifier[mcgroup] ):
identifier[group] = literal[string]
identifier[sock] . identifier[setsockopt] ( identifier[SOL_SOCKET] , identifier[SO_BROADCAST] , literal[int] )
keyword[elif] (( identifier[int] ( identifier[mcgroup] . identifier[split] ( literal[string] )[ literal[int] ])> literal[int] ) keyword[or]
( identifier[int] ( identifier[mcgroup] . identifier[split] ( literal[string] )[ literal[int] ])< literal[int] )):
keyword[raise] identifier[IOError] ( literal[string] )
keyword[else] :
identifier[group] = identifier[mcgroup]
identifier[ttl] = identifier[struct] . identifier[pack] ( literal[string] , identifier[TTL_LOCALNET] )
identifier[sock] . identifier[setsockopt] ( identifier[IPPROTO_IP] , identifier[IP_MULTICAST_TTL] , identifier[ttl] )
keyword[return] identifier[sock] , identifier[group] | def mcast_sender(mcgroup=MC_GROUP):
"""Non-object interface for sending multicast messages.
"""
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if _is_broadcast_group(mcgroup):
group = '<broadcast>'
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) # depends on [control=['if'], data=[]]
elif int(mcgroup.split('.')[0]) > 239 or int(mcgroup.split('.')[0]) < 224:
raise IOError('Invalid multicast address.') # depends on [control=['if'], data=[]]
else:
group = mcgroup
ttl = struct.pack('b', TTL_LOCALNET) # Time-to-live
sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl)
return (sock, group) |
def stop_process(process):
"""Does its best to stop the process."""
process.terminate()
process.join(3)
if process.is_alive() and os.name != 'nt':
try:
os.kill(process.pid, signal.SIGKILL)
process.join()
except OSError:
return
if process.is_alive():
raise RuntimeError("Unable to terminate PID %d" % os.getpid()) | def function[stop_process, parameter[process]]:
constant[Does its best to stop the process.]
call[name[process].terminate, parameter[]]
call[name[process].join, parameter[constant[3]]]
if <ast.BoolOp object at 0x7da1b11b5330> begin[:]
<ast.Try object at 0x7da1b11b54b0>
if call[name[process].is_alive, parameter[]] begin[:]
<ast.Raise object at 0x7da1b11b5900> | keyword[def] identifier[stop_process] ( identifier[process] ):
literal[string]
identifier[process] . identifier[terminate] ()
identifier[process] . identifier[join] ( literal[int] )
keyword[if] identifier[process] . identifier[is_alive] () keyword[and] identifier[os] . identifier[name] != literal[string] :
keyword[try] :
identifier[os] . identifier[kill] ( identifier[process] . identifier[pid] , identifier[signal] . identifier[SIGKILL] )
identifier[process] . identifier[join] ()
keyword[except] identifier[OSError] :
keyword[return]
keyword[if] identifier[process] . identifier[is_alive] ():
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[os] . identifier[getpid] ()) | def stop_process(process):
"""Does its best to stop the process."""
process.terminate()
process.join(3)
if process.is_alive() and os.name != 'nt':
try:
os.kill(process.pid, signal.SIGKILL)
process.join() # depends on [control=['try'], data=[]]
except OSError:
return # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if process.is_alive():
raise RuntimeError('Unable to terminate PID %d' % os.getpid()) # depends on [control=['if'], data=[]] |
def _GetVSSStoreIdentifiers(self, scan_node):
"""Determines the VSS store identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the scan node is invalid.
UserAbort: if the user requested to abort.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
# TODO: refactor to use scan options.
if self._vss_stores:
if self._vss_stores == 'all':
vss_stores = range(1, volume_system.number_of_volumes + 1)
else:
vss_stores = self._vss_stores
selected_volume_identifiers = self._NormalizedVolumeIdentifiers(
volume_system, vss_stores, prefix='vss')
if not set(selected_volume_identifiers).difference(volume_identifiers):
return selected_volume_identifiers
try:
volume_identifiers = self._PromptUserForVSSStoreIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix='vss') | def function[_GetVSSStoreIdentifiers, parameter[self, scan_node]]:
constant[Determines the VSS store identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the scan node is invalid.
UserAbort: if the user requested to abort.
]
if <ast.BoolOp object at 0x7da20e9b1870> begin[:]
<ast.Raise object at 0x7da20e9b0370>
variable[volume_system] assign[=] call[name[vshadow_volume_system].VShadowVolumeSystem, parameter[]]
call[name[volume_system].Open, parameter[name[scan_node].path_spec]]
variable[volume_identifiers] assign[=] call[name[self]._source_scanner.GetVolumeIdentifiers, parameter[name[volume_system]]]
if <ast.UnaryOp object at 0x7da204567400> begin[:]
return[list[[]]]
if name[self]._vss_stores begin[:]
if compare[name[self]._vss_stores equal[==] constant[all]] begin[:]
variable[vss_stores] assign[=] call[name[range], parameter[constant[1], binary_operation[name[volume_system].number_of_volumes + constant[1]]]]
variable[selected_volume_identifiers] assign[=] call[name[self]._NormalizedVolumeIdentifiers, parameter[name[volume_system], name[vss_stores]]]
if <ast.UnaryOp object at 0x7da204564460> begin[:]
return[name[selected_volume_identifiers]]
<ast.Try object at 0x7da204566740>
return[call[name[self]._NormalizedVolumeIdentifiers, parameter[name[volume_system], name[volume_identifiers]]]] | keyword[def] identifier[_GetVSSStoreIdentifiers] ( identifier[self] , identifier[scan_node] ):
literal[string]
keyword[if] keyword[not] identifier[scan_node] keyword[or] keyword[not] identifier[scan_node] . identifier[path_spec] :
keyword[raise] identifier[errors] . identifier[SourceScannerError] ( literal[string] )
identifier[volume_system] = identifier[vshadow_volume_system] . identifier[VShadowVolumeSystem] ()
identifier[volume_system] . identifier[Open] ( identifier[scan_node] . identifier[path_spec] )
identifier[volume_identifiers] = identifier[self] . identifier[_source_scanner] . identifier[GetVolumeIdentifiers] (
identifier[volume_system] )
keyword[if] keyword[not] identifier[volume_identifiers] :
keyword[return] []
keyword[if] identifier[self] . identifier[_vss_stores] :
keyword[if] identifier[self] . identifier[_vss_stores] == literal[string] :
identifier[vss_stores] = identifier[range] ( literal[int] , identifier[volume_system] . identifier[number_of_volumes] + literal[int] )
keyword[else] :
identifier[vss_stores] = identifier[self] . identifier[_vss_stores]
identifier[selected_volume_identifiers] = identifier[self] . identifier[_NormalizedVolumeIdentifiers] (
identifier[volume_system] , identifier[vss_stores] , identifier[prefix] = literal[string] )
keyword[if] keyword[not] identifier[set] ( identifier[selected_volume_identifiers] ). identifier[difference] ( identifier[volume_identifiers] ):
keyword[return] identifier[selected_volume_identifiers]
keyword[try] :
identifier[volume_identifiers] = identifier[self] . identifier[_PromptUserForVSSStoreIdentifiers] (
identifier[volume_system] , identifier[volume_identifiers] )
keyword[except] identifier[KeyboardInterrupt] :
keyword[raise] identifier[errors] . identifier[UserAbort] ( literal[string] )
keyword[return] identifier[self] . identifier[_NormalizedVolumeIdentifiers] (
identifier[volume_system] , identifier[volume_identifiers] , identifier[prefix] = literal[string] ) | def _GetVSSStoreIdentifiers(self, scan_node):
"""Determines the VSS store identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the scan node is invalid.
UserAbort: if the user requested to abort.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.') # depends on [control=['if'], data=[]]
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(volume_system)
if not volume_identifiers:
return [] # depends on [control=['if'], data=[]]
# TODO: refactor to use scan options.
if self._vss_stores:
if self._vss_stores == 'all':
vss_stores = range(1, volume_system.number_of_volumes + 1) # depends on [control=['if'], data=[]]
else:
vss_stores = self._vss_stores
selected_volume_identifiers = self._NormalizedVolumeIdentifiers(volume_system, vss_stores, prefix='vss')
if not set(selected_volume_identifiers).difference(volume_identifiers):
return selected_volume_identifiers # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
volume_identifiers = self._PromptUserForVSSStoreIdentifiers(volume_system, volume_identifiers) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.') # depends on [control=['except'], data=[]]
return self._NormalizedVolumeIdentifiers(volume_system, volume_identifiers, prefix='vss') |
def delete_resource(self, resource, filename, allow_deletion=False):
"""Delete copy of resource in filename on local system.
Will only actually do the deletion if allow_deletion is True. Regardless
of whether the deletion occurs, self.last_timestamp will be updated
if the resource.timestamp is later than the current value.
Returns the number of files actually deleted (0 or 1).
"""
num_deleted = 0
uri = resource.uri
if (resource.timestamp is not None and
resource.timestamp > self.last_timestamp):
self.last_timestamp = resource.timestamp
if (allow_deletion):
if (self.dryrun):
self.logger.info(
"dryrun: would delete %s -> %s" %
(uri, filename))
else:
try:
os.unlink(filename)
num_deleted += 1
self.logger.info("deleted: %s -> %s" % (uri, filename))
self.log_event(
Resource(
resource=resource,
change="deleted"))
except OSError as e:
msg = "Failed to DELETE %s -> %s : %s" % (
uri, filename, str(e))
# if (self.ignore_failures):
self.logger.warning(msg)
# return
# else:
# raise ClientFatalError(msg)
else:
self.logger.info(
"nodelete: would delete %s (--delete to enable)" %
uri)
return(num_deleted) | def function[delete_resource, parameter[self, resource, filename, allow_deletion]]:
constant[Delete copy of resource in filename on local system.
Will only actually do the deletion if allow_deletion is True. Regardless
of whether the deletion occurs, self.last_timestamp will be updated
if the resource.timestamp is later than the current value.
Returns the number of files actually deleted (0 or 1).
]
variable[num_deleted] assign[=] constant[0]
variable[uri] assign[=] name[resource].uri
if <ast.BoolOp object at 0x7da1b2527400> begin[:]
name[self].last_timestamp assign[=] name[resource].timestamp
if name[allow_deletion] begin[:]
if name[self].dryrun begin[:]
call[name[self].logger.info, parameter[binary_operation[constant[dryrun: would delete %s -> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2583160>, <ast.Name object at 0x7da1b2582080>]]]]]
return[name[num_deleted]] | keyword[def] identifier[delete_resource] ( identifier[self] , identifier[resource] , identifier[filename] , identifier[allow_deletion] = keyword[False] ):
literal[string]
identifier[num_deleted] = literal[int]
identifier[uri] = identifier[resource] . identifier[uri]
keyword[if] ( identifier[resource] . identifier[timestamp] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[resource] . identifier[timestamp] > identifier[self] . identifier[last_timestamp] ):
identifier[self] . identifier[last_timestamp] = identifier[resource] . identifier[timestamp]
keyword[if] ( identifier[allow_deletion] ):
keyword[if] ( identifier[self] . identifier[dryrun] ):
identifier[self] . identifier[logger] . identifier[info] (
literal[string] %
( identifier[uri] , identifier[filename] ))
keyword[else] :
keyword[try] :
identifier[os] . identifier[unlink] ( identifier[filename] )
identifier[num_deleted] += literal[int]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[uri] , identifier[filename] ))
identifier[self] . identifier[log_event] (
identifier[Resource] (
identifier[resource] = identifier[resource] ,
identifier[change] = literal[string] ))
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[msg] = literal[string] %(
identifier[uri] , identifier[filename] , identifier[str] ( identifier[e] ))
identifier[self] . identifier[logger] . identifier[warning] ( identifier[msg] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[info] (
literal[string] %
identifier[uri] )
keyword[return] ( identifier[num_deleted] ) | def delete_resource(self, resource, filename, allow_deletion=False):
"""Delete copy of resource in filename on local system.
Will only actually do the deletion if allow_deletion is True. Regardless
of whether the deletion occurs, self.last_timestamp will be updated
if the resource.timestamp is later than the current value.
Returns the number of files actually deleted (0 or 1).
"""
num_deleted = 0
uri = resource.uri
if resource.timestamp is not None and resource.timestamp > self.last_timestamp:
self.last_timestamp = resource.timestamp # depends on [control=['if'], data=[]]
if allow_deletion:
if self.dryrun:
self.logger.info('dryrun: would delete %s -> %s' % (uri, filename)) # depends on [control=['if'], data=[]]
else:
try:
os.unlink(filename)
num_deleted += 1
self.logger.info('deleted: %s -> %s' % (uri, filename))
self.log_event(Resource(resource=resource, change='deleted')) # depends on [control=['try'], data=[]]
except OSError as e:
msg = 'Failed to DELETE %s -> %s : %s' % (uri, filename, str(e))
# if (self.ignore_failures):
self.logger.warning(msg) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
# return
# else:
# raise ClientFatalError(msg)
self.logger.info('nodelete: would delete %s (--delete to enable)' % uri)
return num_deleted |
def new_temp_file(directory=None, hint=''):
'''Return a new temporary file.'''
return tempfile.NamedTemporaryFile(
prefix='tmp-wpull-{0}-'.format(hint), suffix='.tmp', dir=directory) | def function[new_temp_file, parameter[directory, hint]]:
constant[Return a new temporary file.]
return[call[name[tempfile].NamedTemporaryFile, parameter[]]] | keyword[def] identifier[new_temp_file] ( identifier[directory] = keyword[None] , identifier[hint] = literal[string] ):
literal[string]
keyword[return] identifier[tempfile] . identifier[NamedTemporaryFile] (
identifier[prefix] = literal[string] . identifier[format] ( identifier[hint] ), identifier[suffix] = literal[string] , identifier[dir] = identifier[directory] ) | def new_temp_file(directory=None, hint=''):
"""Return a new temporary file."""
return tempfile.NamedTemporaryFile(prefix='tmp-wpull-{0}-'.format(hint), suffix='.tmp', dir=directory) |
def command_schema(self, name=None):
'''
Prints current database schema (according sqlalchemy database model)::
./manage.py sqla:schema [name]
'''
meta_name = table_name = None
if name:
if isinstance(self.metadata, MetaData):
table_name = name
elif '.' in name:
meta_name, table_name = name.split('.', 1)
else:
meta_name = name
def _print_metadata_schema(metadata):
if table_name is None:
for table in metadata.sorted_tables:
print(self._schema(table))
else:
try:
table = metadata.tables[table_name]
except KeyError:
sys.exit('Table {} is not found'.format(name))
print(self._schema(table))
if isinstance(self.metadata, MetaData):
_print_metadata_schema(self.metadata)
else:
for current_meta_name, metadata in self.metadata.items():
if meta_name not in (current_meta_name, None):
continue
_print_metadata_schema(metadata) | def function[command_schema, parameter[self, name]]:
constant[
Prints current database schema (according sqlalchemy database model)::
./manage.py sqla:schema [name]
]
variable[meta_name] assign[=] constant[None]
if name[name] begin[:]
if call[name[isinstance], parameter[name[self].metadata, name[MetaData]]] begin[:]
variable[table_name] assign[=] name[name]
def function[_print_metadata_schema, parameter[metadata]]:
if compare[name[table_name] is constant[None]] begin[:]
for taget[name[table]] in starred[name[metadata].sorted_tables] begin[:]
call[name[print], parameter[call[name[self]._schema, parameter[name[table]]]]]
if call[name[isinstance], parameter[name[self].metadata, name[MetaData]]] begin[:]
call[name[_print_metadata_schema], parameter[name[self].metadata]] | keyword[def] identifier[command_schema] ( identifier[self] , identifier[name] = keyword[None] ):
literal[string]
identifier[meta_name] = identifier[table_name] = keyword[None]
keyword[if] identifier[name] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[metadata] , identifier[MetaData] ):
identifier[table_name] = identifier[name]
keyword[elif] literal[string] keyword[in] identifier[name] :
identifier[meta_name] , identifier[table_name] = identifier[name] . identifier[split] ( literal[string] , literal[int] )
keyword[else] :
identifier[meta_name] = identifier[name]
keyword[def] identifier[_print_metadata_schema] ( identifier[metadata] ):
keyword[if] identifier[table_name] keyword[is] keyword[None] :
keyword[for] identifier[table] keyword[in] identifier[metadata] . identifier[sorted_tables] :
identifier[print] ( identifier[self] . identifier[_schema] ( identifier[table] ))
keyword[else] :
keyword[try] :
identifier[table] = identifier[metadata] . identifier[tables] [ identifier[table_name] ]
keyword[except] identifier[KeyError] :
identifier[sys] . identifier[exit] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[print] ( identifier[self] . identifier[_schema] ( identifier[table] ))
keyword[if] identifier[isinstance] ( identifier[self] . identifier[metadata] , identifier[MetaData] ):
identifier[_print_metadata_schema] ( identifier[self] . identifier[metadata] )
keyword[else] :
keyword[for] identifier[current_meta_name] , identifier[metadata] keyword[in] identifier[self] . identifier[metadata] . identifier[items] ():
keyword[if] identifier[meta_name] keyword[not] keyword[in] ( identifier[current_meta_name] , keyword[None] ):
keyword[continue]
identifier[_print_metadata_schema] ( identifier[metadata] ) | def command_schema(self, name=None):
"""
Prints current database schema (according sqlalchemy database model)::
./manage.py sqla:schema [name]
"""
meta_name = table_name = None
if name:
if isinstance(self.metadata, MetaData):
table_name = name # depends on [control=['if'], data=[]]
elif '.' in name:
(meta_name, table_name) = name.split('.', 1) # depends on [control=['if'], data=['name']]
else:
meta_name = name # depends on [control=['if'], data=[]]
def _print_metadata_schema(metadata):
if table_name is None:
for table in metadata.sorted_tables:
print(self._schema(table)) # depends on [control=['for'], data=['table']] # depends on [control=['if'], data=[]]
else:
try:
table = metadata.tables[table_name] # depends on [control=['try'], data=[]]
except KeyError:
sys.exit('Table {} is not found'.format(name)) # depends on [control=['except'], data=[]]
print(self._schema(table))
if isinstance(self.metadata, MetaData):
_print_metadata_schema(self.metadata) # depends on [control=['if'], data=[]]
else:
for (current_meta_name, metadata) in self.metadata.items():
if meta_name not in (current_meta_name, None):
continue # depends on [control=['if'], data=[]]
_print_metadata_schema(metadata) # depends on [control=['for'], data=[]] |
def updateEditorGeometry(self, editor, option, index):
"""Make sure the editor is the same size as the widget
By default it can get smaller because does not expand over viewport size.
This will make sure it will resize to the same size as the widget.
:param editor: the editor to update
:type editor: :class:`QtGui.QWidget`
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
super(WidgetDelegate, self).updateEditorGeometry(editor, option, index)
editor.setGeometry(option.rect)
if self.keep_editor_size:
esh = editor.sizeHint()
osh = option.rect.size()
w = osh.width() if osh.width() > esh.width() else esh.width()
h = osh.height() if osh.height() > esh.height() else esh.height()
editor.resize(w, h) | def function[updateEditorGeometry, parameter[self, editor, option, index]]:
constant[Make sure the editor is the same size as the widget
By default it can get smaller because does not expand over viewport size.
This will make sure it will resize to the same size as the widget.
:param editor: the editor to update
:type editor: :class:`QtGui.QWidget`
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
]
call[call[name[super], parameter[name[WidgetDelegate], name[self]]].updateEditorGeometry, parameter[name[editor], name[option], name[index]]]
call[name[editor].setGeometry, parameter[name[option].rect]]
if name[self].keep_editor_size begin[:]
variable[esh] assign[=] call[name[editor].sizeHint, parameter[]]
variable[osh] assign[=] call[name[option].rect.size, parameter[]]
variable[w] assign[=] <ast.IfExp object at 0x7da1b1668310>
variable[h] assign[=] <ast.IfExp object at 0x7da1b1643220>
call[name[editor].resize, parameter[name[w], name[h]]] | keyword[def] identifier[updateEditorGeometry] ( identifier[self] , identifier[editor] , identifier[option] , identifier[index] ):
literal[string]
identifier[super] ( identifier[WidgetDelegate] , identifier[self] ). identifier[updateEditorGeometry] ( identifier[editor] , identifier[option] , identifier[index] )
identifier[editor] . identifier[setGeometry] ( identifier[option] . identifier[rect] )
keyword[if] identifier[self] . identifier[keep_editor_size] :
identifier[esh] = identifier[editor] . identifier[sizeHint] ()
identifier[osh] = identifier[option] . identifier[rect] . identifier[size] ()
identifier[w] = identifier[osh] . identifier[width] () keyword[if] identifier[osh] . identifier[width] ()> identifier[esh] . identifier[width] () keyword[else] identifier[esh] . identifier[width] ()
identifier[h] = identifier[osh] . identifier[height] () keyword[if] identifier[osh] . identifier[height] ()> identifier[esh] . identifier[height] () keyword[else] identifier[esh] . identifier[height] ()
identifier[editor] . identifier[resize] ( identifier[w] , identifier[h] ) | def updateEditorGeometry(self, editor, option, index):
"""Make sure the editor is the same size as the widget
By default it can get smaller because does not expand over viewport size.
This will make sure it will resize to the same size as the widget.
:param editor: the editor to update
:type editor: :class:`QtGui.QWidget`
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
super(WidgetDelegate, self).updateEditorGeometry(editor, option, index)
editor.setGeometry(option.rect)
if self.keep_editor_size:
esh = editor.sizeHint()
osh = option.rect.size()
w = osh.width() if osh.width() > esh.width() else esh.width()
h = osh.height() if osh.height() > esh.height() else esh.height()
editor.resize(w, h) # depends on [control=['if'], data=[]] |
async def addRelation(self, endpoint1, endpoint2):
"""
:param endpoint1 string:
:param endpoint2 string:
Endpoint1 and Endpoint2 hold relation endpoints in the
"application:interface" form, where the application is always a
placeholder pointing to an application change, and the interface is
optional. Examples are "$deploy-42:web" or just "$deploy-42".
"""
endpoints = [endpoint1, endpoint2]
# resolve indirect references
for i in range(len(endpoints)):
parts = endpoints[i].split(':')
parts[0] = self.resolve(parts[0])
endpoints[i] = ':'.join(parts)
log.info('Relating %s <-> %s', *endpoints)
return await self.model.add_relation(*endpoints) | <ast.AsyncFunctionDef object at 0x7da1b0efac20> | keyword[async] keyword[def] identifier[addRelation] ( identifier[self] , identifier[endpoint1] , identifier[endpoint2] ):
literal[string]
identifier[endpoints] =[ identifier[endpoint1] , identifier[endpoint2] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[endpoints] )):
identifier[parts] = identifier[endpoints] [ identifier[i] ]. identifier[split] ( literal[string] )
identifier[parts] [ literal[int] ]= identifier[self] . identifier[resolve] ( identifier[parts] [ literal[int] ])
identifier[endpoints] [ identifier[i] ]= literal[string] . identifier[join] ( identifier[parts] )
identifier[log] . identifier[info] ( literal[string] ,* identifier[endpoints] )
keyword[return] keyword[await] identifier[self] . identifier[model] . identifier[add_relation] (* identifier[endpoints] ) | async def addRelation(self, endpoint1, endpoint2):
"""
:param endpoint1 string:
:param endpoint2 string:
Endpoint1 and Endpoint2 hold relation endpoints in the
"application:interface" form, where the application is always a
placeholder pointing to an application change, and the interface is
optional. Examples are "$deploy-42:web" or just "$deploy-42".
"""
endpoints = [endpoint1, endpoint2]
# resolve indirect references
for i in range(len(endpoints)):
parts = endpoints[i].split(':')
parts[0] = self.resolve(parts[0])
endpoints[i] = ':'.join(parts) # depends on [control=['for'], data=['i']]
log.info('Relating %s <-> %s', *endpoints)
return await self.model.add_relation(*endpoints) |
def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join(t for t in list_contents if t is not None)
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
new_el, visited_nodes = build_table(el, meta_data)
return etree.tostring(new_el), visited_nodes
elif el.tag == '%sp' % w_namespace:
return get_element_content(el, meta_data), [el]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag)
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
new_el, el_visited_nodes = _build_non_li_content(
li_node,
meta_data,
)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Get the data needed to build the current list item
list_contents.append(get_element_content(
li_node,
meta_data,
))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if (ilvl > current_ilvl) or (numId != current_numId):
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
else:
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(
ilvl=ilvl,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl]
else:
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
if current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type)
# Create the li element.
visited_nodes.extend(list(li_node.iter()))
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(
ilvl=0,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
return root_ol, visited_nodes | def function[build_list, parameter[li_nodes, meta_data]]:
constant[
Build the list structure and return the root list
]
variable[ol_dict] assign[=] dictionary[[], []]
variable[current_ilvl] assign[=] <ast.UnaryOp object at 0x7da1b02c3d60>
variable[current_numId] assign[=] <ast.UnaryOp object at 0x7da1b02c3ca0>
variable[current_ol] assign[=] constant[None]
variable[root_ol] assign[=] constant[None]
variable[visited_nodes] assign[=] list[[]]
variable[list_contents] assign[=] list[[]]
def function[_build_li, parameter[list_contents]]:
variable[data] assign[=] call[constant[<br />].join, parameter[<ast.GeneratorExp object at 0x7da1b02c3880>]]
return[call[name[etree].XML, parameter[binary_operation[constant[<li>%s</li>] <ast.Mod object at 0x7da2590d6920> name[data]]]]]
def function[_build_non_li_content, parameter[el, meta_data]]:
variable[w_namespace] assign[=] call[name[get_namespace], parameter[name[el], constant[w]]]
if compare[name[el].tag equal[==] binary_operation[constant[%stbl] <ast.Mod object at 0x7da2590d6920> name[w_namespace]]] begin[:]
<ast.Tuple object at 0x7da1b02c31f0> assign[=] call[name[build_table], parameter[name[el], name[meta_data]]]
return[tuple[[<ast.Call object at 0x7da1b02c3040>, <ast.Name object at 0x7da1b02c2f80>]]]
if call[name[has_text], parameter[name[el]]] begin[:]
<ast.Raise object at 0x7da1b02c2b60>
def function[_merge_lists, parameter[ilvl, current_ilvl, ol_dict, current_ol]]:
for taget[name[i]] in starred[call[name[reversed], parameter[call[name[range], parameter[name[ilvl], name[current_ilvl]]]]]] begin[:]
if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[ol_dict]] begin[:]
continue
if compare[call[name[ol_dict]][name[i]] is_not name[current_ol]] begin[:]
if compare[call[name[ol_dict]][name[i]] is name[current_ol]] begin[:]
continue
call[call[call[name[ol_dict]][name[i]]][<ast.UnaryOp object at 0x7da1b02c0ee0>].append, parameter[name[current_ol]]]
variable[current_ol] assign[=] call[name[ol_dict]][name[i]]
for taget[name[key]] in starred[call[name[list], parameter[name[ol_dict]]]] begin[:]
if compare[name[key] greater[>] name[ilvl]] begin[:]
<ast.Delete object at 0x7da1b02c0b80>
return[name[current_ol]]
for taget[name[li_node]] in starred[name[li_nodes]] begin[:]
variable[w_namespace] assign[=] call[name[get_namespace], parameter[name[li_node], constant[w]]]
if <ast.UnaryOp object at 0x7da1b02c0820> begin[:]
<ast.Tuple object at 0x7da1b02c0700> assign[=] call[name[_build_non_li_content], parameter[name[li_node], name[meta_data]]]
call[name[list_contents].append, parameter[name[new_el]]]
call[name[visited_nodes].extend, parameter[name[el_visited_nodes]]]
continue
if name[list_contents] begin[:]
variable[li_el] assign[=] call[name[_build_li], parameter[name[list_contents]]]
variable[list_contents] assign[=] list[[]]
call[name[current_ol].append, parameter[name[li_el]]]
call[name[list_contents].append, parameter[call[name[get_element_content], parameter[name[li_node], name[meta_data]]]]]
variable[ilvl] assign[=] call[name[get_ilvl], parameter[name[li_node], name[w_namespace]]]
variable[numId] assign[=] call[name[get_numId], parameter[name[li_node], name[w_namespace]]]
variable[list_type] assign[=] call[name[get_ordered_list_type], parameter[name[meta_data], name[numId], name[ilvl]]]
if <ast.BoolOp object at 0x7da1b03548b0> begin[:]
call[name[ol_dict]][name[ilvl]] assign[=] call[name[create_list], parameter[name[list_type]]]
variable[current_ol] assign[=] call[name[ol_dict]][name[ilvl]]
variable[current_ilvl] assign[=] name[ilvl]
variable[current_numId] assign[=] name[numId]
if compare[name[root_ol] is constant[None]] begin[:]
variable[root_ol] assign[=] name[current_ol]
if compare[name[ilvl] in name[ol_dict]] begin[:]
variable[current_ol] assign[=] call[name[ol_dict]][name[ilvl]]
call[name[visited_nodes].extend, parameter[call[name[list], parameter[call[name[li_node].iter, parameter[]]]]]]
if name[list_contents] begin[:]
variable[li_el] assign[=] call[name[_build_li], parameter[name[list_contents]]]
variable[list_contents] assign[=] list[[]]
call[name[current_ol].append, parameter[name[li_el]]]
variable[current_ol] assign[=] call[name[_merge_lists], parameter[]]
return[tuple[[<ast.Name object at 0x7da1b0214c70>, <ast.Name object at 0x7da1b0215030>]]] | keyword[def] identifier[build_list] ( identifier[li_nodes] , identifier[meta_data] ):
literal[string]
identifier[ol_dict] ={}
identifier[current_ilvl] =- literal[int]
identifier[current_numId] =- literal[int]
identifier[current_ol] = keyword[None]
identifier[root_ol] = keyword[None]
identifier[visited_nodes] =[]
identifier[list_contents] =[]
keyword[def] identifier[_build_li] ( identifier[list_contents] ):
identifier[data] = literal[string] . identifier[join] ( identifier[t] keyword[for] identifier[t] keyword[in] identifier[list_contents] keyword[if] identifier[t] keyword[is] keyword[not] keyword[None] )
keyword[return] identifier[etree] . identifier[XML] ( literal[string] % identifier[data] )
keyword[def] identifier[_build_non_li_content] ( identifier[el] , identifier[meta_data] ):
identifier[w_namespace] = identifier[get_namespace] ( identifier[el] , literal[string] )
keyword[if] identifier[el] . identifier[tag] == literal[string] % identifier[w_namespace] :
identifier[new_el] , identifier[visited_nodes] = identifier[build_table] ( identifier[el] , identifier[meta_data] )
keyword[return] identifier[etree] . identifier[tostring] ( identifier[new_el] ), identifier[visited_nodes]
keyword[elif] identifier[el] . identifier[tag] == literal[string] % identifier[w_namespace] :
keyword[return] identifier[get_element_content] ( identifier[el] , identifier[meta_data] ),[ identifier[el] ]
keyword[if] identifier[has_text] ( identifier[el] ):
keyword[raise] identifier[UnintendedTag] ( literal[string] % identifier[el] . identifier[tag] )
keyword[def] identifier[_merge_lists] ( identifier[ilvl] , identifier[current_ilvl] , identifier[ol_dict] , identifier[current_ol] ):
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[range] ( identifier[ilvl] , identifier[current_ilvl] )):
keyword[if] identifier[i] keyword[not] keyword[in] identifier[ol_dict] :
keyword[continue]
keyword[if] identifier[ol_dict] [ identifier[i] ] keyword[is] keyword[not] identifier[current_ol] :
keyword[if] identifier[ol_dict] [ identifier[i] ] keyword[is] identifier[current_ol] :
keyword[continue]
identifier[ol_dict] [ identifier[i] ][- literal[int] ]. identifier[append] ( identifier[current_ol] )
identifier[current_ol] = identifier[ol_dict] [ identifier[i] ]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[ol_dict] ):
keyword[if] identifier[key] > identifier[ilvl] :
keyword[del] identifier[ol_dict] [ identifier[key] ]
keyword[return] identifier[current_ol]
keyword[for] identifier[li_node] keyword[in] identifier[li_nodes] :
identifier[w_namespace] = identifier[get_namespace] ( identifier[li_node] , literal[string] )
keyword[if] keyword[not] identifier[is_li] ( identifier[li_node] , identifier[meta_data] ):
identifier[new_el] , identifier[el_visited_nodes] = identifier[_build_non_li_content] (
identifier[li_node] ,
identifier[meta_data] ,
)
identifier[list_contents] . identifier[append] ( identifier[new_el] )
identifier[visited_nodes] . identifier[extend] ( identifier[el_visited_nodes] )
keyword[continue]
keyword[if] identifier[list_contents] :
identifier[li_el] = identifier[_build_li] ( identifier[list_contents] )
identifier[list_contents] =[]
identifier[current_ol] . identifier[append] ( identifier[li_el] )
identifier[list_contents] . identifier[append] ( identifier[get_element_content] (
identifier[li_node] ,
identifier[meta_data] ,
))
identifier[ilvl] = identifier[get_ilvl] ( identifier[li_node] , identifier[w_namespace] )
identifier[numId] = identifier[get_numId] ( identifier[li_node] , identifier[w_namespace] )
identifier[list_type] = identifier[get_ordered_list_type] ( identifier[meta_data] , identifier[numId] , identifier[ilvl] )
keyword[if] ( identifier[ilvl] > identifier[current_ilvl] ) keyword[or] ( identifier[numId] != identifier[current_numId] ):
identifier[ol_dict] [ identifier[ilvl] ]= identifier[create_list] ( identifier[list_type] )
identifier[current_ol] = identifier[ol_dict] [ identifier[ilvl] ]
identifier[current_ilvl] = identifier[ilvl]
identifier[current_numId] = identifier[numId]
keyword[else] :
identifier[current_ol] = identifier[_merge_lists] (
identifier[ilvl] = identifier[ilvl] ,
identifier[current_ilvl] = identifier[current_ilvl] ,
identifier[ol_dict] = identifier[ol_dict] ,
identifier[current_ol] = identifier[current_ol] ,
)
keyword[if] identifier[root_ol] keyword[is] keyword[None] :
identifier[root_ol] = identifier[current_ol]
keyword[if] identifier[ilvl] keyword[in] identifier[ol_dict] :
identifier[current_ol] = identifier[ol_dict] [ identifier[ilvl] ]
keyword[else] :
keyword[if] identifier[current_ol] keyword[is] keyword[not] identifier[root_ol] :
identifier[root_ol] [- literal[int] ]. identifier[append] ( identifier[current_ol] )
identifier[current_ol] = identifier[create_list] ( identifier[list_type] )
identifier[visited_nodes] . identifier[extend] ( identifier[list] ( identifier[li_node] . identifier[iter] ()))
keyword[if] identifier[list_contents] :
identifier[li_el] = identifier[_build_li] ( identifier[list_contents] )
identifier[list_contents] =[]
identifier[current_ol] . identifier[append] ( identifier[li_el] )
identifier[current_ol] = identifier[_merge_lists] (
identifier[ilvl] = literal[int] ,
identifier[current_ilvl] = identifier[current_ilvl] ,
identifier[ol_dict] = identifier[ol_dict] ,
identifier[current_ol] = identifier[current_ol] ,
)
keyword[return] identifier[root_ol] , identifier[visited_nodes] | def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join((t for t in list_contents if t is not None))
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
(new_el, visited_nodes) = build_table(el, meta_data)
return (etree.tostring(new_el), visited_nodes) # depends on [control=['if'], data=[]]
elif el.tag == '%sp' % w_namespace:
return (get_element_content(el, meta_data), [el]) # depends on [control=['if'], data=[]]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag) # depends on [control=['if'], data=[]]
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue # depends on [control=['if'], data=[]]
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue # depends on [control=['if'], data=[]]
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i] # depends on [control=['if'], data=['current_ol']] # depends on [control=['for'], data=['i']]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
(new_el, el_visited_nodes) = _build_non_li_content(li_node, meta_data)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue # depends on [control=['if'], data=[]]
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el) # depends on [control=['if'], data=[]]
# Get the data needed to build the current list item
list_contents.append(get_element_content(li_node, meta_data))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if ilvl > current_ilvl or numId != current_numId:
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId # depends on [control=['if'], data=[]]
else:
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(ilvl=ilvl, current_ilvl=current_ilvl, ol_dict=ol_dict, current_ol=current_ol)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol # depends on [control=['if'], data=['root_ol']]
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl] # depends on [control=['if'], data=['ilvl', 'ol_dict']]
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
elif current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type) # depends on [control=['if'], data=['current_ol', 'root_ol']]
# Create the li element.
visited_nodes.extend(list(li_node.iter())) # depends on [control=['for'], data=['li_node']]
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el) # depends on [control=['if'], data=[]]
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(ilvl=0, current_ilvl=current_ilvl, ol_dict=ol_dict, current_ol=current_ol)
return (root_ol, visited_nodes) |
def is_cdl(filename):
'''
Quick check for .cdl ascii file
Example:
netcdf sample_file {
dimensions:
name_strlen = 7 ;
time = 96 ;
variables:
float lat ;
lat:units = "degrees_north" ;
lat:standard_name = "latitude" ;
lat:long_name = "station latitude" ;
etc...
:param str filename: Absolute path of file to check
:param str data: First chuck of data from file to check
'''
if os.path.splitext(filename)[-1] != '.cdl':
return False
with open(filename, 'rb') as f:
data = f.read(32)
if data.startswith(b'netcdf') or b'dimensions' in data:
return True
return False | def function[is_cdl, parameter[filename]]:
constant[
Quick check for .cdl ascii file
Example:
netcdf sample_file {
dimensions:
name_strlen = 7 ;
time = 96 ;
variables:
float lat ;
lat:units = "degrees_north" ;
lat:standard_name = "latitude" ;
lat:long_name = "station latitude" ;
etc...
:param str filename: Absolute path of file to check
:param str data: First chuck of data from file to check
]
if compare[call[call[name[os].path.splitext, parameter[name[filename]]]][<ast.UnaryOp object at 0x7da1b2345d20>] not_equal[!=] constant[.cdl]] begin[:]
return[constant[False]]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
variable[data] assign[=] call[name[f].read, parameter[constant[32]]]
if <ast.BoolOp object at 0x7da1b2347be0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_cdl] ( identifier[filename] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[- literal[int] ]!= literal[string] :
keyword[return] keyword[False]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[f] . identifier[read] ( literal[int] )
keyword[if] identifier[data] . identifier[startswith] ( literal[string] ) keyword[or] literal[string] keyword[in] identifier[data] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_cdl(filename):
"""
Quick check for .cdl ascii file
Example:
netcdf sample_file {
dimensions:
name_strlen = 7 ;
time = 96 ;
variables:
float lat ;
lat:units = "degrees_north" ;
lat:standard_name = "latitude" ;
lat:long_name = "station latitude" ;
etc...
:param str filename: Absolute path of file to check
:param str data: First chuck of data from file to check
"""
if os.path.splitext(filename)[-1] != '.cdl':
return False # depends on [control=['if'], data=[]]
with open(filename, 'rb') as f:
data = f.read(32) # depends on [control=['with'], data=['f']]
if data.startswith(b'netcdf') or b'dimensions' in data:
return True # depends on [control=['if'], data=[]]
return False |
def imshow(img, win_name='', wait_time=0):
"""Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
"""
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time) | def function[imshow, parameter[img, win_name, wait_time]]:
constant[Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
]
call[name[cv2].imshow, parameter[name[win_name], call[name[imread], parameter[name[img]]]]]
call[name[cv2].waitKey, parameter[name[wait_time]]] | keyword[def] identifier[imshow] ( identifier[img] , identifier[win_name] = literal[string] , identifier[wait_time] = literal[int] ):
literal[string]
identifier[cv2] . identifier[imshow] ( identifier[win_name] , identifier[imread] ( identifier[img] ))
identifier[cv2] . identifier[waitKey] ( identifier[wait_time] ) | def imshow(img, win_name='', wait_time=0):
"""Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
"""
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time) |
def log_histogram(self, step, tag, val):
'''
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
'''
hist = Histogram()
hist.add(val)
summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())])
self._add_event(step, summary) | def function[log_histogram, parameter[self, step, tag, val]]:
constant[
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
]
variable[hist] assign[=] call[name[Histogram], parameter[]]
call[name[hist].add, parameter[name[val]]]
variable[summary] assign[=] call[name[Summary], parameter[]]
call[name[self]._add_event, parameter[name[step], name[summary]]] | keyword[def] identifier[log_histogram] ( identifier[self] , identifier[step] , identifier[tag] , identifier[val] ):
literal[string]
identifier[hist] = identifier[Histogram] ()
identifier[hist] . identifier[add] ( identifier[val] )
identifier[summary] = identifier[Summary] ( identifier[value] =[ identifier[Summary] . identifier[Value] ( identifier[tag] = identifier[tag] , identifier[histo] = identifier[hist] . identifier[encode_to_proto] ())])
identifier[self] . identifier[_add_event] ( identifier[step] , identifier[summary] ) | def log_histogram(self, step, tag, val):
"""
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
"""
hist = Histogram()
hist.add(val)
summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())])
self._add_event(step, summary) |
def parse_request(cls, sock):
"""Parse the request (the pre-execution) section of the nailgun protocol from the given socket.
Handles reading of the Argument, Environment, Working Directory and Command chunks from the
client which represents the "request" phase of the exchange. Working Directory and Command are
required and must be sent as the last two chunks in this phase. Argument and Environment chunks
are optional and can be sent more than once (thus we aggregate them).
"""
command = None
working_dir = None
arguments = []
environment = {}
while not all((working_dir, command)):
chunk_type, payload = cls.read_chunk(sock)
if chunk_type == ChunkType.ARGUMENT:
arguments.append(payload)
elif chunk_type == ChunkType.ENVIRONMENT:
key, val = payload.split(cls.ENVIRON_SEP, 1)
environment[key] = val
elif chunk_type == ChunkType.WORKING_DIR:
working_dir = payload
elif chunk_type == ChunkType.COMMAND:
command = payload
else:
raise cls.ProtocolError('received non-request chunk before header was fully received!')
return working_dir, command, arguments, environment | def function[parse_request, parameter[cls, sock]]:
constant[Parse the request (the pre-execution) section of the nailgun protocol from the given socket.
Handles reading of the Argument, Environment, Working Directory and Command chunks from the
client which represents the "request" phase of the exchange. Working Directory and Command are
required and must be sent as the last two chunks in this phase. Argument and Environment chunks
are optional and can be sent more than once (thus we aggregate them).
]
variable[command] assign[=] constant[None]
variable[working_dir] assign[=] constant[None]
variable[arguments] assign[=] list[[]]
variable[environment] assign[=] dictionary[[], []]
while <ast.UnaryOp object at 0x7da1b2290880> begin[:]
<ast.Tuple object at 0x7da1b2293c70> assign[=] call[name[cls].read_chunk, parameter[name[sock]]]
if compare[name[chunk_type] equal[==] name[ChunkType].ARGUMENT] begin[:]
call[name[arguments].append, parameter[name[payload]]]
return[tuple[[<ast.Name object at 0x7da1b2293820>, <ast.Name object at 0x7da1b22937c0>, <ast.Name object at 0x7da1b22909d0>, <ast.Name object at 0x7da1b2290280>]]] | keyword[def] identifier[parse_request] ( identifier[cls] , identifier[sock] ):
literal[string]
identifier[command] = keyword[None]
identifier[working_dir] = keyword[None]
identifier[arguments] =[]
identifier[environment] ={}
keyword[while] keyword[not] identifier[all] (( identifier[working_dir] , identifier[command] )):
identifier[chunk_type] , identifier[payload] = identifier[cls] . identifier[read_chunk] ( identifier[sock] )
keyword[if] identifier[chunk_type] == identifier[ChunkType] . identifier[ARGUMENT] :
identifier[arguments] . identifier[append] ( identifier[payload] )
keyword[elif] identifier[chunk_type] == identifier[ChunkType] . identifier[ENVIRONMENT] :
identifier[key] , identifier[val] = identifier[payload] . identifier[split] ( identifier[cls] . identifier[ENVIRON_SEP] , literal[int] )
identifier[environment] [ identifier[key] ]= identifier[val]
keyword[elif] identifier[chunk_type] == identifier[ChunkType] . identifier[WORKING_DIR] :
identifier[working_dir] = identifier[payload]
keyword[elif] identifier[chunk_type] == identifier[ChunkType] . identifier[COMMAND] :
identifier[command] = identifier[payload]
keyword[else] :
keyword[raise] identifier[cls] . identifier[ProtocolError] ( literal[string] )
keyword[return] identifier[working_dir] , identifier[command] , identifier[arguments] , identifier[environment] | def parse_request(cls, sock):
"""Parse the request (the pre-execution) section of the nailgun protocol from the given socket.
Handles reading of the Argument, Environment, Working Directory and Command chunks from the
client which represents the "request" phase of the exchange. Working Directory and Command are
required and must be sent as the last two chunks in this phase. Argument and Environment chunks
are optional and can be sent more than once (thus we aggregate them).
"""
command = None
working_dir = None
arguments = []
environment = {}
while not all((working_dir, command)):
(chunk_type, payload) = cls.read_chunk(sock)
if chunk_type == ChunkType.ARGUMENT:
arguments.append(payload) # depends on [control=['if'], data=[]]
elif chunk_type == ChunkType.ENVIRONMENT:
(key, val) = payload.split(cls.ENVIRON_SEP, 1)
environment[key] = val # depends on [control=['if'], data=[]]
elif chunk_type == ChunkType.WORKING_DIR:
working_dir = payload # depends on [control=['if'], data=[]]
elif chunk_type == ChunkType.COMMAND:
command = payload # depends on [control=['if'], data=[]]
else:
raise cls.ProtocolError('received non-request chunk before header was fully received!') # depends on [control=['while'], data=[]]
return (working_dir, command, arguments, environment) |
def _put_subject(self, subject_id, body):
"""
Update a subject for the given subject id. The body is not
a list but a dictionary of a single resource.
"""
assert isinstance(body, (dict)), "PUT requires body to be dict."
# subject_id could be a path such as '/asset/123' so quote
uri = self._get_subject_uri(guid=subject_id)
return self.service._put(uri, body) | def function[_put_subject, parameter[self, subject_id, body]]:
constant[
Update a subject for the given subject id. The body is not
a list but a dictionary of a single resource.
]
assert[call[name[isinstance], parameter[name[body], name[dict]]]]
variable[uri] assign[=] call[name[self]._get_subject_uri, parameter[]]
return[call[name[self].service._put, parameter[name[uri], name[body]]]] | keyword[def] identifier[_put_subject] ( identifier[self] , identifier[subject_id] , identifier[body] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[body] ,( identifier[dict] )), literal[string]
identifier[uri] = identifier[self] . identifier[_get_subject_uri] ( identifier[guid] = identifier[subject_id] )
keyword[return] identifier[self] . identifier[service] . identifier[_put] ( identifier[uri] , identifier[body] ) | def _put_subject(self, subject_id, body):
"""
Update a subject for the given subject id. The body is not
a list but a dictionary of a single resource.
"""
assert isinstance(body, dict), 'PUT requires body to be dict.'
# subject_id could be a path such as '/asset/123' so quote
uri = self._get_subject_uri(guid=subject_id)
return self.service._put(uri, body) |
def get_host_address(host=None, default_address=DEFAULT_HOST_IP):
"""
Returns the given host address.
:param host: Host to retrieve the address.
:type host: unicode
:param default_address: Default address if the host is unreachable.
:type default_address: unicode
:return: Host address.
:rtype: unicode
"""
try:
return unicode(socket.gethostbyname(host or socket.gethostname()),
Constants.default_codec,
Constants.codec_error)
except Exception as error:
return default_address | def function[get_host_address, parameter[host, default_address]]:
constant[
Returns the given host address.
:param host: Host to retrieve the address.
:type host: unicode
:param default_address: Default address if the host is unreachable.
:type default_address: unicode
:return: Host address.
:rtype: unicode
]
<ast.Try object at 0x7da18dc9b160> | keyword[def] identifier[get_host_address] ( identifier[host] = keyword[None] , identifier[default_address] = identifier[DEFAULT_HOST_IP] ):
literal[string]
keyword[try] :
keyword[return] identifier[unicode] ( identifier[socket] . identifier[gethostbyname] ( identifier[host] keyword[or] identifier[socket] . identifier[gethostname] ()),
identifier[Constants] . identifier[default_codec] ,
identifier[Constants] . identifier[codec_error] )
keyword[except] identifier[Exception] keyword[as] identifier[error] :
keyword[return] identifier[default_address] | def get_host_address(host=None, default_address=DEFAULT_HOST_IP):
"""
Returns the given host address.
:param host: Host to retrieve the address.
:type host: unicode
:param default_address: Default address if the host is unreachable.
:type default_address: unicode
:return: Host address.
:rtype: unicode
"""
try:
return unicode(socket.gethostbyname(host or socket.gethostname()), Constants.default_codec, Constants.codec_error) # depends on [control=['try'], data=[]]
except Exception as error:
return default_address # depends on [control=['except'], data=[]] |
def from_datetime(dt, machine_id=0):
"""
Convert a datetime to an SPL `Timestamp`.
Args:
dt(datetime.datetime): Datetime to be converted.
machine_id(int): Machine identifier.
Returns:
Timestamp: Datetime converted to Timestamp.
"""
td = dt - Timestamp._EPOCH
seconds = td.days * 3600 * 24
seconds += td.seconds
return Timestamp(seconds, td.microseconds*1000, machine_id) | def function[from_datetime, parameter[dt, machine_id]]:
constant[
Convert a datetime to an SPL `Timestamp`.
Args:
dt(datetime.datetime): Datetime to be converted.
machine_id(int): Machine identifier.
Returns:
Timestamp: Datetime converted to Timestamp.
]
variable[td] assign[=] binary_operation[name[dt] - name[Timestamp]._EPOCH]
variable[seconds] assign[=] binary_operation[binary_operation[name[td].days * constant[3600]] * constant[24]]
<ast.AugAssign object at 0x7da18c4cc4f0>
return[call[name[Timestamp], parameter[name[seconds], binary_operation[name[td].microseconds * constant[1000]], name[machine_id]]]] | keyword[def] identifier[from_datetime] ( identifier[dt] , identifier[machine_id] = literal[int] ):
literal[string]
identifier[td] = identifier[dt] - identifier[Timestamp] . identifier[_EPOCH]
identifier[seconds] = identifier[td] . identifier[days] * literal[int] * literal[int]
identifier[seconds] += identifier[td] . identifier[seconds]
keyword[return] identifier[Timestamp] ( identifier[seconds] , identifier[td] . identifier[microseconds] * literal[int] , identifier[machine_id] ) | def from_datetime(dt, machine_id=0):
"""
Convert a datetime to an SPL `Timestamp`.
Args:
dt(datetime.datetime): Datetime to be converted.
machine_id(int): Machine identifier.
Returns:
Timestamp: Datetime converted to Timestamp.
"""
td = dt - Timestamp._EPOCH
seconds = td.days * 3600 * 24
seconds += td.seconds
return Timestamp(seconds, td.microseconds * 1000, machine_id) |
def _init_fmtname2wbfmtobj(self, workbook, **kws):
"""Initialize fmtname2wbfmtobj."""
wbfmtdict = [
kws.get('format_txt0', self.dflt_wbfmtdict[0]),
kws.get('format_txt1', self.dflt_wbfmtdict[1]),
kws.get('format_txt2', self.dflt_wbfmtdict[2]),
kws.get('format_txt3', self.dflt_wbfmtdict[3])]
fmtname2wbfmtobj = {
'plain': workbook.add_format(wbfmtdict[0]),
'plain bold': workbook.add_format(wbfmtdict[3]),
'very light grey' : workbook.add_format(wbfmtdict[1]),
'light grey' :workbook.add_format(wbfmtdict[2])}
# Use a xlsx namedtuple field value to set row color
ntval2wbfmtdict = kws.get('ntval2wbfmtdict', None)
if ntval2wbfmtdict is not None:
for ntval, wbfmtdict in ntval2wbfmtdict.items():
fmtname2wbfmtobj[ntval] = workbook.add_format(wbfmtdict)
if 'ntfld_wbfmt' not in kws:
sys.stdout.write("**WARNING: 'ntfld_wbfmt' NOT PRESENT\n")
return fmtname2wbfmtobj | def function[_init_fmtname2wbfmtobj, parameter[self, workbook]]:
constant[Initialize fmtname2wbfmtobj.]
variable[wbfmtdict] assign[=] list[[<ast.Call object at 0x7da20c6a8c40>, <ast.Call object at 0x7da1b26aca90>, <ast.Call object at 0x7da1b26acd00>, <ast.Call object at 0x7da1b26afe80>]]
variable[fmtname2wbfmtobj] assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab430>, <ast.Constant object at 0x7da20c6a91e0>, <ast.Constant object at 0x7da20c6abeb0>, <ast.Constant object at 0x7da20c6ab490>], [<ast.Call object at 0x7da20c6a99c0>, <ast.Call object at 0x7da20c6abdc0>, <ast.Call object at 0x7da20c6a9480>, <ast.Call object at 0x7da20c6aace0>]]
variable[ntval2wbfmtdict] assign[=] call[name[kws].get, parameter[constant[ntval2wbfmtdict], constant[None]]]
if compare[name[ntval2wbfmtdict] is_not constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bccabf0>, <ast.Name object at 0x7da18bcca050>]]] in starred[call[name[ntval2wbfmtdict].items, parameter[]]] begin[:]
call[name[fmtname2wbfmtobj]][name[ntval]] assign[=] call[name[workbook].add_format, parameter[name[wbfmtdict]]]
if compare[constant[ntfld_wbfmt] <ast.NotIn object at 0x7da2590d7190> name[kws]] begin[:]
call[name[sys].stdout.write, parameter[constant[**WARNING: 'ntfld_wbfmt' NOT PRESENT
]]]
return[name[fmtname2wbfmtobj]] | keyword[def] identifier[_init_fmtname2wbfmtobj] ( identifier[self] , identifier[workbook] ,** identifier[kws] ):
literal[string]
identifier[wbfmtdict] =[
identifier[kws] . identifier[get] ( literal[string] , identifier[self] . identifier[dflt_wbfmtdict] [ literal[int] ]),
identifier[kws] . identifier[get] ( literal[string] , identifier[self] . identifier[dflt_wbfmtdict] [ literal[int] ]),
identifier[kws] . identifier[get] ( literal[string] , identifier[self] . identifier[dflt_wbfmtdict] [ literal[int] ]),
identifier[kws] . identifier[get] ( literal[string] , identifier[self] . identifier[dflt_wbfmtdict] [ literal[int] ])]
identifier[fmtname2wbfmtobj] ={
literal[string] : identifier[workbook] . identifier[add_format] ( identifier[wbfmtdict] [ literal[int] ]),
literal[string] : identifier[workbook] . identifier[add_format] ( identifier[wbfmtdict] [ literal[int] ]),
literal[string] : identifier[workbook] . identifier[add_format] ( identifier[wbfmtdict] [ literal[int] ]),
literal[string] : identifier[workbook] . identifier[add_format] ( identifier[wbfmtdict] [ literal[int] ])}
identifier[ntval2wbfmtdict] = identifier[kws] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[ntval2wbfmtdict] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ntval] , identifier[wbfmtdict] keyword[in] identifier[ntval2wbfmtdict] . identifier[items] ():
identifier[fmtname2wbfmtobj] [ identifier[ntval] ]= identifier[workbook] . identifier[add_format] ( identifier[wbfmtdict] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kws] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
keyword[return] identifier[fmtname2wbfmtobj] | def _init_fmtname2wbfmtobj(self, workbook, **kws):
"""Initialize fmtname2wbfmtobj."""
wbfmtdict = [kws.get('format_txt0', self.dflt_wbfmtdict[0]), kws.get('format_txt1', self.dflt_wbfmtdict[1]), kws.get('format_txt2', self.dflt_wbfmtdict[2]), kws.get('format_txt3', self.dflt_wbfmtdict[3])]
fmtname2wbfmtobj = {'plain': workbook.add_format(wbfmtdict[0]), 'plain bold': workbook.add_format(wbfmtdict[3]), 'very light grey': workbook.add_format(wbfmtdict[1]), 'light grey': workbook.add_format(wbfmtdict[2])}
# Use a xlsx namedtuple field value to set row color
ntval2wbfmtdict = kws.get('ntval2wbfmtdict', None)
if ntval2wbfmtdict is not None:
for (ntval, wbfmtdict) in ntval2wbfmtdict.items():
fmtname2wbfmtobj[ntval] = workbook.add_format(wbfmtdict) # depends on [control=['for'], data=[]]
if 'ntfld_wbfmt' not in kws:
sys.stdout.write("**WARNING: 'ntfld_wbfmt' NOT PRESENT\n") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ntval2wbfmtdict']]
return fmtname2wbfmtobj |
def check(self, request, user):
"""
check if the service is well configured
:return: Boolean
"""
redirect_uris = '%s://%s%s' % (request.scheme, request.get_host(), reverse('mastodon_callback'))
us = UserService.objects.get(user=user,
name='ServiceMastodon')
client_id, client_secret = MastodonAPI.create_app(
client_name="TriggerHappy", api_base_url=us.host,
redirect_uris=redirect_uris)
# get the token by logging in
mastodon = MastodonAPI(
client_id=client_id,
client_secret=client_secret,
api_base_url=us.host
)
try:
mastodon.log_in(username=us.username, password=us.password)
return True
except MastodonIllegalArgumentError as e:
return e | def function[check, parameter[self, request, user]]:
constant[
check if the service is well configured
:return: Boolean
]
variable[redirect_uris] assign[=] binary_operation[constant[%s://%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6e67a0>, <ast.Call object at 0x7da20c6e4190>, <ast.Call object at 0x7da20c6e6740>]]]
variable[us] assign[=] call[name[UserService].objects.get, parameter[]]
<ast.Tuple object at 0x7da20c6e4640> assign[=] call[name[MastodonAPI].create_app, parameter[]]
variable[mastodon] assign[=] call[name[MastodonAPI], parameter[]]
<ast.Try object at 0x7da1b26af550> | keyword[def] identifier[check] ( identifier[self] , identifier[request] , identifier[user] ):
literal[string]
identifier[redirect_uris] = literal[string] %( identifier[request] . identifier[scheme] , identifier[request] . identifier[get_host] (), identifier[reverse] ( literal[string] ))
identifier[us] = identifier[UserService] . identifier[objects] . identifier[get] ( identifier[user] = identifier[user] ,
identifier[name] = literal[string] )
identifier[client_id] , identifier[client_secret] = identifier[MastodonAPI] . identifier[create_app] (
identifier[client_name] = literal[string] , identifier[api_base_url] = identifier[us] . identifier[host] ,
identifier[redirect_uris] = identifier[redirect_uris] )
identifier[mastodon] = identifier[MastodonAPI] (
identifier[client_id] = identifier[client_id] ,
identifier[client_secret] = identifier[client_secret] ,
identifier[api_base_url] = identifier[us] . identifier[host]
)
keyword[try] :
identifier[mastodon] . identifier[log_in] ( identifier[username] = identifier[us] . identifier[username] , identifier[password] = identifier[us] . identifier[password] )
keyword[return] keyword[True]
keyword[except] identifier[MastodonIllegalArgumentError] keyword[as] identifier[e] :
keyword[return] identifier[e] | def check(self, request, user):
"""
check if the service is well configured
:return: Boolean
"""
redirect_uris = '%s://%s%s' % (request.scheme, request.get_host(), reverse('mastodon_callback'))
us = UserService.objects.get(user=user, name='ServiceMastodon')
(client_id, client_secret) = MastodonAPI.create_app(client_name='TriggerHappy', api_base_url=us.host, redirect_uris=redirect_uris)
# get the token by logging in
mastodon = MastodonAPI(client_id=client_id, client_secret=client_secret, api_base_url=us.host)
try:
mastodon.log_in(username=us.username, password=us.password)
return True # depends on [control=['try'], data=[]]
except MastodonIllegalArgumentError as e:
return e # depends on [control=['except'], data=['e']] |
def login(self, username, password, state=None, sync=True):
"""Authenticate to Google with the provided credentials & sync.
Args:
email (str): The account to use.
password (str): The account password.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
"""
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.login(username, password, get_mac())
if ret:
self.load(auth, state, sync)
return ret | def function[login, parameter[self, username, password, state, sync]]:
constant[Authenticate to Google with the provided credentials & sync.
Args:
email (str): The account to use.
password (str): The account password.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
]
variable[auth] assign[=] call[name[APIAuth], parameter[name[self].OAUTH_SCOPES]]
variable[ret] assign[=] call[name[auth].login, parameter[name[username], name[password], call[name[get_mac], parameter[]]]]
if name[ret] begin[:]
call[name[self].load, parameter[name[auth], name[state], name[sync]]]
return[name[ret]] | keyword[def] identifier[login] ( identifier[self] , identifier[username] , identifier[password] , identifier[state] = keyword[None] , identifier[sync] = keyword[True] ):
literal[string]
identifier[auth] = identifier[APIAuth] ( identifier[self] . identifier[OAUTH_SCOPES] )
identifier[ret] = identifier[auth] . identifier[login] ( identifier[username] , identifier[password] , identifier[get_mac] ())
keyword[if] identifier[ret] :
identifier[self] . identifier[load] ( identifier[auth] , identifier[state] , identifier[sync] )
keyword[return] identifier[ret] | def login(self, username, password, state=None, sync=True):
"""Authenticate to Google with the provided credentials & sync.
Args:
email (str): The account to use.
password (str): The account password.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
"""
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.login(username, password, get_mac())
if ret:
self.load(auth, state, sync) # depends on [control=['if'], data=[]]
return ret |
def remote(func):
"""
Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.mode == 'server':
# In server mode, call the function
return func(self, *args, **kwargs)
# Make sure we're connected
if not self.conn:
self.connect()
# Call the remote function
self.conn.send('CALL', func.__name__, args, kwargs)
# Receive the response
cmd, payload = self.conn.recv()
if cmd == 'ERR':
self.close()
raise Exception("Catastrophic error from server: %s" %
payload[0])
elif cmd == 'EXC':
exc_type = utils.find_entrypoint(None, payload[0])
raise exc_type(payload[1])
elif cmd != 'RES':
self.close()
raise Exception("Invalid command response from server: %s" % cmd)
return payload[0]
# Mark it a callable
wrapper._remote = True
# Return the wrapped function
return wrapper | def function[remote, parameter[func]]:
constant[
Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated.
]
def function[wrapper, parameter[self]]:
if compare[name[self].mode equal[==] constant[server]] begin[:]
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da18ede6410>]]]
if <ast.UnaryOp object at 0x7da18ede7460> begin[:]
call[name[self].connect, parameter[]]
call[name[self].conn.send, parameter[constant[CALL], name[func].__name__, name[args], name[kwargs]]]
<ast.Tuple object at 0x7da18ede5990> assign[=] call[name[self].conn.recv, parameter[]]
if compare[name[cmd] equal[==] constant[ERR]] begin[:]
call[name[self].close, parameter[]]
<ast.Raise object at 0x7da18ede6230>
return[call[name[payload]][constant[0]]]
name[wrapper]._remote assign[=] constant[True]
return[name[wrapper]] | keyword[def] identifier[remote] ( identifier[func] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[self] . identifier[mode] == literal[string] :
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[self] . identifier[conn] :
identifier[self] . identifier[connect] ()
identifier[self] . identifier[conn] . identifier[send] ( literal[string] , identifier[func] . identifier[__name__] , identifier[args] , identifier[kwargs] )
identifier[cmd] , identifier[payload] = identifier[self] . identifier[conn] . identifier[recv] ()
keyword[if] identifier[cmd] == literal[string] :
identifier[self] . identifier[close] ()
keyword[raise] identifier[Exception] ( literal[string] %
identifier[payload] [ literal[int] ])
keyword[elif] identifier[cmd] == literal[string] :
identifier[exc_type] = identifier[utils] . identifier[find_entrypoint] ( keyword[None] , identifier[payload] [ literal[int] ])
keyword[raise] identifier[exc_type] ( identifier[payload] [ literal[int] ])
keyword[elif] identifier[cmd] != literal[string] :
identifier[self] . identifier[close] ()
keyword[raise] identifier[Exception] ( literal[string] % identifier[cmd] )
keyword[return] identifier[payload] [ literal[int] ]
identifier[wrapper] . identifier[_remote] = keyword[True]
keyword[return] identifier[wrapper] | def remote(func):
"""
Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.mode == 'server':
# In server mode, call the function
return func(self, *args, **kwargs) # depends on [control=['if'], data=[]]
# Make sure we're connected
if not self.conn:
self.connect() # depends on [control=['if'], data=[]]
# Call the remote function
self.conn.send('CALL', func.__name__, args, kwargs)
# Receive the response
(cmd, payload) = self.conn.recv()
if cmd == 'ERR':
self.close()
raise Exception('Catastrophic error from server: %s' % payload[0]) # depends on [control=['if'], data=[]]
elif cmd == 'EXC':
exc_type = utils.find_entrypoint(None, payload[0])
raise exc_type(payload[1]) # depends on [control=['if'], data=[]]
elif cmd != 'RES':
self.close()
raise Exception('Invalid command response from server: %s' % cmd) # depends on [control=['if'], data=['cmd']]
return payload[0]
# Mark it a callable
wrapper._remote = True
# Return the wrapped function
return wrapper |
def normpath(path):
"""Normalize given path in various different forms.
>>> normpath("/tmp/../etc/hosts")
'/etc/hosts'
>>> normpath("~root/t")
'/root/t'
"""
funcs = [os.path.normpath, os.path.abspath]
if "~" in path:
funcs = [os.path.expanduser] + funcs
return chaincalls(funcs, path) | def function[normpath, parameter[path]]:
constant[Normalize given path in various different forms.
>>> normpath("/tmp/../etc/hosts")
'/etc/hosts'
>>> normpath("~root/t")
'/root/t'
]
variable[funcs] assign[=] list[[<ast.Attribute object at 0x7da20c6e6830>, <ast.Attribute object at 0x7da20c6e5d50>]]
if compare[constant[~] in name[path]] begin[:]
variable[funcs] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da20c6e7a30>]] + name[funcs]]
return[call[name[chaincalls], parameter[name[funcs], name[path]]]] | keyword[def] identifier[normpath] ( identifier[path] ):
literal[string]
identifier[funcs] =[ identifier[os] . identifier[path] . identifier[normpath] , identifier[os] . identifier[path] . identifier[abspath] ]
keyword[if] literal[string] keyword[in] identifier[path] :
identifier[funcs] =[ identifier[os] . identifier[path] . identifier[expanduser] ]+ identifier[funcs]
keyword[return] identifier[chaincalls] ( identifier[funcs] , identifier[path] ) | def normpath(path):
"""Normalize given path in various different forms.
>>> normpath("/tmp/../etc/hosts")
'/etc/hosts'
>>> normpath("~root/t")
'/root/t'
"""
funcs = [os.path.normpath, os.path.abspath]
if '~' in path:
funcs = [os.path.expanduser] + funcs # depends on [control=['if'], data=[]]
return chaincalls(funcs, path) |
def do_delete(self, args):
'''delete the entire contents of the current namespace'''
namespace = self.config['namespace']
if not args.assume_yes:
response = raw_input('Delete everything in {0!r}? Enter namespace: '
.format(namespace))
if response != namespace:
self.stdout.write('not deleting anything\n')
return
self.stdout.write('deleting namespace {0!r}\n'.format(namespace))
self.task_master.clear() | def function[do_delete, parameter[self, args]]:
constant[delete the entire contents of the current namespace]
variable[namespace] assign[=] call[name[self].config][constant[namespace]]
if <ast.UnaryOp object at 0x7da18f00ce20> begin[:]
variable[response] assign[=] call[name[raw_input], parameter[call[constant[Delete everything in {0!r}? Enter namespace: ].format, parameter[name[namespace]]]]]
if compare[name[response] not_equal[!=] name[namespace]] begin[:]
call[name[self].stdout.write, parameter[constant[not deleting anything
]]]
return[None]
call[name[self].stdout.write, parameter[call[constant[deleting namespace {0!r}
].format, parameter[name[namespace]]]]]
call[name[self].task_master.clear, parameter[]] | keyword[def] identifier[do_delete] ( identifier[self] , identifier[args] ):
literal[string]
identifier[namespace] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] keyword[not] identifier[args] . identifier[assume_yes] :
identifier[response] = identifier[raw_input] ( literal[string]
. identifier[format] ( identifier[namespace] ))
keyword[if] identifier[response] != identifier[namespace] :
identifier[self] . identifier[stdout] . identifier[write] ( literal[string] )
keyword[return]
identifier[self] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[namespace] ))
identifier[self] . identifier[task_master] . identifier[clear] () | def do_delete(self, args):
"""delete the entire contents of the current namespace"""
namespace = self.config['namespace']
if not args.assume_yes:
response = raw_input('Delete everything in {0!r}? Enter namespace: '.format(namespace))
if response != namespace:
self.stdout.write('not deleting anything\n')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.stdout.write('deleting namespace {0!r}\n'.format(namespace))
self.task_master.clear() |
def visitAdditionOrSubtractionExpression(self, ctx):
"""
expression: expression (PLUS | MINUS) expression
"""
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
# first try as decimals
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError:
pass
# then as date + something
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
# upgrade our date to datetime
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
# convert time value to a duration
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond)
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex:
raise EvaluationError("Expression could not be evaluated as decimal or date arithmetic", ex) | def function[visitAdditionOrSubtractionExpression, parameter[self, ctx]]:
constant[
expression: expression (PLUS | MINUS) expression
]
variable[is_add] assign[=] compare[call[name[ctx].PLUS, parameter[]] is_not constant[None]]
variable[arg1] assign[=] call[name[self].visit, parameter[call[name[ctx].expression, parameter[constant[0]]]]]
variable[arg2] assign[=] call[name[self].visit, parameter[call[name[ctx].expression, parameter[constant[1]]]]]
<ast.Try object at 0x7da1b23457b0>
<ast.Try object at 0x7da1b2347910> | keyword[def] identifier[visitAdditionOrSubtractionExpression] ( identifier[self] , identifier[ctx] ):
literal[string]
identifier[is_add] = identifier[ctx] . identifier[PLUS] () keyword[is] keyword[not] keyword[None]
identifier[arg1] = identifier[self] . identifier[visit] ( identifier[ctx] . identifier[expression] ( literal[int] ))
identifier[arg2] = identifier[self] . identifier[visit] ( identifier[ctx] . identifier[expression] ( literal[int] ))
keyword[try] :
identifier[_arg1] = identifier[conversions] . identifier[to_decimal] ( identifier[arg1] , identifier[self] . identifier[_eval_context] )
identifier[_arg2] = identifier[conversions] . identifier[to_decimal] ( identifier[arg2] , identifier[self] . identifier[_eval_context] )
keyword[return] identifier[_arg1] + identifier[_arg2] keyword[if] identifier[is_add] keyword[else] identifier[_arg1] - identifier[_arg2]
keyword[except] identifier[EvaluationError] :
keyword[pass]
keyword[try] :
identifier[_arg1] = identifier[conversions] . identifier[to_date_or_datetime] ( identifier[arg1] , identifier[self] . identifier[_eval_context] )
keyword[if] identifier[isinstance] ( identifier[arg2] , identifier[datetime] . identifier[time] ):
identifier[_arg1] = identifier[conversions] . identifier[to_datetime] ( identifier[_arg1] , identifier[self] . identifier[_eval_context] )
identifier[_arg2] = identifier[datetime] . identifier[timedelta] ( identifier[hours] = identifier[arg2] . identifier[hour] , identifier[minutes] = identifier[arg2] . identifier[minute] , identifier[seconds] = identifier[arg2] . identifier[second] , identifier[microseconds] = identifier[arg2] . identifier[microsecond] )
keyword[else] :
identifier[_arg2] = identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[conversions] . identifier[to_integer] ( identifier[arg2] , identifier[self] . identifier[_eval_context] ))
keyword[return] identifier[_arg1] + identifier[_arg2] keyword[if] identifier[is_add] keyword[else] identifier[_arg1] - identifier[_arg2]
keyword[except] identifier[EvaluationError] keyword[as] identifier[ex] :
keyword[raise] identifier[EvaluationError] ( literal[string] , identifier[ex] ) | def visitAdditionOrSubtractionExpression(self, ctx):
"""
expression: expression (PLUS | MINUS) expression
"""
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
# first try as decimals
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2 # depends on [control=['try'], data=[]]
except EvaluationError:
pass # depends on [control=['except'], data=[]]
# then as date + something
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
# upgrade our date to datetime
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
# convert time value to a duration
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond) # depends on [control=['if'], data=[]]
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2 # depends on [control=['try'], data=[]]
except EvaluationError as ex:
raise EvaluationError('Expression could not be evaluated as decimal or date arithmetic', ex) # depends on [control=['except'], data=['ex']] |
def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
'''Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
'''
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading='gouraud',
label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis('tight')
return ax | def function[separation, parameter[sources, fs, labels, alpha, ax]]:
constant[Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
]
<ast.Tuple object at 0x7da1b0f18430> assign[=] call[name[__get_axes], parameter[]]
variable[sources] assign[=] call[name[np].atleast_2d, parameter[name[sources]]]
if compare[name[labels] is constant[None]] begin[:]
variable[labels] assign[=] <ast.ListComp object at 0x7da1b0f18850>
call[name[kwargs].setdefault, parameter[constant[scaling], constant[spectrum]]]
variable[cumspec] assign[=] constant[None]
variable[specs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0f186d0>, <ast.Name object at 0x7da1b0f189a0>]]] in starred[call[name[enumerate], parameter[name[sources]]]] begin[:]
<ast.Tuple object at 0x7da1b0f1b670> assign[=] call[name[spectrogram], parameter[name[src]]]
call[name[specs].append, parameter[name[spec]]]
if compare[name[cumspec] is constant[None]] begin[:]
variable[cumspec] assign[=] call[name[spec].copy, parameter[]]
variable[ref_max] assign[=] call[name[cumspec].max, parameter[]]
variable[ref_min] assign[=] binary_operation[name[ref_max] * constant[1e-06]]
variable[color_conv] assign[=] call[name[ColorConverter], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0f18fd0>, <ast.Name object at 0x7da1b0f18f10>]]] in starred[call[name[enumerate], parameter[name[specs]]]] begin[:]
variable[color] assign[=] call[call[name[next], parameter[name[ax]._get_lines.prop_cycler]]][constant[color]]
variable[color] assign[=] call[name[color_conv].to_rgba, parameter[name[color]]]
variable[cmap] assign[=] call[name[LinearSegmentedColormap].from_list, parameter[call[name[labels]][name[i]], list[[<ast.Tuple object at 0x7da1b0f18a90>, <ast.Name object at 0x7da1b0f19c00>]]]]
call[name[ax].pcolormesh, parameter[name[times], name[freqs], name[spec]]]
call[name[ax].add_patch, parameter[call[name[Rectangle], parameter[tuple[[<ast.Constant object at 0x7da1b0f1b520>, <ast.Constant object at 0x7da1b0f1b610>]], constant[0], constant[0]]]]]
if name[new_axes] begin[:]
call[name[ax].axis, parameter[constant[tight]]]
return[name[ax]] | keyword[def] identifier[separation] ( identifier[sources] , identifier[fs] = literal[int] , identifier[labels] = keyword[None] , identifier[alpha] = literal[int] , identifier[ax] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ax] , identifier[new_axes] = identifier[__get_axes] ( identifier[ax] = identifier[ax] )
identifier[sources] = identifier[np] . identifier[atleast_2d] ( identifier[sources] )
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[labels] =[ literal[string] . identifier[format] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[sources] ))]
identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[cumspec] = keyword[None]
identifier[specs] =[]
keyword[for] identifier[i] , identifier[src] keyword[in] identifier[enumerate] ( identifier[sources] ):
identifier[freqs] , identifier[times] , identifier[spec] = identifier[spectrogram] ( identifier[src] , identifier[fs] = identifier[fs] ,** identifier[kwargs] )
identifier[specs] . identifier[append] ( identifier[spec] )
keyword[if] identifier[cumspec] keyword[is] keyword[None] :
identifier[cumspec] = identifier[spec] . identifier[copy] ()
keyword[else] :
identifier[cumspec] += identifier[spec]
identifier[ref_max] = identifier[cumspec] . identifier[max] ()
identifier[ref_min] = identifier[ref_max] * literal[int]
identifier[color_conv] = identifier[ColorConverter] ()
keyword[for] identifier[i] , identifier[spec] keyword[in] identifier[enumerate] ( identifier[specs] ):
identifier[color] = identifier[next] ( identifier[ax] . identifier[_get_lines] . identifier[prop_cycler] )[ literal[string] ]
identifier[color] = identifier[color_conv] . identifier[to_rgba] ( identifier[color] , identifier[alpha] = identifier[alpha] )
identifier[cmap] = identifier[LinearSegmentedColormap] . identifier[from_list] ( identifier[labels] [ identifier[i] ],
[( literal[int] , literal[int] , literal[int] , literal[int] ),
identifier[color] ])
identifier[ax] . identifier[pcolormesh] ( identifier[times] , identifier[freqs] , identifier[spec] ,
identifier[cmap] = identifier[cmap] ,
identifier[norm] = identifier[LogNorm] ( identifier[vmin] = identifier[ref_min] , identifier[vmax] = identifier[ref_max] ),
identifier[shading] = literal[string] ,
identifier[label] = identifier[labels] [ identifier[i] ])
identifier[ax] . identifier[add_patch] ( identifier[Rectangle] (( literal[int] , literal[int] ), literal[int] , literal[int] , identifier[color] = identifier[color] , identifier[label] = identifier[labels] [ identifier[i] ]))
keyword[if] identifier[new_axes] :
identifier[ax] . identifier[axis] ( literal[string] )
keyword[return] identifier[ax] | def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
"""Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
"""
# Get the axes handle
(ax, new_axes) = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))] # depends on [control=['if'], data=['labels']]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for (i, src) in enumerate(sources):
(freqs, times, spec) = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy() # depends on [control=['if'], data=['cumspec']]
else:
cumspec += spec # depends on [control=['for'], data=[]]
ref_max = cumspec.max()
ref_min = ref_max * 1e-06
color_conv = ColorConverter()
for (i, spec) in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i], [(1.0, 1.0, 1.0, 0.0), color])
ax.pcolormesh(times, freqs, spec, cmap=cmap, norm=LogNorm(vmin=ref_min, vmax=ref_max), shading='gouraud', label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i])) # depends on [control=['for'], data=[]]
if new_axes:
ax.axis('tight') # depends on [control=['if'], data=[]]
return ax |
def set_proxy(self, proxy): # pragma: no cover, not with unit tests
"""Set HTTP proxy
:param proxy: proxy url
:type proxy: str
:return: None
"""
if proxy:
logger.debug('PROXY SETTING PROXY %s', proxy)
self._requests_con.proxies = {
'http': proxy,
'https': proxy,
} | def function[set_proxy, parameter[self, proxy]]:
constant[Set HTTP proxy
:param proxy: proxy url
:type proxy: str
:return: None
]
if name[proxy] begin[:]
call[name[logger].debug, parameter[constant[PROXY SETTING PROXY %s], name[proxy]]]
name[self]._requests_con.proxies assign[=] dictionary[[<ast.Constant object at 0x7da18bc71c90>, <ast.Constant object at 0x7da18bc70ee0>], [<ast.Name object at 0x7da18bc72dd0>, <ast.Name object at 0x7da18bc727d0>]] | keyword[def] identifier[set_proxy] ( identifier[self] , identifier[proxy] ):
literal[string]
keyword[if] identifier[proxy] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[proxy] )
identifier[self] . identifier[_requests_con] . identifier[proxies] ={
literal[string] : identifier[proxy] ,
literal[string] : identifier[proxy] ,
} | def set_proxy(self, proxy): # pragma: no cover, not with unit tests
'Set HTTP proxy\n\n :param proxy: proxy url\n :type proxy: str\n :return: None\n '
if proxy:
logger.debug('PROXY SETTING PROXY %s', proxy)
self._requests_con.proxies = {'http': proxy, 'https': proxy} # depends on [control=['if'], data=[]] |
def update(self, dt=-1):
"""
Return type string, compatible with numpy.
"""
self.library.update.argtypes = [c_double]
self.library.update.restype = c_int
if dt == -1:
# use default timestep
dt = self.get_time_step()
result = wrap(self.library.update)(dt)
return result | def function[update, parameter[self, dt]]:
constant[
Return type string, compatible with numpy.
]
name[self].library.update.argtypes assign[=] list[[<ast.Name object at 0x7da1b26a29e0>]]
name[self].library.update.restype assign[=] name[c_int]
if compare[name[dt] equal[==] <ast.UnaryOp object at 0x7da1b26a05b0>] begin[:]
variable[dt] assign[=] call[name[self].get_time_step, parameter[]]
variable[result] assign[=] call[call[name[wrap], parameter[name[self].library.update]], parameter[name[dt]]]
return[name[result]] | keyword[def] identifier[update] ( identifier[self] , identifier[dt] =- literal[int] ):
literal[string]
identifier[self] . identifier[library] . identifier[update] . identifier[argtypes] =[ identifier[c_double] ]
identifier[self] . identifier[library] . identifier[update] . identifier[restype] = identifier[c_int]
keyword[if] identifier[dt] ==- literal[int] :
identifier[dt] = identifier[self] . identifier[get_time_step] ()
identifier[result] = identifier[wrap] ( identifier[self] . identifier[library] . identifier[update] )( identifier[dt] )
keyword[return] identifier[result] | def update(self, dt=-1):
"""
Return type string, compatible with numpy.
"""
self.library.update.argtypes = [c_double]
self.library.update.restype = c_int
if dt == -1:
# use default timestep
dt = self.get_time_step() # depends on [control=['if'], data=['dt']]
result = wrap(self.library.update)(dt)
return result |
def trees_to_dict(trees_list):
"""
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
"""
result = {}
for tree in trees_list:
result.update(tree.to_dict())
return result | def function[trees_to_dict, parameter[trees_list]]:
constant[
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
]
variable[result] assign[=] dictionary[[], []]
for taget[name[tree]] in starred[name[trees_list]] begin[:]
call[name[result].update, parameter[call[name[tree].to_dict, parameter[]]]]
return[name[result]] | keyword[def] identifier[trees_to_dict] ( identifier[trees_list] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[tree] keyword[in] identifier[trees_list] :
identifier[result] . identifier[update] ( identifier[tree] . identifier[to_dict] ())
keyword[return] identifier[result] | def trees_to_dict(trees_list):
"""
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
"""
result = {}
for tree in trees_list:
result.update(tree.to_dict()) # depends on [control=['for'], data=['tree']]
return result |
def _normalize_utf8_keys(kwargs):
"""When kwargs are passed literally in a source file, their keys are ascii: normalize."""
if any(type(key) is binary_type for key in kwargs.keys()):
# This is to preserve the original dict type for kwargs.
dict_type = type(kwargs)
return dict_type([(text_type(k), v) for k, v in kwargs.items()])
return kwargs | def function[_normalize_utf8_keys, parameter[kwargs]]:
constant[When kwargs are passed literally in a source file, their keys are ascii: normalize.]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1ddf130>]] begin[:]
variable[dict_type] assign[=] call[name[type], parameter[name[kwargs]]]
return[call[name[dict_type], parameter[<ast.ListComp object at 0x7da18c4ce0b0>]]]
return[name[kwargs]] | keyword[def] identifier[_normalize_utf8_keys] ( identifier[kwargs] ):
literal[string]
keyword[if] identifier[any] ( identifier[type] ( identifier[key] ) keyword[is] identifier[binary_type] keyword[for] identifier[key] keyword[in] identifier[kwargs] . identifier[keys] ()):
identifier[dict_type] = identifier[type] ( identifier[kwargs] )
keyword[return] identifier[dict_type] ([( identifier[text_type] ( identifier[k] ), identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ()])
keyword[return] identifier[kwargs] | def _normalize_utf8_keys(kwargs):
"""When kwargs are passed literally in a source file, their keys are ascii: normalize."""
if any((type(key) is binary_type for key in kwargs.keys())):
# This is to preserve the original dict type for kwargs.
dict_type = type(kwargs)
return dict_type([(text_type(k), v) for (k, v) in kwargs.items()]) # depends on [control=['if'], data=[]]
return kwargs |
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f)) | def function[iterfiles, parameter[self, pattern, abspath]]:
constant[ Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
]
if compare[name[pattern] is_not constant[None]] begin[:]
variable[globster] assign[=] call[name[Globster], parameter[list[[<ast.Name object at 0x7da20cabdff0>]]]]
for taget[tuple[[<ast.Name object at 0x7da20cabd090>, <ast.Name object at 0x7da20cabcee0>, <ast.Name object at 0x7da20cabe170>]]] in starred[call[name[self].walk, parameter[]]] begin[:]
for taget[name[f]] in starred[name[files]] begin[:]
if <ast.BoolOp object at 0x7da20cabf370> begin[:]
if name[abspath] begin[:]
<ast.Yield object at 0x7da20cabc880> | keyword[def] identifier[iterfiles] ( identifier[self] , identifier[pattern] = keyword[None] , identifier[abspath] = keyword[False] ):
literal[string]
keyword[if] identifier[pattern] keyword[is] keyword[not] keyword[None] :
identifier[globster] = identifier[Globster] ([ identifier[pattern] ])
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[self] . identifier[walk] ():
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] identifier[pattern] keyword[is] keyword[None] keyword[or] ( identifier[pattern] keyword[is] keyword[not] keyword[None] keyword[and] identifier[globster] . identifier[match] ( identifier[f] )):
keyword[if] identifier[abspath] :
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] )
keyword[else] :
keyword[yield] identifier[self] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] )) | def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern]) # depends on [control=['if'], data=['pattern']]
for (root, dirs, files) in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f) # depends on [control=['if'], data=[]]
else:
yield self.relpath(os.path.join(root, f)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]] |
def update_scenario(scenario,update_data=True,update_groups=True,flush=True,**kwargs):
"""
Update a single scenario
as all resources already exist, there is no need to worry
about negative IDS
flush = True flushes to the DB at the end of the function.
flush = False does not flush, assuming that it will happen as part
of another process, like update_network.
"""
user_id = kwargs.get('user_id')
scen = _get_scenario(scenario.id, user_id)
if scen.locked == 'Y':
raise PermissionError('Scenario is locked. Unlock before editing.')
start_time = None
if isinstance(scenario.start_time, float):
start_time = six.text_type(scenario.start_time)
else:
start_time = timestamp_to_ordinal(scenario.start_time)
if start_time is not None:
start_time = six.text_type(start_time)
end_time = None
if isinstance(scenario.end_time, float):
end_time = six.text_type(scenario.end_time)
else:
end_time = timestamp_to_ordinal(scenario.end_time)
if end_time is not None:
end_time = six.text_type(end_time)
scen.name = scenario.name
scen.description = scenario.description
scen.layout = scenario.get_layout()
scen.start_time = start_time
scen.end_time = end_time
scen.time_step = scenario.time_step
if scenario.resourcescenarios == None:
scenario.resourcescenarios = []
if scenario.resourcegroupitems == None:
scenario.resourcegroupitems = []
#lazy load resourcescenarios from the DB
scen.resourcescenarios
if update_data is True:
datasets = [rs.dataset for rs in scenario.resourcescenarios]
updated_datasets = data._bulk_insert_data(datasets, user_id, kwargs.get('app_name'))
for i, r_scen in enumerate(scenario.resourcescenarios):
_update_resourcescenario(scen, r_scen, dataset=updated_datasets[i], user_id=user_id, source=kwargs.get('app_name'))
#lazy load resource grou items from the DB
scen.resourcegroupitems
if update_groups is True:
#Get all the exiting resource group items for this scenario.
#THen process all the items sent to this handler.
#Any in the DB that are not passed in here are removed.
for group_item in scenario.resourcegroupitems:
_add_resourcegroupitem(group_item, scenario.id)
if flush is True:
db.DBSession.flush()
return scen | def function[update_scenario, parameter[scenario, update_data, update_groups, flush]]:
constant[
Update a single scenario
as all resources already exist, there is no need to worry
about negative IDS
flush = True flushes to the DB at the end of the function.
flush = False does not flush, assuming that it will happen as part
of another process, like update_network.
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[scen] assign[=] call[name[_get_scenario], parameter[name[scenario].id, name[user_id]]]
if compare[name[scen].locked equal[==] constant[Y]] begin[:]
<ast.Raise object at 0x7da204564700>
variable[start_time] assign[=] constant[None]
if call[name[isinstance], parameter[name[scenario].start_time, name[float]]] begin[:]
variable[start_time] assign[=] call[name[six].text_type, parameter[name[scenario].start_time]]
variable[end_time] assign[=] constant[None]
if call[name[isinstance], parameter[name[scenario].end_time, name[float]]] begin[:]
variable[end_time] assign[=] call[name[six].text_type, parameter[name[scenario].end_time]]
name[scen].name assign[=] name[scenario].name
name[scen].description assign[=] name[scenario].description
name[scen].layout assign[=] call[name[scenario].get_layout, parameter[]]
name[scen].start_time assign[=] name[start_time]
name[scen].end_time assign[=] name[end_time]
name[scen].time_step assign[=] name[scenario].time_step
if compare[name[scenario].resourcescenarios equal[==] constant[None]] begin[:]
name[scenario].resourcescenarios assign[=] list[[]]
if compare[name[scenario].resourcegroupitems equal[==] constant[None]] begin[:]
name[scenario].resourcegroupitems assign[=] list[[]]
name[scen].resourcescenarios
if compare[name[update_data] is constant[True]] begin[:]
variable[datasets] assign[=] <ast.ListComp object at 0x7da2045675e0>
variable[updated_datasets] assign[=] call[name[data]._bulk_insert_data, parameter[name[datasets], name[user_id], call[name[kwargs].get, parameter[constant[app_name]]]]]
for taget[tuple[[<ast.Name object at 0x7da2045662f0>, <ast.Name object at 0x7da204566c20>]]] in starred[call[name[enumerate], parameter[name[scenario].resourcescenarios]]] begin[:]
call[name[_update_resourcescenario], parameter[name[scen], name[r_scen]]]
name[scen].resourcegroupitems
if compare[name[update_groups] is constant[True]] begin[:]
for taget[name[group_item]] in starred[name[scenario].resourcegroupitems] begin[:]
call[name[_add_resourcegroupitem], parameter[name[group_item], name[scenario].id]]
if compare[name[flush] is constant[True]] begin[:]
call[name[db].DBSession.flush, parameter[]]
return[name[scen]] | keyword[def] identifier[update_scenario] ( identifier[scenario] , identifier[update_data] = keyword[True] , identifier[update_groups] = keyword[True] , identifier[flush] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[scen] = identifier[_get_scenario] ( identifier[scenario] . identifier[id] , identifier[user_id] )
keyword[if] identifier[scen] . identifier[locked] == literal[string] :
keyword[raise] identifier[PermissionError] ( literal[string] )
identifier[start_time] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[scenario] . identifier[start_time] , identifier[float] ):
identifier[start_time] = identifier[six] . identifier[text_type] ( identifier[scenario] . identifier[start_time] )
keyword[else] :
identifier[start_time] = identifier[timestamp_to_ordinal] ( identifier[scenario] . identifier[start_time] )
keyword[if] identifier[start_time] keyword[is] keyword[not] keyword[None] :
identifier[start_time] = identifier[six] . identifier[text_type] ( identifier[start_time] )
identifier[end_time] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[scenario] . identifier[end_time] , identifier[float] ):
identifier[end_time] = identifier[six] . identifier[text_type] ( identifier[scenario] . identifier[end_time] )
keyword[else] :
identifier[end_time] = identifier[timestamp_to_ordinal] ( identifier[scenario] . identifier[end_time] )
keyword[if] identifier[end_time] keyword[is] keyword[not] keyword[None] :
identifier[end_time] = identifier[six] . identifier[text_type] ( identifier[end_time] )
identifier[scen] . identifier[name] = identifier[scenario] . identifier[name]
identifier[scen] . identifier[description] = identifier[scenario] . identifier[description]
identifier[scen] . identifier[layout] = identifier[scenario] . identifier[get_layout] ()
identifier[scen] . identifier[start_time] = identifier[start_time]
identifier[scen] . identifier[end_time] = identifier[end_time]
identifier[scen] . identifier[time_step] = identifier[scenario] . identifier[time_step]
keyword[if] identifier[scenario] . identifier[resourcescenarios] == keyword[None] :
identifier[scenario] . identifier[resourcescenarios] =[]
keyword[if] identifier[scenario] . identifier[resourcegroupitems] == keyword[None] :
identifier[scenario] . identifier[resourcegroupitems] =[]
identifier[scen] . identifier[resourcescenarios]
keyword[if] identifier[update_data] keyword[is] keyword[True] :
identifier[datasets] =[ identifier[rs] . identifier[dataset] keyword[for] identifier[rs] keyword[in] identifier[scenario] . identifier[resourcescenarios] ]
identifier[updated_datasets] = identifier[data] . identifier[_bulk_insert_data] ( identifier[datasets] , identifier[user_id] , identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[for] identifier[i] , identifier[r_scen] keyword[in] identifier[enumerate] ( identifier[scenario] . identifier[resourcescenarios] ):
identifier[_update_resourcescenario] ( identifier[scen] , identifier[r_scen] , identifier[dataset] = identifier[updated_datasets] [ identifier[i] ], identifier[user_id] = identifier[user_id] , identifier[source] = identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[scen] . identifier[resourcegroupitems]
keyword[if] identifier[update_groups] keyword[is] keyword[True] :
keyword[for] identifier[group_item] keyword[in] identifier[scenario] . identifier[resourcegroupitems] :
identifier[_add_resourcegroupitem] ( identifier[group_item] , identifier[scenario] . identifier[id] )
keyword[if] identifier[flush] keyword[is] keyword[True] :
identifier[db] . identifier[DBSession] . identifier[flush] ()
keyword[return] identifier[scen] | def update_scenario(scenario, update_data=True, update_groups=True, flush=True, **kwargs):
"""
Update a single scenario
as all resources already exist, there is no need to worry
about negative IDS
flush = True flushes to the DB at the end of the function.
flush = False does not flush, assuming that it will happen as part
of another process, like update_network.
"""
user_id = kwargs.get('user_id')
scen = _get_scenario(scenario.id, user_id)
if scen.locked == 'Y':
raise PermissionError('Scenario is locked. Unlock before editing.') # depends on [control=['if'], data=[]]
start_time = None
if isinstance(scenario.start_time, float):
start_time = six.text_type(scenario.start_time) # depends on [control=['if'], data=[]]
else:
start_time = timestamp_to_ordinal(scenario.start_time)
if start_time is not None:
start_time = six.text_type(start_time) # depends on [control=['if'], data=['start_time']]
end_time = None
if isinstance(scenario.end_time, float):
end_time = six.text_type(scenario.end_time) # depends on [control=['if'], data=[]]
else:
end_time = timestamp_to_ordinal(scenario.end_time)
if end_time is not None:
end_time = six.text_type(end_time) # depends on [control=['if'], data=['end_time']]
scen.name = scenario.name
scen.description = scenario.description
scen.layout = scenario.get_layout()
scen.start_time = start_time
scen.end_time = end_time
scen.time_step = scenario.time_step
if scenario.resourcescenarios == None:
scenario.resourcescenarios = [] # depends on [control=['if'], data=[]]
if scenario.resourcegroupitems == None:
scenario.resourcegroupitems = [] # depends on [control=['if'], data=[]]
#lazy load resourcescenarios from the DB
scen.resourcescenarios
if update_data is True:
datasets = [rs.dataset for rs in scenario.resourcescenarios]
updated_datasets = data._bulk_insert_data(datasets, user_id, kwargs.get('app_name'))
for (i, r_scen) in enumerate(scenario.resourcescenarios):
_update_resourcescenario(scen, r_scen, dataset=updated_datasets[i], user_id=user_id, source=kwargs.get('app_name')) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
#lazy load resource grou items from the DB
scen.resourcegroupitems
if update_groups is True:
#Get all the exiting resource group items for this scenario.
#THen process all the items sent to this handler.
#Any in the DB that are not passed in here are removed.
for group_item in scenario.resourcegroupitems:
_add_resourcegroupitem(group_item, scenario.id) # depends on [control=['for'], data=['group_item']] # depends on [control=['if'], data=[]]
if flush is True:
db.DBSession.flush() # depends on [control=['if'], data=[]]
return scen |
def createSegment(self, cell):
"""
Adds a new segment on a cell.
:param cell: (int) Cell index
:returns: (int) New segment index
"""
cellData = self._cells[cell]
if len(self._freeFlatIdxs) > 0:
flatIdx = self._freeFlatIdxs.pop()
else:
flatIdx = self._nextFlatIdx
self._segmentForFlatIdx.append(None)
self._nextFlatIdx += 1
ordinal = self._nextSegmentOrdinal
self._nextSegmentOrdinal += 1
segment = Segment(cell, flatIdx, ordinal)
cellData._segments.append(segment)
self._segmentForFlatIdx[flatIdx] = segment
return segment | def function[createSegment, parameter[self, cell]]:
constant[
Adds a new segment on a cell.
:param cell: (int) Cell index
:returns: (int) New segment index
]
variable[cellData] assign[=] call[name[self]._cells][name[cell]]
if compare[call[name[len], parameter[name[self]._freeFlatIdxs]] greater[>] constant[0]] begin[:]
variable[flatIdx] assign[=] call[name[self]._freeFlatIdxs.pop, parameter[]]
variable[ordinal] assign[=] name[self]._nextSegmentOrdinal
<ast.AugAssign object at 0x7da18f09e950>
variable[segment] assign[=] call[name[Segment], parameter[name[cell], name[flatIdx], name[ordinal]]]
call[name[cellData]._segments.append, parameter[name[segment]]]
call[name[self]._segmentForFlatIdx][name[flatIdx]] assign[=] name[segment]
return[name[segment]] | keyword[def] identifier[createSegment] ( identifier[self] , identifier[cell] ):
literal[string]
identifier[cellData] = identifier[self] . identifier[_cells] [ identifier[cell] ]
keyword[if] identifier[len] ( identifier[self] . identifier[_freeFlatIdxs] )> literal[int] :
identifier[flatIdx] = identifier[self] . identifier[_freeFlatIdxs] . identifier[pop] ()
keyword[else] :
identifier[flatIdx] = identifier[self] . identifier[_nextFlatIdx]
identifier[self] . identifier[_segmentForFlatIdx] . identifier[append] ( keyword[None] )
identifier[self] . identifier[_nextFlatIdx] += literal[int]
identifier[ordinal] = identifier[self] . identifier[_nextSegmentOrdinal]
identifier[self] . identifier[_nextSegmentOrdinal] += literal[int]
identifier[segment] = identifier[Segment] ( identifier[cell] , identifier[flatIdx] , identifier[ordinal] )
identifier[cellData] . identifier[_segments] . identifier[append] ( identifier[segment] )
identifier[self] . identifier[_segmentForFlatIdx] [ identifier[flatIdx] ]= identifier[segment]
keyword[return] identifier[segment] | def createSegment(self, cell):
"""
Adds a new segment on a cell.
:param cell: (int) Cell index
:returns: (int) New segment index
"""
cellData = self._cells[cell]
if len(self._freeFlatIdxs) > 0:
flatIdx = self._freeFlatIdxs.pop() # depends on [control=['if'], data=[]]
else:
flatIdx = self._nextFlatIdx
self._segmentForFlatIdx.append(None)
self._nextFlatIdx += 1
ordinal = self._nextSegmentOrdinal
self._nextSegmentOrdinal += 1
segment = Segment(cell, flatIdx, ordinal)
cellData._segments.append(segment)
self._segmentForFlatIdx[flatIdx] = segment
return segment |
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ''
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + ' ' + rv
return rv.lstrip() | def function[command_path, parameter[self]]:
constant[The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
]
variable[rv] assign[=] constant[]
if compare[name[self].info_name is_not constant[None]] begin[:]
variable[rv] assign[=] name[self].info_name
if compare[name[self].parent is_not constant[None]] begin[:]
variable[rv] assign[=] binary_operation[binary_operation[name[self].parent.command_path + constant[ ]] + name[rv]]
return[call[name[rv].lstrip, parameter[]]] | keyword[def] identifier[command_path] ( identifier[self] ):
literal[string]
identifier[rv] = literal[string]
keyword[if] identifier[self] . identifier[info_name] keyword[is] keyword[not] keyword[None] :
identifier[rv] = identifier[self] . identifier[info_name]
keyword[if] identifier[self] . identifier[parent] keyword[is] keyword[not] keyword[None] :
identifier[rv] = identifier[self] . identifier[parent] . identifier[command_path] + literal[string] + identifier[rv]
keyword[return] identifier[rv] . identifier[lstrip] () | def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ''
if self.info_name is not None:
rv = self.info_name # depends on [control=['if'], data=[]]
if self.parent is not None:
rv = self.parent.command_path + ' ' + rv # depends on [control=['if'], data=[]]
return rv.lstrip() |
def aggplot(df, projection=None,
hue=None,
by=None,
geometry=None,
nmax=None, nmin=None, nsig=0,
agg=np.mean,
cmap='viridis', vmin=None, vmax=None,
legend=True, legend_kwargs=None,
extent=None,
figsize=(8, 6), ax=None,
**kwargs):
"""
Self-aggregating quadtree plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str
Applies a colormap to the output shapes. Required.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
by : iterable or str, optional
If specified, this data grouping will be used to aggregate points into `convex hulls
<https://en.wikipedia.org/wiki/Convex_hull>`_ or, if ``geometry`` is also specified, into polygons. If left
unspecified the data will be aggregated using a `quadtree <https://en.wikipedia.org/wiki/Quadtree>`_.
geometry : GeoDataFrame or GeoSeries, optional
A list of polygons to be used for spatial aggregation. Optional. See ``by``.
nmax : int or None, optional
Ignored if not plotting a quadtree. Otherwise, controls the maximum number of observations in a quadrangle.
If left unspecified, there is no maximum size.
nmin : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle.
If left unspecified, there is no minimum size.
nsig : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle
deemed significant. Insignificant quadrangles are removed from the plot. Defaults to 0 (empty patches).
agg : function, optional
The aggregation func used for the colormap. Defaults to ``np.mean``.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum.
legend : boolean, optional
Whether or not to include a legend.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis
Examples
--------
This plot type accepts any geometry, including mixtures of polygons and points, averages the value of a certain
data parameter at their centroids, and plots the result, using a colormap is the visual variable.
For the purposes of comparison, this library's ``choropleth`` function takes some sort of data as input,
polygons as geospatial context, and combines themselves into a colorful map. This is useful if, for example,
you have data on the amount of crimes committed per neighborhood, and you want to plot that.
But suppose your original dataset came in terms of individual observations - instead of "n collisions happened
in this neighborhood", you have "one collision occured at this specific coordinate at this specific date".
This is obviously more useful data - it can be made to do more things - but in order to generate the same map,
you will first have to do all of the work of geolocating your points to neighborhoods (not trivial),
then aggregating them (by, in this case, taking a count).
``aggplot`` handles this work for you. It takes input in the form of observations, and outputs as useful as
possible a visualization of their "regional" statistics. What a "region" corresponds to depends on how much
geospatial information you can provide.
If you can't provide *any* geospatial context, ``aggplot`` will output what's known as a quadtree: it will break
your data down into recursive squares, and use them to aggregate the data. This is a very experimental format,
is very fiddly to make, and has not yet been optimized for speed; but it provides a useful baseline which
requires no additional work and can be used to expose interesting geospatial correlations right away. And,
if you have enough observations, it can be `a pretty good approximation
<../figures/aggplot/aggplot-initial.png>`_ (collisions in New York City pictured).
Our first few examples are of just such figures. A simple ``aggplot`` quadtree can be generated with just a
dataset, a data column of interest, and, optionally, a projection.
.. code-block:: python
import geoplot as gplt
import geoplot.crs as gcrs
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='LATDEP')
.. image:: ../figures/aggplot/aggplot-initial.png
To get the best output, you often need to tweak the ``nmin`` and ``nmax`` parameters, controlling the minimum and
maximum number of observations per box, respectively, yourself. In this case we'll also choose a different
`matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_, using the ``cmap``
parameter.
``aggplot`` will satisfy the ``nmax`` parameter before trying to satisfy ``nmin``, so you may result in spaces
without observations, or ones lacking a statistically significant number of observations. This is necessary in
order to break up "spaces" that the algorithm would otherwise end on. You can control the maximum number of
observations in the blank spaces using the ``nsig`` parameter.
.. code-block:: python
gplt.aggplot(collisions, nmin=20, nmax=500, nsig=5, projection=gcrs.PlateCarree(), hue='LATDEP', cmap='Reds')
.. image:: ../figures/aggplot/aggplot-quadtree-tuned.png
You'll have to play around with these parameters to get the clearest picture.
Usually, however, observations with a geospatial component will be provided with some form of spatial
categorization. In the case of our collisions example, this comes in the form of a postal zip code. With the
simple addition of this data column via the ``by`` parameter, our output changes radically, taking advantage of
the additional context we now have to sort and aggregate our observations by (hopefully) geospatially
meaningful, if still crude, grouped convex hulls.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH')
.. image:: ../figures/aggplot/aggplot-hulls.png
Finally, suppose you actually know exactly the geometries that you would like to aggregate by. Provide these in
the form of a ``geopandas`` ``GeoSeries``, one whose index matches the values in your ``by`` column (so
``BROOKLYN`` matches ``BROOKLYN`` for example), to the ``geometry`` parameter. Your output will now be an
ordinary choropleth.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH', geometry=boroughs)
.. image:: ../figures/aggplot/aggplot-by.png
Observations will be aggregated by average, by default. In our example case, our plot shows that accidents in
Manhattan tend to result in significantly fewer injuries than accidents occuring in other boroughs. Specify an
alternative aggregation using the ``agg`` parameter.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len)
.. image:: ../figures/aggplot/aggplot-agg.png
``legend`` toggles the legend. Additional keyword arguments for styling the `colorbar
<http://matplotlib.org/api/colorbar_api.html>`_ legend are passed using ``legend_kwargs``. Other additional keyword
arguments are passed to the underlying ``matplotlib`` `Polygon
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_ instances.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len, linewidth=0,
legend_kwargs={'orientation': 'horizontal'})
.. image:: ../figures/aggplot/aggplot-legend-kwargs.png
"""
fig = _init_figure(ax, figsize)
# Set up projection.
if projection:
projection = projection.load(df, {
'central_longitude': lambda df: np.mean(np.array([p.x for p in df.geometry.centroid])),
'central_latitude': lambda df: np.mean(np.array([p.y for p in df.geometry.centroid]))
})
if not ax:
ax = plt.subplot(111, projection=projection)
else:
if not ax:
ax = plt.gca()
# Clean up patches.
_lay_out_axes(ax, projection)
# Immediately return if input geometry is empty.
if len(df.geometry) == 0:
return ax
# Up-convert input to a GeoDataFrame (necessary for quadtree comprehension).
df = gpd.GeoDataFrame(df, geometry=df.geometry)
# Validate hue.
if not isinstance(hue, str):
hue_col = hash(str(hue))
df[hue_col] = _validate_hue(df, hue)
else:
hue_col = hue
if geometry is not None and by is None:
raise NotImplementedError("Aggregation by geometry alone is not currently implemented and unlikely to be "
"implemented in the future - it is likely out-of-scope here due to the algorithmic "
"complexity involved.")
# The user wants us to classify our data geometries by their location within the passed world geometries
# ("sectors"), aggregate a statistic based on that, and return a plot. Unfortunately this seems to be too
# hard for the moment. Two reasons:
# 1. The Shapely API for doing so is just about as consistent as can be, but still a little bit inconsistent.
# In particular, it is not obvious what to do with invalid and self-intersecting geometric components passed
# to the algorithm.
# 2. Point-in-polygon and, worse, polygon-in-polygon algorithms are extremely slow, to the point that almost
# any optimizations that the user can make by doing classification "by hand" is worth it.
# There should perhaps be a separate library or ``geopandas`` function for doing this.
elif by is not None:
# Side-convert geometry for ease of use.
if geometry is not None:
# Downconvert GeoDataFrame to GeoSeries objects.
if isinstance(geometry, gpd.GeoDataFrame):
geometry = geometry.geometry
sectors = []
values = []
# The groupby operation does not take generators as inputs, so we duck test and convert them to lists.
if not isinstance(by, str):
try: len(by)
except TypeError: by = list(by)
for label, p in df.groupby(by):
if geometry is not None:
try:
sector = geometry.loc[label]
except KeyError:
raise KeyError("Data contains a '{0}' label which lacks a corresponding value in the provided "
"geometry.".format(label))
else:
xs = [c.x for c in p.geometry]
ys = [c.y for c in p.geometry]
coords = list(zip(xs, ys))
sector = shapely.geometry.MultiPoint(coords).convex_hull
sectors.append(sector)
values.append(agg(p[hue_col]))
# Because we have to set the extent ourselves, we have to do some bookkeeping to keep track of the
# extrema of the hulls we are generating.
bxmin = bxmax = bymin = bymax = None
if not extent:
for sector in sectors:
if not isinstance(sector.envelope, shapely.geometry.Point):
hxmin, hxmax, hymin, hymax = _get_envelopes_min_maxes(pd.Series(sector.envelope.exterior))
if not bxmin or hxmin < bxmin:
bxmin = hxmin
if not bxmax or hxmax > bxmax:
bxmax = hxmax
if not bymin or hymin < bymin:
bymin = hymin
if not bymax or hymax > bymax:
bymax = hymax
# By often creates overlapping polygons, to keep smaller polygons from being hidden by possibly overlapping
# larger ones we have to bring the smaller ones in front in the plotting order. This bit of code does that.
sorted_indices = np.array(sorted(enumerate(gpd.GeoSeries(sectors).area.values),
key=lambda tup: tup[1])[::-1])[:, 0].astype(int)
sectors = np.array(sectors)[sorted_indices]
values = np.array(values)[sorted_indices]
# Generate a colormap.
cmap = _continuous_colormap(values, cmap, vmin, vmax)
colors = [cmap.to_rgba(value) for value in values]
# Draw.
for sector, color in zip(sectors, colors):
if projection:
features = ShapelyFeature([sector], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, **kwargs)
else:
try: # Duck test for MultiPolygon.
for subgeom in sector:
feature = descartes.PolygonPatch(subgeom, facecolor=color, **kwargs)
ax.add_patch(feature)
except (TypeError, AssertionError): # Shapely Polygon.
feature = descartes.PolygonPatch(sector, facecolor=color, **kwargs)
ax.add_patch(feature)
# Set extent.
extrema = (bxmin, bxmax, bymin, bymax)
_set_extent(ax, projection, extent, extrema)
else:
# Set reasonable defaults for the n-params if appropriate.
nmax = nmax if nmax else len(df)
nmin = nmin if nmin else np.max([1, np.min([20, int(0.05 * len(df))])])
# Generate a quadtree.
quad = QuadTree(df)
bxmin, bxmax, bymin, bymax = quad.bounds
# Assert that nmin is not smaller than the largest number of co-located observations (otherwise the algorithm
# would continue running until the recursion limit).
max_coloc = np.max([len(l) for l in quad.agg.values()])
if max_coloc > nmin:
raise ValueError("nmin is set to {0}, but there is a coordinate containing {1} observations in the "
"dataset.".format(nmin, max_coloc))
# Run the partitions.
# partitions = quad.partition(nmin, nmax)
partitions = list(quad.partition(nmin, nmax))
# Generate colormap.
values = [agg(p.data[hue_col]) for p in partitions if p.n > nsig]
cmap = _continuous_colormap(values, cmap, vmin, vmax)
for p in partitions:
xmin, xmax, ymin, ymax = p.bounds
rect = shapely.geometry.Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)])
color = cmap.to_rgba(agg(p.data[hue_col])) if p.n > nsig else "white"
if projection:
feature = ShapelyFeature([rect], ccrs.PlateCarree())
ax.add_feature(feature, facecolor=color, **kwargs)
else:
feature = descartes.PolygonPatch(rect, facecolor=color, **kwargs)
ax.add_patch(feature)
# Set extent.
extrema = (bxmin, bxmax, bymin, bymax)
_set_extent(ax, projection, extent, extrema)
# Append a legend, if appropriate.
if legend:
_paint_colorbar_legend(ax, values, cmap, legend_kwargs)
return ax | def function[aggplot, parameter[df, projection, hue, by, geometry, nmax, nmin, nsig, agg, cmap, vmin, vmax, legend, legend_kwargs, extent, figsize, ax]]:
constant[
Self-aggregating quadtree plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str
Applies a colormap to the output shapes. Required.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
by : iterable or str, optional
If specified, this data grouping will be used to aggregate points into `convex hulls
<https://en.wikipedia.org/wiki/Convex_hull>`_ or, if ``geometry`` is also specified, into polygons. If left
unspecified the data will be aggregated using a `quadtree <https://en.wikipedia.org/wiki/Quadtree>`_.
geometry : GeoDataFrame or GeoSeries, optional
A list of polygons to be used for spatial aggregation. Optional. See ``by``.
nmax : int or None, optional
Ignored if not plotting a quadtree. Otherwise, controls the maximum number of observations in a quadrangle.
If left unspecified, there is no maximum size.
nmin : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle.
If left unspecified, there is no minimum size.
nsig : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle
deemed significant. Insignificant quadrangles are removed from the plot. Defaults to 0 (empty patches).
agg : function, optional
The aggregation func used for the colormap. Defaults to ``np.mean``.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum.
legend : boolean, optional
Whether or not to include a legend.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis
Examples
--------
This plot type accepts any geometry, including mixtures of polygons and points, averages the value of a certain
data parameter at their centroids, and plots the result, using a colormap is the visual variable.
For the purposes of comparison, this library's ``choropleth`` function takes some sort of data as input,
polygons as geospatial context, and combines themselves into a colorful map. This is useful if, for example,
you have data on the amount of crimes committed per neighborhood, and you want to plot that.
But suppose your original dataset came in terms of individual observations - instead of "n collisions happened
in this neighborhood", you have "one collision occured at this specific coordinate at this specific date".
This is obviously more useful data - it can be made to do more things - but in order to generate the same map,
you will first have to do all of the work of geolocating your points to neighborhoods (not trivial),
then aggregating them (by, in this case, taking a count).
``aggplot`` handles this work for you. It takes input in the form of observations, and outputs as useful as
possible a visualization of their "regional" statistics. What a "region" corresponds to depends on how much
geospatial information you can provide.
If you can't provide *any* geospatial context, ``aggplot`` will output what's known as a quadtree: it will break
your data down into recursive squares, and use them to aggregate the data. This is a very experimental format,
is very fiddly to make, and has not yet been optimized for speed; but it provides a useful baseline which
requires no additional work and can be used to expose interesting geospatial correlations right away. And,
if you have enough observations, it can be `a pretty good approximation
<../figures/aggplot/aggplot-initial.png>`_ (collisions in New York City pictured).
Our first few examples are of just such figures. A simple ``aggplot`` quadtree can be generated with just a
dataset, a data column of interest, and, optionally, a projection.
.. code-block:: python
import geoplot as gplt
import geoplot.crs as gcrs
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='LATDEP')
.. image:: ../figures/aggplot/aggplot-initial.png
To get the best output, you often need to tweak the ``nmin`` and ``nmax`` parameters, controlling the minimum and
maximum number of observations per box, respectively, yourself. In this case we'll also choose a different
`matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_, using the ``cmap``
parameter.
``aggplot`` will satisfy the ``nmax`` parameter before trying to satisfy ``nmin``, so you may result in spaces
without observations, or ones lacking a statistically significant number of observations. This is necessary in
order to break up "spaces" that the algorithm would otherwise end on. You can control the maximum number of
observations in the blank spaces using the ``nsig`` parameter.
.. code-block:: python
gplt.aggplot(collisions, nmin=20, nmax=500, nsig=5, projection=gcrs.PlateCarree(), hue='LATDEP', cmap='Reds')
.. image:: ../figures/aggplot/aggplot-quadtree-tuned.png
You'll have to play around with these parameters to get the clearest picture.
Usually, however, observations with a geospatial component will be provided with some form of spatial
categorization. In the case of our collisions example, this comes in the form of a postal zip code. With the
simple addition of this data column via the ``by`` parameter, our output changes radically, taking advantage of
the additional context we now have to sort and aggregate our observations by (hopefully) geospatially
meaningful, if still crude, grouped convex hulls.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH')
.. image:: ../figures/aggplot/aggplot-hulls.png
Finally, suppose you actually know exactly the geometries that you would like to aggregate by. Provide these in
the form of a ``geopandas`` ``GeoSeries``, one whose index matches the values in your ``by`` column (so
``BROOKLYN`` matches ``BROOKLYN`` for example), to the ``geometry`` parameter. Your output will now be an
ordinary choropleth.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH', geometry=boroughs)
.. image:: ../figures/aggplot/aggplot-by.png
Observations will be aggregated by average, by default. In our example case, our plot shows that accidents in
Manhattan tend to result in significantly fewer injuries than accidents occuring in other boroughs. Specify an
alternative aggregation using the ``agg`` parameter.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len)
.. image:: ../figures/aggplot/aggplot-agg.png
``legend`` toggles the legend. Additional keyword arguments for styling the `colorbar
<http://matplotlib.org/api/colorbar_api.html>`_ legend are passed using ``legend_kwargs``. Other additional keyword
arguments are passed to the underlying ``matplotlib`` `Polygon
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_ instances.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len, linewidth=0,
legend_kwargs={'orientation': 'horizontal'})
.. image:: ../figures/aggplot/aggplot-legend-kwargs.png
]
variable[fig] assign[=] call[name[_init_figure], parameter[name[ax], name[figsize]]]
if name[projection] begin[:]
variable[projection] assign[=] call[name[projection].load, parameter[name[df], dictionary[[<ast.Constant object at 0x7da18f00f790>, <ast.Constant object at 0x7da18f00fb80>], [<ast.Lambda object at 0x7da18f00eec0>, <ast.Lambda object at 0x7da18f00c7f0>]]]]
if <ast.UnaryOp object at 0x7da18f00c460> begin[:]
variable[ax] assign[=] call[name[plt].subplot, parameter[constant[111]]]
call[name[_lay_out_axes], parameter[name[ax], name[projection]]]
if compare[call[name[len], parameter[name[df].geometry]] equal[==] constant[0]] begin[:]
return[name[ax]]
variable[df] assign[=] call[name[gpd].GeoDataFrame, parameter[name[df]]]
if <ast.UnaryOp object at 0x7da18f00cd30> begin[:]
variable[hue_col] assign[=] call[name[hash], parameter[call[name[str], parameter[name[hue]]]]]
call[name[df]][name[hue_col]] assign[=] call[name[_validate_hue], parameter[name[df], name[hue]]]
if <ast.BoolOp object at 0x7da18f00d8d0> begin[:]
<ast.Raise object at 0x7da18f00d840>
if name[legend] begin[:]
call[name[_paint_colorbar_legend], parameter[name[ax], name[values], name[cmap], name[legend_kwargs]]]
return[name[ax]] | keyword[def] identifier[aggplot] ( identifier[df] , identifier[projection] = keyword[None] ,
identifier[hue] = keyword[None] ,
identifier[by] = keyword[None] ,
identifier[geometry] = keyword[None] ,
identifier[nmax] = keyword[None] , identifier[nmin] = keyword[None] , identifier[nsig] = literal[int] ,
identifier[agg] = identifier[np] . identifier[mean] ,
identifier[cmap] = literal[string] , identifier[vmin] = keyword[None] , identifier[vmax] = keyword[None] ,
identifier[legend] = keyword[True] , identifier[legend_kwargs] = keyword[None] ,
identifier[extent] = keyword[None] ,
identifier[figsize] =( literal[int] , literal[int] ), identifier[ax] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[fig] = identifier[_init_figure] ( identifier[ax] , identifier[figsize] )
keyword[if] identifier[projection] :
identifier[projection] = identifier[projection] . identifier[load] ( identifier[df] ,{
literal[string] : keyword[lambda] identifier[df] : identifier[np] . identifier[mean] ( identifier[np] . identifier[array] ([ identifier[p] . identifier[x] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ])),
literal[string] : keyword[lambda] identifier[df] : identifier[np] . identifier[mean] ( identifier[np] . identifier[array] ([ identifier[p] . identifier[y] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ]))
})
keyword[if] keyword[not] identifier[ax] :
identifier[ax] = identifier[plt] . identifier[subplot] ( literal[int] , identifier[projection] = identifier[projection] )
keyword[else] :
keyword[if] keyword[not] identifier[ax] :
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[_lay_out_axes] ( identifier[ax] , identifier[projection] )
keyword[if] identifier[len] ( identifier[df] . identifier[geometry] )== literal[int] :
keyword[return] identifier[ax]
identifier[df] = identifier[gpd] . identifier[GeoDataFrame] ( identifier[df] , identifier[geometry] = identifier[df] . identifier[geometry] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[hue] , identifier[str] ):
identifier[hue_col] = identifier[hash] ( identifier[str] ( identifier[hue] ))
identifier[df] [ identifier[hue_col] ]= identifier[_validate_hue] ( identifier[df] , identifier[hue] )
keyword[else] :
identifier[hue_col] = identifier[hue]
keyword[if] identifier[geometry] keyword[is] keyword[not] keyword[None] keyword[and] identifier[by] keyword[is] keyword[None] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string]
literal[string] )
keyword[elif] identifier[by] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[geometry] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[geometry] , identifier[gpd] . identifier[GeoDataFrame] ):
identifier[geometry] = identifier[geometry] . identifier[geometry]
identifier[sectors] =[]
identifier[values] =[]
keyword[if] keyword[not] identifier[isinstance] ( identifier[by] , identifier[str] ):
keyword[try] : identifier[len] ( identifier[by] )
keyword[except] identifier[TypeError] : identifier[by] = identifier[list] ( identifier[by] )
keyword[for] identifier[label] , identifier[p] keyword[in] identifier[df] . identifier[groupby] ( identifier[by] ):
keyword[if] identifier[geometry] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[sector] = identifier[geometry] . identifier[loc] [ identifier[label] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string]
literal[string] . identifier[format] ( identifier[label] ))
keyword[else] :
identifier[xs] =[ identifier[c] . identifier[x] keyword[for] identifier[c] keyword[in] identifier[p] . identifier[geometry] ]
identifier[ys] =[ identifier[c] . identifier[y] keyword[for] identifier[c] keyword[in] identifier[p] . identifier[geometry] ]
identifier[coords] = identifier[list] ( identifier[zip] ( identifier[xs] , identifier[ys] ))
identifier[sector] = identifier[shapely] . identifier[geometry] . identifier[MultiPoint] ( identifier[coords] ). identifier[convex_hull]
identifier[sectors] . identifier[append] ( identifier[sector] )
identifier[values] . identifier[append] ( identifier[agg] ( identifier[p] [ identifier[hue_col] ]))
identifier[bxmin] = identifier[bxmax] = identifier[bymin] = identifier[bymax] = keyword[None]
keyword[if] keyword[not] identifier[extent] :
keyword[for] identifier[sector] keyword[in] identifier[sectors] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[sector] . identifier[envelope] , identifier[shapely] . identifier[geometry] . identifier[Point] ):
identifier[hxmin] , identifier[hxmax] , identifier[hymin] , identifier[hymax] = identifier[_get_envelopes_min_maxes] ( identifier[pd] . identifier[Series] ( identifier[sector] . identifier[envelope] . identifier[exterior] ))
keyword[if] keyword[not] identifier[bxmin] keyword[or] identifier[hxmin] < identifier[bxmin] :
identifier[bxmin] = identifier[hxmin]
keyword[if] keyword[not] identifier[bxmax] keyword[or] identifier[hxmax] > identifier[bxmax] :
identifier[bxmax] = identifier[hxmax]
keyword[if] keyword[not] identifier[bymin] keyword[or] identifier[hymin] < identifier[bymin] :
identifier[bymin] = identifier[hymin]
keyword[if] keyword[not] identifier[bymax] keyword[or] identifier[hymax] > identifier[bymax] :
identifier[bymax] = identifier[hymax]
identifier[sorted_indices] = identifier[np] . identifier[array] ( identifier[sorted] ( identifier[enumerate] ( identifier[gpd] . identifier[GeoSeries] ( identifier[sectors] ). identifier[area] . identifier[values] ),
identifier[key] = keyword[lambda] identifier[tup] : identifier[tup] [ literal[int] ])[::- literal[int] ])[:, literal[int] ]. identifier[astype] ( identifier[int] )
identifier[sectors] = identifier[np] . identifier[array] ( identifier[sectors] )[ identifier[sorted_indices] ]
identifier[values] = identifier[np] . identifier[array] ( identifier[values] )[ identifier[sorted_indices] ]
identifier[cmap] = identifier[_continuous_colormap] ( identifier[values] , identifier[cmap] , identifier[vmin] , identifier[vmax] )
identifier[colors] =[ identifier[cmap] . identifier[to_rgba] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[values] ]
keyword[for] identifier[sector] , identifier[color] keyword[in] identifier[zip] ( identifier[sectors] , identifier[colors] ):
keyword[if] identifier[projection] :
identifier[features] = identifier[ShapelyFeature] ([ identifier[sector] ], identifier[ccrs] . identifier[PlateCarree] ())
identifier[ax] . identifier[add_feature] ( identifier[features] , identifier[facecolor] = identifier[color] ,** identifier[kwargs] )
keyword[else] :
keyword[try] :
keyword[for] identifier[subgeom] keyword[in] identifier[sector] :
identifier[feature] = identifier[descartes] . identifier[PolygonPatch] ( identifier[subgeom] , identifier[facecolor] = identifier[color] ,** identifier[kwargs] )
identifier[ax] . identifier[add_patch] ( identifier[feature] )
keyword[except] ( identifier[TypeError] , identifier[AssertionError] ):
identifier[feature] = identifier[descartes] . identifier[PolygonPatch] ( identifier[sector] , identifier[facecolor] = identifier[color] ,** identifier[kwargs] )
identifier[ax] . identifier[add_patch] ( identifier[feature] )
identifier[extrema] =( identifier[bxmin] , identifier[bxmax] , identifier[bymin] , identifier[bymax] )
identifier[_set_extent] ( identifier[ax] , identifier[projection] , identifier[extent] , identifier[extrema] )
keyword[else] :
identifier[nmax] = identifier[nmax] keyword[if] identifier[nmax] keyword[else] identifier[len] ( identifier[df] )
identifier[nmin] = identifier[nmin] keyword[if] identifier[nmin] keyword[else] identifier[np] . identifier[max] ([ literal[int] , identifier[np] . identifier[min] ([ literal[int] , identifier[int] ( literal[int] * identifier[len] ( identifier[df] ))])])
identifier[quad] = identifier[QuadTree] ( identifier[df] )
identifier[bxmin] , identifier[bxmax] , identifier[bymin] , identifier[bymax] = identifier[quad] . identifier[bounds]
identifier[max_coloc] = identifier[np] . identifier[max] ([ identifier[len] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[quad] . identifier[agg] . identifier[values] ()])
keyword[if] identifier[max_coloc] > identifier[nmin] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[nmin] , identifier[max_coloc] ))
identifier[partitions] = identifier[list] ( identifier[quad] . identifier[partition] ( identifier[nmin] , identifier[nmax] ))
identifier[values] =[ identifier[agg] ( identifier[p] . identifier[data] [ identifier[hue_col] ]) keyword[for] identifier[p] keyword[in] identifier[partitions] keyword[if] identifier[p] . identifier[n] > identifier[nsig] ]
identifier[cmap] = identifier[_continuous_colormap] ( identifier[values] , identifier[cmap] , identifier[vmin] , identifier[vmax] )
keyword[for] identifier[p] keyword[in] identifier[partitions] :
identifier[xmin] , identifier[xmax] , identifier[ymin] , identifier[ymax] = identifier[p] . identifier[bounds]
identifier[rect] = identifier[shapely] . identifier[geometry] . identifier[Polygon] ([( identifier[xmin] , identifier[ymin] ),( identifier[xmin] , identifier[ymax] ),( identifier[xmax] , identifier[ymax] ),( identifier[xmax] , identifier[ymin] )])
identifier[color] = identifier[cmap] . identifier[to_rgba] ( identifier[agg] ( identifier[p] . identifier[data] [ identifier[hue_col] ])) keyword[if] identifier[p] . identifier[n] > identifier[nsig] keyword[else] literal[string]
keyword[if] identifier[projection] :
identifier[feature] = identifier[ShapelyFeature] ([ identifier[rect] ], identifier[ccrs] . identifier[PlateCarree] ())
identifier[ax] . identifier[add_feature] ( identifier[feature] , identifier[facecolor] = identifier[color] ,** identifier[kwargs] )
keyword[else] :
identifier[feature] = identifier[descartes] . identifier[PolygonPatch] ( identifier[rect] , identifier[facecolor] = identifier[color] ,** identifier[kwargs] )
identifier[ax] . identifier[add_patch] ( identifier[feature] )
identifier[extrema] =( identifier[bxmin] , identifier[bxmax] , identifier[bymin] , identifier[bymax] )
identifier[_set_extent] ( identifier[ax] , identifier[projection] , identifier[extent] , identifier[extrema] )
keyword[if] identifier[legend] :
identifier[_paint_colorbar_legend] ( identifier[ax] , identifier[values] , identifier[cmap] , identifier[legend_kwargs] )
keyword[return] identifier[ax] | def aggplot(df, projection=None, hue=None, by=None, geometry=None, nmax=None, nmin=None, nsig=0, agg=np.mean, cmap='viridis', vmin=None, vmax=None, legend=True, legend_kwargs=None, extent=None, figsize=(8, 6), ax=None, **kwargs):
"""
Self-aggregating quadtree plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str
Applies a colormap to the output shapes. Required.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
by : iterable or str, optional
If specified, this data grouping will be used to aggregate points into `convex hulls
<https://en.wikipedia.org/wiki/Convex_hull>`_ or, if ``geometry`` is also specified, into polygons. If left
unspecified the data will be aggregated using a `quadtree <https://en.wikipedia.org/wiki/Quadtree>`_.
geometry : GeoDataFrame or GeoSeries, optional
A list of polygons to be used for spatial aggregation. Optional. See ``by``.
nmax : int or None, optional
Ignored if not plotting a quadtree. Otherwise, controls the maximum number of observations in a quadrangle.
If left unspecified, there is no maximum size.
nmin : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle.
If left unspecified, there is no minimum size.
nsig : int, optional
Ignored if not plotting a quadtree. Otherwise, controls the minimum number of observations in a quadrangle
deemed significant. Insignificant quadrangles are removed from the plot. Defaults to 0 (empty patches).
agg : function, optional
The aggregation func used for the colormap. Defaults to ``np.mean``.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum.
legend : boolean, optional
Whether or not to include a legend.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis
Examples
--------
This plot type accepts any geometry, including mixtures of polygons and points, averages the value of a certain
data parameter at their centroids, and plots the result, using a colormap is the visual variable.
For the purposes of comparison, this library's ``choropleth`` function takes some sort of data as input,
polygons as geospatial context, and combines themselves into a colorful map. This is useful if, for example,
you have data on the amount of crimes committed per neighborhood, and you want to plot that.
But suppose your original dataset came in terms of individual observations - instead of "n collisions happened
in this neighborhood", you have "one collision occured at this specific coordinate at this specific date".
This is obviously more useful data - it can be made to do more things - but in order to generate the same map,
you will first have to do all of the work of geolocating your points to neighborhoods (not trivial),
then aggregating them (by, in this case, taking a count).
``aggplot`` handles this work for you. It takes input in the form of observations, and outputs as useful as
possible a visualization of their "regional" statistics. What a "region" corresponds to depends on how much
geospatial information you can provide.
If you can't provide *any* geospatial context, ``aggplot`` will output what's known as a quadtree: it will break
your data down into recursive squares, and use them to aggregate the data. This is a very experimental format,
is very fiddly to make, and has not yet been optimized for speed; but it provides a useful baseline which
requires no additional work and can be used to expose interesting geospatial correlations right away. And,
if you have enough observations, it can be `a pretty good approximation
<../figures/aggplot/aggplot-initial.png>`_ (collisions in New York City pictured).
Our first few examples are of just such figures. A simple ``aggplot`` quadtree can be generated with just a
dataset, a data column of interest, and, optionally, a projection.
.. code-block:: python
import geoplot as gplt
import geoplot.crs as gcrs
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='LATDEP')
.. image:: ../figures/aggplot/aggplot-initial.png
To get the best output, you often need to tweak the ``nmin`` and ``nmax`` parameters, controlling the minimum and
maximum number of observations per box, respectively, yourself. In this case we'll also choose a different
`matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_, using the ``cmap``
parameter.
``aggplot`` will satisfy the ``nmax`` parameter before trying to satisfy ``nmin``, so you may result in spaces
without observations, or ones lacking a statistically significant number of observations. This is necessary in
order to break up "spaces" that the algorithm would otherwise end on. You can control the maximum number of
observations in the blank spaces using the ``nsig`` parameter.
.. code-block:: python
gplt.aggplot(collisions, nmin=20, nmax=500, nsig=5, projection=gcrs.PlateCarree(), hue='LATDEP', cmap='Reds')
.. image:: ../figures/aggplot/aggplot-quadtree-tuned.png
You'll have to play around with these parameters to get the clearest picture.
Usually, however, observations with a geospatial component will be provided with some form of spatial
categorization. In the case of our collisions example, this comes in the form of a postal zip code. With the
simple addition of this data column via the ``by`` parameter, our output changes radically, taking advantage of
the additional context we now have to sort and aggregate our observations by (hopefully) geospatially
meaningful, if still crude, grouped convex hulls.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH')
.. image:: ../figures/aggplot/aggplot-hulls.png
Finally, suppose you actually know exactly the geometries that you would like to aggregate by. Provide these in
the form of a ``geopandas`` ``GeoSeries``, one whose index matches the values in your ``by`` column (so
``BROOKLYN`` matches ``BROOKLYN`` for example), to the ``geometry`` parameter. Your output will now be an
ordinary choropleth.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
by='BOROUGH', geometry=boroughs)
.. image:: ../figures/aggplot/aggplot-by.png
Observations will be aggregated by average, by default. In our example case, our plot shows that accidents in
Manhattan tend to result in significantly fewer injuries than accidents occuring in other boroughs. Specify an
alternative aggregation using the ``agg`` parameter.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len)
.. image:: ../figures/aggplot/aggplot-agg.png
``legend`` toggles the legend. Additional keyword arguments for styling the `colorbar
<http://matplotlib.org/api/colorbar_api.html>`_ legend are passed using ``legend_kwargs``. Other additional keyword
arguments are passed to the underlying ``matplotlib`` `Polygon
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_ instances.
.. code-block:: python
gplt.aggplot(collisions, projection=gcrs.PlateCarree(), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
geometry=boroughs_2, by='BOROUGH', agg=len, linewidth=0,
legend_kwargs={'orientation': 'horizontal'})
.. image:: ../figures/aggplot/aggplot-legend-kwargs.png
"""
fig = _init_figure(ax, figsize)
# Set up projection.
if projection:
projection = projection.load(df, {'central_longitude': lambda df: np.mean(np.array([p.x for p in df.geometry.centroid])), 'central_latitude': lambda df: np.mean(np.array([p.y for p in df.geometry.centroid]))})
if not ax:
ax = plt.subplot(111, projection=projection) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not ax:
ax = plt.gca() # depends on [control=['if'], data=[]]
# Clean up patches.
_lay_out_axes(ax, projection)
# Immediately return if input geometry is empty.
if len(df.geometry) == 0:
return ax # depends on [control=['if'], data=[]]
# Up-convert input to a GeoDataFrame (necessary for quadtree comprehension).
df = gpd.GeoDataFrame(df, geometry=df.geometry)
# Validate hue.
if not isinstance(hue, str):
hue_col = hash(str(hue))
df[hue_col] = _validate_hue(df, hue) # depends on [control=['if'], data=[]]
else:
hue_col = hue
if geometry is not None and by is None:
raise NotImplementedError('Aggregation by geometry alone is not currently implemented and unlikely to be implemented in the future - it is likely out-of-scope here due to the algorithmic complexity involved.') # depends on [control=['if'], data=[]]
# The user wants us to classify our data geometries by their location within the passed world geometries
# ("sectors"), aggregate a statistic based on that, and return a plot. Unfortunately this seems to be too
# hard for the moment. Two reasons:
# 1. The Shapely API for doing so is just about as consistent as can be, but still a little bit inconsistent.
# In particular, it is not obvious what to do with invalid and self-intersecting geometric components passed
# to the algorithm.
# 2. Point-in-polygon and, worse, polygon-in-polygon algorithms are extremely slow, to the point that almost
# any optimizations that the user can make by doing classification "by hand" is worth it.
# There should perhaps be a separate library or ``geopandas`` function for doing this.
elif by is not None:
# Side-convert geometry for ease of use.
if geometry is not None:
# Downconvert GeoDataFrame to GeoSeries objects.
if isinstance(geometry, gpd.GeoDataFrame):
geometry = geometry.geometry # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['geometry']]
sectors = []
values = []
# The groupby operation does not take generators as inputs, so we duck test and convert them to lists.
if not isinstance(by, str):
try:
len(by) # depends on [control=['try'], data=[]]
except TypeError:
by = list(by) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
for (label, p) in df.groupby(by):
if geometry is not None:
try:
sector = geometry.loc[label] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError("Data contains a '{0}' label which lacks a corresponding value in the provided geometry.".format(label)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['geometry']]
else:
xs = [c.x for c in p.geometry]
ys = [c.y for c in p.geometry]
coords = list(zip(xs, ys))
sector = shapely.geometry.MultiPoint(coords).convex_hull
sectors.append(sector)
values.append(agg(p[hue_col])) # depends on [control=['for'], data=[]]
# Because we have to set the extent ourselves, we have to do some bookkeeping to keep track of the
# extrema of the hulls we are generating.
bxmin = bxmax = bymin = bymax = None
if not extent:
for sector in sectors:
if not isinstance(sector.envelope, shapely.geometry.Point):
(hxmin, hxmax, hymin, hymax) = _get_envelopes_min_maxes(pd.Series(sector.envelope.exterior))
if not bxmin or hxmin < bxmin:
bxmin = hxmin # depends on [control=['if'], data=[]]
if not bxmax or hxmax > bxmax:
bxmax = hxmax # depends on [control=['if'], data=[]]
if not bymin or hymin < bymin:
bymin = hymin # depends on [control=['if'], data=[]]
if not bymax or hymax > bymax:
bymax = hymax # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sector']] # depends on [control=['if'], data=[]]
# By often creates overlapping polygons, to keep smaller polygons from being hidden by possibly overlapping
# larger ones we have to bring the smaller ones in front in the plotting order. This bit of code does that.
sorted_indices = np.array(sorted(enumerate(gpd.GeoSeries(sectors).area.values), key=lambda tup: tup[1])[::-1])[:, 0].astype(int)
sectors = np.array(sectors)[sorted_indices]
values = np.array(values)[sorted_indices]
# Generate a colormap.
cmap = _continuous_colormap(values, cmap, vmin, vmax)
colors = [cmap.to_rgba(value) for value in values]
# Draw.
for (sector, color) in zip(sectors, colors):
if projection:
features = ShapelyFeature([sector], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, **kwargs) # depends on [control=['if'], data=[]]
else:
try: # Duck test for MultiPolygon.
for subgeom in sector:
feature = descartes.PolygonPatch(subgeom, facecolor=color, **kwargs)
ax.add_patch(feature) # depends on [control=['for'], data=['subgeom']] # depends on [control=['try'], data=[]]
except (TypeError, AssertionError): # Shapely Polygon.
feature = descartes.PolygonPatch(sector, facecolor=color, **kwargs)
ax.add_patch(feature) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
# Set extent.
extrema = (bxmin, bxmax, bymin, bymax)
_set_extent(ax, projection, extent, extrema) # depends on [control=['if'], data=['by']]
else:
# Set reasonable defaults for the n-params if appropriate.
nmax = nmax if nmax else len(df)
nmin = nmin if nmin else np.max([1, np.min([20, int(0.05 * len(df))])])
# Generate a quadtree.
quad = QuadTree(df)
(bxmin, bxmax, bymin, bymax) = quad.bounds
# Assert that nmin is not smaller than the largest number of co-located observations (otherwise the algorithm
# would continue running until the recursion limit).
max_coloc = np.max([len(l) for l in quad.agg.values()])
if max_coloc > nmin:
raise ValueError('nmin is set to {0}, but there is a coordinate containing {1} observations in the dataset.'.format(nmin, max_coloc)) # depends on [control=['if'], data=['max_coloc', 'nmin']]
# Run the partitions.
# partitions = quad.partition(nmin, nmax)
partitions = list(quad.partition(nmin, nmax))
# Generate colormap.
values = [agg(p.data[hue_col]) for p in partitions if p.n > nsig]
cmap = _continuous_colormap(values, cmap, vmin, vmax)
for p in partitions:
(xmin, xmax, ymin, ymax) = p.bounds
rect = shapely.geometry.Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)])
color = cmap.to_rgba(agg(p.data[hue_col])) if p.n > nsig else 'white'
if projection:
feature = ShapelyFeature([rect], ccrs.PlateCarree())
ax.add_feature(feature, facecolor=color, **kwargs) # depends on [control=['if'], data=[]]
else:
feature = descartes.PolygonPatch(rect, facecolor=color, **kwargs)
ax.add_patch(feature) # depends on [control=['for'], data=['p']]
# Set extent.
extrema = (bxmin, bxmax, bymin, bymax)
_set_extent(ax, projection, extent, extrema)
# Append a legend, if appropriate.
if legend:
_paint_colorbar_legend(ax, values, cmap, legend_kwargs) # depends on [control=['if'], data=[]]
return ax |
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
dg = self.get_direct_band_gap_dict()
return min(v['value'] for v in dg.values()) | def function[get_direct_band_gap, parameter[self]]:
constant[
Returns the direct band gap.
Returns:
the value of the direct band gap
]
if call[name[self].is_metal, parameter[]] begin[:]
return[constant[0.0]]
variable[dg] assign[=] call[name[self].get_direct_band_gap_dict, parameter[]]
return[call[name[min], parameter[<ast.GeneratorExp object at 0x7da2041dace0>]]] | keyword[def] identifier[get_direct_band_gap] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_metal] ():
keyword[return] literal[int]
identifier[dg] = identifier[self] . identifier[get_direct_band_gap_dict] ()
keyword[return] identifier[min] ( identifier[v] [ literal[string] ] keyword[for] identifier[v] keyword[in] identifier[dg] . identifier[values] ()) | def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0 # depends on [control=['if'], data=[]]
dg = self.get_direct_band_gap_dict()
return min((v['value'] for v in dg.values())) |
def distance_to_interval(self, start, end):
"""
Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0.
"""
if self.start > end:
# interval is before this exon
return self.start - end
elif self.end < start:
# exon is before the interval
return start - self.end
else:
return 0 | def function[distance_to_interval, parameter[self, start, end]]:
constant[
Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0.
]
if compare[name[self].start greater[>] name[end]] begin[:]
return[binary_operation[name[self].start - name[end]]] | keyword[def] identifier[distance_to_interval] ( identifier[self] , identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[self] . identifier[start] > identifier[end] :
keyword[return] identifier[self] . identifier[start] - identifier[end]
keyword[elif] identifier[self] . identifier[end] < identifier[start] :
keyword[return] identifier[start] - identifier[self] . identifier[end]
keyword[else] :
keyword[return] literal[int] | def distance_to_interval(self, start, end):
"""
Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0.
"""
if self.start > end:
# interval is before this exon
return self.start - end # depends on [control=['if'], data=['end']]
elif self.end < start:
# exon is before the interval
return start - self.end # depends on [control=['if'], data=['start']]
else:
return 0 |
def _process_config_item(item, dirname):
"""
Process one item from the configuration file, which contains multiple items
saved as dictionary.
This function reads additional data from the config and do some
replacements - for example, if you specify url, it will download data
from this url and so on.
Args:
item (dict): Item, which will be processed.
Note:
Returned data format::
{
"link": "link to html page/file",
"html": "html code from file/url",
"vars": {
"varname": {
"data": "matching data..",
...
}
}
}
Returns:
dict: Dictionary in format showed above.
"""
item = copy.deepcopy(item)
html = item.get("html", None)
if not html:
raise UserWarning("Can't find HTML source for item:\n%s" % str(item))
# process HTML link
link = html if "://" in html else os.path.join(dirname, html)
del item["html"]
# replace $name with the actual name of the field
for key, val in item.items():
if "notfoundmsg" in val:
val["notfoundmsg"] = val["notfoundmsg"].replace("$name", key)
return {
"html": _get_source(link),
"link": link,
"vars": item
} | def function[_process_config_item, parameter[item, dirname]]:
constant[
Process one item from the configuration file, which contains multiple items
saved as dictionary.
This function reads additional data from the config and do some
replacements - for example, if you specify url, it will download data
from this url and so on.
Args:
item (dict): Item, which will be processed.
Note:
Returned data format::
{
"link": "link to html page/file",
"html": "html code from file/url",
"vars": {
"varname": {
"data": "matching data..",
...
}
}
}
Returns:
dict: Dictionary in format showed above.
]
variable[item] assign[=] call[name[copy].deepcopy, parameter[name[item]]]
variable[html] assign[=] call[name[item].get, parameter[constant[html], constant[None]]]
if <ast.UnaryOp object at 0x7da1b14e4580> begin[:]
<ast.Raise object at 0x7da1b14e7e50>
variable[link] assign[=] <ast.IfExp object at 0x7da1b14e4c40>
<ast.Delete object at 0x7da1b14e4430>
for taget[tuple[[<ast.Name object at 0x7da1b14e6fb0>, <ast.Name object at 0x7da1b14e6830>]]] in starred[call[name[item].items, parameter[]]] begin[:]
if compare[constant[notfoundmsg] in name[val]] begin[:]
call[name[val]][constant[notfoundmsg]] assign[=] call[call[name[val]][constant[notfoundmsg]].replace, parameter[constant[$name], name[key]]]
return[dictionary[[<ast.Constant object at 0x7da1b14e70d0>, <ast.Constant object at 0x7da1b14e68c0>, <ast.Constant object at 0x7da1b14e7be0>], [<ast.Call object at 0x7da1b14e7580>, <ast.Name object at 0x7da1b14e7970>, <ast.Name object at 0x7da1b14e43d0>]]] | keyword[def] identifier[_process_config_item] ( identifier[item] , identifier[dirname] ):
literal[string]
identifier[item] = identifier[copy] . identifier[deepcopy] ( identifier[item] )
identifier[html] = identifier[item] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[html] :
keyword[raise] identifier[UserWarning] ( literal[string] % identifier[str] ( identifier[item] ))
identifier[link] = identifier[html] keyword[if] literal[string] keyword[in] identifier[html] keyword[else] identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[html] )
keyword[del] identifier[item] [ literal[string] ]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[item] . identifier[items] ():
keyword[if] literal[string] keyword[in] identifier[val] :
identifier[val] [ literal[string] ]= identifier[val] [ literal[string] ]. identifier[replace] ( literal[string] , identifier[key] )
keyword[return] {
literal[string] : identifier[_get_source] ( identifier[link] ),
literal[string] : identifier[link] ,
literal[string] : identifier[item]
} | def _process_config_item(item, dirname):
"""
Process one item from the configuration file, which contains multiple items
saved as dictionary.
This function reads additional data from the config and do some
replacements - for example, if you specify url, it will download data
from this url and so on.
Args:
item (dict): Item, which will be processed.
Note:
Returned data format::
{
"link": "link to html page/file",
"html": "html code from file/url",
"vars": {
"varname": {
"data": "matching data..",
...
}
}
}
Returns:
dict: Dictionary in format showed above.
"""
item = copy.deepcopy(item)
html = item.get('html', None)
if not html:
raise UserWarning("Can't find HTML source for item:\n%s" % str(item)) # depends on [control=['if'], data=[]]
# process HTML link
link = html if '://' in html else os.path.join(dirname, html)
del item['html']
# replace $name with the actual name of the field
for (key, val) in item.items():
if 'notfoundmsg' in val:
val['notfoundmsg'] = val['notfoundmsg'].replace('$name', key) # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=[]]
return {'html': _get_source(link), 'link': link, 'vars': item} |
def run_validators(self, values):
"""Run validators for each item separately."""
for val in values:
super(CommaSepFloatField, self).run_validators(val) | def function[run_validators, parameter[self, values]]:
constant[Run validators for each item separately.]
for taget[name[val]] in starred[name[values]] begin[:]
call[call[name[super], parameter[name[CommaSepFloatField], name[self]]].run_validators, parameter[name[val]]] | keyword[def] identifier[run_validators] ( identifier[self] , identifier[values] ):
literal[string]
keyword[for] identifier[val] keyword[in] identifier[values] :
identifier[super] ( identifier[CommaSepFloatField] , identifier[self] ). identifier[run_validators] ( identifier[val] ) | def run_validators(self, values):
"""Run validators for each item separately."""
for val in values:
super(CommaSepFloatField, self).run_validators(val) # depends on [control=['for'], data=['val']] |
def col_strip(df,col_name,dest = False):
""" Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
if dest:
df[col_name] = df[col_name].str.strip()
else:
return df[col_name].str.strip() | def function[col_strip, parameter[df, col_name, dest]]:
constant[ Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
]
if name[dest] begin[:]
call[name[df]][name[col_name]] assign[=] call[call[name[df]][name[col_name]].str.strip, parameter[]] | keyword[def] identifier[col_strip] ( identifier[df] , identifier[col_name] , identifier[dest] = keyword[False] ):
literal[string]
keyword[if] identifier[dest] :
identifier[df] [ identifier[col_name] ]= identifier[df] [ identifier[col_name] ]. identifier[str] . identifier[strip] ()
keyword[else] :
keyword[return] identifier[df] [ identifier[col_name] ]. identifier[str] . identifier[strip] () | def col_strip(df, col_name, dest=False):
""" Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
if dest:
df[col_name] = df[col_name].str.strip() # depends on [control=['if'], data=[]]
else:
return df[col_name].str.strip() |
def has_annotation(self, annotation: str) -> bool:
"""Check if this annotation is defined."""
return (
self.has_enumerated_annotation(annotation) or
self.has_regex_annotation(annotation) or
self.has_local_annotation(annotation)
) | def function[has_annotation, parameter[self, annotation]]:
constant[Check if this annotation is defined.]
return[<ast.BoolOp object at 0x7da1b0ebe1d0>] | keyword[def] identifier[has_annotation] ( identifier[self] , identifier[annotation] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[return] (
identifier[self] . identifier[has_enumerated_annotation] ( identifier[annotation] ) keyword[or]
identifier[self] . identifier[has_regex_annotation] ( identifier[annotation] ) keyword[or]
identifier[self] . identifier[has_local_annotation] ( identifier[annotation] )
) | def has_annotation(self, annotation: str) -> bool:
"""Check if this annotation is defined."""
return self.has_enumerated_annotation(annotation) or self.has_regex_annotation(annotation) or self.has_local_annotation(annotation) |
def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value}
else:
self.config['nodes'][param_name][node_id] = param_value | def function[add_node_configuration, parameter[self, param_name, node_id, param_value]]:
constant[
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
]
if compare[name[param_name] <ast.NotIn object at 0x7da2590d7190> call[name[self].config][constant[nodes]]] begin[:]
call[call[name[self].config][constant[nodes]]][name[param_name]] assign[=] dictionary[[<ast.Name object at 0x7da207f98400>], [<ast.Name object at 0x7da207f99720>]] | keyword[def] identifier[add_node_configuration] ( identifier[self] , identifier[param_name] , identifier[node_id] , identifier[param_value] ):
literal[string]
keyword[if] identifier[param_name] keyword[not] keyword[in] identifier[self] . identifier[config] [ literal[string] ]:
identifier[self] . identifier[config] [ literal[string] ][ identifier[param_name] ]={ identifier[node_id] : identifier[param_value] }
keyword[else] :
identifier[self] . identifier[config] [ literal[string] ][ identifier[param_name] ][ identifier[node_id] ]= identifier[param_value] | def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value} # depends on [control=['if'], data=['param_name']]
else:
self.config['nodes'][param_name][node_id] = param_value |
def write(domain, key, value, type='string', user=None):
'''
Write a default to the system
CLI Example:
.. code-block:: bash
salt '*' macdefaults.write com.apple.CrashReporter DialogType Server
salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool
domain
The name of the domain to write to
key
The key of the given domain to write to
value
The value to write to the given key
type
The type of value to be written, valid types are string, data, int[eger],
float, bool[ean], date, array, array-add, dict, dict-add
user
The user to write the defaults to
'''
if type == 'bool' or type == 'boolean':
if value is True:
value = 'TRUE'
elif value is False:
value = 'FALSE'
cmd = 'defaults write "{0}" "{1}" -{2} "{3}"'.format(domain, key, type, value)
return __salt__['cmd.run_all'](cmd, runas=user) | def function[write, parameter[domain, key, value, type, user]]:
constant[
Write a default to the system
CLI Example:
.. code-block:: bash
salt '*' macdefaults.write com.apple.CrashReporter DialogType Server
salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool
domain
The name of the domain to write to
key
The key of the given domain to write to
value
The value to write to the given key
type
The type of value to be written, valid types are string, data, int[eger],
float, bool[ean], date, array, array-add, dict, dict-add
user
The user to write the defaults to
]
if <ast.BoolOp object at 0x7da1b1f2c3a0> begin[:]
if compare[name[value] is constant[True]] begin[:]
variable[value] assign[=] constant[TRUE]
variable[cmd] assign[=] call[constant[defaults write "{0}" "{1}" -{2} "{3}"].format, parameter[name[domain], name[key], name[type], name[value]]]
return[call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]] | keyword[def] identifier[write] ( identifier[domain] , identifier[key] , identifier[value] , identifier[type] = literal[string] , identifier[user] = keyword[None] ):
literal[string]
keyword[if] identifier[type] == literal[string] keyword[or] identifier[type] == literal[string] :
keyword[if] identifier[value] keyword[is] keyword[True] :
identifier[value] = literal[string]
keyword[elif] identifier[value] keyword[is] keyword[False] :
identifier[value] = literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[domain] , identifier[key] , identifier[type] , identifier[value] )
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[runas] = identifier[user] ) | def write(domain, key, value, type='string', user=None):
"""
Write a default to the system
CLI Example:
.. code-block:: bash
salt '*' macdefaults.write com.apple.CrashReporter DialogType Server
salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool
domain
The name of the domain to write to
key
The key of the given domain to write to
value
The value to write to the given key
type
The type of value to be written, valid types are string, data, int[eger],
float, bool[ean], date, array, array-add, dict, dict-add
user
The user to write the defaults to
"""
if type == 'bool' or type == 'boolean':
if value is True:
value = 'TRUE' # depends on [control=['if'], data=['value']]
elif value is False:
value = 'FALSE' # depends on [control=['if'], data=['value']] # depends on [control=['if'], data=[]]
cmd = 'defaults write "{0}" "{1}" -{2} "{3}"'.format(domain, key, type, value)
return __salt__['cmd.run_all'](cmd, runas=user) |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Apple Account entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
accounts = match.get('Accounts', {})
for name_account, account in iter(accounts.items()):
first_name = account.get('FirstName', '<FirstName>')
last_name = account.get('LastName', '<LastName>')
general_description = '{0:s} ({1:s} {2:s})'.format(
name_account, first_name, last_name)
event_data = plist_event.PlistTimeEventData()
event_data.key = name_account
event_data.root = '/Accounts'
datetime_value = account.get('CreationDate', None)
if datetime_value:
event_data.desc = 'Configured Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('LastSuccessfulConnect', None)
if datetime_value:
event_data.desc = 'Connected Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('ValidationDate', None)
if datetime_value:
event_data.desc = 'Last validation Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[GetEntries, parameter[self, parser_mediator, match]]:
constant[Extracts relevant Apple Account entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
]
variable[accounts] assign[=] call[name[match].get, parameter[constant[Accounts], dictionary[[], []]]]
for taget[tuple[[<ast.Name object at 0x7da18fe905b0>, <ast.Name object at 0x7da18fe939d0>]]] in starred[call[name[iter], parameter[call[name[accounts].items, parameter[]]]]] begin[:]
variable[first_name] assign[=] call[name[account].get, parameter[constant[FirstName], constant[<FirstName>]]]
variable[last_name] assign[=] call[name[account].get, parameter[constant[LastName], constant[<LastName>]]]
variable[general_description] assign[=] call[constant[{0:s} ({1:s} {2:s})].format, parameter[name[name_account], name[first_name], name[last_name]]]
variable[event_data] assign[=] call[name[plist_event].PlistTimeEventData, parameter[]]
name[event_data].key assign[=] name[name_account]
name[event_data].root assign[=] constant[/Accounts]
variable[datetime_value] assign[=] call[name[account].get, parameter[constant[CreationDate], constant[None]]]
if name[datetime_value] begin[:]
name[event_data].desc assign[=] call[constant[Configured Apple account {0:s}].format, parameter[name[general_description]]]
variable[event] assign[=] call[name[time_events].PythonDatetimeEvent, parameter[name[datetime_value], name[definitions].TIME_DESCRIPTION_WRITTEN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[datetime_value] assign[=] call[name[account].get, parameter[constant[LastSuccessfulConnect], constant[None]]]
if name[datetime_value] begin[:]
name[event_data].desc assign[=] call[constant[Connected Apple account {0:s}].format, parameter[name[general_description]]]
variable[event] assign[=] call[name[time_events].PythonDatetimeEvent, parameter[name[datetime_value], name[definitions].TIME_DESCRIPTION_WRITTEN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
variable[datetime_value] assign[=] call[name[account].get, parameter[constant[ValidationDate], constant[None]]]
if name[datetime_value] begin[:]
name[event_data].desc assign[=] call[constant[Last validation Apple account {0:s}].format, parameter[name[general_description]]]
variable[event] assign[=] call[name[time_events].PythonDatetimeEvent, parameter[name[datetime_value], name[definitions].TIME_DESCRIPTION_WRITTEN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[GetEntries] ( identifier[self] , identifier[parser_mediator] , identifier[match] = keyword[None] ,** identifier[unused_kwargs] ):
literal[string]
identifier[accounts] = identifier[match] . identifier[get] ( literal[string] ,{})
keyword[for] identifier[name_account] , identifier[account] keyword[in] identifier[iter] ( identifier[accounts] . identifier[items] ()):
identifier[first_name] = identifier[account] . identifier[get] ( literal[string] , literal[string] )
identifier[last_name] = identifier[account] . identifier[get] ( literal[string] , literal[string] )
identifier[general_description] = literal[string] . identifier[format] (
identifier[name_account] , identifier[first_name] , identifier[last_name] )
identifier[event_data] = identifier[plist_event] . identifier[PlistTimeEventData] ()
identifier[event_data] . identifier[key] = identifier[name_account]
identifier[event_data] . identifier[root] = literal[string]
identifier[datetime_value] = identifier[account] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[datetime_value] :
identifier[event_data] . identifier[desc] = literal[string] . identifier[format] (
identifier[general_description] )
identifier[event] = identifier[time_events] . identifier[PythonDatetimeEvent] (
identifier[datetime_value] , identifier[definitions] . identifier[TIME_DESCRIPTION_WRITTEN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[datetime_value] = identifier[account] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[datetime_value] :
identifier[event_data] . identifier[desc] = literal[string] . identifier[format] (
identifier[general_description] )
identifier[event] = identifier[time_events] . identifier[PythonDatetimeEvent] (
identifier[datetime_value] , identifier[definitions] . identifier[TIME_DESCRIPTION_WRITTEN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
identifier[datetime_value] = identifier[account] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[datetime_value] :
identifier[event_data] . identifier[desc] = literal[string] . identifier[format] (
identifier[general_description] )
identifier[event] = identifier[time_events] . identifier[PythonDatetimeEvent] (
identifier[datetime_value] , identifier[definitions] . identifier[TIME_DESCRIPTION_WRITTEN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Apple Account entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
accounts = match.get('Accounts', {})
for (name_account, account) in iter(accounts.items()):
first_name = account.get('FirstName', '<FirstName>')
last_name = account.get('LastName', '<LastName>')
general_description = '{0:s} ({1:s} {2:s})'.format(name_account, first_name, last_name)
event_data = plist_event.PlistTimeEventData()
event_data.key = name_account
event_data.root = '/Accounts'
datetime_value = account.get('CreationDate', None)
if datetime_value:
event_data.desc = 'Configured Apple account {0:s}'.format(general_description)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]]
datetime_value = account.get('LastSuccessfulConnect', None)
if datetime_value:
event_data.desc = 'Connected Apple account {0:s}'.format(general_description)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]]
datetime_value = account.get('ValidationDate', None)
if datetime_value:
event_data.desc = 'Last validation Apple account {0:s}'.format(general_description)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def target_sequence(self):
# type: () -> SeqRecord
"""Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
"""
if self.cutter.is_3overhang():
start, end = self._match.span(2)[0], self._match.span(3)[1]
else:
start, end = self._match.span(1)[0], self._match.span(2)[1]
return add_as_source(self.record, (self.record << start)[end - start :]) | def function[target_sequence, parameter[self]]:
constant[Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
]
if call[name[self].cutter.is_3overhang, parameter[]] begin[:]
<ast.Tuple object at 0x7da1b23601c0> assign[=] tuple[[<ast.Subscript object at 0x7da1b23616f0>, <ast.Subscript object at 0x7da1b2362320>]]
return[call[name[add_as_source], parameter[name[self].record, call[binary_operation[name[self].record <ast.LShift object at 0x7da2590d69e0> name[start]]][<ast.Slice object at 0x7da1b23610c0>]]]] | keyword[def] identifier[target_sequence] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[cutter] . identifier[is_3overhang] ():
identifier[start] , identifier[end] = identifier[self] . identifier[_match] . identifier[span] ( literal[int] )[ literal[int] ], identifier[self] . identifier[_match] . identifier[span] ( literal[int] )[ literal[int] ]
keyword[else] :
identifier[start] , identifier[end] = identifier[self] . identifier[_match] . identifier[span] ( literal[int] )[ literal[int] ], identifier[self] . identifier[_match] . identifier[span] ( literal[int] )[ literal[int] ]
keyword[return] identifier[add_as_source] ( identifier[self] . identifier[record] ,( identifier[self] . identifier[record] << identifier[start] )[ identifier[end] - identifier[start] :]) | def target_sequence(self):
# type: () -> SeqRecord
'Get the target sequence in the vector.\n\n The target sequence if the part of the plasmid that is not discarded\n during the assembly (everything except the placeholder sequence).\n '
if self.cutter.is_3overhang():
(start, end) = (self._match.span(2)[0], self._match.span(3)[1]) # depends on [control=['if'], data=[]]
else:
(start, end) = (self._match.span(1)[0], self._match.span(2)[1])
return add_as_source(self.record, (self.record << start)[end - start:]) |
def put_user(self, username, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html>`_
:arg username: The username of the User
:arg body: The user to add
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
for param in (username, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT", _make_path("_security", "user", username), params=params, body=body
) | def function[put_user, parameter[self, username, body, params]]:
constant[
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html>`_
:arg username: The username of the User
:arg body: The user to add
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
]
for taget[name[param]] in starred[tuple[[<ast.Name object at 0x7da1b21e35e0>, <ast.Name object at 0x7da1b21e1e10>]]] begin[:]
if compare[name[param] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da1b21e1630>
return[call[name[self].transport.perform_request, parameter[constant[PUT], call[name[_make_path], parameter[constant[_security], constant[user], name[username]]]]]] | keyword[def] identifier[put_user] ( identifier[self] , identifier[username] , identifier[body] , identifier[params] = keyword[None] ):
literal[string]
keyword[for] identifier[param] keyword[in] ( identifier[username] , identifier[body] ):
keyword[if] identifier[param] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( literal[string] , literal[string] , identifier[username] ), identifier[params] = identifier[params] , identifier[body] = identifier[body]
) | def put_user(self, username, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html>`_
:arg username: The username of the User
:arg body: The user to add
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
for param in (username, body):
if param in SKIP_IN_PATH:
raise ValueError('Empty value passed for a required argument.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param']]
return self.transport.perform_request('PUT', _make_path('_security', 'user', username), params=params, body=body) |
def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8')
tree = defusedxml.ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree) | def function[create_class_from_xml_string, parameter[target_class, xml_string]]:
constant[Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
]
if <ast.UnaryOp object at 0x7da18f811570> begin[:]
variable[xml_string] assign[=] call[name[xml_string].encode, parameter[constant[utf-8]]]
variable[tree] assign[=] call[name[defusedxml].ElementTree.fromstring, parameter[name[xml_string]]]
return[call[name[create_class_from_element_tree], parameter[name[target_class], name[tree]]]] | keyword[def] identifier[create_class_from_xml_string] ( identifier[target_class] , identifier[xml_string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[xml_string] , identifier[six] . identifier[binary_type] ):
identifier[xml_string] = identifier[xml_string] . identifier[encode] ( literal[string] )
identifier[tree] = identifier[defusedxml] . identifier[ElementTree] . identifier[fromstring] ( identifier[xml_string] )
keyword[return] identifier[create_class_from_element_tree] ( identifier[target_class] , identifier[tree] ) | def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8') # depends on [control=['if'], data=[]]
tree = defusedxml.ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree) |
def parseExtensionArgs(self, ax_args):
"""Given attribute exchange arguments, populate this FetchRequest.
@param ax_args: Attribute Exchange arguments from the request.
As returned from L{Message.getArgs<openid.message.Message.getArgs>}.
@type ax_args: dict
@raises KeyError: if the message is not consistent in its use
of namespace aliases.
@raises NotAXMessage: If ax_args does not include an Attribute Exchange
mode.
@raises AXError: If the data to be parsed does not follow the
attribute exchange specification. At least when
'if_available' or 'required' is not specified for a
particular attribute type.
"""
# Raises an exception if the mode is not the expected value
self._checkMode(ax_args)
aliases = NamespaceMap()
for key, value in ax_args.items():
if key.startswith('type.'):
alias = key[5:]
type_uri = value
aliases.addAlias(type_uri, alias)
count_key = 'count.' + alias
count_s = ax_args.get(count_key)
if count_s:
try:
count = int(count_s)
if count <= 0:
raise AXError(
"Count %r must be greater than zero, got %r" %
(count_key, count_s, ))
except ValueError:
if count_s != UNLIMITED_VALUES:
raise AXError("Invalid count value for %r: %r" %
(count_key, count_s, ))
count = count_s
else:
count = 1
self.add(AttrInfo(type_uri, alias=alias, count=count))
required = toTypeURIs(aliases, ax_args.get('required'))
for type_uri in required:
self.requested_attributes[type_uri].required = True
if_available = toTypeURIs(aliases, ax_args.get('if_available'))
all_type_uris = required + if_available
for type_uri in aliases.iterNamespaceURIs():
if type_uri not in all_type_uris:
raise AXError('Type URI %r was in the request but not '
'present in "required" or "if_available"' %
(type_uri, ))
self.update_url = ax_args.get('update_url') | def function[parseExtensionArgs, parameter[self, ax_args]]:
constant[Given attribute exchange arguments, populate this FetchRequest.
@param ax_args: Attribute Exchange arguments from the request.
As returned from L{Message.getArgs<openid.message.Message.getArgs>}.
@type ax_args: dict
@raises KeyError: if the message is not consistent in its use
of namespace aliases.
@raises NotAXMessage: If ax_args does not include an Attribute Exchange
mode.
@raises AXError: If the data to be parsed does not follow the
attribute exchange specification. At least when
'if_available' or 'required' is not specified for a
particular attribute type.
]
call[name[self]._checkMode, parameter[name[ax_args]]]
variable[aliases] assign[=] call[name[NamespaceMap], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0626020>, <ast.Name object at 0x7da1b0627a60>]]] in starred[call[name[ax_args].items, parameter[]]] begin[:]
if call[name[key].startswith, parameter[constant[type.]]] begin[:]
variable[alias] assign[=] call[name[key]][<ast.Slice object at 0x7da1b0627be0>]
variable[type_uri] assign[=] name[value]
call[name[aliases].addAlias, parameter[name[type_uri], name[alias]]]
variable[count_key] assign[=] binary_operation[constant[count.] + name[alias]]
variable[count_s] assign[=] call[name[ax_args].get, parameter[name[count_key]]]
if name[count_s] begin[:]
<ast.Try object at 0x7da1b0627370>
call[name[self].add, parameter[call[name[AttrInfo], parameter[name[type_uri]]]]]
variable[required] assign[=] call[name[toTypeURIs], parameter[name[aliases], call[name[ax_args].get, parameter[constant[required]]]]]
for taget[name[type_uri]] in starred[name[required]] begin[:]
call[name[self].requested_attributes][name[type_uri]].required assign[=] constant[True]
variable[if_available] assign[=] call[name[toTypeURIs], parameter[name[aliases], call[name[ax_args].get, parameter[constant[if_available]]]]]
variable[all_type_uris] assign[=] binary_operation[name[required] + name[if_available]]
for taget[name[type_uri]] in starred[call[name[aliases].iterNamespaceURIs, parameter[]]] begin[:]
if compare[name[type_uri] <ast.NotIn object at 0x7da2590d7190> name[all_type_uris]] begin[:]
<ast.Raise object at 0x7da1b0627190>
name[self].update_url assign[=] call[name[ax_args].get, parameter[constant[update_url]]] | keyword[def] identifier[parseExtensionArgs] ( identifier[self] , identifier[ax_args] ):
literal[string]
identifier[self] . identifier[_checkMode] ( identifier[ax_args] )
identifier[aliases] = identifier[NamespaceMap] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[ax_args] . identifier[items] ():
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[alias] = identifier[key] [ literal[int] :]
identifier[type_uri] = identifier[value]
identifier[aliases] . identifier[addAlias] ( identifier[type_uri] , identifier[alias] )
identifier[count_key] = literal[string] + identifier[alias]
identifier[count_s] = identifier[ax_args] . identifier[get] ( identifier[count_key] )
keyword[if] identifier[count_s] :
keyword[try] :
identifier[count] = identifier[int] ( identifier[count_s] )
keyword[if] identifier[count] <= literal[int] :
keyword[raise] identifier[AXError] (
literal[string] %
( identifier[count_key] , identifier[count_s] ,))
keyword[except] identifier[ValueError] :
keyword[if] identifier[count_s] != identifier[UNLIMITED_VALUES] :
keyword[raise] identifier[AXError] ( literal[string] %
( identifier[count_key] , identifier[count_s] ,))
identifier[count] = identifier[count_s]
keyword[else] :
identifier[count] = literal[int]
identifier[self] . identifier[add] ( identifier[AttrInfo] ( identifier[type_uri] , identifier[alias] = identifier[alias] , identifier[count] = identifier[count] ))
identifier[required] = identifier[toTypeURIs] ( identifier[aliases] , identifier[ax_args] . identifier[get] ( literal[string] ))
keyword[for] identifier[type_uri] keyword[in] identifier[required] :
identifier[self] . identifier[requested_attributes] [ identifier[type_uri] ]. identifier[required] = keyword[True]
identifier[if_available] = identifier[toTypeURIs] ( identifier[aliases] , identifier[ax_args] . identifier[get] ( literal[string] ))
identifier[all_type_uris] = identifier[required] + identifier[if_available]
keyword[for] identifier[type_uri] keyword[in] identifier[aliases] . identifier[iterNamespaceURIs] ():
keyword[if] identifier[type_uri] keyword[not] keyword[in] identifier[all_type_uris] :
keyword[raise] identifier[AXError] ( literal[string]
literal[string] %
( identifier[type_uri] ,))
identifier[self] . identifier[update_url] = identifier[ax_args] . identifier[get] ( literal[string] ) | def parseExtensionArgs(self, ax_args):
"""Given attribute exchange arguments, populate this FetchRequest.
@param ax_args: Attribute Exchange arguments from the request.
As returned from L{Message.getArgs<openid.message.Message.getArgs>}.
@type ax_args: dict
@raises KeyError: if the message is not consistent in its use
of namespace aliases.
@raises NotAXMessage: If ax_args does not include an Attribute Exchange
mode.
@raises AXError: If the data to be parsed does not follow the
attribute exchange specification. At least when
'if_available' or 'required' is not specified for a
particular attribute type.
"""
# Raises an exception if the mode is not the expected value
self._checkMode(ax_args)
aliases = NamespaceMap()
for (key, value) in ax_args.items():
if key.startswith('type.'):
alias = key[5:]
type_uri = value
aliases.addAlias(type_uri, alias)
count_key = 'count.' + alias
count_s = ax_args.get(count_key)
if count_s:
try:
count = int(count_s)
if count <= 0:
raise AXError('Count %r must be greater than zero, got %r' % (count_key, count_s)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
if count_s != UNLIMITED_VALUES:
raise AXError('Invalid count value for %r: %r' % (count_key, count_s)) # depends on [control=['if'], data=['count_s']]
count = count_s # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
count = 1
self.add(AttrInfo(type_uri, alias=alias, count=count)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
required = toTypeURIs(aliases, ax_args.get('required'))
for type_uri in required:
self.requested_attributes[type_uri].required = True # depends on [control=['for'], data=['type_uri']]
if_available = toTypeURIs(aliases, ax_args.get('if_available'))
all_type_uris = required + if_available
for type_uri in aliases.iterNamespaceURIs():
if type_uri not in all_type_uris:
raise AXError('Type URI %r was in the request but not present in "required" or "if_available"' % (type_uri,)) # depends on [control=['if'], data=['type_uri']] # depends on [control=['for'], data=['type_uri']]
self.update_url = ax_args.get('update_url') |
def delete_mockdata_url(service_name, implementation_name,
url, headers,
dir_base=dirname(__file__)):
"""
:param service_name:
possible "sws", "pws", "book", "hfs", etc.
:param implementation_name:
possible values: "file", etc.
"""
# Http response code 204 No Content:
# The server has fulfilled the request but does not need to
# return an entity-body
response = MockHTTP()
response.status = 204
return response | def function[delete_mockdata_url, parameter[service_name, implementation_name, url, headers, dir_base]]:
constant[
:param service_name:
possible "sws", "pws", "book", "hfs", etc.
:param implementation_name:
possible values: "file", etc.
]
variable[response] assign[=] call[name[MockHTTP], parameter[]]
name[response].status assign[=] constant[204]
return[name[response]] | keyword[def] identifier[delete_mockdata_url] ( identifier[service_name] , identifier[implementation_name] ,
identifier[url] , identifier[headers] ,
identifier[dir_base] = identifier[dirname] ( identifier[__file__] )):
literal[string]
identifier[response] = identifier[MockHTTP] ()
identifier[response] . identifier[status] = literal[int]
keyword[return] identifier[response] | def delete_mockdata_url(service_name, implementation_name, url, headers, dir_base=dirname(__file__)):
"""
:param service_name:
possible "sws", "pws", "book", "hfs", etc.
:param implementation_name:
possible values: "file", etc.
"""
# Http response code 204 No Content:
# The server has fulfilled the request but does not need to
# return an entity-body
response = MockHTTP()
response.status = 204
return response |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.