code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def complete_xml_element(self, xmlnode, doc):
"""Complete the XML node with `self` content.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`"""
_unused = doc
if self.label is not None:
xmlnode.setProp("label", self.label.encode("utf-8"))
xmlnode.newTextChild(xmlnode.ns(), "value", self.value.encode("utf-8"))
return xmlnode | def function[complete_xml_element, parameter[self, xmlnode, doc]]:
constant[Complete the XML node with `self` content.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`]
variable[_unused] assign[=] name[doc]
if compare[name[self].label is_not constant[None]] begin[:]
call[name[xmlnode].setProp, parameter[constant[label], call[name[self].label.encode, parameter[constant[utf-8]]]]]
call[name[xmlnode].newTextChild, parameter[call[name[xmlnode].ns, parameter[]], constant[value], call[name[self].value.encode, parameter[constant[utf-8]]]]]
return[name[xmlnode]] | keyword[def] identifier[complete_xml_element] ( identifier[self] , identifier[xmlnode] , identifier[doc] ):
literal[string]
identifier[_unused] = identifier[doc]
keyword[if] identifier[self] . identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[xmlnode] . identifier[setProp] ( literal[string] , identifier[self] . identifier[label] . identifier[encode] ( literal[string] ))
identifier[xmlnode] . identifier[newTextChild] ( identifier[xmlnode] . identifier[ns] (), literal[string] , identifier[self] . identifier[value] . identifier[encode] ( literal[string] ))
keyword[return] identifier[xmlnode] | def complete_xml_element(self, xmlnode, doc):
"""Complete the XML node with `self` content.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`"""
_unused = doc
if self.label is not None:
xmlnode.setProp('label', self.label.encode('utf-8')) # depends on [control=['if'], data=[]]
xmlnode.newTextChild(xmlnode.ns(), 'value', self.value.encode('utf-8'))
return xmlnode |
def _pipe(self):
"""On Windows we use a pipe to emulate a Linux style character
buffer."""
if self._evdev:
return None
if not self.__pipe:
target_function = self._get_target_function()
if not target_function:
return None
self.__pipe, child_conn = Pipe(duplex=False)
self._listener = Process(target=target_function,
args=(child_conn,), daemon=True)
self._listener.start()
return self.__pipe | def function[_pipe, parameter[self]]:
constant[On Windows we use a pipe to emulate a Linux style character
buffer.]
if name[self]._evdev begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da1b084c2b0> begin[:]
variable[target_function] assign[=] call[name[self]._get_target_function, parameter[]]
if <ast.UnaryOp object at 0x7da1b084c130> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da1b084f640> assign[=] call[name[Pipe], parameter[]]
name[self]._listener assign[=] call[name[Process], parameter[]]
call[name[self]._listener.start, parameter[]]
return[name[self].__pipe] | keyword[def] identifier[_pipe] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_evdev] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[self] . identifier[__pipe] :
identifier[target_function] = identifier[self] . identifier[_get_target_function] ()
keyword[if] keyword[not] identifier[target_function] :
keyword[return] keyword[None]
identifier[self] . identifier[__pipe] , identifier[child_conn] = identifier[Pipe] ( identifier[duplex] = keyword[False] )
identifier[self] . identifier[_listener] = identifier[Process] ( identifier[target] = identifier[target_function] ,
identifier[args] =( identifier[child_conn] ,), identifier[daemon] = keyword[True] )
identifier[self] . identifier[_listener] . identifier[start] ()
keyword[return] identifier[self] . identifier[__pipe] | def _pipe(self):
"""On Windows we use a pipe to emulate a Linux style character
buffer."""
if self._evdev:
return None # depends on [control=['if'], data=[]]
if not self.__pipe:
target_function = self._get_target_function()
if not target_function:
return None # depends on [control=['if'], data=[]]
(self.__pipe, child_conn) = Pipe(duplex=False)
self._listener = Process(target=target_function, args=(child_conn,), daemon=True)
self._listener.start() # depends on [control=['if'], data=[]]
return self.__pipe |
def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({
'component': 'singleuser-storage'
})
annotations = self._build_common_annotations({})
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
storage=self.storage_capacity,
labels=labels,
annotations=annotations
) | def function[get_pvc_manifest, parameter[self]]:
constant[
Make a pvc manifest that will spawn current user's pvc.
]
variable[labels] assign[=] call[name[self]._build_common_labels, parameter[call[name[self]._expand_all, parameter[name[self].storage_extra_labels]]]]
call[name[labels].update, parameter[dictionary[[<ast.Constant object at 0x7da1b16e9390>], [<ast.Constant object at 0x7da1b16e8670>]]]]
variable[annotations] assign[=] call[name[self]._build_common_annotations, parameter[dictionary[[], []]]]
return[call[name[make_pvc], parameter[]]] | keyword[def] identifier[get_pvc_manifest] ( identifier[self] ):
literal[string]
identifier[labels] = identifier[self] . identifier[_build_common_labels] ( identifier[self] . identifier[_expand_all] ( identifier[self] . identifier[storage_extra_labels] ))
identifier[labels] . identifier[update] ({
literal[string] : literal[string]
})
identifier[annotations] = identifier[self] . identifier[_build_common_annotations] ({})
keyword[return] identifier[make_pvc] (
identifier[name] = identifier[self] . identifier[pvc_name] ,
identifier[storage_class] = identifier[self] . identifier[storage_class] ,
identifier[access_modes] = identifier[self] . identifier[storage_access_modes] ,
identifier[storage] = identifier[self] . identifier[storage_capacity] ,
identifier[labels] = identifier[labels] ,
identifier[annotations] = identifier[annotations]
) | def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({'component': 'singleuser-storage'})
annotations = self._build_common_annotations({})
return make_pvc(name=self.pvc_name, storage_class=self.storage_class, access_modes=self.storage_access_modes, storage=self.storage_capacity, labels=labels, annotations=annotations) |
def to_text(s, encoding="utf-8"):
"""
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
"""
if isinstance(s, six.text_type):
return s
else:
return six.binary_type(s).decode(encoding) | def function[to_text, parameter[s, encoding]]:
constant[
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
]
if call[name[isinstance], parameter[name[s], name[six].text_type]] begin[:]
return[name[s]] | keyword[def] identifier[to_text] ( identifier[s] , identifier[encoding] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[s]
keyword[else] :
keyword[return] identifier[six] . identifier[binary_type] ( identifier[s] ). identifier[decode] ( identifier[encoding] ) | def to_text(s, encoding='utf-8'):
"""
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
"""
if isinstance(s, six.text_type):
return s # depends on [control=['if'], data=[]]
else:
return six.binary_type(s).decode(encoding) |
def tovalues(self, element_value):
"""
Return the `Values` string for an element value, based upon this value
mapping.
Parameters:
element_value (:term:`integer` or :class:`~pywbem.CIMInt`):
The value of the CIM element (property, method, parameter).
Returns:
:term:`string`:
The `Values` string for the element value.
Raises:
ValueError: Element value outside of the set defined by `ValueMap`.
TypeError: Element value is not an integer type.
"""
if not isinstance(element_value, (six.integer_types, CIMInt)):
raise TypeError(
_format("The value for value-mapped {0} is not "
"integer-typed, but has Python type: {1}",
self._element_str(), type(element_value)))
# try single value
try:
return self._b2v_single_dict[element_value]
except KeyError:
pass
# try value ranges
for range_tuple in self._b2v_range_tuple_list:
lo, hi, values_str = range_tuple
if lo <= element_value <= hi:
return values_str
# try catch-all '..'
if self._b2v_unclaimed is not None:
return self._b2v_unclaimed
raise ValueError(
_format("The value for value-mapped {0} is outside of the set "
"defined by its ValueMap qualifier: {1!A}",
self._element_str(), element_value)) | def function[tovalues, parameter[self, element_value]]:
constant[
Return the `Values` string for an element value, based upon this value
mapping.
Parameters:
element_value (:term:`integer` or :class:`~pywbem.CIMInt`):
The value of the CIM element (property, method, parameter).
Returns:
:term:`string`:
The `Values` string for the element value.
Raises:
ValueError: Element value outside of the set defined by `ValueMap`.
TypeError: Element value is not an integer type.
]
if <ast.UnaryOp object at 0x7da18f810dc0> begin[:]
<ast.Raise object at 0x7da18f8135b0>
<ast.Try object at 0x7da20ed9bd30>
for taget[name[range_tuple]] in starred[name[self]._b2v_range_tuple_list] begin[:]
<ast.Tuple object at 0x7da18dc04ee0> assign[=] name[range_tuple]
if compare[name[lo] less_or_equal[<=] name[element_value]] begin[:]
return[name[values_str]]
if compare[name[self]._b2v_unclaimed is_not constant[None]] begin[:]
return[name[self]._b2v_unclaimed]
<ast.Raise object at 0x7da18dc04d00> | keyword[def] identifier[tovalues] ( identifier[self] , identifier[element_value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[element_value] ,( identifier[six] . identifier[integer_types] , identifier[CIMInt] )):
keyword[raise] identifier[TypeError] (
identifier[_format] ( literal[string]
literal[string] ,
identifier[self] . identifier[_element_str] (), identifier[type] ( identifier[element_value] )))
keyword[try] :
keyword[return] identifier[self] . identifier[_b2v_single_dict] [ identifier[element_value] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[for] identifier[range_tuple] keyword[in] identifier[self] . identifier[_b2v_range_tuple_list] :
identifier[lo] , identifier[hi] , identifier[values_str] = identifier[range_tuple]
keyword[if] identifier[lo] <= identifier[element_value] <= identifier[hi] :
keyword[return] identifier[values_str]
keyword[if] identifier[self] . identifier[_b2v_unclaimed] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_b2v_unclaimed]
keyword[raise] identifier[ValueError] (
identifier[_format] ( literal[string]
literal[string] ,
identifier[self] . identifier[_element_str] (), identifier[element_value] )) | def tovalues(self, element_value):
"""
Return the `Values` string for an element value, based upon this value
mapping.
Parameters:
element_value (:term:`integer` or :class:`~pywbem.CIMInt`):
The value of the CIM element (property, method, parameter).
Returns:
:term:`string`:
The `Values` string for the element value.
Raises:
ValueError: Element value outside of the set defined by `ValueMap`.
TypeError: Element value is not an integer type.
"""
if not isinstance(element_value, (six.integer_types, CIMInt)):
raise TypeError(_format('The value for value-mapped {0} is not integer-typed, but has Python type: {1}', self._element_str(), type(element_value))) # depends on [control=['if'], data=[]]
# try single value
try:
return self._b2v_single_dict[element_value] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
# try value ranges
for range_tuple in self._b2v_range_tuple_list:
(lo, hi, values_str) = range_tuple
if lo <= element_value <= hi:
return values_str # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['range_tuple']]
# try catch-all '..'
if self._b2v_unclaimed is not None:
return self._b2v_unclaimed # depends on [control=['if'], data=[]]
raise ValueError(_format('The value for value-mapped {0} is outside of the set defined by its ValueMap qualifier: {1!A}', self._element_str(), element_value)) |
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games] | def function[schedule, parameter[self, kind]]:
constant[Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
]
variable[kind] assign[=] call[call[name[kind].upper, parameter[]]][constant[0]]
variable[dfs] assign[=] list[[]]
for taget[name[month]] in starred[tuple[[<ast.Constant object at 0x7da20e9565c0>, <ast.Constant object at 0x7da20e955e40>, <ast.Constant object at 0x7da20e955060>, <ast.Constant object at 0x7da20e956a40>, <ast.Constant object at 0x7da20e956a10>, <ast.Constant object at 0x7da20e9567d0>, <ast.Constant object at 0x7da20e957160>, <ast.Constant object at 0x7da20e957940>, <ast.Constant object at 0x7da20e9540d0>]]] begin[:]
<ast.Try object at 0x7da20e956230>
variable[table] assign[=] call[name[doc], parameter[constant[table#schedule]]]
variable[df] assign[=] call[name[sportsref].utils.parse_table, parameter[name[table]]]
call[name[dfs].append, parameter[name[df]]]
variable[df] assign[=] call[call[name[pd].concat, parameter[name[dfs]]].reset_index, parameter[]]
<ast.Try object at 0x7da20e954250>
if name[is_past_season] begin[:]
variable[team_per_game] assign[=] call[name[self].team_stats_per_game, parameter[]]
variable[n_reg_games] assign[=] call[name[int], parameter[binary_operation[call[name[team_per_game].g.sum, parameter[]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]]
if compare[name[kind] equal[==] constant[P]] begin[:]
return[call[name[df].iloc][<ast.Slice object at 0x7da1b26adf30>]] | keyword[def] identifier[schedule] ( identifier[self] , identifier[kind] = literal[string] ):
literal[string]
identifier[kind] = identifier[kind] . identifier[upper] ()[ literal[int] ]
identifier[dfs] =[]
keyword[for] identifier[month] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[try] :
identifier[doc] = identifier[self] . identifier[get_sub_doc] ( literal[string] . identifier[format] ( identifier[month] ))
keyword[except] identifier[ValueError] :
keyword[continue]
identifier[table] = identifier[doc] ( literal[string] )
identifier[df] = identifier[sportsref] . identifier[utils] . identifier[parse_table] ( identifier[table] )
identifier[dfs] . identifier[append] ( identifier[df] )
identifier[df] = identifier[pd] . identifier[concat] ( identifier[dfs] ). identifier[reset_index] ( identifier[drop] = keyword[True] )
keyword[try] :
identifier[sportsref] . identifier[utils] . identifier[get_html] ( literal[string] . identifier[format] (
identifier[sportsref] . identifier[nba] . identifier[BASE_URL] , identifier[self] . identifier[yr] )
)
identifier[is_past_season] = keyword[True]
keyword[except] identifier[ValueError] :
identifier[is_past_season] = keyword[False]
keyword[if] identifier[is_past_season] :
identifier[team_per_game] = identifier[self] . identifier[team_stats_per_game] ()
identifier[n_reg_games] = identifier[int] ( identifier[team_per_game] . identifier[g] . identifier[sum] ()// literal[int] )
keyword[else] :
identifier[n_reg_games] = identifier[len] ( identifier[df] )
keyword[if] identifier[kind] == literal[string] :
keyword[return] identifier[df] . identifier[iloc] [ identifier[n_reg_games] :]
keyword[else] :
keyword[return] identifier[df] . identifier[iloc] [: identifier[n_reg_games] ] | def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february', 'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month)) # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df) # depends on [control=['for'], data=['month']]
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(sportsref.nba.BASE_URL, self.yr))
is_past_season = True # depends on [control=['try'], data=[]]
except ValueError:
is_past_season = False # depends on [control=['except'], data=[]]
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2) # depends on [control=['if'], data=[]]
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:] # depends on [control=['if'], data=[]]
else:
return df.iloc[:n_reg_games] |
def _writer(func):
"""
Decorator for a custom writer, but a default reader
"""
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func) | def function[_writer, parameter[func]]:
constant[
Decorator for a custom writer, but a default reader
]
variable[name] assign[=] name[func].__name__
return[call[name[property], parameter[]]] | keyword[def] identifier[_writer] ( identifier[func] ):
literal[string]
identifier[name] = identifier[func] . identifier[__name__]
keyword[return] identifier[property] ( identifier[fget] = keyword[lambda] identifier[self] : identifier[getattr] ( identifier[self] , literal[string] % identifier[name] ), identifier[fset] = identifier[func] ) | def _writer(func):
"""
Decorator for a custom writer, but a default reader
"""
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func) |
def use_pickle():
"""Revert to using stdlib pickle.
Reverts custom serialization enabled by use_dill|cloudpickle.
"""
from . import serialize
serialize.pickle = serialize._stdlib_pickle
# restore special function handling
can_map[FunctionType] = _original_can_map[FunctionType] | def function[use_pickle, parameter[]]:
constant[Revert to using stdlib pickle.
Reverts custom serialization enabled by use_dill|cloudpickle.
]
from relative_module[None] import module[serialize]
name[serialize].pickle assign[=] name[serialize]._stdlib_pickle
call[name[can_map]][name[FunctionType]] assign[=] call[name[_original_can_map]][name[FunctionType]] | keyword[def] identifier[use_pickle] ():
literal[string]
keyword[from] . keyword[import] identifier[serialize]
identifier[serialize] . identifier[pickle] = identifier[serialize] . identifier[_stdlib_pickle]
identifier[can_map] [ identifier[FunctionType] ]= identifier[_original_can_map] [ identifier[FunctionType] ] | def use_pickle():
"""Revert to using stdlib pickle.
Reverts custom serialization enabled by use_dill|cloudpickle.
"""
from . import serialize
serialize.pickle = serialize._stdlib_pickle
# restore special function handling
can_map[FunctionType] = _original_can_map[FunctionType] |
def print_code(co, lasti= -1, level=0):
"""Disassemble a code object."""
code = co.co_code
for constant in co.co_consts:
print( '| |' * level, end=' ')
print( 'constant:', constant)
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
have_inner = False
c = code[i]
op = co_ord(c)
if i in linestarts:
if i > 0:
print()
print( '| |' * level, end=' ')
print( "%3d" % linestarts[i], end=' ')
else:
print( '| |' * level, end=' ')
print(' ', end=' ')
if i == lasti: print( '-->',end=' ')
else: print( ' ', end=' ')
if i in labels: print( '>>', end=' ')
else: print( ' ',end=' ')
print(repr(i).rjust(4), end=' ')
print(opcode.opname[op].ljust(20), end=' ')
i = i + 1
if op >= opcode.HAVE_ARGUMENT:
oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536
print( repr(oparg).rjust(5), end=' ')
if op in opcode.hasconst:
print( '(' + repr(co.co_consts[oparg]) + ')', end=' ')
if type(co.co_consts[oparg]) == types.CodeType:
have_inner = co.co_consts[oparg]
elif op in opcode.hasname:
print( '(' + co.co_names[oparg] + ')',end=' ')
elif op in opcode.hasjrel:
print('(to ' + repr(i + oparg) + ')', end=' ')
elif op in opcode.haslocal:
print('(' + co.co_varnames[oparg] + ')', end=' ')
elif op in opcode.hascompare:
print('(' + opcode.cmp_op[oparg] + ')', end=' ')
elif op in opcode.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[oparg] + ')', end=' ')
print()
if have_inner is not False:
print_code(have_inner, level=level + 1) | def function[print_code, parameter[co, lasti, level]]:
constant[Disassemble a code object.]
variable[code] assign[=] name[co].co_code
for taget[name[constant]] in starred[name[co].co_consts] begin[:]
call[name[print], parameter[binary_operation[constant[| |] * name[level]]]]
call[name[print], parameter[constant[constant:], name[constant]]]
variable[labels] assign[=] call[name[findlabels], parameter[name[code]]]
variable[linestarts] assign[=] call[name[dict], parameter[call[name[findlinestarts], parameter[name[co]]]]]
variable[n] assign[=] call[name[len], parameter[name[code]]]
variable[i] assign[=] constant[0]
variable[extended_arg] assign[=] constant[0]
variable[free] assign[=] constant[None]
while compare[name[i] less[<] name[n]] begin[:]
variable[have_inner] assign[=] constant[False]
variable[c] assign[=] call[name[code]][name[i]]
variable[op] assign[=] call[name[co_ord], parameter[name[c]]]
if compare[name[i] in name[linestarts]] begin[:]
if compare[name[i] greater[>] constant[0]] begin[:]
call[name[print], parameter[]]
call[name[print], parameter[binary_operation[constant[| |] * name[level]]]]
call[name[print], parameter[binary_operation[constant[%3d] <ast.Mod object at 0x7da2590d6920> call[name[linestarts]][name[i]]]]]
if compare[name[i] equal[==] name[lasti]] begin[:]
call[name[print], parameter[constant[-->]]]
if compare[name[i] in name[labels]] begin[:]
call[name[print], parameter[constant[>>]]]
call[name[print], parameter[call[call[name[repr], parameter[name[i]]].rjust, parameter[constant[4]]]]]
call[name[print], parameter[call[call[name[opcode].opname][name[op]].ljust, parameter[constant[20]]]]]
variable[i] assign[=] binary_operation[name[i] + constant[1]]
if compare[name[op] greater_or_equal[>=] name[opcode].HAVE_ARGUMENT] begin[:]
variable[oparg] assign[=] binary_operation[binary_operation[call[name[co_ord], parameter[call[name[code]][name[i]]]] + binary_operation[call[name[co_ord], parameter[call[name[code]][binary_operation[name[i] + constant[1]]]]] * constant[256]]] + name[extended_arg]]
variable[extended_arg] assign[=] constant[0]
variable[i] assign[=] binary_operation[name[i] + constant[2]]
if compare[name[op] equal[==] name[opcode].EXTENDED_ARG] begin[:]
variable[extended_arg] assign[=] binary_operation[name[oparg] * constant[65536]]
call[name[print], parameter[call[call[name[repr], parameter[name[oparg]]].rjust, parameter[constant[5]]]]]
if compare[name[op] in name[opcode].hasconst] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[(] + call[name[repr], parameter[call[name[co].co_consts][name[oparg]]]]] + constant[)]]]]
if compare[call[name[type], parameter[call[name[co].co_consts][name[oparg]]]] equal[==] name[types].CodeType] begin[:]
variable[have_inner] assign[=] call[name[co].co_consts][name[oparg]]
call[name[print], parameter[]]
if compare[name[have_inner] is_not constant[False]] begin[:]
call[name[print_code], parameter[name[have_inner]]] | keyword[def] identifier[print_code] ( identifier[co] , identifier[lasti] =- literal[int] , identifier[level] = literal[int] ):
literal[string]
identifier[code] = identifier[co] . identifier[co_code]
keyword[for] identifier[constant] keyword[in] identifier[co] . identifier[co_consts] :
identifier[print] ( literal[string] * identifier[level] , identifier[end] = literal[string] )
identifier[print] ( literal[string] , identifier[constant] )
identifier[labels] = identifier[findlabels] ( identifier[code] )
identifier[linestarts] = identifier[dict] ( identifier[findlinestarts] ( identifier[co] ))
identifier[n] = identifier[len] ( identifier[code] )
identifier[i] = literal[int]
identifier[extended_arg] = literal[int]
identifier[free] = keyword[None]
keyword[while] identifier[i] < identifier[n] :
identifier[have_inner] = keyword[False]
identifier[c] = identifier[code] [ identifier[i] ]
identifier[op] = identifier[co_ord] ( identifier[c] )
keyword[if] identifier[i] keyword[in] identifier[linestarts] :
keyword[if] identifier[i] > literal[int] :
identifier[print] ()
identifier[print] ( literal[string] * identifier[level] , identifier[end] = literal[string] )
identifier[print] ( literal[string] % identifier[linestarts] [ identifier[i] ], identifier[end] = literal[string] )
keyword[else] :
identifier[print] ( literal[string] * identifier[level] , identifier[end] = literal[string] )
identifier[print] ( literal[string] , identifier[end] = literal[string] )
keyword[if] identifier[i] == identifier[lasti] : identifier[print] ( literal[string] , identifier[end] = literal[string] )
keyword[else] : identifier[print] ( literal[string] , identifier[end] = literal[string] )
keyword[if] identifier[i] keyword[in] identifier[labels] : identifier[print] ( literal[string] , identifier[end] = literal[string] )
keyword[else] : identifier[print] ( literal[string] , identifier[end] = literal[string] )
identifier[print] ( identifier[repr] ( identifier[i] ). identifier[rjust] ( literal[int] ), identifier[end] = literal[string] )
identifier[print] ( identifier[opcode] . identifier[opname] [ identifier[op] ]. identifier[ljust] ( literal[int] ), identifier[end] = literal[string] )
identifier[i] = identifier[i] + literal[int]
keyword[if] identifier[op] >= identifier[opcode] . identifier[HAVE_ARGUMENT] :
identifier[oparg] = identifier[co_ord] ( identifier[code] [ identifier[i] ])+ identifier[co_ord] ( identifier[code] [ identifier[i] + literal[int] ])* literal[int] + identifier[extended_arg]
identifier[extended_arg] = literal[int]
identifier[i] = identifier[i] + literal[int]
keyword[if] identifier[op] == identifier[opcode] . identifier[EXTENDED_ARG] :
identifier[extended_arg] = identifier[oparg] * literal[int]
identifier[print] ( identifier[repr] ( identifier[oparg] ). identifier[rjust] ( literal[int] ), identifier[end] = literal[string] )
keyword[if] identifier[op] keyword[in] identifier[opcode] . identifier[hasconst] :
identifier[print] ( literal[string] + identifier[repr] ( identifier[co] . identifier[co_consts] [ identifier[oparg] ])+ literal[string] , identifier[end] = literal[string] )
keyword[if] identifier[type] ( identifier[co] . identifier[co_consts] [ identifier[oparg] ])== identifier[types] . identifier[CodeType] :
identifier[have_inner] = identifier[co] . identifier[co_consts] [ identifier[oparg] ]
keyword[elif] identifier[op] keyword[in] identifier[opcode] . identifier[hasname] :
identifier[print] ( literal[string] + identifier[co] . identifier[co_names] [ identifier[oparg] ]+ literal[string] , identifier[end] = literal[string] )
keyword[elif] identifier[op] keyword[in] identifier[opcode] . identifier[hasjrel] :
identifier[print] ( literal[string] + identifier[repr] ( identifier[i] + identifier[oparg] )+ literal[string] , identifier[end] = literal[string] )
keyword[elif] identifier[op] keyword[in] identifier[opcode] . identifier[haslocal] :
identifier[print] ( literal[string] + identifier[co] . identifier[co_varnames] [ identifier[oparg] ]+ literal[string] , identifier[end] = literal[string] )
keyword[elif] identifier[op] keyword[in] identifier[opcode] . identifier[hascompare] :
identifier[print] ( literal[string] + identifier[opcode] . identifier[cmp_op] [ identifier[oparg] ]+ literal[string] , identifier[end] = literal[string] )
keyword[elif] identifier[op] keyword[in] identifier[opcode] . identifier[hasfree] :
keyword[if] identifier[free] keyword[is] keyword[None] :
identifier[free] = identifier[co] . identifier[co_cellvars] + identifier[co] . identifier[co_freevars]
identifier[print] ( literal[string] + identifier[free] [ identifier[oparg] ]+ literal[string] , identifier[end] = literal[string] )
identifier[print] ()
keyword[if] identifier[have_inner] keyword[is] keyword[not] keyword[False] :
identifier[print_code] ( identifier[have_inner] , identifier[level] = identifier[level] + literal[int] ) | def print_code(co, lasti=-1, level=0):
"""Disassemble a code object."""
code = co.co_code
for constant in co.co_consts:
print('| |' * level, end=' ')
print('constant:', constant) # depends on [control=['for'], data=['constant']]
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
have_inner = False
c = code[i]
op = co_ord(c)
if i in linestarts:
if i > 0:
print() # depends on [control=['if'], data=[]]
print('| |' * level, end=' ')
print('%3d' % linestarts[i], end=' ') # depends on [control=['if'], data=['i', 'linestarts']]
else:
print('| |' * level, end=' ')
print(' ', end=' ')
if i == lasti:
print('-->', end=' ') # depends on [control=['if'], data=[]]
else:
print(' ', end=' ')
if i in labels:
print('>>', end=' ') # depends on [control=['if'], data=[]]
else:
print(' ', end=' ')
print(repr(i).rjust(4), end=' ')
print(opcode.opname[op].ljust(20), end=' ')
i = i + 1
if op >= opcode.HAVE_ARGUMENT:
oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536 # depends on [control=['if'], data=[]]
print(repr(oparg).rjust(5), end=' ')
if op in opcode.hasconst:
print('(' + repr(co.co_consts[oparg]) + ')', end=' ')
if type(co.co_consts[oparg]) == types.CodeType:
have_inner = co.co_consts[oparg] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif op in opcode.hasname:
print('(' + co.co_names[oparg] + ')', end=' ') # depends on [control=['if'], data=[]]
elif op in opcode.hasjrel:
print('(to ' + repr(i + oparg) + ')', end=' ') # depends on [control=['if'], data=[]]
elif op in opcode.haslocal:
print('(' + co.co_varnames[oparg] + ')', end=' ') # depends on [control=['if'], data=[]]
elif op in opcode.hascompare:
print('(' + opcode.cmp_op[oparg] + ')', end=' ') # depends on [control=['if'], data=[]]
elif op in opcode.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars # depends on [control=['if'], data=['free']]
print('(' + free[oparg] + ')', end=' ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['op']]
print()
if have_inner is not False:
print_code(have_inner, level=level + 1) # depends on [control=['if'], data=['have_inner']] # depends on [control=['while'], data=['i']] |
def create_objects_for_type(self, raw_objects, o_type):
"""Generic function to create objects regarding the o_type
This function create real Alignak objects from the raw data got from the configuration.
:param raw_objects: Raw objects
:type raw_objects: dict
:param o_type: the object type we want to create
:type o_type: object
:return: None
"""
# Ex: the above code do for timeperiods:
# timeperiods = []
# for timeperiodcfg in objects['timeperiod']:
# t = Timeperiod(timeperiodcfg)
# timeperiods.append(t)
# self.timeperiods = Timeperiods(timeperiods)
types_creations = self.__class__.types_creations
(cls, clss, prop, initial_index, _) = types_creations[o_type]
# List to store the created objects
lst = []
try:
logger.info("- creating '%s' objects", o_type)
for obj_cfg in raw_objects[o_type]:
# We create the object
my_object = cls(obj_cfg)
# and append it to the list
lst.append(my_object)
if not lst:
logger.info(" none.")
except KeyError:
logger.info(" no %s objects in the configuration", o_type)
# Create the objects list and set it in our properties
setattr(self, prop, clss(lst, initial_index)) | def function[create_objects_for_type, parameter[self, raw_objects, o_type]]:
constant[Generic function to create objects regarding the o_type
This function create real Alignak objects from the raw data got from the configuration.
:param raw_objects: Raw objects
:type raw_objects: dict
:param o_type: the object type we want to create
:type o_type: object
:return: None
]
variable[types_creations] assign[=] name[self].__class__.types_creations
<ast.Tuple object at 0x7da1b26adc30> assign[=] call[name[types_creations]][name[o_type]]
variable[lst] assign[=] list[[]]
<ast.Try object at 0x7da1b26ad150>
call[name[setattr], parameter[name[self], name[prop], call[name[clss], parameter[name[lst], name[initial_index]]]]] | keyword[def] identifier[create_objects_for_type] ( identifier[self] , identifier[raw_objects] , identifier[o_type] ):
literal[string]
identifier[types_creations] = identifier[self] . identifier[__class__] . identifier[types_creations]
( identifier[cls] , identifier[clss] , identifier[prop] , identifier[initial_index] , identifier[_] )= identifier[types_creations] [ identifier[o_type] ]
identifier[lst] =[]
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] , identifier[o_type] )
keyword[for] identifier[obj_cfg] keyword[in] identifier[raw_objects] [ identifier[o_type] ]:
identifier[my_object] = identifier[cls] ( identifier[obj_cfg] )
identifier[lst] . identifier[append] ( identifier[my_object] )
keyword[if] keyword[not] identifier[lst] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[info] ( literal[string] , identifier[o_type] )
identifier[setattr] ( identifier[self] , identifier[prop] , identifier[clss] ( identifier[lst] , identifier[initial_index] )) | def create_objects_for_type(self, raw_objects, o_type):
"""Generic function to create objects regarding the o_type
This function create real Alignak objects from the raw data got from the configuration.
:param raw_objects: Raw objects
:type raw_objects: dict
:param o_type: the object type we want to create
:type o_type: object
:return: None
"""
# Ex: the above code do for timeperiods:
# timeperiods = []
# for timeperiodcfg in objects['timeperiod']:
# t = Timeperiod(timeperiodcfg)
# timeperiods.append(t)
# self.timeperiods = Timeperiods(timeperiods)
types_creations = self.__class__.types_creations
(cls, clss, prop, initial_index, _) = types_creations[o_type]
# List to store the created objects
lst = []
try:
logger.info("- creating '%s' objects", o_type)
for obj_cfg in raw_objects[o_type]:
# We create the object
my_object = cls(obj_cfg)
# and append it to the list
lst.append(my_object) # depends on [control=['for'], data=['obj_cfg']]
if not lst:
logger.info(' none.') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
logger.info(' no %s objects in the configuration', o_type) # depends on [control=['except'], data=[]]
# Create the objects list and set it in our properties
setattr(self, prop, clss(lst, initial_index)) |
def sqlmany(self, stringname, *args):
"""Wrapper for executing many SQL calls on my connection.
First arg is the name of a query, either a key in the
precompiled JSON or a method name in
``allegedb.alchemy.Alchemist``. Remaining arguments should be
tuples of argument sequences to be passed to the query.
"""
if hasattr(self, 'alchemist'):
return getattr(self.alchemist.many, stringname)(*args)
s = self.strings[stringname]
return self.connection.cursor().executemany(s, args) | def function[sqlmany, parameter[self, stringname]]:
constant[Wrapper for executing many SQL calls on my connection.
First arg is the name of a query, either a key in the
precompiled JSON or a method name in
``allegedb.alchemy.Alchemist``. Remaining arguments should be
tuples of argument sequences to be passed to the query.
]
if call[name[hasattr], parameter[name[self], constant[alchemist]]] begin[:]
return[call[call[name[getattr], parameter[name[self].alchemist.many, name[stringname]]], parameter[<ast.Starred object at 0x7da207f01900>]]]
variable[s] assign[=] call[name[self].strings][name[stringname]]
return[call[call[name[self].connection.cursor, parameter[]].executemany, parameter[name[s], name[args]]]] | keyword[def] identifier[sqlmany] ( identifier[self] , identifier[stringname] ,* identifier[args] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[getattr] ( identifier[self] . identifier[alchemist] . identifier[many] , identifier[stringname] )(* identifier[args] )
identifier[s] = identifier[self] . identifier[strings] [ identifier[stringname] ]
keyword[return] identifier[self] . identifier[connection] . identifier[cursor] (). identifier[executemany] ( identifier[s] , identifier[args] ) | def sqlmany(self, stringname, *args):
"""Wrapper for executing many SQL calls on my connection.
First arg is the name of a query, either a key in the
precompiled JSON or a method name in
``allegedb.alchemy.Alchemist``. Remaining arguments should be
tuples of argument sequences to be passed to the query.
"""
if hasattr(self, 'alchemist'):
return getattr(self.alchemist.many, stringname)(*args) # depends on [control=['if'], data=[]]
s = self.strings[stringname]
return self.connection.cursor().executemany(s, args) |
def dag_run_status(dag_id, execution_date):
"""
Returns a JSON with a dag_run's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
info = get_dag_run_state(dag_id, execution_date)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(info) | def function[dag_run_status, parameter[dag_id, execution_date]]:
constant[
Returns a JSON with a dag_run's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
]
<ast.Try object at 0x7da1b05be5c0>
<ast.Try object at 0x7da20c6c6bc0>
return[call[name[jsonify], parameter[name[info]]]] | keyword[def] identifier[dag_run_status] ( identifier[dag_id] , identifier[execution_date] ):
literal[string]
keyword[try] :
identifier[execution_date] = identifier[timezone] . identifier[parse] ( identifier[execution_date] )
keyword[except] identifier[ValueError] :
identifier[error_message] =(
literal[string]
literal[string] . identifier[format] (
identifier[execution_date] ))
identifier[_log] . identifier[info] ( identifier[error_message] )
identifier[response] = identifier[jsonify] ({ literal[string] : identifier[error_message] })
identifier[response] . identifier[status_code] = literal[int]
keyword[return] identifier[response]
keyword[try] :
identifier[info] = identifier[get_dag_run_state] ( identifier[dag_id] , identifier[execution_date] )
keyword[except] identifier[AirflowException] keyword[as] identifier[err] :
identifier[_log] . identifier[info] ( identifier[err] )
identifier[response] = identifier[jsonify] ( identifier[error] = literal[string] . identifier[format] ( identifier[err] ))
identifier[response] . identifier[status_code] = identifier[err] . identifier[status_code]
keyword[return] identifier[response]
keyword[return] identifier[jsonify] ( identifier[info] ) | def dag_run_status(dag_id, execution_date):
"""
Returns a JSON with a dag_run's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date) # depends on [control=['try'], data=[]]
except ValueError:
error_message = 'Given execution date, {}, could not be identified as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(execution_date)
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response # depends on [control=['except'], data=[]]
try:
info = get_dag_run_state(dag_id, execution_date) # depends on [control=['try'], data=[]]
except AirflowException as err:
_log.info(err)
response = jsonify(error='{}'.format(err))
response.status_code = err.status_code
return response # depends on [control=['except'], data=['err']]
return jsonify(info) |
def _is_plugin_disabled(plugin):
""" Determines if provided plugin is disabled from running for the
active task.
"""
item = _registered.get(plugin.name)
if not item:
return False
_, props = item
return bool(props.get('disabled')) | def function[_is_plugin_disabled, parameter[plugin]]:
constant[ Determines if provided plugin is disabled from running for the
active task.
]
variable[item] assign[=] call[name[_registered].get, parameter[name[plugin].name]]
if <ast.UnaryOp object at 0x7da1b15b2890> begin[:]
return[constant[False]]
<ast.Tuple object at 0x7da2041d8940> assign[=] name[item]
return[call[name[bool], parameter[call[name[props].get, parameter[constant[disabled]]]]]] | keyword[def] identifier[_is_plugin_disabled] ( identifier[plugin] ):
literal[string]
identifier[item] = identifier[_registered] . identifier[get] ( identifier[plugin] . identifier[name] )
keyword[if] keyword[not] identifier[item] :
keyword[return] keyword[False]
identifier[_] , identifier[props] = identifier[item]
keyword[return] identifier[bool] ( identifier[props] . identifier[get] ( literal[string] )) | def _is_plugin_disabled(plugin):
""" Determines if provided plugin is disabled from running for the
active task.
"""
item = _registered.get(plugin.name)
if not item:
return False # depends on [control=['if'], data=[]]
(_, props) = item
return bool(props.get('disabled')) |
def _is_master_running(self):
'''
Perform a lightweight check to see if the master daemon is running
Note, this will return an invalid success if the master crashed or was
not shut down cleanly.
'''
# Windows doesn't have IPC. Assume the master is running.
# At worse, it will error 500.
if salt.utils.platform.is_windows():
return True
if self.opts['transport'] == 'tcp':
ipc_file = 'publish_pull.ipc'
else:
ipc_file = 'workers.ipc'
return os.path.exists(os.path.join(
self.opts['sock_dir'],
ipc_file)) | def function[_is_master_running, parameter[self]]:
constant[
Perform a lightweight check to see if the master daemon is running
Note, this will return an invalid success if the master crashed or was
not shut down cleanly.
]
if call[name[salt].utils.platform.is_windows, parameter[]] begin[:]
return[constant[True]]
if compare[call[name[self].opts][constant[transport]] equal[==] constant[tcp]] begin[:]
variable[ipc_file] assign[=] constant[publish_pull.ipc]
return[call[name[os].path.exists, parameter[call[name[os].path.join, parameter[call[name[self].opts][constant[sock_dir]], name[ipc_file]]]]]] | keyword[def] identifier[_is_master_running] ( identifier[self] ):
literal[string]
keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] ():
keyword[return] keyword[True]
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]== literal[string] :
identifier[ipc_file] = literal[string]
keyword[else] :
identifier[ipc_file] = literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[opts] [ literal[string] ],
identifier[ipc_file] )) | def _is_master_running(self):
"""
Perform a lightweight check to see if the master daemon is running
Note, this will return an invalid success if the master crashed or was
not shut down cleanly.
"""
# Windows doesn't have IPC. Assume the master is running.
# At worse, it will error 500.
if salt.utils.platform.is_windows():
return True # depends on [control=['if'], data=[]]
if self.opts['transport'] == 'tcp':
ipc_file = 'publish_pull.ipc' # depends on [control=['if'], data=[]]
else:
ipc_file = 'workers.ipc'
return os.path.exists(os.path.join(self.opts['sock_dir'], ipc_file)) |
def delete(self):
""" Delete this Dagobah instance from the Backend. """
logger.debug('Deleting Dagobah instance with ID {0}'.format(self.dagobah_id))
self.jobs = []
self.created_jobs = 0
self.backend.delete_dagobah(self.dagobah_id) | def function[delete, parameter[self]]:
constant[ Delete this Dagobah instance from the Backend. ]
call[name[logger].debug, parameter[call[constant[Deleting Dagobah instance with ID {0}].format, parameter[name[self].dagobah_id]]]]
name[self].jobs assign[=] list[[]]
name[self].created_jobs assign[=] constant[0]
call[name[self].backend.delete_dagobah, parameter[name[self].dagobah_id]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[dagobah_id] ))
identifier[self] . identifier[jobs] =[]
identifier[self] . identifier[created_jobs] = literal[int]
identifier[self] . identifier[backend] . identifier[delete_dagobah] ( identifier[self] . identifier[dagobah_id] ) | def delete(self):
""" Delete this Dagobah instance from the Backend. """
logger.debug('Deleting Dagobah instance with ID {0}'.format(self.dagobah_id))
self.jobs = []
self.created_jobs = 0
self.backend.delete_dagobah(self.dagobah_id) |
def p_lpartselect_lpointer_plus(self, p):
'lpartselect : pointer LBRACKET expression PLUSCOLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], Plus(p[3], p[5]), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | def function[p_lpartselect_lpointer_plus, parameter[self, p]]:
constant[lpartselect : pointer LBRACKET expression PLUSCOLON expression RBRACKET]
call[name[p]][constant[0]] assign[=] call[name[Partselect], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]], call[name[Plus], parameter[call[name[p]][constant[3]], call[name[p]][constant[5]]]]]]
call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]] | keyword[def] identifier[p_lpartselect_lpointer_plus] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[Partselect] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[Plus] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_lpartselect_lpointer_plus(self, p):
"""lpartselect : pointer LBRACKET expression PLUSCOLON expression RBRACKET"""
p[0] = Partselect(p[1], p[3], Plus(p[3], p[5]), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def relate_to(self, part, reltype):
"""
Return rId key of relationship to *part*, from the existing
relationship if there is one, otherwise a newly created one.
"""
rel = self.rels.get_or_add(reltype, part)
return rel.rId | def function[relate_to, parameter[self, part, reltype]]:
constant[
Return rId key of relationship to *part*, from the existing
relationship if there is one, otherwise a newly created one.
]
variable[rel] assign[=] call[name[self].rels.get_or_add, parameter[name[reltype], name[part]]]
return[name[rel].rId] | keyword[def] identifier[relate_to] ( identifier[self] , identifier[part] , identifier[reltype] ):
literal[string]
identifier[rel] = identifier[self] . identifier[rels] . identifier[get_or_add] ( identifier[reltype] , identifier[part] )
keyword[return] identifier[rel] . identifier[rId] | def relate_to(self, part, reltype):
"""
Return rId key of relationship to *part*, from the existing
relationship if there is one, otherwise a newly created one.
"""
rel = self.rels.get_or_add(reltype, part)
return rel.rId |
def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
'', b[0], b[1], self.name, cache_key=None
) for b in _getBranchHeads(self.repo).items()
] | def function[availableBranches, parameter[self]]:
constant[ return a list of GithubComponentVersion objects for the tip of each branch
]
return[<ast.ListComp object at 0x7da1b00895a0>] | keyword[def] identifier[availableBranches] ( identifier[self] ):
literal[string]
keyword[return] [
identifier[GithubComponentVersion] (
literal[string] , identifier[b] [ literal[int] ], identifier[b] [ literal[int] ], identifier[self] . identifier[name] , identifier[cache_key] = keyword[None]
) keyword[for] identifier[b] keyword[in] identifier[_getBranchHeads] ( identifier[self] . identifier[repo] ). identifier[items] ()
] | def availableBranches(self):
""" return a list of GithubComponentVersion objects for the tip of each branch
"""
return [GithubComponentVersion('', b[0], b[1], self.name, cache_key=None) for b in _getBranchHeads(self.repo).items()] |
def constant_fold(code, silent=True, ignore_errors=True):
"""Constant-folds simple expressions like 2 3 + to 5.
Args:
code: Code in non-native types.
silent: Flag that controls whether to print optimizations made.
ignore_errors: Whether to raise exceptions on found errors.
"""
# Loop until we haven't done any optimizations. E.g., "2 3 + 5 *" will be
# optimized to "5 5 *" and in the next iteration to 25. Yes, this is
# extremely slow, big-O wise. We'll fix that some other time. (TODO)
arithmetic = list(map(instructions.lookup, [
instructions.add,
instructions.bitwise_and,
instructions.bitwise_or,
instructions.bitwise_xor,
instructions.div,
instructions.equal,
instructions.greater,
instructions.less,
instructions.mod,
instructions.mul,
instructions.sub,
]))
divzero = map(instructions.lookup, [
instructions.div,
instructions.mod,
])
lookup = instructions.lookup
def isfunction(op):
try:
instructions.lookup(op)
return True
except KeyError:
return False
def isconstant(op):
return op is None or interpreter.isconstant(op, quoted=True) or not isfunction(op)
keep_running = True
while keep_running:
keep_running = False
# Find two consecutive numbes and an arithmetic operator
for i, a in enumerate(code):
b = code[i+1] if i+1 < len(code) else None
c = code[i+2] if i+2 < len(code) else None
# Constant fold arithmetic operations (TODO: Move to check-func)
if interpreter.isnumber(a, b) and c in arithmetic:
# Although we can detect division by zero at compile time, we
# don't report it here, because the surrounding system doesn't
# handle that very well. So just leave it for now. (NOTE: If
# we had an "error" instruction, we could actually transform
# the expression to an error, or exit instruction perhaps)
if b==0 and c in divzero:
if ignore_errors:
continue
else:
raise errors.CompileError(ZeroDivisionError(
"Division by zero"))
# Calculate result by running on a machine (lambda vm: ... is
# embedded pushes, see compiler)
result = interpreter.Machine([lambda vm: vm.push(a), lambda vm:
vm.push(b), instructions.lookup(c)]).run().top
del code[i:i+3]
code.insert(i, result)
if not silent:
print("Optimizer: Constant-folded %s %s %s to %s" % (a,b,c,result))
keep_running = True
break
# Translate <constant> dup to <constant> <constant>
if isconstant(a) and b == lookup(instructions.dup):
code[i+1] = a
if not silent:
print("Optimizer: Translated %s %s to %s %s" % (a,b,a,a))
keep_running = True
break
# Dead code removal: <constant> drop
if isconstant(a) and b == lookup(instructions.drop):
del code[i:i+2]
if not silent:
print("Optimizer: Removed dead code %s %s" % (a,b))
keep_running = True
break
if a == lookup(instructions.nop):
del code[i]
if not silent:
print("Optimizer: Removed dead code %s" % a)
keep_running = True
break
# Dead code removal: <integer> cast_int
if isinstance(a, int) and b == lookup(instructions.cast_int):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <float> cast_float
if isinstance(a, float) and b == lookup(instructions.cast_float):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <string> cast_str
if isinstance(a, str) and b == lookup(instructions.cast_str):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <boolean> cast_bool
if isinstance(a, bool) and b == lookup(instructions.cast_bool):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# <c1> <c2> swap -> <c2> <c1>
if isconstant(a) and isconstant(b) and c == lookup(instructions.swap):
del code[i:i+3]
code = code[:i] + [b, a] + code[i:]
if not silent:
print("Optimizer: Translated %s %s %s to %s %s" %
(a,b,c,b,a))
keep_running = True
break
# a b over -> a b a
if isconstant(a) and isconstant(b) and c == lookup(instructions.over):
code[i+2] = a
if not silent:
print("Optimizer: Translated %s %s %s to %s %s %s" %
(a,b,c,a,b,a))
keep_running = True
break
# "123" cast_int -> 123
if interpreter.isstring(a) and b == lookup(instructions.cast_int):
try:
number = int(a)
del code[i:i+2]
code.insert(i, number)
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b,
number))
keep_running = True
break
except ValueError:
pass
if isconstant(a) and b == lookup(instructions.cast_str):
del code[i:i+2]
code.insert(i, str(a)) # TODO: Try-except here
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, str(a)))
keep_running = True
break
if isconstant(a) and b == lookup(instructions.cast_bool):
del code[i:i+2]
code.insert(i, bool(a)) # TODO: Try-except here
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, bool(a)))
keep_running = True
break
if isconstant(a) and b == lookup(instructions.cast_float):
try:
v = float(a)
del code[i:i+2]
code.insert(i, v)
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, v))
keep_running = True
break
except ValueError:
pass
return code | def function[constant_fold, parameter[code, silent, ignore_errors]]:
constant[Constant-folds simple expressions like 2 3 + to 5.
Args:
code: Code in non-native types.
silent: Flag that controls whether to print optimizations made.
ignore_errors: Whether to raise exceptions on found errors.
]
variable[arithmetic] assign[=] call[name[list], parameter[call[name[map], parameter[name[instructions].lookup, list[[<ast.Attribute object at 0x7da1b25264a0>, <ast.Attribute object at 0x7da1b25263e0>, <ast.Attribute object at 0x7da1b2524b80>, <ast.Attribute object at 0x7da1b2524a90>, <ast.Attribute object at 0x7da1b2525b70>, <ast.Attribute object at 0x7da1b2527610>, <ast.Attribute object at 0x7da1b2527a60>, <ast.Attribute object at 0x7da1b25260e0>, <ast.Attribute object at 0x7da1b2526740>, <ast.Attribute object at 0x7da1b2526ef0>, <ast.Attribute object at 0x7da1b2524fa0>]]]]]]
variable[divzero] assign[=] call[name[map], parameter[name[instructions].lookup, list[[<ast.Attribute object at 0x7da1b2524370>, <ast.Attribute object at 0x7da1b2527820>]]]]
variable[lookup] assign[=] name[instructions].lookup
def function[isfunction, parameter[op]]:
<ast.Try object at 0x7da1b25273a0>
def function[isconstant, parameter[op]]:
return[<ast.BoolOp object at 0x7da1b2525450>]
variable[keep_running] assign[=] constant[True]
while name[keep_running] begin[:]
variable[keep_running] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b2525180>, <ast.Name object at 0x7da1b2527310>]]] in starred[call[name[enumerate], parameter[name[code]]]] begin[:]
variable[b] assign[=] <ast.IfExp object at 0x7da1b2525030>
variable[c] assign[=] <ast.IfExp object at 0x7da1b2524490>
if <ast.BoolOp object at 0x7da1b2525000> begin[:]
if <ast.BoolOp object at 0x7da1b2527430> begin[:]
if name[ignore_errors] begin[:]
continue
variable[result] assign[=] call[call[name[interpreter].Machine, parameter[list[[<ast.Lambda object at 0x7da1b25274f0>, <ast.Lambda object at 0x7da1b25279a0>, <ast.Call object at 0x7da1b2526800>]]]].run, parameter[]].top
<ast.Delete object at 0x7da1b25255d0>
call[name[code].insert, parameter[name[i], name[result]]]
if <ast.UnaryOp object at 0x7da1b2525090> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Constant-folded %s %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b25261d0>, <ast.Name object at 0x7da1b2524820>, <ast.Name object at 0x7da1b2526e30>, <ast.Name object at 0x7da1b2527d90>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da1b25246d0> begin[:]
call[name[code]][binary_operation[name[i] + constant[1]]] assign[=] name[a]
if <ast.UnaryOp object at 0x7da1b2524f10> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b25243a0>, <ast.Name object at 0x7da1b2526170>, <ast.Name object at 0x7da1b25257e0>, <ast.Name object at 0x7da1b2526200>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da1b25277c0> begin[:]
<ast.Delete object at 0x7da1b2526650>
if <ast.UnaryOp object at 0x7da1b2525360> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Removed dead code %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2524790>, <ast.Name object at 0x7da1b2525600>]]]]]
variable[keep_running] assign[=] constant[True]
break
if compare[name[a] equal[==] call[name[lookup], parameter[name[instructions].nop]]] begin[:]
<ast.Delete object at 0x7da1b25252a0>
if <ast.UnaryOp object at 0x7da1b2524f40> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Removed dead code %s] <ast.Mod object at 0x7da2590d6920> name[a]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c991cf0> begin[:]
<ast.Delete object at 0x7da20c992320>
if <ast.UnaryOp object at 0x7da20c9921d0> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9926b0>, <ast.Name object at 0x7da20c990f40>, <ast.Name object at 0x7da20c9915a0>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c993f70> begin[:]
<ast.Delete object at 0x7da20c990730>
if <ast.UnaryOp object at 0x7da20c991120> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c990160>, <ast.Name object at 0x7da20c991960>, <ast.Name object at 0x7da20c991ba0>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c991330> begin[:]
<ast.Delete object at 0x7da20c992050>
if <ast.UnaryOp object at 0x7da20c9934c0> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c991a50>, <ast.Name object at 0x7da20c992a70>, <ast.Name object at 0x7da20c990520>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c9930a0> begin[:]
<ast.Delete object at 0x7da20c9936d0>
if <ast.UnaryOp object at 0x7da20c993ca0> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c991fc0>, <ast.Name object at 0x7da20c9939d0>, <ast.Name object at 0x7da20c993e80>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c990c70> begin[:]
<ast.Delete object at 0x7da20c990d60>
variable[code] assign[=] binary_operation[binary_operation[call[name[code]][<ast.Slice object at 0x7da20c991ea0>] + list[[<ast.Name object at 0x7da20c990e80>, <ast.Name object at 0x7da20c9932e0>]]] + call[name[code]][<ast.Slice object at 0x7da20c992bc0>]]
if <ast.UnaryOp object at 0x7da20c992770> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s %s to %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c990d30>, <ast.Name object at 0x7da20c993340>, <ast.Name object at 0x7da20c9932b0>, <ast.Name object at 0x7da20c990df0>, <ast.Name object at 0x7da20c990a60>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c991270> begin[:]
call[name[code]][binary_operation[name[i] + constant[2]]] assign[=] name[a]
if <ast.UnaryOp object at 0x7da20c6c7670> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s %s to %s %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c47f0>, <ast.Name object at 0x7da20c6c6710>, <ast.Name object at 0x7da20c6c4100>, <ast.Name object at 0x7da20c6c5330>, <ast.Name object at 0x7da20c6c7370>, <ast.Name object at 0x7da20c6c6290>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c6c4040> begin[:]
<ast.Try object at 0x7da20c6c64a0>
if <ast.BoolOp object at 0x7da20c6c55a0> begin[:]
<ast.Delete object at 0x7da20c6c5b10>
call[name[code].insert, parameter[name[i], call[name[str], parameter[name[a]]]]]
if <ast.UnaryOp object at 0x7da20c6c5090> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c70d0>, <ast.Name object at 0x7da20c6c7910>, <ast.Call object at 0x7da20c6c4730>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c6c43d0> begin[:]
<ast.Delete object at 0x7da20c6c72e0>
call[name[code].insert, parameter[name[i], call[name[bool], parameter[name[a]]]]]
if <ast.UnaryOp object at 0x7da20c6c7490> begin[:]
call[name[print], parameter[binary_operation[constant[Optimizer: Translated %s %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c6110>, <ast.Name object at 0x7da20c6c4460>, <ast.Call object at 0x7da20c6c53f0>]]]]]
variable[keep_running] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da20c6c5720> begin[:]
<ast.Try object at 0x7da20c6c7e20>
return[name[code]] | keyword[def] identifier[constant_fold] ( identifier[code] , identifier[silent] = keyword[True] , identifier[ignore_errors] = keyword[True] ):
literal[string]
identifier[arithmetic] = identifier[list] ( identifier[map] ( identifier[instructions] . identifier[lookup] ,[
identifier[instructions] . identifier[add] ,
identifier[instructions] . identifier[bitwise_and] ,
identifier[instructions] . identifier[bitwise_or] ,
identifier[instructions] . identifier[bitwise_xor] ,
identifier[instructions] . identifier[div] ,
identifier[instructions] . identifier[equal] ,
identifier[instructions] . identifier[greater] ,
identifier[instructions] . identifier[less] ,
identifier[instructions] . identifier[mod] ,
identifier[instructions] . identifier[mul] ,
identifier[instructions] . identifier[sub] ,
]))
identifier[divzero] = identifier[map] ( identifier[instructions] . identifier[lookup] ,[
identifier[instructions] . identifier[div] ,
identifier[instructions] . identifier[mod] ,
])
identifier[lookup] = identifier[instructions] . identifier[lookup]
keyword[def] identifier[isfunction] ( identifier[op] ):
keyword[try] :
identifier[instructions] . identifier[lookup] ( identifier[op] )
keyword[return] keyword[True]
keyword[except] identifier[KeyError] :
keyword[return] keyword[False]
keyword[def] identifier[isconstant] ( identifier[op] ):
keyword[return] identifier[op] keyword[is] keyword[None] keyword[or] identifier[interpreter] . identifier[isconstant] ( identifier[op] , identifier[quoted] = keyword[True] ) keyword[or] keyword[not] identifier[isfunction] ( identifier[op] )
identifier[keep_running] = keyword[True]
keyword[while] identifier[keep_running] :
identifier[keep_running] = keyword[False]
keyword[for] identifier[i] , identifier[a] keyword[in] identifier[enumerate] ( identifier[code] ):
identifier[b] = identifier[code] [ identifier[i] + literal[int] ] keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[code] ) keyword[else] keyword[None]
identifier[c] = identifier[code] [ identifier[i] + literal[int] ] keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[code] ) keyword[else] keyword[None]
keyword[if] identifier[interpreter] . identifier[isnumber] ( identifier[a] , identifier[b] ) keyword[and] identifier[c] keyword[in] identifier[arithmetic] :
keyword[if] identifier[b] == literal[int] keyword[and] identifier[c] keyword[in] identifier[divzero] :
keyword[if] identifier[ignore_errors] :
keyword[continue]
keyword[else] :
keyword[raise] identifier[errors] . identifier[CompileError] ( identifier[ZeroDivisionError] (
literal[string] ))
identifier[result] = identifier[interpreter] . identifier[Machine] ([ keyword[lambda] identifier[vm] : identifier[vm] . identifier[push] ( identifier[a] ), keyword[lambda] identifier[vm] :
identifier[vm] . identifier[push] ( identifier[b] ), identifier[instructions] . identifier[lookup] ( identifier[c] )]). identifier[run] (). identifier[top]
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] . identifier[insert] ( identifier[i] , identifier[result] )
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[c] , identifier[result] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[dup] ):
identifier[code] [ identifier[i] + literal[int] ]= identifier[a]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[a] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[drop] ):
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[a] == identifier[lookup] ( identifier[instructions] . identifier[nop] ):
keyword[del] identifier[code] [ identifier[i] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] % identifier[a] )
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isinstance] ( identifier[a] , identifier[int] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_int] ):
keyword[del] identifier[code] [ identifier[i] + literal[int] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isinstance] ( identifier[a] , identifier[float] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_float] ):
keyword[del] identifier[code] [ identifier[i] + literal[int] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isinstance] ( identifier[a] , identifier[str] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_str] ):
keyword[del] identifier[code] [ identifier[i] + literal[int] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isinstance] ( identifier[a] , identifier[bool] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_bool] ):
keyword[del] identifier[code] [ identifier[i] + literal[int] ]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[isconstant] ( identifier[b] ) keyword[and] identifier[c] == identifier[lookup] ( identifier[instructions] . identifier[swap] ):
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] = identifier[code] [: identifier[i] ]+[ identifier[b] , identifier[a] ]+ identifier[code] [ identifier[i] :]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %
( identifier[a] , identifier[b] , identifier[c] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[isconstant] ( identifier[b] ) keyword[and] identifier[c] == identifier[lookup] ( identifier[instructions] . identifier[over] ):
identifier[code] [ identifier[i] + literal[int] ]= identifier[a]
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %
( identifier[a] , identifier[b] , identifier[c] , identifier[a] , identifier[b] , identifier[a] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[interpreter] . identifier[isstring] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_int] ):
keyword[try] :
identifier[number] = identifier[int] ( identifier[a] )
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] . identifier[insert] ( identifier[i] , identifier[number] )
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] ,
identifier[number] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_str] ):
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] . identifier[insert] ( identifier[i] , identifier[str] ( identifier[a] ))
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[str] ( identifier[a] )))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_bool] ):
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] . identifier[insert] ( identifier[i] , identifier[bool] ( identifier[a] ))
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[bool] ( identifier[a] )))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[if] identifier[isconstant] ( identifier[a] ) keyword[and] identifier[b] == identifier[lookup] ( identifier[instructions] . identifier[cast_float] ):
keyword[try] :
identifier[v] = identifier[float] ( identifier[a] )
keyword[del] identifier[code] [ identifier[i] : identifier[i] + literal[int] ]
identifier[code] . identifier[insert] ( identifier[i] , identifier[v] )
keyword[if] keyword[not] identifier[silent] :
identifier[print] ( literal[string] %( identifier[a] , identifier[b] , identifier[v] ))
identifier[keep_running] = keyword[True]
keyword[break]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[code] | def constant_fold(code, silent=True, ignore_errors=True):
"""Constant-folds simple expressions like 2 3 + to 5.
Args:
code: Code in non-native types.
silent: Flag that controls whether to print optimizations made.
ignore_errors: Whether to raise exceptions on found errors.
"""
# Loop until we haven't done any optimizations. E.g., "2 3 + 5 *" will be
# optimized to "5 5 *" and in the next iteration to 25. Yes, this is
# extremely slow, big-O wise. We'll fix that some other time. (TODO)
arithmetic = list(map(instructions.lookup, [instructions.add, instructions.bitwise_and, instructions.bitwise_or, instructions.bitwise_xor, instructions.div, instructions.equal, instructions.greater, instructions.less, instructions.mod, instructions.mul, instructions.sub]))
divzero = map(instructions.lookup, [instructions.div, instructions.mod])
lookup = instructions.lookup
def isfunction(op):
try:
instructions.lookup(op)
return True # depends on [control=['try'], data=[]]
except KeyError:
return False # depends on [control=['except'], data=[]]
def isconstant(op):
return op is None or interpreter.isconstant(op, quoted=True) or (not isfunction(op))
keep_running = True
while keep_running:
keep_running = False
# Find two consecutive numbes and an arithmetic operator
for (i, a) in enumerate(code):
b = code[i + 1] if i + 1 < len(code) else None
c = code[i + 2] if i + 2 < len(code) else None
# Constant fold arithmetic operations (TODO: Move to check-func)
if interpreter.isnumber(a, b) and c in arithmetic:
# Although we can detect division by zero at compile time, we
# don't report it here, because the surrounding system doesn't
# handle that very well. So just leave it for now. (NOTE: If
# we had an "error" instruction, we could actually transform
# the expression to an error, or exit instruction perhaps)
if b == 0 and c in divzero:
if ignore_errors:
continue # depends on [control=['if'], data=[]]
else:
raise errors.CompileError(ZeroDivisionError('Division by zero')) # depends on [control=['if'], data=[]]
# Calculate result by running on a machine (lambda vm: ... is
# embedded pushes, see compiler)
result = interpreter.Machine([lambda vm: vm.push(a), lambda vm: vm.push(b), instructions.lookup(c)]).run().top
del code[i:i + 3]
code.insert(i, result)
if not silent:
print('Optimizer: Constant-folded %s %s %s to %s' % (a, b, c, result)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# Translate <constant> dup to <constant> <constant>
if isconstant(a) and b == lookup(instructions.dup):
code[i + 1] = a
if not silent:
print('Optimizer: Translated %s %s to %s %s' % (a, b, a, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# Dead code removal: <constant> drop
if isconstant(a) and b == lookup(instructions.drop):
del code[i:i + 2]
if not silent:
print('Optimizer: Removed dead code %s %s' % (a, b)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
if a == lookup(instructions.nop):
del code[i]
if not silent:
print('Optimizer: Removed dead code %s' % a) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=['a']]
# Dead code removal: <integer> cast_int
if isinstance(a, int) and b == lookup(instructions.cast_int):
del code[i + 1]
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# Dead code removal: <float> cast_float
if isinstance(a, float) and b == lookup(instructions.cast_float):
del code[i + 1]
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# Dead code removal: <string> cast_str
if isinstance(a, str) and b == lookup(instructions.cast_str):
del code[i + 1]
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# Dead code removal: <boolean> cast_bool
if isinstance(a, bool) and b == lookup(instructions.cast_bool):
del code[i + 1]
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# <c1> <c2> swap -> <c2> <c1>
if isconstant(a) and isconstant(b) and (c == lookup(instructions.swap)):
del code[i:i + 3]
code = code[:i] + [b, a] + code[i:]
if not silent:
print('Optimizer: Translated %s %s %s to %s %s' % (a, b, c, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# a b over -> a b a
if isconstant(a) and isconstant(b) and (c == lookup(instructions.over)):
code[i + 2] = a
if not silent:
print('Optimizer: Translated %s %s %s to %s %s %s' % (a, b, c, a, b, a)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
# "123" cast_int -> 123
if interpreter.isstring(a) and b == lookup(instructions.cast_int):
try:
number = int(a)
del code[i:i + 2]
code.insert(i, number)
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, number)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if isconstant(a) and b == lookup(instructions.cast_str):
del code[i:i + 2]
code.insert(i, str(a)) # TODO: Try-except here
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, str(a))) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
if isconstant(a) and b == lookup(instructions.cast_bool):
del code[i:i + 2]
code.insert(i, bool(a)) # TODO: Try-except here
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, bool(a))) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['if'], data=[]]
if isconstant(a) and b == lookup(instructions.cast_float):
try:
v = float(a)
del code[i:i + 2]
code.insert(i, v)
if not silent:
print('Optimizer: Translated %s %s to %s' % (a, b, v)) # depends on [control=['if'], data=[]]
keep_running = True
break # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]]
return code |
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
if not continue_prompt:
self.shell.displayhook.prompt_count -= 1
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered | def function[prefilter_line, parameter[self, line, continue_prompt]]:
constant[Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
]
name[self].shell._last_input_line assign[=] name[line]
if <ast.UnaryOp object at 0x7da2041daec0> begin[:]
return[constant[]]
if <ast.BoolOp object at 0x7da2041dbbe0> begin[:]
variable[line] assign[=] call[name[self].transform_line, parameter[name[line], name[continue_prompt]]]
variable[line_info] assign[=] call[name[LineInfo], parameter[name[line], name[continue_prompt]]]
variable[stripped] assign[=] call[name[line].strip, parameter[]]
variable[normal_handler] assign[=] call[name[self].get_handler_by_name, parameter[constant[normal]]]
if <ast.UnaryOp object at 0x7da2041d96f0> begin[:]
if <ast.UnaryOp object at 0x7da2041d96c0> begin[:]
<ast.AugAssign object at 0x7da2041d8c70>
return[call[name[normal_handler].handle, parameter[name[line_info]]]]
if <ast.BoolOp object at 0x7da1b021cb80> begin[:]
return[call[name[normal_handler].handle, parameter[name[line_info]]]]
variable[prefiltered] assign[=] call[name[self].prefilter_line_info, parameter[name[line_info]]]
return[name[prefiltered]] | keyword[def] identifier[prefilter_line] ( identifier[self] , identifier[line] , identifier[continue_prompt] = keyword[False] ):
literal[string]
identifier[self] . identifier[shell] . identifier[_last_input_line] = identifier[line]
keyword[if] keyword[not] identifier[line] :
keyword[return] literal[string]
keyword[if] keyword[not] identifier[continue_prompt] keyword[or] ( identifier[continue_prompt] keyword[and] identifier[self] . identifier[multi_line_specials] ):
identifier[line] = identifier[self] . identifier[transform_line] ( identifier[line] , identifier[continue_prompt] )
identifier[line_info] = identifier[LineInfo] ( identifier[line] , identifier[continue_prompt] )
identifier[stripped] = identifier[line] . identifier[strip] ()
identifier[normal_handler] = identifier[self] . identifier[get_handler_by_name] ( literal[string] )
keyword[if] keyword[not] identifier[stripped] :
keyword[if] keyword[not] identifier[continue_prompt] :
identifier[self] . identifier[shell] . identifier[displayhook] . identifier[prompt_count] -= literal[int]
keyword[return] identifier[normal_handler] . identifier[handle] ( identifier[line_info] )
keyword[if] identifier[continue_prompt] keyword[and] keyword[not] identifier[self] . identifier[multi_line_specials] :
keyword[return] identifier[normal_handler] . identifier[handle] ( identifier[line_info] )
identifier[prefiltered] = identifier[self] . identifier[prefilter_line_info] ( identifier[line_info] )
keyword[return] identifier[prefiltered] | def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return '' # depends on [control=['if'], data=[]]
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt) # depends on [control=['if'], data=[]]
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
if not continue_prompt:
self.shell.displayhook.prompt_count -= 1 # depends on [control=['if'], data=[]]
return normal_handler.handle(line_info) # depends on [control=['if'], data=[]]
# special handlers are only allowed for single line statements
if continue_prompt and (not self.multi_line_specials):
return normal_handler.handle(line_info) # depends on [control=['if'], data=[]]
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered |
def get_access_token(self, code, client_id, client_secret):
'''
Exchange a temporary code for an access token allowing access to a user's account
See https://developer.wunderlist.com/documentation/concepts/authorization for more info
'''
headers = {
'Content-Type' : 'application/json'
}
data = {
'client_id' : client_id,
'client_secret' : client_secret,
'code' : code,
}
str_data = json.dumps(data)
response = requests.request(method='POST', url=ACCESS_TOKEN_URL, headers=headers, data=str_data)
status_code = response.status_code
if status_code != 200:
raise ValueError("{} -- {}".format(status_code, response.json()))
return body['access_token'] | def function[get_access_token, parameter[self, code, client_id, client_secret]]:
constant[
Exchange a temporary code for an access token allowing access to a user's account
See https://developer.wunderlist.com/documentation/concepts/authorization for more info
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20e961ab0>], [<ast.Constant object at 0x7da20e963b80>]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e962020>, <ast.Constant object at 0x7da20e960bb0>, <ast.Constant object at 0x7da20e962d70>], [<ast.Name object at 0x7da20e960610>, <ast.Name object at 0x7da20e963070>, <ast.Name object at 0x7da20e962f80>]]
variable[str_data] assign[=] call[name[json].dumps, parameter[name[data]]]
variable[response] assign[=] call[name[requests].request, parameter[]]
variable[status_code] assign[=] name[response].status_code
if compare[name[status_code] not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da204621cc0>
return[call[name[body]][constant[access_token]]] | keyword[def] identifier[get_access_token] ( identifier[self] , identifier[code] , identifier[client_id] , identifier[client_secret] ):
literal[string]
identifier[headers] ={
literal[string] : literal[string]
}
identifier[data] ={
literal[string] : identifier[client_id] ,
literal[string] : identifier[client_secret] ,
literal[string] : identifier[code] ,
}
identifier[str_data] = identifier[json] . identifier[dumps] ( identifier[data] )
identifier[response] = identifier[requests] . identifier[request] ( identifier[method] = literal[string] , identifier[url] = identifier[ACCESS_TOKEN_URL] , identifier[headers] = identifier[headers] , identifier[data] = identifier[str_data] )
identifier[status_code] = identifier[response] . identifier[status_code]
keyword[if] identifier[status_code] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[status_code] , identifier[response] . identifier[json] ()))
keyword[return] identifier[body] [ literal[string] ] | def get_access_token(self, code, client_id, client_secret):
"""
Exchange a temporary code for an access token allowing access to a user's account
See https://developer.wunderlist.com/documentation/concepts/authorization for more info
"""
headers = {'Content-Type': 'application/json'}
data = {'client_id': client_id, 'client_secret': client_secret, 'code': code}
str_data = json.dumps(data)
response = requests.request(method='POST', url=ACCESS_TOKEN_URL, headers=headers, data=str_data)
status_code = response.status_code
if status_code != 200:
raise ValueError('{} -- {}'.format(status_code, response.json())) # depends on [control=['if'], data=['status_code']]
return body['access_token'] |
def list_by_ids(self, ids):
"""
If you wish to retrieve a list of messages from this queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
"""
ids = utils.coerce_to_list(ids)
uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids))
# The API is not consistent in how it returns message lists, so this
# workaround is needed.
curr_prkey = self.plural_response_key
self.plural_response_key = ""
# BROKEN: API returns a list, not a dict.
ret = self._list(uri)
self.plural_response_key = curr_prkey
return ret | def function[list_by_ids, parameter[self, ids]]:
constant[
If you wish to retrieve a list of messages from this queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
]
variable[ids] assign[=] call[name[utils].coerce_to_list, parameter[name[ids]]]
variable[uri] assign[=] binary_operation[constant[/%s?ids=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b05586d0>, <ast.Call object at 0x7da1b0559300>]]]
variable[curr_prkey] assign[=] name[self].plural_response_key
name[self].plural_response_key assign[=] constant[]
variable[ret] assign[=] call[name[self]._list, parameter[name[uri]]]
name[self].plural_response_key assign[=] name[curr_prkey]
return[name[ret]] | keyword[def] identifier[list_by_ids] ( identifier[self] , identifier[ids] ):
literal[string]
identifier[ids] = identifier[utils] . identifier[coerce_to_list] ( identifier[ids] )
identifier[uri] = literal[string] %( identifier[self] . identifier[uri_base] , literal[string] . identifier[join] ( identifier[ids] ))
identifier[curr_prkey] = identifier[self] . identifier[plural_response_key]
identifier[self] . identifier[plural_response_key] = literal[string]
identifier[ret] = identifier[self] . identifier[_list] ( identifier[uri] )
identifier[self] . identifier[plural_response_key] = identifier[curr_prkey]
keyword[return] identifier[ret] | def list_by_ids(self, ids):
"""
If you wish to retrieve a list of messages from this queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
"""
ids = utils.coerce_to_list(ids)
uri = '/%s?ids=%s' % (self.uri_base, ','.join(ids))
# The API is not consistent in how it returns message lists, so this
# workaround is needed.
curr_prkey = self.plural_response_key
self.plural_response_key = ''
# BROKEN: API returns a list, not a dict.
ret = self._list(uri)
self.plural_response_key = curr_prkey
return ret |
def from_Composition(composition):
"""Return the LilyPond equivalent of a Composition in a string."""
# warning Throw exception
if not hasattr(composition, 'tracks'):
return False
result = '\\header { title = "%s" composer = "%s" opus = "%s" } '\
% (composition.title, composition.author, composition.subtitle)
for track in composition.tracks:
result += from_Track(track) + ' '
return result[:-1] | def function[from_Composition, parameter[composition]]:
constant[Return the LilyPond equivalent of a Composition in a string.]
if <ast.UnaryOp object at 0x7da18eb56f80> begin[:]
return[constant[False]]
variable[result] assign[=] binary_operation[constant[\header { title = "%s" composer = "%s" opus = "%s" } ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b13d5cf0>, <ast.Attribute object at 0x7da1b13d44f0>, <ast.Attribute object at 0x7da1b13d4910>]]]
for taget[name[track]] in starred[name[composition].tracks] begin[:]
<ast.AugAssign object at 0x7da1b13d7670>
return[call[name[result]][<ast.Slice object at 0x7da1b13d6da0>]] | keyword[def] identifier[from_Composition] ( identifier[composition] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[composition] , literal[string] ):
keyword[return] keyword[False]
identifier[result] = literal[string] %( identifier[composition] . identifier[title] , identifier[composition] . identifier[author] , identifier[composition] . identifier[subtitle] )
keyword[for] identifier[track] keyword[in] identifier[composition] . identifier[tracks] :
identifier[result] += identifier[from_Track] ( identifier[track] )+ literal[string]
keyword[return] identifier[result] [:- literal[int] ] | def from_Composition(composition):
"""Return the LilyPond equivalent of a Composition in a string."""
# warning Throw exception
if not hasattr(composition, 'tracks'):
return False # depends on [control=['if'], data=[]]
result = '\\header { title = "%s" composer = "%s" opus = "%s" } ' % (composition.title, composition.author, composition.subtitle)
for track in composition.tracks:
result += from_Track(track) + ' ' # depends on [control=['for'], data=['track']]
return result[:-1] |
def key_json(minion_id,
pillar, # pylint: disable=W0613
pillar_key=None):
'''
Pulls a string from redis and deserializes it from json. Deserialized
dictionary data loaded directly into top level if pillar_key is not set.
pillar_key
Pillar key to return data into
'''
key_data = __salt__['redis.get_key'](minion_id)
# Return nothing for non-existent keys
if not key_data:
return {}
data = salt.utils.json.loads(key_data)
# Return as requested
if isinstance(data, dict) and not pillar_key:
return data
elif not pillar_key:
return {'redis_pillar': data}
else:
return {pillar_key: data} | def function[key_json, parameter[minion_id, pillar, pillar_key]]:
constant[
Pulls a string from redis and deserializes it from json. Deserialized
dictionary data loaded directly into top level if pillar_key is not set.
pillar_key
Pillar key to return data into
]
variable[key_data] assign[=] call[call[name[__salt__]][constant[redis.get_key]], parameter[name[minion_id]]]
if <ast.UnaryOp object at 0x7da2047e8580> begin[:]
return[dictionary[[], []]]
variable[data] assign[=] call[name[salt].utils.json.loads, parameter[name[key_data]]]
if <ast.BoolOp object at 0x7da2047e8760> begin[:]
return[name[data]] | keyword[def] identifier[key_json] ( identifier[minion_id] ,
identifier[pillar] ,
identifier[pillar_key] = keyword[None] ):
literal[string]
identifier[key_data] = identifier[__salt__] [ literal[string] ]( identifier[minion_id] )
keyword[if] keyword[not] identifier[key_data] :
keyword[return] {}
identifier[data] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[key_data] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ) keyword[and] keyword[not] identifier[pillar_key] :
keyword[return] identifier[data]
keyword[elif] keyword[not] identifier[pillar_key] :
keyword[return] { literal[string] : identifier[data] }
keyword[else] :
keyword[return] { identifier[pillar_key] : identifier[data] } | def key_json(minion_id, pillar, pillar_key=None): # pylint: disable=W0613
'\n Pulls a string from redis and deserializes it from json. Deserialized\n dictionary data loaded directly into top level if pillar_key is not set.\n\n pillar_key\n Pillar key to return data into\n '
key_data = __salt__['redis.get_key'](minion_id)
# Return nothing for non-existent keys
if not key_data:
return {} # depends on [control=['if'], data=[]]
data = salt.utils.json.loads(key_data)
# Return as requested
if isinstance(data, dict) and (not pillar_key):
return data # depends on [control=['if'], data=[]]
elif not pillar_key:
return {'redis_pillar': data} # depends on [control=['if'], data=[]]
else:
return {pillar_key: data} |
def _rm_udf_link(self, rec):
# type: (udfmod.UDFFileEntry) -> int
'''
An internal method to remove a UDF File Entry link.
Parameters:
rec - The UDF File Entry to remove.
Returns:
The number of bytes to remove from the ISO.
'''
if not rec.is_file() and not rec.is_symlink():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)')
# To remove something from UDF, we have to:
# 1. Remove it from the list of linked_records on the Inode.
# 2. If the number of links to the Inode is now 0, remove the Inode.
# 3. If the number of links to the UDF File Entry this uses is 0,
# remove the UDF File Entry.
# 4. Remove the UDF File Identifier from the parent.
logical_block_size = self.pvd.logical_block_size()
num_bytes_to_remove = 0
if rec.inode is not None:
# Step 1.
found_index = None
for index, link in enumerate(rec.inode.linked_records):
if id(link) == id(rec):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del rec.inode.linked_records[found_index]
rec.inode.num_udf -= 1
# Step 2.
if not rec.inode.linked_records:
found_index = None
for index, ino in enumerate(self.inodes):
if id(ino) == id(rec.inode):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del self.inodes[found_index]
num_bytes_to_remove += rec.get_data_length()
# Step 3.
if rec.inode.num_udf == 0:
num_bytes_to_remove += logical_block_size
else:
# If rec.inode is None, then we are just removing the UDF File
# Entry.
num_bytes_to_remove += logical_block_size
# Step 4.
if rec.parent is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent')
if rec.file_ident is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier')
return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi) | def function[_rm_udf_link, parameter[self, rec]]:
constant[
An internal method to remove a UDF File Entry link.
Parameters:
rec - The UDF File Entry to remove.
Returns:
The number of bytes to remove from the ISO.
]
if <ast.BoolOp object at 0x7da18bc72d10> begin[:]
<ast.Raise object at 0x7da18bc715a0>
variable[logical_block_size] assign[=] call[name[self].pvd.logical_block_size, parameter[]]
variable[num_bytes_to_remove] assign[=] constant[0]
if compare[name[rec].inode is_not constant[None]] begin[:]
variable[found_index] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18bc73cd0>, <ast.Name object at 0x7da18bc72110>]]] in starred[call[name[enumerate], parameter[name[rec].inode.linked_records]]] begin[:]
if compare[call[name[id], parameter[name[link]]] equal[==] call[name[id], parameter[name[rec]]]] begin[:]
variable[found_index] assign[=] name[index]
break
<ast.Delete object at 0x7da18bc71690>
<ast.AugAssign object at 0x7da18bc739d0>
if <ast.UnaryOp object at 0x7da18bc72e30> begin[:]
variable[found_index] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18bc72fb0>, <ast.Name object at 0x7da18bc73df0>]]] in starred[call[name[enumerate], parameter[name[self].inodes]]] begin[:]
if compare[call[name[id], parameter[name[ino]]] equal[==] call[name[id], parameter[name[rec].inode]]] begin[:]
variable[found_index] assign[=] name[index]
break
<ast.Delete object at 0x7da18bc73d30>
<ast.AugAssign object at 0x7da18bc73d60>
if compare[name[rec].inode.num_udf equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0de1a20>
if compare[name[rec].parent is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0de3f70>
if compare[name[rec].file_ident is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0de3b20>
return[binary_operation[name[num_bytes_to_remove] + call[name[self]._rm_udf_file_ident, parameter[name[rec].parent, name[rec].file_ident.fi]]]] | keyword[def] identifier[_rm_udf_link] ( identifier[self] , identifier[rec] ):
literal[string]
keyword[if] keyword[not] identifier[rec] . identifier[is_file] () keyword[and] keyword[not] identifier[rec] . identifier[is_symlink] ():
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
identifier[logical_block_size] = identifier[self] . identifier[pvd] . identifier[logical_block_size] ()
identifier[num_bytes_to_remove] = literal[int]
keyword[if] identifier[rec] . identifier[inode] keyword[is] keyword[not] keyword[None] :
identifier[found_index] = keyword[None]
keyword[for] identifier[index] , identifier[link] keyword[in] identifier[enumerate] ( identifier[rec] . identifier[inode] . identifier[linked_records] ):
keyword[if] identifier[id] ( identifier[link] )== identifier[id] ( identifier[rec] ):
identifier[found_index] = identifier[index]
keyword[break]
keyword[else] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[del] identifier[rec] . identifier[inode] . identifier[linked_records] [ identifier[found_index] ]
identifier[rec] . identifier[inode] . identifier[num_udf] -= literal[int]
keyword[if] keyword[not] identifier[rec] . identifier[inode] . identifier[linked_records] :
identifier[found_index] = keyword[None]
keyword[for] identifier[index] , identifier[ino] keyword[in] identifier[enumerate] ( identifier[self] . identifier[inodes] ):
keyword[if] identifier[id] ( identifier[ino] )== identifier[id] ( identifier[rec] . identifier[inode] ):
identifier[found_index] = identifier[index]
keyword[break]
keyword[else] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[del] identifier[self] . identifier[inodes] [ identifier[found_index] ]
identifier[num_bytes_to_remove] += identifier[rec] . identifier[get_data_length] ()
keyword[if] identifier[rec] . identifier[inode] . identifier[num_udf] == literal[int] :
identifier[num_bytes_to_remove] += identifier[logical_block_size]
keyword[else] :
identifier[num_bytes_to_remove] += identifier[logical_block_size]
keyword[if] identifier[rec] . identifier[parent] keyword[is] keyword[None] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[if] identifier[rec] . identifier[file_ident] keyword[is] keyword[None] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[return] identifier[num_bytes_to_remove] + identifier[self] . identifier[_rm_udf_file_ident] ( identifier[rec] . identifier[parent] , identifier[rec] . identifier[file_ident] . identifier[fi] ) | def _rm_udf_link(self, rec):
# type: (udfmod.UDFFileEntry) -> int
'\n An internal method to remove a UDF File Entry link.\n\n Parameters:\n rec - The UDF File Entry to remove.\n Returns:\n The number of bytes to remove from the ISO.\n '
if not rec.is_file() and (not rec.is_symlink()):
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)') # depends on [control=['if'], data=[]]
# To remove something from UDF, we have to:
# 1. Remove it from the list of linked_records on the Inode.
# 2. If the number of links to the Inode is now 0, remove the Inode.
# 3. If the number of links to the UDF File Entry this uses is 0,
# remove the UDF File Entry.
# 4. Remove the UDF File Identifier from the parent.
logical_block_size = self.pvd.logical_block_size()
num_bytes_to_remove = 0
if rec.inode is not None:
# Step 1.
found_index = None
for (index, link) in enumerate(rec.inode.linked_records):
if id(link) == id(rec):
found_index = index
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del rec.inode.linked_records[found_index]
rec.inode.num_udf -= 1
# Step 2.
if not rec.inode.linked_records:
found_index = None
for (index, ino) in enumerate(self.inodes):
if id(ino) == id(rec.inode):
found_index = index
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del self.inodes[found_index]
num_bytes_to_remove += rec.get_data_length() # depends on [control=['if'], data=[]]
# Step 3.
if rec.inode.num_udf == 0:
num_bytes_to_remove += logical_block_size # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# If rec.inode is None, then we are just removing the UDF File
# Entry.
num_bytes_to_remove += logical_block_size
# Step 4.
if rec.parent is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent') # depends on [control=['if'], data=[]]
if rec.file_ident is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier') # depends on [control=['if'], data=[]]
return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi) |
def magic_run(self, line):
"""
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
"""
i = float('inf')
if line.strip():
i = int(line)
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.run(i)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except iarm.exceptions.EndOfProgram as e:
f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__
f_name = f_name[:f_name.find('_')]
message = "Error in {}: ".format(f_name)
stream_content = {'name': 'stdout', 'text': message + str(e) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': str(err)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': type(e).__name__,
'evalue': str(e),
'traceback': '???'} | def function[magic_run, parameter[self, line]]:
constant[
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
]
variable[i] assign[=] call[name[float], parameter[constant[inf]]]
if call[name[line].strip, parameter[]] begin[:]
variable[i] assign[=] call[name[int], parameter[name[line]]]
<ast.Try object at 0x7da18f58d7b0> | keyword[def] identifier[magic_run] ( identifier[self] , identifier[line] ):
literal[string]
identifier[i] = identifier[float] ( literal[string] )
keyword[if] identifier[line] . identifier[strip] ():
identifier[i] = identifier[int] ( identifier[line] )
keyword[try] :
keyword[with] identifier[warnings] . identifier[catch_warnings] ( identifier[record] = keyword[True] ) keyword[as] identifier[w] :
identifier[self] . identifier[interpreter] . identifier[run] ( identifier[i] )
keyword[for] identifier[warning_message] keyword[in] identifier[w] :
identifier[stream_content] ={ literal[string] : literal[string] , literal[string] : literal[string] + identifier[str] ( identifier[warning_message] . identifier[message] )+ literal[string] }
identifier[self] . identifier[send_response] ( identifier[self] . identifier[iopub_socket] , literal[string] , identifier[stream_content] )
keyword[except] identifier[iarm] . identifier[exceptions] . identifier[EndOfProgram] keyword[as] identifier[e] :
identifier[f_name] = identifier[self] . identifier[interpreter] . identifier[program] [ identifier[self] . identifier[interpreter] . identifier[register] [ literal[string] ]- literal[int] ]. identifier[__name__]
identifier[f_name] = identifier[f_name] [: identifier[f_name] . identifier[find] ( literal[string] )]
identifier[message] = literal[string] . identifier[format] ( identifier[f_name] )
identifier[stream_content] ={ literal[string] : literal[string] , literal[string] : identifier[message] + identifier[str] ( identifier[e] )+ literal[string] }
identifier[self] . identifier[send_response] ( identifier[self] . identifier[iopub_socket] , literal[string] , identifier[stream_content] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[for] identifier[err] keyword[in] identifier[e] . identifier[args] :
identifier[stream_content] ={ literal[string] : literal[string] , literal[string] : identifier[str] ( identifier[err] )}
identifier[self] . identifier[send_response] ( identifier[self] . identifier[iopub_socket] , literal[string] , identifier[stream_content] )
keyword[return] { literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[execution_count] ,
literal[string] : identifier[type] ( identifier[e] ). identifier[__name__] ,
literal[string] : identifier[str] ( identifier[e] ),
literal[string] : literal[string] } | def magic_run(self, line):
"""
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
"""
i = float('inf')
if line.strip():
i = int(line) # depends on [control=['if'], data=[]]
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.run(i)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content) # depends on [control=['for'], data=['warning_message']] # depends on [control=['with'], data=['w']] # depends on [control=['try'], data=[]]
except iarm.exceptions.EndOfProgram as e:
f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__
f_name = f_name[:f_name.find('_')]
message = 'Error in {}: '.format(f_name)
stream_content = {'name': 'stdout', 'text': message + str(e) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content) # depends on [control=['except'], data=['e']]
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': str(err)}
self.send_response(self.iopub_socket, 'stream', stream_content) # depends on [control=['for'], data=['err']]
return {'status': 'error', 'execution_count': self.execution_count, 'ename': type(e).__name__, 'evalue': str(e), 'traceback': '???'} # depends on [control=['except'], data=['e']] |
def get_lang(tweet):
"""
Get the language that the Tweet is written in.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: 2-letter BCP 47 language code (or None if undefined)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_lang
>>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017",
... "lang": "en"}
>>> get_lang(original)
'en'
>>> activity = {"postedTime": "2017-05-24T20:17:19.000Z",
... "twitter_lang": "en"}
>>> get_lang(activity)
'en'
"""
if is_original_format(tweet):
lang_field = "lang"
else:
lang_field = "twitter_lang"
if tweet[lang_field] is not None and tweet[lang_field] != "und":
return tweet[lang_field]
else:
return None | def function[get_lang, parameter[tweet]]:
constant[
Get the language that the Tweet is written in.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: 2-letter BCP 47 language code (or None if undefined)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_lang
>>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017",
... "lang": "en"}
>>> get_lang(original)
'en'
>>> activity = {"postedTime": "2017-05-24T20:17:19.000Z",
... "twitter_lang": "en"}
>>> get_lang(activity)
'en'
]
if call[name[is_original_format], parameter[name[tweet]]] begin[:]
variable[lang_field] assign[=] constant[lang]
if <ast.BoolOp object at 0x7da1b0e47100> begin[:]
return[call[name[tweet]][name[lang_field]]] | keyword[def] identifier[get_lang] ( identifier[tweet] ):
literal[string]
keyword[if] identifier[is_original_format] ( identifier[tweet] ):
identifier[lang_field] = literal[string]
keyword[else] :
identifier[lang_field] = literal[string]
keyword[if] identifier[tweet] [ identifier[lang_field] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[tweet] [ identifier[lang_field] ]!= literal[string] :
keyword[return] identifier[tweet] [ identifier[lang_field] ]
keyword[else] :
keyword[return] keyword[None] | def get_lang(tweet):
"""
Get the language that the Tweet is written in.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: 2-letter BCP 47 language code (or None if undefined)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_lang
>>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017",
... "lang": "en"}
>>> get_lang(original)
'en'
>>> activity = {"postedTime": "2017-05-24T20:17:19.000Z",
... "twitter_lang": "en"}
>>> get_lang(activity)
'en'
"""
if is_original_format(tweet):
lang_field = 'lang' # depends on [control=['if'], data=[]]
else:
lang_field = 'twitter_lang'
if tweet[lang_field] is not None and tweet[lang_field] != 'und':
return tweet[lang_field] # depends on [control=['if'], data=[]]
else:
return None |
def _from_sql(self, soql):
"""Create Force.com SOQL tree structure from SOQL"""
# pylint:disable=too-many-branches,too-many-nested-blocks
assert not self.soql, "Don't use _from_sql method directly"
self.soql = soql
soql, self.subqueries = split_subquery(soql)
match_parse = re.match(r'SELECT (.*) FROM (\w+)\b(.*)$', soql, re.I)
if not match_parse:
raise ProgrammingError('Invalid SQL: %s' % self.soql)
fields_sql, self.root_table, self.extra_soql = match_parse.groups()
fields = [x.strip() for x in fields_sql.split(',')]
self.is_aggregation = bool(pattern_groupby.search(self.extra_soql) or
pattern_aggregation.search(fields[0]))
self.is_plain_count = fields[0].upper() == 'COUNT()'
consumed_subqueries = 0
expr_alias_counter = 0
#
if not self.is_plain_count:
for field in fields:
if self.is_aggregation:
match = re.search(r'\b\w+$', field)
if match:
alias = match.group()
assert alias not in RESERVED_WORDS, "invalid alias name"
if match.start() > 0 and field[match.start() - 1] == ' ':
field = field[match.start() - 1]
else:
alias = 'expr{}'.format(expr_alias_counter)
expr_alias_counter += 1
assert '&' not in field, "Subquery not expected as field in aggregation query"
elif '&' in field:
assert field == '(&)' # verify that the subquery was in parentheses
subquery = QQuery(self.subqueries[consumed_subqueries][0])
consumed_subqueries += 1
self.has_child_rel_field = True
field = subquery
# TODO more child relationships to the same table
alias = subquery.root_table
else:
alias = field
if '.' in alias:
if alias.split('.', 1)[0].lower() == self.root_table.lower():
alias = alias.split('.', 1)[1]
if '.' in alias:
# prepare paths for possible empty outer joins
subroots = self.subroots
root_crumbs = alias.lower().split('.')[:-1]
for scrumb in root_crumbs:
subroots.setdefault(scrumb, {})
subroots = subroots[scrumb]
self.aliases.append(alias)
self.fields.append(field) | def function[_from_sql, parameter[self, soql]]:
constant[Create Force.com SOQL tree structure from SOQL]
assert[<ast.UnaryOp object at 0x7da1b13a4a60>]
name[self].soql assign[=] name[soql]
<ast.Tuple object at 0x7da1b13a7a00> assign[=] call[name[split_subquery], parameter[name[soql]]]
variable[match_parse] assign[=] call[name[re].match, parameter[constant[SELECT (.*) FROM (\w+)\b(.*)$], name[soql], name[re].I]]
if <ast.UnaryOp object at 0x7da1b13a5fc0> begin[:]
<ast.Raise object at 0x7da1b13a5120>
<ast.Tuple object at 0x7da1b13a73d0> assign[=] call[name[match_parse].groups, parameter[]]
variable[fields] assign[=] <ast.ListComp object at 0x7da1b13a5db0>
name[self].is_aggregation assign[=] call[name[bool], parameter[<ast.BoolOp object at 0x7da1b13a53c0>]]
name[self].is_plain_count assign[=] compare[call[call[name[fields]][constant[0]].upper, parameter[]] equal[==] constant[COUNT()]]
variable[consumed_subqueries] assign[=] constant[0]
variable[expr_alias_counter] assign[=] constant[0]
if <ast.UnaryOp object at 0x7da1b13a6a40> begin[:]
for taget[name[field]] in starred[name[fields]] begin[:]
if name[self].is_aggregation begin[:]
variable[match] assign[=] call[name[re].search, parameter[constant[\b\w+$], name[field]]]
if name[match] begin[:]
variable[alias] assign[=] call[name[match].group, parameter[]]
assert[compare[name[alias] <ast.NotIn object at 0x7da2590d7190> name[RESERVED_WORDS]]]
if <ast.BoolOp object at 0x7da1b13a7e80> begin[:]
variable[field] assign[=] call[name[field]][binary_operation[call[name[match].start, parameter[]] - constant[1]]]
assert[compare[constant[&] <ast.NotIn object at 0x7da2590d7190> name[field]]]
call[name[self].aliases.append, parameter[name[alias]]]
call[name[self].fields.append, parameter[name[field]]] | keyword[def] identifier[_from_sql] ( identifier[self] , identifier[soql] ):
literal[string]
keyword[assert] keyword[not] identifier[self] . identifier[soql] , literal[string]
identifier[self] . identifier[soql] = identifier[soql]
identifier[soql] , identifier[self] . identifier[subqueries] = identifier[split_subquery] ( identifier[soql] )
identifier[match_parse] = identifier[re] . identifier[match] ( literal[string] , identifier[soql] , identifier[re] . identifier[I] )
keyword[if] keyword[not] identifier[match_parse] :
keyword[raise] identifier[ProgrammingError] ( literal[string] % identifier[self] . identifier[soql] )
identifier[fields_sql] , identifier[self] . identifier[root_table] , identifier[self] . identifier[extra_soql] = identifier[match_parse] . identifier[groups] ()
identifier[fields] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[fields_sql] . identifier[split] ( literal[string] )]
identifier[self] . identifier[is_aggregation] = identifier[bool] ( identifier[pattern_groupby] . identifier[search] ( identifier[self] . identifier[extra_soql] ) keyword[or]
identifier[pattern_aggregation] . identifier[search] ( identifier[fields] [ literal[int] ]))
identifier[self] . identifier[is_plain_count] = identifier[fields] [ literal[int] ]. identifier[upper] ()== literal[string]
identifier[consumed_subqueries] = literal[int]
identifier[expr_alias_counter] = literal[int]
keyword[if] keyword[not] identifier[self] . identifier[is_plain_count] :
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[if] identifier[self] . identifier[is_aggregation] :
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[field] )
keyword[if] identifier[match] :
identifier[alias] = identifier[match] . identifier[group] ()
keyword[assert] identifier[alias] keyword[not] keyword[in] identifier[RESERVED_WORDS] , literal[string]
keyword[if] identifier[match] . identifier[start] ()> literal[int] keyword[and] identifier[field] [ identifier[match] . identifier[start] ()- literal[int] ]== literal[string] :
identifier[field] = identifier[field] [ identifier[match] . identifier[start] ()- literal[int] ]
keyword[else] :
identifier[alias] = literal[string] . identifier[format] ( identifier[expr_alias_counter] )
identifier[expr_alias_counter] += literal[int]
keyword[assert] literal[string] keyword[not] keyword[in] identifier[field] , literal[string]
keyword[elif] literal[string] keyword[in] identifier[field] :
keyword[assert] identifier[field] == literal[string]
identifier[subquery] = identifier[QQuery] ( identifier[self] . identifier[subqueries] [ identifier[consumed_subqueries] ][ literal[int] ])
identifier[consumed_subqueries] += literal[int]
identifier[self] . identifier[has_child_rel_field] = keyword[True]
identifier[field] = identifier[subquery]
identifier[alias] = identifier[subquery] . identifier[root_table]
keyword[else] :
identifier[alias] = identifier[field]
keyword[if] literal[string] keyword[in] identifier[alias] :
keyword[if] identifier[alias] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[lower] ()== identifier[self] . identifier[root_table] . identifier[lower] ():
identifier[alias] = identifier[alias] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[alias] :
identifier[subroots] = identifier[self] . identifier[subroots]
identifier[root_crumbs] = identifier[alias] . identifier[lower] (). identifier[split] ( literal[string] )[:- literal[int] ]
keyword[for] identifier[scrumb] keyword[in] identifier[root_crumbs] :
identifier[subroots] . identifier[setdefault] ( identifier[scrumb] ,{})
identifier[subroots] = identifier[subroots] [ identifier[scrumb] ]
identifier[self] . identifier[aliases] . identifier[append] ( identifier[alias] )
identifier[self] . identifier[fields] . identifier[append] ( identifier[field] ) | def _from_sql(self, soql):
"""Create Force.com SOQL tree structure from SOQL"""
# pylint:disable=too-many-branches,too-many-nested-blocks
assert not self.soql, "Don't use _from_sql method directly"
self.soql = soql
(soql, self.subqueries) = split_subquery(soql)
match_parse = re.match('SELECT (.*) FROM (\\w+)\\b(.*)$', soql, re.I)
if not match_parse:
raise ProgrammingError('Invalid SQL: %s' % self.soql) # depends on [control=['if'], data=[]]
(fields_sql, self.root_table, self.extra_soql) = match_parse.groups()
fields = [x.strip() for x in fields_sql.split(',')]
self.is_aggregation = bool(pattern_groupby.search(self.extra_soql) or pattern_aggregation.search(fields[0]))
self.is_plain_count = fields[0].upper() == 'COUNT()'
consumed_subqueries = 0
expr_alias_counter = 0
#
if not self.is_plain_count:
for field in fields:
if self.is_aggregation:
match = re.search('\\b\\w+$', field)
if match:
alias = match.group()
assert alias not in RESERVED_WORDS, 'invalid alias name'
if match.start() > 0 and field[match.start() - 1] == ' ':
field = field[match.start() - 1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
alias = 'expr{}'.format(expr_alias_counter)
expr_alias_counter += 1
assert '&' not in field, 'Subquery not expected as field in aggregation query' # depends on [control=['if'], data=[]]
elif '&' in field:
assert field == '(&)' # verify that the subquery was in parentheses
subquery = QQuery(self.subqueries[consumed_subqueries][0])
consumed_subqueries += 1
self.has_child_rel_field = True
field = subquery
# TODO more child relationships to the same table
alias = subquery.root_table # depends on [control=['if'], data=['field']]
else:
alias = field
if '.' in alias:
if alias.split('.', 1)[0].lower() == self.root_table.lower():
alias = alias.split('.', 1)[1] # depends on [control=['if'], data=[]]
if '.' in alias:
# prepare paths for possible empty outer joins
subroots = self.subroots
root_crumbs = alias.lower().split('.')[:-1]
for scrumb in root_crumbs:
subroots.setdefault(scrumb, {})
subroots = subroots[scrumb] # depends on [control=['for'], data=['scrumb']] # depends on [control=['if'], data=['alias']] # depends on [control=['if'], data=['alias']]
self.aliases.append(alias)
self.fields.append(field) # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]] |
def default(self, obj):
"""
if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded
"""
if isinstance(obj, np.ndarray):
data_b64 = base64.b64encode(obj.data).decode('utf-8')
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape)
elif sps.issparse(obj):
data_b64 = base64.b64encode(obj.data).decode('utf-8')
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape,
indices=obj.indices,
indptr=obj.indptr)
elif hasattr(obj, '__dict__'):
return obj.__dict__
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj) | def function[default, parameter[self, obj]]:
constant[
if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded
]
if call[name[isinstance], parameter[name[obj], name[np].ndarray]] begin[:]
variable[data_b64] assign[=] call[call[name[base64].b64encode, parameter[name[obj].data]].decode, parameter[constant[utf-8]]]
return[call[name[dict], parameter[]]]
return[call[name[json].JSONEncoder.default, parameter[name[self], name[obj]]]] | keyword[def] identifier[default] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[np] . identifier[ndarray] ):
identifier[data_b64] = identifier[base64] . identifier[b64encode] ( identifier[obj] . identifier[data] ). identifier[decode] ( literal[string] )
keyword[return] identifier[dict] ( identifier[__ndarray__] = identifier[data_b64] ,
identifier[dtype] = identifier[str] ( identifier[obj] . identifier[dtype] ),
identifier[shape] = identifier[obj] . identifier[shape] )
keyword[elif] identifier[sps] . identifier[issparse] ( identifier[obj] ):
identifier[data_b64] = identifier[base64] . identifier[b64encode] ( identifier[obj] . identifier[data] ). identifier[decode] ( literal[string] )
keyword[return] identifier[dict] ( identifier[__ndarray__] = identifier[data_b64] ,
identifier[dtype] = identifier[str] ( identifier[obj] . identifier[dtype] ),
identifier[shape] = identifier[obj] . identifier[shape] ,
identifier[indices] = identifier[obj] . identifier[indices] ,
identifier[indptr] = identifier[obj] . identifier[indptr] )
keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ):
keyword[return] identifier[obj] . identifier[__dict__]
keyword[return] identifier[json] . identifier[JSONEncoder] . identifier[default] ( identifier[self] , identifier[obj] ) | def default(self, obj):
"""
if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded
"""
if isinstance(obj, np.ndarray):
data_b64 = base64.b64encode(obj.data).decode('utf-8')
return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) # depends on [control=['if'], data=[]]
elif sps.issparse(obj):
data_b64 = base64.b64encode(obj.data).decode('utf-8')
return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape, indices=obj.indices, indptr=obj.indptr) # depends on [control=['if'], data=[]]
elif hasattr(obj, '__dict__'):
return obj.__dict__ # depends on [control=['if'], data=[]]
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj) |
def _compute_term_4(self, C, mag, R):
"""
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
"""
return (
(C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) +
C['a19'] * np.power(mag, 3)) * np.power(R, 3)
) | def function[_compute_term_4, parameter[self, C, mag, R]]:
constant[
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
]
return[binary_operation[binary_operation[binary_operation[binary_operation[call[name[C]][constant[a16]] + binary_operation[call[name[C]][constant[a17]] * name[mag]]] + binary_operation[call[name[C]][constant[a18]] * call[name[np].power, parameter[name[mag], constant[2]]]]] + binary_operation[call[name[C]][constant[a19]] * call[name[np].power, parameter[name[mag], constant[3]]]]] * call[name[np].power, parameter[name[R], constant[3]]]]] | keyword[def] identifier[_compute_term_4] ( identifier[self] , identifier[C] , identifier[mag] , identifier[R] ):
literal[string]
keyword[return] (
( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[mag] + identifier[C] [ literal[string] ]* identifier[np] . identifier[power] ( identifier[mag] , literal[int] )+
identifier[C] [ literal[string] ]* identifier[np] . identifier[power] ( identifier[mag] , literal[int] ))* identifier[np] . identifier[power] ( identifier[R] , literal[int] )
) | def _compute_term_4(self, C, mag, R):
"""
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
"""
return (C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) + C['a19'] * np.power(mag, 3)) * np.power(R, 3) |
def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None):
""" Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated
"""
dtrajs_new = []
for dtraj in dtrajs:
if len(dtraj) <= lag:
continue
if shift is None:
s = np.random.randint(min(lag, dtraj.size-lag))
else:
s = shift
if sliding:
if s > 0:
dtrajs_new.append(dtraj[0:lag+s])
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+2*lag])
else:
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+lag+1])
return dtrajs_new | def function[blocksplit_dtrajs, parameter[dtrajs, lag, sliding, shift]]:
constant[ Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated
]
variable[dtrajs_new] assign[=] list[[]]
for taget[name[dtraj]] in starred[name[dtrajs]] begin[:]
if compare[call[name[len], parameter[name[dtraj]]] less_or_equal[<=] name[lag]] begin[:]
continue
if compare[name[shift] is constant[None]] begin[:]
variable[s] assign[=] call[name[np].random.randint, parameter[call[name[min], parameter[name[lag], binary_operation[name[dtraj].size - name[lag]]]]]]
if name[sliding] begin[:]
if compare[name[s] greater[>] constant[0]] begin[:]
call[name[dtrajs_new].append, parameter[call[name[dtraj]][<ast.Slice object at 0x7da204565480>]]]
for taget[name[t0]] in starred[call[name[range], parameter[name[s], binary_operation[name[dtraj].size - name[lag]], name[lag]]]] begin[:]
call[name[dtrajs_new].append, parameter[call[name[dtraj]][<ast.Slice object at 0x7da204564610>]]]
return[name[dtrajs_new]] | keyword[def] identifier[blocksplit_dtrajs] ( identifier[dtrajs] , identifier[lag] = literal[int] , identifier[sliding] = keyword[True] , identifier[shift] = keyword[None] ):
literal[string]
identifier[dtrajs_new] =[]
keyword[for] identifier[dtraj] keyword[in] identifier[dtrajs] :
keyword[if] identifier[len] ( identifier[dtraj] )<= identifier[lag] :
keyword[continue]
keyword[if] identifier[shift] keyword[is] keyword[None] :
identifier[s] = identifier[np] . identifier[random] . identifier[randint] ( identifier[min] ( identifier[lag] , identifier[dtraj] . identifier[size] - identifier[lag] ))
keyword[else] :
identifier[s] = identifier[shift]
keyword[if] identifier[sliding] :
keyword[if] identifier[s] > literal[int] :
identifier[dtrajs_new] . identifier[append] ( identifier[dtraj] [ literal[int] : identifier[lag] + identifier[s] ])
keyword[for] identifier[t0] keyword[in] identifier[range] ( identifier[s] , identifier[dtraj] . identifier[size] - identifier[lag] , identifier[lag] ):
identifier[dtrajs_new] . identifier[append] ( identifier[dtraj] [ identifier[t0] : identifier[t0] + literal[int] * identifier[lag] ])
keyword[else] :
keyword[for] identifier[t0] keyword[in] identifier[range] ( identifier[s] , identifier[dtraj] . identifier[size] - identifier[lag] , identifier[lag] ):
identifier[dtrajs_new] . identifier[append] ( identifier[dtraj] [ identifier[t0] : identifier[t0] + identifier[lag] + literal[int] ])
keyword[return] identifier[dtrajs_new] | def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None):
""" Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated
"""
dtrajs_new = []
for dtraj in dtrajs:
if len(dtraj) <= lag:
continue # depends on [control=['if'], data=[]]
if shift is None:
s = np.random.randint(min(lag, dtraj.size - lag)) # depends on [control=['if'], data=[]]
else:
s = shift
if sliding:
if s > 0:
dtrajs_new.append(dtraj[0:lag + s]) # depends on [control=['if'], data=['s']]
for t0 in range(s, dtraj.size - lag, lag):
dtrajs_new.append(dtraj[t0:t0 + 2 * lag]) # depends on [control=['for'], data=['t0']] # depends on [control=['if'], data=[]]
else:
for t0 in range(s, dtraj.size - lag, lag):
dtrajs_new.append(dtraj[t0:t0 + lag + 1]) # depends on [control=['for'], data=['t0']] # depends on [control=['for'], data=['dtraj']]
return dtrajs_new |
def level(self):
"""Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
"""
m = re.match(
self.compliance_prefix +
r'(\d)' +
self.compliance_suffix +
r'$',
self.compliance)
if (m):
return int(m.group(1))
raise IIIFInfoError(
"Bad compliance profile URI, failed to extract level number") | def function[level, parameter[self]]:
constant[Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
]
variable[m] assign[=] call[name[re].match, parameter[binary_operation[binary_operation[binary_operation[name[self].compliance_prefix + constant[(\d)]] + name[self].compliance_suffix] + constant[$]], name[self].compliance]]
if name[m] begin[:]
return[call[name[int], parameter[call[name[m].group, parameter[constant[1]]]]]]
<ast.Raise object at 0x7da20c6c59c0> | keyword[def] identifier[level] ( identifier[self] ):
literal[string]
identifier[m] = identifier[re] . identifier[match] (
identifier[self] . identifier[compliance_prefix] +
literal[string] +
identifier[self] . identifier[compliance_suffix] +
literal[string] ,
identifier[self] . identifier[compliance] )
keyword[if] ( identifier[m] ):
keyword[return] identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[raise] identifier[IIIFInfoError] (
literal[string] ) | def level(self):
"""Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
"""
m = re.match(self.compliance_prefix + '(\\d)' + self.compliance_suffix + '$', self.compliance)
if m:
return int(m.group(1)) # depends on [control=['if'], data=[]]
raise IIIFInfoError('Bad compliance profile URI, failed to extract level number') |
def list(self, teamId, max=None, **request_parameters):
"""List team memberships for a team, by ID.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all team memberships returned by
the query. The generator will automatically request additional 'pages'
of responses from Webex as needed until all responses have been
returned. The container makes the generator safe for reuse. A new API
call will be made, using the same parameters that were specified when
the generator was created, every time a new iterator is requested from
the container.
Args:
teamId(basestring): List team memberships for a team, by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the team memberships returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(teamId, basestring, may_be_none=False)
check_type(max, int)
params = dict_from_items_with_values(
request_parameters,
teamId=teamId,
max=max,
)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield team membership objects created from the returned items JSON
# objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item) | def function[list, parameter[self, teamId, max]]:
constant[List team memberships for a team, by ID.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all team memberships returned by
the query. The generator will automatically request additional 'pages'
of responses from Webex as needed until all responses have been
returned. The container makes the generator safe for reuse. A new API
call will be made, using the same parameters that were specified when
the generator was created, every time a new iterator is requested from
the container.
Args:
teamId(basestring): List team memberships for a team, by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the team memberships returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
]
call[name[check_type], parameter[name[teamId], name[basestring]]]
call[name[check_type], parameter[name[max], name[int]]]
variable[params] assign[=] call[name[dict_from_items_with_values], parameter[name[request_parameters]]]
variable[items] assign[=] call[name[self]._session.get_items, parameter[name[API_ENDPOINT]]]
for taget[name[item]] in starred[name[items]] begin[:]
<ast.Yield object at 0x7da1b025c250> | keyword[def] identifier[list] ( identifier[self] , identifier[teamId] , identifier[max] = keyword[None] ,** identifier[request_parameters] ):
literal[string]
identifier[check_type] ( identifier[teamId] , identifier[basestring] , identifier[may_be_none] = keyword[False] )
identifier[check_type] ( identifier[max] , identifier[int] )
identifier[params] = identifier[dict_from_items_with_values] (
identifier[request_parameters] ,
identifier[teamId] = identifier[teamId] ,
identifier[max] = identifier[max] ,
)
identifier[items] = identifier[self] . identifier[_session] . identifier[get_items] ( identifier[API_ENDPOINT] , identifier[params] = identifier[params] )
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[yield] identifier[self] . identifier[_object_factory] ( identifier[OBJECT_TYPE] , identifier[item] ) | def list(self, teamId, max=None, **request_parameters):
"""List team memberships for a team, by ID.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all team memberships returned by
the query. The generator will automatically request additional 'pages'
of responses from Webex as needed until all responses have been
returned. The container makes the generator safe for reuse. A new API
call will be made, using the same parameters that were specified when
the generator was created, every time a new iterator is requested from
the container.
Args:
teamId(basestring): List team memberships for a team, by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the team memberships returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(teamId, basestring, may_be_none=False)
check_type(max, int)
params = dict_from_items_with_values(request_parameters, teamId=teamId, max=max)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield team membership objects created from the returned items JSON
# objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item) # depends on [control=['for'], data=['item']] |
def one_of(s):
'''Parser a char from specified string.'''
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser | def function[one_of, parameter[s]]:
constant[Parser a char from specified string.]
def function[one_of_parser, parameter[text, index]]:
if <ast.BoolOp object at 0x7da20c7c8130> begin[:]
return[call[name[Value].success, parameter[binary_operation[name[index] + constant[1]], call[name[text]][name[index]]]]]
return[name[one_of_parser]] | keyword[def] identifier[one_of] ( identifier[s] ):
literal[string]
@ identifier[Parser]
keyword[def] identifier[one_of_parser] ( identifier[text] , identifier[index] = literal[int] ):
keyword[if] identifier[index] < identifier[len] ( identifier[text] ) keyword[and] identifier[text] [ identifier[index] ] keyword[in] identifier[s] :
keyword[return] identifier[Value] . identifier[success] ( identifier[index] + literal[int] , identifier[text] [ identifier[index] ])
keyword[else] :
keyword[return] identifier[Value] . identifier[failure] ( identifier[index] , literal[string] . identifier[format] ( identifier[s] ))
keyword[return] identifier[one_of_parser] | def one_of(s):
"""Parser a char from specified string."""
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index]) # depends on [control=['if'], data=[]]
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser |
def run(ctx, project, file, name, tags, description, ttl, u, l): # pylint:disable=redefined-builtin
"""Run polyaxonfile specification.
Examples:
\b
```bash
$ polyaxon run -f file -f file_override ...
```
Upload before running
\b
```bash
$ polyaxon run -f file -u
```
Run and set description and tags for this run
\b
```bash
$ polyaxon run -f file -u --description="Description of the current run" --tags="foo, bar, moo"
```
Run and set a unique name for this run
\b
```bash
polyaxon run --name=foo
```
Run for a specific project
\b
```bash
$ polyaxon run -p project1 -f file.yaml
```
"""
if not file:
file = PolyaxonFile.check_default_path(path='.')
if not file:
file = ''
specification = check_polyaxonfile(file, log=False).specification
spec_cond = (specification.is_experiment or
specification.is_group or
specification.is_job or
specification.is_build)
if not spec_cond:
Printer.print_error(
'This command expects an experiment, a group, a job, or a build specification,'
'received instead a `{}` specification'.format(specification.kind))
if specification.is_notebook:
click.echo('Please check "polyaxon notebook --help" to start a notebook.')
elif specification.is_tensorboard:
click.echo('Please check: "polyaxon tensorboard --help" to start a tensorboard.')
sys.exit(1)
# Check if we need to upload
if u:
if project:
Printer.print_error('Uploading is not supported when switching project context!')
click.echo('Please, either omit the `-u` option or `-p` / `--project=` option.')
sys.exit(1)
ctx.invoke(upload, sync=False)
user, project_name = get_project_or_local(project)
project_client = PolyaxonClient().project
tags = validate_tags(tags)
def run_experiment():
click.echo('Creating an independent experiment.')
experiment = ExperimentConfig(
name=name,
description=description,
tags=tags,
config=specification.parsed_data,
ttl=ttl)
try:
response = PolyaxonClient().project.create_experiment(user,
project_name,
experiment)
cache.cache(config_manager=ExperimentManager, response=response)
Printer.print_success('Experiment `{}` was created'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create experiment.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def run_group():
click.echo('Creating an experiment group with the following definition:')
experiments_def = specification.experiments_def
get_group_experiments_info(**experiments_def)
experiment_group = ExperimentGroupConfig(
name=name,
description=description,
tags=tags,
content=specification._data) # pylint:disable=protected-access
try:
response = project_client.create_experiment_group(user,
project_name,
experiment_group)
cache.cache(config_manager=GroupManager, response=response)
Printer.print_success('Experiment group {} was created'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create experiment group.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def run_job():
click.echo('Creating a job.')
job = JobConfig(
name=name,
description=description,
tags=tags,
config=specification.parsed_data,
ttl=ttl)
try:
response = project_client.create_job(user,
project_name,
job)
cache.cache(config_manager=JobManager, response=response)
Printer.print_success('Job {} was created'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create job.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def run_build():
click.echo('Creating a build.')
job = JobConfig(
name=name,
description=description,
tags=tags,
config=specification.parsed_data,
ttl=ttl)
try:
response = project_client.create_build(user,
project_name,
job)
cache.cache(config_manager=BuildJobManager, response=response)
Printer.print_success('Build {} was created'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create build.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
logs = None
if specification.is_experiment:
run_experiment()
logs = experiment_logs
elif specification.is_group:
run_group()
elif specification.is_job:
run_job()
logs = job_logs
elif specification.is_build:
run_build()
logs = build_logs
# Check if we need to invoke logs
if l and logs:
ctx.obj = {'project': '{}/{}'.format(user, project_name)}
ctx.invoke(logs) | def function[run, parameter[ctx, project, file, name, tags, description, ttl, u, l]]:
constant[Run polyaxonfile specification.
Examples:
```bash
$ polyaxon run -f file -f file_override ...
```
Upload before running
```bash
$ polyaxon run -f file -u
```
Run and set description and tags for this run
```bash
$ polyaxon run -f file -u --description="Description of the current run" --tags="foo, bar, moo"
```
Run and set a unique name for this run
```bash
polyaxon run --name=foo
```
Run for a specific project
```bash
$ polyaxon run -p project1 -f file.yaml
```
]
if <ast.UnaryOp object at 0x7da1aff1c7f0> begin[:]
variable[file] assign[=] call[name[PolyaxonFile].check_default_path, parameter[]]
if <ast.UnaryOp object at 0x7da1aff1d000> begin[:]
variable[file] assign[=] constant[]
variable[specification] assign[=] call[name[check_polyaxonfile], parameter[name[file]]].specification
variable[spec_cond] assign[=] <ast.BoolOp object at 0x7da1aff1f130>
if <ast.UnaryOp object at 0x7da1aff1e440> begin[:]
call[name[Printer].print_error, parameter[call[constant[This command expects an experiment, a group, a job, or a build specification,received instead a `{}` specification].format, parameter[name[specification].kind]]]]
if name[specification].is_notebook begin[:]
call[name[click].echo, parameter[constant[Please check "polyaxon notebook --help" to start a notebook.]]]
call[name[sys].exit, parameter[constant[1]]]
if name[u] begin[:]
if name[project] begin[:]
call[name[Printer].print_error, parameter[constant[Uploading is not supported when switching project context!]]]
call[name[click].echo, parameter[constant[Please, either omit the `-u` option or `-p` / `--project=` option.]]]
call[name[sys].exit, parameter[constant[1]]]
call[name[ctx].invoke, parameter[name[upload]]]
<ast.Tuple object at 0x7da1aff1ef20> assign[=] call[name[get_project_or_local], parameter[name[project]]]
variable[project_client] assign[=] call[name[PolyaxonClient], parameter[]].project
variable[tags] assign[=] call[name[validate_tags], parameter[name[tags]]]
def function[run_experiment, parameter[]]:
call[name[click].echo, parameter[constant[Creating an independent experiment.]]]
variable[experiment] assign[=] call[name[ExperimentConfig], parameter[]]
<ast.Try object at 0x7da1aff1d9f0>
def function[run_group, parameter[]]:
call[name[click].echo, parameter[constant[Creating an experiment group with the following definition:]]]
variable[experiments_def] assign[=] name[specification].experiments_def
call[name[get_group_experiments_info], parameter[]]
variable[experiment_group] assign[=] call[name[ExperimentGroupConfig], parameter[]]
<ast.Try object at 0x7da1aff1ed40>
def function[run_job, parameter[]]:
call[name[click].echo, parameter[constant[Creating a job.]]]
variable[job] assign[=] call[name[JobConfig], parameter[]]
<ast.Try object at 0x7da1afe71450>
def function[run_build, parameter[]]:
call[name[click].echo, parameter[constant[Creating a build.]]]
variable[job] assign[=] call[name[JobConfig], parameter[]]
<ast.Try object at 0x7da1afe72080>
variable[logs] assign[=] constant[None]
if name[specification].is_experiment begin[:]
call[name[run_experiment], parameter[]]
variable[logs] assign[=] name[experiment_logs]
if <ast.BoolOp object at 0x7da1afe711e0> begin[:]
name[ctx].obj assign[=] dictionary[[<ast.Constant object at 0x7da1afe70eb0>], [<ast.Call object at 0x7da1afe70f10>]]
call[name[ctx].invoke, parameter[name[logs]]] | keyword[def] identifier[run] ( identifier[ctx] , identifier[project] , identifier[file] , identifier[name] , identifier[tags] , identifier[description] , identifier[ttl] , identifier[u] , identifier[l] ):
literal[string]
keyword[if] keyword[not] identifier[file] :
identifier[file] = identifier[PolyaxonFile] . identifier[check_default_path] ( identifier[path] = literal[string] )
keyword[if] keyword[not] identifier[file] :
identifier[file] = literal[string]
identifier[specification] = identifier[check_polyaxonfile] ( identifier[file] , identifier[log] = keyword[False] ). identifier[specification]
identifier[spec_cond] =( identifier[specification] . identifier[is_experiment] keyword[or]
identifier[specification] . identifier[is_group] keyword[or]
identifier[specification] . identifier[is_job] keyword[or]
identifier[specification] . identifier[is_build] )
keyword[if] keyword[not] identifier[spec_cond] :
identifier[Printer] . identifier[print_error] (
literal[string]
literal[string] . identifier[format] ( identifier[specification] . identifier[kind] ))
keyword[if] identifier[specification] . identifier[is_notebook] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[elif] identifier[specification] . identifier[is_tensorboard] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[u] :
keyword[if] identifier[project] :
identifier[Printer] . identifier[print_error] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[ctx] . identifier[invoke] ( identifier[upload] , identifier[sync] = keyword[False] )
identifier[user] , identifier[project_name] = identifier[get_project_or_local] ( identifier[project] )
identifier[project_client] = identifier[PolyaxonClient] (). identifier[project]
identifier[tags] = identifier[validate_tags] ( identifier[tags] )
keyword[def] identifier[run_experiment] ():
identifier[click] . identifier[echo] ( literal[string] )
identifier[experiment] = identifier[ExperimentConfig] (
identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[tags] = identifier[tags] ,
identifier[config] = identifier[specification] . identifier[parsed_data] ,
identifier[ttl] = identifier[ttl] )
keyword[try] :
identifier[response] = identifier[PolyaxonClient] (). identifier[project] . identifier[create_experiment] ( identifier[user] ,
identifier[project_name] ,
identifier[experiment] )
identifier[cache] . identifier[cache] ( identifier[config_manager] = identifier[ExperimentManager] , identifier[response] = identifier[response] )
identifier[Printer] . identifier[print_success] ( literal[string] . identifier[format] ( identifier[response] . identifier[id] ))
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] )
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[def] identifier[run_group] ():
identifier[click] . identifier[echo] ( literal[string] )
identifier[experiments_def] = identifier[specification] . identifier[experiments_def]
identifier[get_group_experiments_info] (** identifier[experiments_def] )
identifier[experiment_group] = identifier[ExperimentGroupConfig] (
identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[tags] = identifier[tags] ,
identifier[content] = identifier[specification] . identifier[_data] )
keyword[try] :
identifier[response] = identifier[project_client] . identifier[create_experiment_group] ( identifier[user] ,
identifier[project_name] ,
identifier[experiment_group] )
identifier[cache] . identifier[cache] ( identifier[config_manager] = identifier[GroupManager] , identifier[response] = identifier[response] )
identifier[Printer] . identifier[print_success] ( literal[string] . identifier[format] ( identifier[response] . identifier[id] ))
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] )
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[def] identifier[run_job] ():
identifier[click] . identifier[echo] ( literal[string] )
identifier[job] = identifier[JobConfig] (
identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[tags] = identifier[tags] ,
identifier[config] = identifier[specification] . identifier[parsed_data] ,
identifier[ttl] = identifier[ttl] )
keyword[try] :
identifier[response] = identifier[project_client] . identifier[create_job] ( identifier[user] ,
identifier[project_name] ,
identifier[job] )
identifier[cache] . identifier[cache] ( identifier[config_manager] = identifier[JobManager] , identifier[response] = identifier[response] )
identifier[Printer] . identifier[print_success] ( literal[string] . identifier[format] ( identifier[response] . identifier[id] ))
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] )
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[def] identifier[run_build] ():
identifier[click] . identifier[echo] ( literal[string] )
identifier[job] = identifier[JobConfig] (
identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[tags] = identifier[tags] ,
identifier[config] = identifier[specification] . identifier[parsed_data] ,
identifier[ttl] = identifier[ttl] )
keyword[try] :
identifier[response] = identifier[project_client] . identifier[create_build] ( identifier[user] ,
identifier[project_name] ,
identifier[job] )
identifier[cache] . identifier[cache] ( identifier[config_manager] = identifier[BuildJobManager] , identifier[response] = identifier[response] )
identifier[Printer] . identifier[print_success] ( literal[string] . identifier[format] ( identifier[response] . identifier[id] ))
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] )
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[logs] = keyword[None]
keyword[if] identifier[specification] . identifier[is_experiment] :
identifier[run_experiment] ()
identifier[logs] = identifier[experiment_logs]
keyword[elif] identifier[specification] . identifier[is_group] :
identifier[run_group] ()
keyword[elif] identifier[specification] . identifier[is_job] :
identifier[run_job] ()
identifier[logs] = identifier[job_logs]
keyword[elif] identifier[specification] . identifier[is_build] :
identifier[run_build] ()
identifier[logs] = identifier[build_logs]
keyword[if] identifier[l] keyword[and] identifier[logs] :
identifier[ctx] . identifier[obj] ={ literal[string] : literal[string] . identifier[format] ( identifier[user] , identifier[project_name] )}
identifier[ctx] . identifier[invoke] ( identifier[logs] ) | def run(ctx, project, file, name, tags, description, ttl, u, l): # pylint:disable=redefined-builtin
'Run polyaxonfile specification.\n\n Examples:\n\n \x08\n ```bash\n $ polyaxon run -f file -f file_override ...\n ```\n\n Upload before running\n\n \x08\n ```bash\n $ polyaxon run -f file -u\n ```\n\n Run and set description and tags for this run\n\n \x08\n ```bash\n $ polyaxon run -f file -u --description="Description of the current run" --tags="foo, bar, moo"\n ```\n Run and set a unique name for this run\n\n \x08\n ```bash\n polyaxon run --name=foo\n ```\n\n Run for a specific project\n\n \x08\n ```bash\n $ polyaxon run -p project1 -f file.yaml\n ```\n '
if not file:
file = PolyaxonFile.check_default_path(path='.') # depends on [control=['if'], data=[]]
if not file:
file = '' # depends on [control=['if'], data=[]]
specification = check_polyaxonfile(file, log=False).specification
spec_cond = specification.is_experiment or specification.is_group or specification.is_job or specification.is_build
if not spec_cond:
Printer.print_error('This command expects an experiment, a group, a job, or a build specification,received instead a `{}` specification'.format(specification.kind))
if specification.is_notebook:
click.echo('Please check "polyaxon notebook --help" to start a notebook.') # depends on [control=['if'], data=[]]
elif specification.is_tensorboard:
click.echo('Please check: "polyaxon tensorboard --help" to start a tensorboard.') # depends on [control=['if'], data=[]]
sys.exit(1) # depends on [control=['if'], data=[]]
# Check if we need to upload
if u:
if project:
Printer.print_error('Uploading is not supported when switching project context!')
click.echo('Please, either omit the `-u` option or `-p` / `--project=` option.')
sys.exit(1) # depends on [control=['if'], data=[]]
ctx.invoke(upload, sync=False) # depends on [control=['if'], data=[]]
(user, project_name) = get_project_or_local(project)
project_client = PolyaxonClient().project
tags = validate_tags(tags)
def run_experiment():
click.echo('Creating an independent experiment.')
experiment = ExperimentConfig(name=name, description=description, tags=tags, config=specification.parsed_data, ttl=ttl)
try:
response = PolyaxonClient().project.create_experiment(user, project_name, experiment)
cache.cache(config_manager=ExperimentManager, response=response)
Printer.print_success('Experiment `{}` was created'.format(response.id)) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create experiment.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
def run_group():
click.echo('Creating an experiment group with the following definition:')
experiments_def = specification.experiments_def
get_group_experiments_info(**experiments_def)
experiment_group = ExperimentGroupConfig(name=name, description=description, tags=tags, content=specification._data) # pylint:disable=protected-access
try:
response = project_client.create_experiment_group(user, project_name, experiment_group)
cache.cache(config_manager=GroupManager, response=response)
Printer.print_success('Experiment group {} was created'.format(response.id)) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create experiment group.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
def run_job():
click.echo('Creating a job.')
job = JobConfig(name=name, description=description, tags=tags, config=specification.parsed_data, ttl=ttl)
try:
response = project_client.create_job(user, project_name, job)
cache.cache(config_manager=JobManager, response=response)
Printer.print_success('Job {} was created'.format(response.id)) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create job.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
def run_build():
click.echo('Creating a build.')
job = JobConfig(name=name, description=description, tags=tags, config=specification.parsed_data, ttl=ttl)
try:
response = project_client.create_build(user, project_name, job)
cache.cache(config_manager=BuildJobManager, response=response)
Printer.print_success('Build {} was created'.format(response.id)) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not create build.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
logs = None
if specification.is_experiment:
run_experiment()
logs = experiment_logs # depends on [control=['if'], data=[]]
elif specification.is_group:
run_group() # depends on [control=['if'], data=[]]
elif specification.is_job:
run_job()
logs = job_logs # depends on [control=['if'], data=[]]
elif specification.is_build:
run_build()
logs = build_logs # depends on [control=['if'], data=[]]
# Check if we need to invoke logs
if l and logs:
ctx.obj = {'project': '{}/{}'.format(user, project_name)}
ctx.invoke(logs) # depends on [control=['if'], data=[]] |
def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken:
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login()
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error("Request aborted: not logged in."):
return request_fun(self, *args, **kwargs)
try:
# Issue the request
return request_fun(self, *args, **kwargs)
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
"Autologin succeeded, but there was an auth error on "
"next request. Something is very wrong."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
"Request failed: Session is not logged in.", he)
else:
raise
return wrapper | def function[_authentication, parameter[request_fun]]:
constant[Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
]
def function[wrapper, parameter[self]]:
if compare[name[self].token is name[_NoAuthenticationToken]] begin[:]
if <ast.BoolOp object at 0x7da1b1607d30> begin[:]
call[name[self].login, parameter[]]
<ast.Try object at 0x7da1b16051e0>
return[name[wrapper]] | keyword[def] identifier[_authentication] ( identifier[request_fun] ):
literal[string]
@ identifier[wraps] ( identifier[request_fun] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[self] . identifier[token] keyword[is] identifier[_NoAuthenticationToken] :
keyword[if] identifier[self] . identifier[autologin] keyword[and] identifier[self] . identifier[username] keyword[and] identifier[self] . identifier[password] :
identifier[self] . identifier[login] ()
keyword[else] :
keyword[with] identifier[_handle_auth_error] ( literal[string] ):
keyword[return] identifier[request_fun] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[try] :
keyword[return] identifier[request_fun] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[HTTPError] keyword[as] identifier[he] :
keyword[if] identifier[he] . identifier[status] == literal[int] keyword[and] identifier[self] . identifier[autologin] :
keyword[with] identifier[_handle_auth_error] ( literal[string] ):
identifier[self] . identifier[login] ()
keyword[with] identifier[_handle_auth_error] (
literal[string]
literal[string] ):
keyword[return] identifier[request_fun] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[he] . identifier[status] == literal[int] keyword[and] keyword[not] identifier[self] . identifier[autologin] :
keyword[raise] identifier[AuthenticationError] (
literal[string] , identifier[he] )
keyword[else] :
keyword[raise]
keyword[return] identifier[wrapper] | def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken:
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login() # depends on [control=['if'], data=[]]
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error('Request aborted: not logged in.'):
return request_fun(self, *args, **kwargs) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
try:
# Issue the request
return request_fun(self, *args, **kwargs) # depends on [control=['try'], data=[]]
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error('Autologin failed.'):
self.login() # depends on [control=['with'], data=[]]
with _handle_auth_error('Autologin succeeded, but there was an auth error on next request. Something is very wrong.'):
return request_fun(self, *args, **kwargs) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
elif he.status == 401 and (not self.autologin):
raise AuthenticationError('Request failed: Session is not logged in.', he) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['he']]
return wrapper |
def _trim_external_pores(self, shape):
r'''
'''
# Find all pores within the domain
Ps = topotools.isoutside(coords=self['pore.coords'], shape=shape)
self['pore.external'] = False
self['pore.external'][Ps] = True
# Find which internal pores are delaunay
Ps = (~self['pore.external'])*self['pore.delaunay']
# Find all pores connected to an internal delaunay pore
Ps = self.find_neighbor_pores(pores=Ps, include_input=True)
# Mark them all as keepers
self['pore.keep'] = False
self['pore.keep'][Ps] = True
# Trim all bad pores
topotools.trim(network=self, pores=~self['pore.keep'])
# Now label boundary pores
self['pore.boundary'] = False
self['pore.boundary'] = self['pore.delaunay']*self['pore.external']
# Label Voronoi pores on boundary
Ps = self.find_neighbor_pores(pores=self.pores('boundary'))
Ps = self['pore.voronoi']*self.tomask(pores=Ps)
self['pore.boundary'][Ps] = True
# Label Voronoi and interconnect throats on boundary
self['throat.boundary'] = False
Ps = self.pores('boundary')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.boundary'][Ts] = True
# Trim throats between Delaunay boundary pores
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
topotools.trim(network=self, throats=Ts)
# Move Delaunay boundary pores to centroid of Voronoi facet
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
for P in Ps:
Ns = self.find_neighbor_pores(pores=P)
Ns = Ps = self['pore.voronoi']*self.tomask(pores=Ns)
coords = sp.mean(self['pore.coords'][Ns], axis=0)
self['pore.coords'][P] = coords
self['pore.internal'] = ~self['pore.boundary']
Ps = self.pores('internal')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.internal'] = False
self['throat.internal'][Ts] = True
# Label surface pores and throats between boundary and internal
Ts = self.throats(['boundary', 'internal'], mode='not')
self['throat.surface'] = False
self['throat.surface'][Ts] = True
surf_pores = self['throat.conns'][Ts].flatten()
surf_pores = sp.unique(surf_pores[~self['pore.boundary'][surf_pores]])
self['pore.surface'] = False
self['pore.surface'][surf_pores] = True
# Clean-up
del self['pore.external']
del self['pore.keep'] | def function[_trim_external_pores, parameter[self, shape]]:
constant[
]
variable[Ps] assign[=] call[name[topotools].isoutside, parameter[]]
call[name[self]][constant[pore.external]] assign[=] constant[False]
call[call[name[self]][constant[pore.external]]][name[Ps]] assign[=] constant[True]
variable[Ps] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b26ac340> * call[name[self]][constant[pore.delaunay]]]
variable[Ps] assign[=] call[name[self].find_neighbor_pores, parameter[]]
call[name[self]][constant[pore.keep]] assign[=] constant[False]
call[call[name[self]][constant[pore.keep]]][name[Ps]] assign[=] constant[True]
call[name[topotools].trim, parameter[]]
call[name[self]][constant[pore.boundary]] assign[=] constant[False]
call[name[self]][constant[pore.boundary]] assign[=] binary_operation[call[name[self]][constant[pore.delaunay]] * call[name[self]][constant[pore.external]]]
variable[Ps] assign[=] call[name[self].find_neighbor_pores, parameter[]]
variable[Ps] assign[=] binary_operation[call[name[self]][constant[pore.voronoi]] * call[name[self].tomask, parameter[]]]
call[call[name[self]][constant[pore.boundary]]][name[Ps]] assign[=] constant[True]
call[name[self]][constant[throat.boundary]] assign[=] constant[False]
variable[Ps] assign[=] call[name[self].pores, parameter[constant[boundary]]]
variable[Ts] assign[=] call[name[self].find_neighbor_throats, parameter[]]
call[call[name[self]][constant[throat.boundary]]][name[Ts]] assign[=] constant[True]
variable[Ps] assign[=] call[name[self].pores, parameter[]]
variable[Ts] assign[=] call[name[self].find_neighbor_throats, parameter[]]
call[name[topotools].trim, parameter[]]
variable[Ps] assign[=] call[name[self].pores, parameter[]]
for taget[name[P]] in starred[name[Ps]] begin[:]
variable[Ns] assign[=] call[name[self].find_neighbor_pores, parameter[]]
variable[Ns] assign[=] binary_operation[call[name[self]][constant[pore.voronoi]] * call[name[self].tomask, parameter[]]]
variable[coords] assign[=] call[name[sp].mean, parameter[call[call[name[self]][constant[pore.coords]]][name[Ns]]]]
call[call[name[self]][constant[pore.coords]]][name[P]] assign[=] name[coords]
call[name[self]][constant[pore.internal]] assign[=] <ast.UnaryOp object at 0x7da1b26adf90>
variable[Ps] assign[=] call[name[self].pores, parameter[constant[internal]]]
variable[Ts] assign[=] call[name[self].find_neighbor_throats, parameter[]]
call[name[self]][constant[throat.internal]] assign[=] constant[False]
call[call[name[self]][constant[throat.internal]]][name[Ts]] assign[=] constant[True]
variable[Ts] assign[=] call[name[self].throats, parameter[list[[<ast.Constant object at 0x7da18c4ce680>, <ast.Constant object at 0x7da18c4cf820>]]]]
call[name[self]][constant[throat.surface]] assign[=] constant[False]
call[call[name[self]][constant[throat.surface]]][name[Ts]] assign[=] constant[True]
variable[surf_pores] assign[=] call[call[call[name[self]][constant[throat.conns]]][name[Ts]].flatten, parameter[]]
variable[surf_pores] assign[=] call[name[sp].unique, parameter[call[name[surf_pores]][<ast.UnaryOp object at 0x7da18c4ce110>]]]
call[name[self]][constant[pore.surface]] assign[=] constant[False]
call[call[name[self]][constant[pore.surface]]][name[surf_pores]] assign[=] constant[True]
<ast.Delete object at 0x7da18c4ce4a0>
<ast.Delete object at 0x7da18c4cc340> | keyword[def] identifier[_trim_external_pores] ( identifier[self] , identifier[shape] ):
literal[string]
identifier[Ps] = identifier[topotools] . identifier[isoutside] ( identifier[coords] = identifier[self] [ literal[string] ], identifier[shape] = identifier[shape] )
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[Ps] ]= keyword[True]
identifier[Ps] =(~ identifier[self] [ literal[string] ])* identifier[self] [ literal[string] ]
identifier[Ps] = identifier[self] . identifier[find_neighbor_pores] ( identifier[pores] = identifier[Ps] , identifier[include_input] = keyword[True] )
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[Ps] ]= keyword[True]
identifier[topotools] . identifier[trim] ( identifier[network] = identifier[self] , identifier[pores] =~ identifier[self] [ literal[string] ])
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ]= identifier[self] [ literal[string] ]* identifier[self] [ literal[string] ]
identifier[Ps] = identifier[self] . identifier[find_neighbor_pores] ( identifier[pores] = identifier[self] . identifier[pores] ( literal[string] ))
identifier[Ps] = identifier[self] [ literal[string] ]* identifier[self] . identifier[tomask] ( identifier[pores] = identifier[Ps] )
identifier[self] [ literal[string] ][ identifier[Ps] ]= keyword[True]
identifier[self] [ literal[string] ]= keyword[False]
identifier[Ps] = identifier[self] . identifier[pores] ( literal[string] )
identifier[Ts] = identifier[self] . identifier[find_neighbor_throats] ( identifier[pores] = identifier[Ps] , identifier[mode] = literal[string] )
identifier[self] [ literal[string] ][ identifier[Ts] ]= keyword[True]
identifier[Ps] = identifier[self] . identifier[pores] ( identifier[labels] =[ literal[string] , literal[string] ], identifier[mode] = literal[string] )
identifier[Ts] = identifier[self] . identifier[find_neighbor_throats] ( identifier[pores] = identifier[Ps] , identifier[mode] = literal[string] )
identifier[topotools] . identifier[trim] ( identifier[network] = identifier[self] , identifier[throats] = identifier[Ts] )
identifier[Ps] = identifier[self] . identifier[pores] ( identifier[labels] =[ literal[string] , literal[string] ], identifier[mode] = literal[string] )
keyword[for] identifier[P] keyword[in] identifier[Ps] :
identifier[Ns] = identifier[self] . identifier[find_neighbor_pores] ( identifier[pores] = identifier[P] )
identifier[Ns] = identifier[Ps] = identifier[self] [ literal[string] ]* identifier[self] . identifier[tomask] ( identifier[pores] = identifier[Ns] )
identifier[coords] = identifier[sp] . identifier[mean] ( identifier[self] [ literal[string] ][ identifier[Ns] ], identifier[axis] = literal[int] )
identifier[self] [ literal[string] ][ identifier[P] ]= identifier[coords]
identifier[self] [ literal[string] ]=~ identifier[self] [ literal[string] ]
identifier[Ps] = identifier[self] . identifier[pores] ( literal[string] )
identifier[Ts] = identifier[self] . identifier[find_neighbor_throats] ( identifier[pores] = identifier[Ps] , identifier[mode] = literal[string] )
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[Ts] ]= keyword[True]
identifier[Ts] = identifier[self] . identifier[throats] ([ literal[string] , literal[string] ], identifier[mode] = literal[string] )
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[Ts] ]= keyword[True]
identifier[surf_pores] = identifier[self] [ literal[string] ][ identifier[Ts] ]. identifier[flatten] ()
identifier[surf_pores] = identifier[sp] . identifier[unique] ( identifier[surf_pores] [~ identifier[self] [ literal[string] ][ identifier[surf_pores] ]])
identifier[self] [ literal[string] ]= keyword[False]
identifier[self] [ literal[string] ][ identifier[surf_pores] ]= keyword[True]
keyword[del] identifier[self] [ literal[string] ]
keyword[del] identifier[self] [ literal[string] ] | def _trim_external_pores(self, shape):
"""
"""
# Find all pores within the domain
Ps = topotools.isoutside(coords=self['pore.coords'], shape=shape)
self['pore.external'] = False
self['pore.external'][Ps] = True
# Find which internal pores are delaunay
Ps = ~self['pore.external'] * self['pore.delaunay']
# Find all pores connected to an internal delaunay pore
Ps = self.find_neighbor_pores(pores=Ps, include_input=True)
# Mark them all as keepers
self['pore.keep'] = False
self['pore.keep'][Ps] = True
# Trim all bad pores
topotools.trim(network=self, pores=~self['pore.keep'])
# Now label boundary pores
self['pore.boundary'] = False
self['pore.boundary'] = self['pore.delaunay'] * self['pore.external']
# Label Voronoi pores on boundary
Ps = self.find_neighbor_pores(pores=self.pores('boundary'))
Ps = self['pore.voronoi'] * self.tomask(pores=Ps)
self['pore.boundary'][Ps] = True
# Label Voronoi and interconnect throats on boundary
self['throat.boundary'] = False
Ps = self.pores('boundary')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.boundary'][Ts] = True
# Trim throats between Delaunay boundary pores
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
topotools.trim(network=self, throats=Ts)
# Move Delaunay boundary pores to centroid of Voronoi facet
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
for P in Ps:
Ns = self.find_neighbor_pores(pores=P)
Ns = Ps = self['pore.voronoi'] * self.tomask(pores=Ns)
coords = sp.mean(self['pore.coords'][Ns], axis=0)
self['pore.coords'][P] = coords # depends on [control=['for'], data=['P']]
self['pore.internal'] = ~self['pore.boundary']
Ps = self.pores('internal')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.internal'] = False
self['throat.internal'][Ts] = True
# Label surface pores and throats between boundary and internal
Ts = self.throats(['boundary', 'internal'], mode='not')
self['throat.surface'] = False
self['throat.surface'][Ts] = True
surf_pores = self['throat.conns'][Ts].flatten()
surf_pores = sp.unique(surf_pores[~self['pore.boundary'][surf_pores]])
self['pore.surface'] = False
self['pore.surface'][surf_pores] = True
# Clean-up
del self['pore.external']
del self['pore.keep'] |
def print_javascript_error(self):
"""Print to the info log the gathered javascript error
If no error is found then nothing is printed
"""
errors = self.get_javascript_error(return_type='list')
if errors:
self.info_log("Javascript error:")
for error in errors:
self.info_log(error) | def function[print_javascript_error, parameter[self]]:
constant[Print to the info log the gathered javascript error
If no error is found then nothing is printed
]
variable[errors] assign[=] call[name[self].get_javascript_error, parameter[]]
if name[errors] begin[:]
call[name[self].info_log, parameter[constant[Javascript error:]]]
for taget[name[error]] in starred[name[errors]] begin[:]
call[name[self].info_log, parameter[name[error]]] | keyword[def] identifier[print_javascript_error] ( identifier[self] ):
literal[string]
identifier[errors] = identifier[self] . identifier[get_javascript_error] ( identifier[return_type] = literal[string] )
keyword[if] identifier[errors] :
identifier[self] . identifier[info_log] ( literal[string] )
keyword[for] identifier[error] keyword[in] identifier[errors] :
identifier[self] . identifier[info_log] ( identifier[error] ) | def print_javascript_error(self):
"""Print to the info log the gathered javascript error
If no error is found then nothing is printed
"""
errors = self.get_javascript_error(return_type='list')
if errors:
self.info_log('Javascript error:')
for error in errors:
self.info_log(error) # depends on [control=['for'], data=['error']] # depends on [control=['if'], data=[]] |
def name(self):
"""A unique name for this scraper."""
return ''.join('_%s' % c if c.isupper() else c for c in self.__class__.__name__).strip('_').lower() | def function[name, parameter[self]]:
constant[A unique name for this scraper.]
return[call[call[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da207f013c0>]].strip, parameter[constant[_]]].lower, parameter[]]] | keyword[def] identifier[name] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( literal[string] % identifier[c] keyword[if] identifier[c] . identifier[isupper] () keyword[else] identifier[c] keyword[for] identifier[c] keyword[in] identifier[self] . identifier[__class__] . identifier[__name__] ). identifier[strip] ( literal[string] ). identifier[lower] () | def name(self):
"""A unique name for this scraper."""
return ''.join(('_%s' % c if c.isupper() else c for c in self.__class__.__name__)).strip('_').lower() |
def matches_count(count, options):
"""
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
"""
if options.get("count") is not None:
return count == int(options["count"])
if options.get("maximum") is not None and int(options["maximum"]) < count:
return False
if options.get("minimum") is not None and int(options["minimum"]) > count:
return False
if options.get("between") is not None and count not in options["between"]:
return False
return True | def function[matches_count, parameter[count, options]]:
constant[
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
]
if compare[call[name[options].get, parameter[constant[count]]] is_not constant[None]] begin[:]
return[compare[name[count] equal[==] call[name[int], parameter[call[name[options]][constant[count]]]]]]
if <ast.BoolOp object at 0x7da1b02112d0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b033c910> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b033c070> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[matches_count] ( identifier[count] , identifier[options] ):
literal[string]
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[count] == identifier[int] ( identifier[options] [ literal[string] ])
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] identifier[int] ( identifier[options] [ literal[string] ])< identifier[count] :
keyword[return] keyword[False]
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] identifier[int] ( identifier[options] [ literal[string] ])> identifier[count] :
keyword[return] keyword[False]
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[and] identifier[count] keyword[not] keyword[in] identifier[options] [ literal[string] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def matches_count(count, options):
"""
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
"""
if options.get('count') is not None:
return count == int(options['count']) # depends on [control=['if'], data=[]]
if options.get('maximum') is not None and int(options['maximum']) < count:
return False # depends on [control=['if'], data=[]]
if options.get('minimum') is not None and int(options['minimum']) > count:
return False # depends on [control=['if'], data=[]]
if options.get('between') is not None and count not in options['between']:
return False # depends on [control=['if'], data=[]]
return True |
def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza <https://docs.python.org/2/library/__main__.html>`_ based on the value of
:code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you would like your module to act as either
a reusable module or a standalone program. Otherwise, if you wish this function to unconditionally instantiate and
execute :code:`command_class`, pass :const:`None` as the value of :code:`module_name`.
:param command_class: Search command class to instantiate and execute.
:type command_class: type
:param argv: List of arguments to the command.
:type argv: list or tuple
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if :code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
"""
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
command_class().process(argv, input_file, output_file) | def function[dispatch, parameter[command_class, argv, input_file, output_file, module_name]]:
constant[ Instantiates and executes a search command class
This function implements a `conditional script stanza <https://docs.python.org/2/library/__main__.html>`_ based on the value of
:code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you would like your module to act as either
a reusable module or a standalone program. Otherwise, if you wish this function to unconditionally instantiate and
execute :code:`command_class`, pass :const:`None` as the value of :code:`module_name`.
:param command_class: Search command class to instantiate and execute.
:type command_class: type
:param argv: List of arguments to the command.
:type argv: list or tuple
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if :code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
]
assert[call[name[issubclass], parameter[name[command_class], name[SearchCommand]]]]
if <ast.BoolOp object at 0x7da1b178c520> begin[:]
call[call[name[command_class], parameter[]].process, parameter[name[argv], name[input_file], name[output_file]]] | keyword[def] identifier[dispatch] ( identifier[command_class] , identifier[argv] = identifier[sys] . identifier[argv] , identifier[input_file] = identifier[sys] . identifier[stdin] , identifier[output_file] = identifier[sys] . identifier[stdout] , identifier[module_name] = keyword[None] ):
literal[string]
keyword[assert] identifier[issubclass] ( identifier[command_class] , identifier[SearchCommand] )
keyword[if] identifier[module_name] keyword[is] keyword[None] keyword[or] identifier[module_name] == literal[string] :
identifier[command_class] (). identifier[process] ( identifier[argv] , identifier[input_file] , identifier[output_file] ) | def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza <https://docs.python.org/2/library/__main__.html>`_ based on the value of
:code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you would like your module to act as either
a reusable module or a standalone program. Otherwise, if you wish this function to unconditionally instantiate and
execute :code:`command_class`, pass :const:`None` as the value of :code:`module_name`.
:param command_class: Search command class to instantiate and execute.
:type command_class: type
:param argv: List of arguments to the command.
:type argv: list or tuple
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if :code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
"""
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
command_class().process(argv, input_file, output_file) # depends on [control=['if'], data=[]] |
def _compute_progress_at_time(self, t):
"""
Calculate the modelled progress state for the given time moment.
:returns: tuple (x, v) of the progress level and progress speed.
"""
t0, x0, v0, ve = self._t0, self._x0, self._v0, self._ve
z = (v0 - ve) * math.exp(-self.BETA * (t - t0))
vt = ve + z
xt = clamp(x0 + ve * (t - t0) + (v0 - ve - z) / self.BETA, 0, 1)
return xt, vt | def function[_compute_progress_at_time, parameter[self, t]]:
constant[
Calculate the modelled progress state for the given time moment.
:returns: tuple (x, v) of the progress level and progress speed.
]
<ast.Tuple object at 0x7da1b05beb60> assign[=] tuple[[<ast.Attribute object at 0x7da1b05bd510>, <ast.Attribute object at 0x7da1b05bed40>, <ast.Attribute object at 0x7da1b05be920>, <ast.Attribute object at 0x7da1b05be980>]]
variable[z] assign[=] binary_operation[binary_operation[name[v0] - name[ve]] * call[name[math].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b05bfa00> * binary_operation[name[t] - name[t0]]]]]]
variable[vt] assign[=] binary_operation[name[ve] + name[z]]
variable[xt] assign[=] call[name[clamp], parameter[binary_operation[binary_operation[name[x0] + binary_operation[name[ve] * binary_operation[name[t] - name[t0]]]] + binary_operation[binary_operation[binary_operation[name[v0] - name[ve]] - name[z]] / name[self].BETA]], constant[0], constant[1]]]
return[tuple[[<ast.Name object at 0x7da20c6e63b0>, <ast.Name object at 0x7da20c6e60e0>]]] | keyword[def] identifier[_compute_progress_at_time] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t0] , identifier[x0] , identifier[v0] , identifier[ve] = identifier[self] . identifier[_t0] , identifier[self] . identifier[_x0] , identifier[self] . identifier[_v0] , identifier[self] . identifier[_ve]
identifier[z] =( identifier[v0] - identifier[ve] )* identifier[math] . identifier[exp] (- identifier[self] . identifier[BETA] *( identifier[t] - identifier[t0] ))
identifier[vt] = identifier[ve] + identifier[z]
identifier[xt] = identifier[clamp] ( identifier[x0] + identifier[ve] *( identifier[t] - identifier[t0] )+( identifier[v0] - identifier[ve] - identifier[z] )/ identifier[self] . identifier[BETA] , literal[int] , literal[int] )
keyword[return] identifier[xt] , identifier[vt] | def _compute_progress_at_time(self, t):
"""
Calculate the modelled progress state for the given time moment.
:returns: tuple (x, v) of the progress level and progress speed.
"""
(t0, x0, v0, ve) = (self._t0, self._x0, self._v0, self._ve)
z = (v0 - ve) * math.exp(-self.BETA * (t - t0))
vt = ve + z
xt = clamp(x0 + ve * (t - t0) + (v0 - ve - z) / self.BETA, 0, 1)
return (xt, vt) |
def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception("Exception while servicing timeout callback: ") | def function[service_timeouts, parameter[self]]:
constant[
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
]
variable[queue] assign[=] name[self]._queue
if name[self]._new_timers begin[:]
variable[new_timers] assign[=] name[self]._new_timers
while name[new_timers] begin[:]
call[name[heappush], parameter[name[queue], call[name[new_timers].pop, parameter[]]]]
if name[queue] begin[:]
variable[now] assign[=] call[name[time].time, parameter[]]
while name[queue] begin[:]
<ast.Try object at 0x7da1b22ba2f0> | keyword[def] identifier[service_timeouts] ( identifier[self] ):
literal[string]
identifier[queue] = identifier[self] . identifier[_queue]
keyword[if] identifier[self] . identifier[_new_timers] :
identifier[new_timers] = identifier[self] . identifier[_new_timers]
keyword[while] identifier[new_timers] :
identifier[heappush] ( identifier[queue] , identifier[new_timers] . identifier[pop] ())
keyword[if] identifier[queue] :
identifier[now] = identifier[time] . identifier[time] ()
keyword[while] identifier[queue] :
keyword[try] :
identifier[timer] = identifier[queue] [ literal[int] ][ literal[int] ]
keyword[if] identifier[timer] . identifier[finish] ( identifier[now] ):
identifier[heappop] ( identifier[queue] )
keyword[else] :
keyword[return] identifier[timer] . identifier[end]
keyword[except] identifier[Exception] :
identifier[log] . identifier[exception] ( literal[string] ) | def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop()) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue) # depends on [control=['if'], data=[]]
else:
return timer.end # depends on [control=['try'], data=[]]
except Exception:
log.exception('Exception while servicing timeout callback: ') # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] |
def get_year_season(day=None):
"""
Returns a guess of the year and season of the current semester.
"""
if day is None:
day = date.today()
year = day.year
if day.month > 3 and day.month <= 7:
season = Semester.SUMMER
elif day.month > 7 and day.month <= 10:
season = Semester.FALL
else:
season = Semester.SPRING
if day.month > 10:
year += 1
return year, season | def function[get_year_season, parameter[day]]:
constant[
Returns a guess of the year and season of the current semester.
]
if compare[name[day] is constant[None]] begin[:]
variable[day] assign[=] call[name[date].today, parameter[]]
variable[year] assign[=] name[day].year
if <ast.BoolOp object at 0x7da20cabd870> begin[:]
variable[season] assign[=] name[Semester].SUMMER
return[tuple[[<ast.Name object at 0x7da20cabcb20>, <ast.Name object at 0x7da20cabfb50>]]] | keyword[def] identifier[get_year_season] ( identifier[day] = keyword[None] ):
literal[string]
keyword[if] identifier[day] keyword[is] keyword[None] :
identifier[day] = identifier[date] . identifier[today] ()
identifier[year] = identifier[day] . identifier[year]
keyword[if] identifier[day] . identifier[month] > literal[int] keyword[and] identifier[day] . identifier[month] <= literal[int] :
identifier[season] = identifier[Semester] . identifier[SUMMER]
keyword[elif] identifier[day] . identifier[month] > literal[int] keyword[and] identifier[day] . identifier[month] <= literal[int] :
identifier[season] = identifier[Semester] . identifier[FALL]
keyword[else] :
identifier[season] = identifier[Semester] . identifier[SPRING]
keyword[if] identifier[day] . identifier[month] > literal[int] :
identifier[year] += literal[int]
keyword[return] identifier[year] , identifier[season] | def get_year_season(day=None):
"""
Returns a guess of the year and season of the current semester.
"""
if day is None:
day = date.today() # depends on [control=['if'], data=['day']]
year = day.year
if day.month > 3 and day.month <= 7:
season = Semester.SUMMER # depends on [control=['if'], data=[]]
elif day.month > 7 and day.month <= 10:
season = Semester.FALL # depends on [control=['if'], data=[]]
else:
season = Semester.SPRING
if day.month > 10:
year += 1 # depends on [control=['if'], data=[]]
return (year, season) |
def add():
"""
Add a blurb (a Misc/NEWS entry) to the current CPython repo.
"""
editor = find_editor()
handle, tmp_path = tempfile.mkstemp(".rst")
os.close(handle)
atexit.register(lambda : os.unlink(tmp_path))
def init_tmp_with_template():
with open(tmp_path, "wt", encoding="utf-8") as file:
# hack:
# my editor likes to strip trailing whitespace from lines.
# normally this is a good idea. but in the case of the template
# it's unhelpful.
# so, manually ensure there's a space at the end of the bpo line.
text = template
bpo_line = ".. bpo:"
without_space = "\n" + bpo_line + "\n"
with_space = "\n" + bpo_line + " \n"
if without_space not in text:
sys.exit("Can't find BPO line to ensure there's a space on the end!")
text = text.replace(without_space, with_space)
file.write(text)
init_tmp_with_template()
# We need to be clever about EDITOR.
# On the one hand, it might be a legitimate path to an
# executable containing spaces.
# On the other hand, it might be a partial command-line
# with options.
if shutil.which(editor):
args = [editor]
else:
args = list(shlex.split(editor))
if not shutil.which(args[0]):
sys.exit(f("Invalid GIT_EDITOR / EDITOR value: {editor}"))
args.append(tmp_path)
while True:
subprocess.run(args)
failure = None
blurb = Blurbs()
try:
blurb.load(tmp_path)
except BlurbError as e:
failure = str(e)
if not failure:
assert len(blurb) # if parse_blurb succeeds, we should always have a body
if len(blurb) > 1:
failure = "Too many entries! Don't specify '..' on a line by itself."
if failure:
print()
print(f("Error: {failure}"))
print()
try:
prompt("Hit return to retry (or Ctrl-C to abort)")
except KeyboardInterrupt:
print()
return
print()
continue
break
path = blurb.save_next()
git_add_files.append(path)
flush_git_add_files()
print("Ready for commit.") | def function[add, parameter[]]:
constant[
Add a blurb (a Misc/NEWS entry) to the current CPython repo.
]
variable[editor] assign[=] call[name[find_editor], parameter[]]
<ast.Tuple object at 0x7da1b1d4e050> assign[=] call[name[tempfile].mkstemp, parameter[constant[.rst]]]
call[name[os].close, parameter[name[handle]]]
call[name[atexit].register, parameter[<ast.Lambda object at 0x7da1b1d4e3e0>]]
def function[init_tmp_with_template, parameter[]]:
with call[name[open], parameter[name[tmp_path], constant[wt]]] begin[:]
variable[text] assign[=] name[template]
variable[bpo_line] assign[=] constant[.. bpo:]
variable[without_space] assign[=] binary_operation[binary_operation[constant[
] + name[bpo_line]] + constant[
]]
variable[with_space] assign[=] binary_operation[binary_operation[constant[
] + name[bpo_line]] + constant[
]]
if compare[name[without_space] <ast.NotIn object at 0x7da2590d7190> name[text]] begin[:]
call[name[sys].exit, parameter[constant[Can't find BPO line to ensure there's a space on the end!]]]
variable[text] assign[=] call[name[text].replace, parameter[name[without_space], name[with_space]]]
call[name[file].write, parameter[name[text]]]
call[name[init_tmp_with_template], parameter[]]
if call[name[shutil].which, parameter[name[editor]]] begin[:]
variable[args] assign[=] list[[<ast.Name object at 0x7da2041d80d0>]]
call[name[args].append, parameter[name[tmp_path]]]
while constant[True] begin[:]
call[name[subprocess].run, parameter[name[args]]]
variable[failure] assign[=] constant[None]
variable[blurb] assign[=] call[name[Blurbs], parameter[]]
<ast.Try object at 0x7da2041dace0>
if <ast.UnaryOp object at 0x7da2041dbcd0> begin[:]
assert[call[name[len], parameter[name[blurb]]]]
if compare[call[name[len], parameter[name[blurb]]] greater[>] constant[1]] begin[:]
variable[failure] assign[=] constant[Too many entries! Don't specify '..' on a line by itself.]
if name[failure] begin[:]
call[name[print], parameter[]]
call[name[print], parameter[call[name[f], parameter[constant[Error: {failure}]]]]]
call[name[print], parameter[]]
<ast.Try object at 0x7da1b2344d60>
call[name[print], parameter[]]
continue
break
variable[path] assign[=] call[name[blurb].save_next, parameter[]]
call[name[git_add_files].append, parameter[name[path]]]
call[name[flush_git_add_files], parameter[]]
call[name[print], parameter[constant[Ready for commit.]]] | keyword[def] identifier[add] ():
literal[string]
identifier[editor] = identifier[find_editor] ()
identifier[handle] , identifier[tmp_path] = identifier[tempfile] . identifier[mkstemp] ( literal[string] )
identifier[os] . identifier[close] ( identifier[handle] )
identifier[atexit] . identifier[register] ( keyword[lambda] : identifier[os] . identifier[unlink] ( identifier[tmp_path] ))
keyword[def] identifier[init_tmp_with_template] ():
keyword[with] identifier[open] ( identifier[tmp_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[file] :
identifier[text] = identifier[template]
identifier[bpo_line] = literal[string]
identifier[without_space] = literal[string] + identifier[bpo_line] + literal[string]
identifier[with_space] = literal[string] + identifier[bpo_line] + literal[string]
keyword[if] identifier[without_space] keyword[not] keyword[in] identifier[text] :
identifier[sys] . identifier[exit] ( literal[string] )
identifier[text] = identifier[text] . identifier[replace] ( identifier[without_space] , identifier[with_space] )
identifier[file] . identifier[write] ( identifier[text] )
identifier[init_tmp_with_template] ()
keyword[if] identifier[shutil] . identifier[which] ( identifier[editor] ):
identifier[args] =[ identifier[editor] ]
keyword[else] :
identifier[args] = identifier[list] ( identifier[shlex] . identifier[split] ( identifier[editor] ))
keyword[if] keyword[not] identifier[shutil] . identifier[which] ( identifier[args] [ literal[int] ]):
identifier[sys] . identifier[exit] ( identifier[f] ( literal[string] ))
identifier[args] . identifier[append] ( identifier[tmp_path] )
keyword[while] keyword[True] :
identifier[subprocess] . identifier[run] ( identifier[args] )
identifier[failure] = keyword[None]
identifier[blurb] = identifier[Blurbs] ()
keyword[try] :
identifier[blurb] . identifier[load] ( identifier[tmp_path] )
keyword[except] identifier[BlurbError] keyword[as] identifier[e] :
identifier[failure] = identifier[str] ( identifier[e] )
keyword[if] keyword[not] identifier[failure] :
keyword[assert] identifier[len] ( identifier[blurb] )
keyword[if] identifier[len] ( identifier[blurb] )> literal[int] :
identifier[failure] = literal[string]
keyword[if] identifier[failure] :
identifier[print] ()
identifier[print] ( identifier[f] ( literal[string] ))
identifier[print] ()
keyword[try] :
identifier[prompt] ( literal[string] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[print] ()
keyword[return]
identifier[print] ()
keyword[continue]
keyword[break]
identifier[path] = identifier[blurb] . identifier[save_next] ()
identifier[git_add_files] . identifier[append] ( identifier[path] )
identifier[flush_git_add_files] ()
identifier[print] ( literal[string] ) | def add():
"""
Add a blurb (a Misc/NEWS entry) to the current CPython repo.
"""
editor = find_editor()
(handle, tmp_path) = tempfile.mkstemp('.rst')
os.close(handle)
atexit.register(lambda : os.unlink(tmp_path))
def init_tmp_with_template():
with open(tmp_path, 'wt', encoding='utf-8') as file:
# hack:
# my editor likes to strip trailing whitespace from lines.
# normally this is a good idea. but in the case of the template
# it's unhelpful.
# so, manually ensure there's a space at the end of the bpo line.
text = template
bpo_line = '.. bpo:'
without_space = '\n' + bpo_line + '\n'
with_space = '\n' + bpo_line + ' \n'
if without_space not in text:
sys.exit("Can't find BPO line to ensure there's a space on the end!") # depends on [control=['if'], data=[]]
text = text.replace(without_space, with_space)
file.write(text) # depends on [control=['with'], data=['file']]
init_tmp_with_template()
# We need to be clever about EDITOR.
# On the one hand, it might be a legitimate path to an
# executable containing spaces.
# On the other hand, it might be a partial command-line
# with options.
if shutil.which(editor):
args = [editor] # depends on [control=['if'], data=[]]
else:
args = list(shlex.split(editor))
if not shutil.which(args[0]):
sys.exit(f('Invalid GIT_EDITOR / EDITOR value: {editor}')) # depends on [control=['if'], data=[]]
args.append(tmp_path)
while True:
subprocess.run(args)
failure = None
blurb = Blurbs()
try:
blurb.load(tmp_path) # depends on [control=['try'], data=[]]
except BlurbError as e:
failure = str(e) # depends on [control=['except'], data=['e']]
if not failure:
assert len(blurb) # if parse_blurb succeeds, we should always have a body
if len(blurb) > 1:
failure = "Too many entries! Don't specify '..' on a line by itself." # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if failure:
print()
print(f('Error: {failure}'))
print()
try:
prompt('Hit return to retry (or Ctrl-C to abort)') # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
print()
return # depends on [control=['except'], data=[]]
print()
continue # depends on [control=['if'], data=[]]
break # depends on [control=['while'], data=[]]
path = blurb.save_next()
git_add_files.append(path)
flush_git_add_files()
print('Ready for commit.') |
def syr2(X, u, v, alpha = 1.0, beta = 1.0, reordered=False):
r"""
Computes the projected rank 2 update of a cspmatrix X
.. math::
X := \alpha*P(u v^T + v u^T) + \beta X.
"""
assert X.is_factor is False, "cspmatrix factor object"
symb = X.symb
n = symb.n
snptr = symb.snptr
snode = symb.snode
blkval = X.blkval
blkptr = symb.blkptr
relptr = symb.relptr
snrowidx = symb.snrowidx
sncolptr = symb.sncolptr
if symb.p is not None and reordered is False:
up = u[symb.p]
vp = v[symb.p]
else:
up = u
vp = v
for k in range(symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = na + nn
for i in range(nn): blas.scal(beta, blkval, n = nj-i, offset = blkptr[k]+(nj+1)*i)
uk = up[snrowidx[sncolptr[k]:sncolptr[k+1]]]
vk = vp[snrowidx[sncolptr[k]:sncolptr[k+1]]]
blas.syr2(uk, vk, blkval, n = nn, offsetA = blkptr[k], ldA = nj, alpha = alpha)
blas.ger(uk, vk, blkval, m = na, n = nn, offsetx = nn, offsetA = blkptr[k]+nn, ldA = nj, alpha = alpha)
blas.ger(vk, uk, blkval, m = na, n = nn, offsetx = nn, offsetA = blkptr[k]+nn, ldA = nj, alpha = alpha)
return | def function[syr2, parameter[X, u, v, alpha, beta, reordered]]:
constant[
Computes the projected rank 2 update of a cspmatrix X
.. math::
X := \alpha*P(u v^T + v u^T) + \beta X.
]
assert[compare[name[X].is_factor is constant[False]]]
variable[symb] assign[=] name[X].symb
variable[n] assign[=] name[symb].n
variable[snptr] assign[=] name[symb].snptr
variable[snode] assign[=] name[symb].snode
variable[blkval] assign[=] name[X].blkval
variable[blkptr] assign[=] name[symb].blkptr
variable[relptr] assign[=] name[symb].relptr
variable[snrowidx] assign[=] name[symb].snrowidx
variable[sncolptr] assign[=] name[symb].sncolptr
if <ast.BoolOp object at 0x7da1b253a9e0> begin[:]
variable[up] assign[=] call[name[u]][name[symb].p]
variable[vp] assign[=] call[name[v]][name[symb].p]
for taget[name[k]] in starred[call[name[range], parameter[name[symb].Nsn]]] begin[:]
variable[nn] assign[=] binary_operation[call[name[snptr]][binary_operation[name[k] + constant[1]]] - call[name[snptr]][name[k]]]
variable[na] assign[=] binary_operation[call[name[relptr]][binary_operation[name[k] + constant[1]]] - call[name[relptr]][name[k]]]
variable[nj] assign[=] binary_operation[name[na] + name[nn]]
for taget[name[i]] in starred[call[name[range], parameter[name[nn]]]] begin[:]
call[name[blas].scal, parameter[name[beta], name[blkval]]]
variable[uk] assign[=] call[name[up]][call[name[snrowidx]][<ast.Slice object at 0x7da1b2536710>]]
variable[vk] assign[=] call[name[vp]][call[name[snrowidx]][<ast.Slice object at 0x7da1b253bd00>]]
call[name[blas].syr2, parameter[name[uk], name[vk], name[blkval]]]
call[name[blas].ger, parameter[name[uk], name[vk], name[blkval]]]
call[name[blas].ger, parameter[name[vk], name[uk], name[blkval]]]
return[None] | keyword[def] identifier[syr2] ( identifier[X] , identifier[u] , identifier[v] , identifier[alpha] = literal[int] , identifier[beta] = literal[int] , identifier[reordered] = keyword[False] ):
literal[string]
keyword[assert] identifier[X] . identifier[is_factor] keyword[is] keyword[False] , literal[string]
identifier[symb] = identifier[X] . identifier[symb]
identifier[n] = identifier[symb] . identifier[n]
identifier[snptr] = identifier[symb] . identifier[snptr]
identifier[snode] = identifier[symb] . identifier[snode]
identifier[blkval] = identifier[X] . identifier[blkval]
identifier[blkptr] = identifier[symb] . identifier[blkptr]
identifier[relptr] = identifier[symb] . identifier[relptr]
identifier[snrowidx] = identifier[symb] . identifier[snrowidx]
identifier[sncolptr] = identifier[symb] . identifier[sncolptr]
keyword[if] identifier[symb] . identifier[p] keyword[is] keyword[not] keyword[None] keyword[and] identifier[reordered] keyword[is] keyword[False] :
identifier[up] = identifier[u] [ identifier[symb] . identifier[p] ]
identifier[vp] = identifier[v] [ identifier[symb] . identifier[p] ]
keyword[else] :
identifier[up] = identifier[u]
identifier[vp] = identifier[v]
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[symb] . identifier[Nsn] ):
identifier[nn] = identifier[snptr] [ identifier[k] + literal[int] ]- identifier[snptr] [ identifier[k] ]
identifier[na] = identifier[relptr] [ identifier[k] + literal[int] ]- identifier[relptr] [ identifier[k] ]
identifier[nj] = identifier[na] + identifier[nn]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nn] ): identifier[blas] . identifier[scal] ( identifier[beta] , identifier[blkval] , identifier[n] = identifier[nj] - identifier[i] , identifier[offset] = identifier[blkptr] [ identifier[k] ]+( identifier[nj] + literal[int] )* identifier[i] )
identifier[uk] = identifier[up] [ identifier[snrowidx] [ identifier[sncolptr] [ identifier[k] ]: identifier[sncolptr] [ identifier[k] + literal[int] ]]]
identifier[vk] = identifier[vp] [ identifier[snrowidx] [ identifier[sncolptr] [ identifier[k] ]: identifier[sncolptr] [ identifier[k] + literal[int] ]]]
identifier[blas] . identifier[syr2] ( identifier[uk] , identifier[vk] , identifier[blkval] , identifier[n] = identifier[nn] , identifier[offsetA] = identifier[blkptr] [ identifier[k] ], identifier[ldA] = identifier[nj] , identifier[alpha] = identifier[alpha] )
identifier[blas] . identifier[ger] ( identifier[uk] , identifier[vk] , identifier[blkval] , identifier[m] = identifier[na] , identifier[n] = identifier[nn] , identifier[offsetx] = identifier[nn] , identifier[offsetA] = identifier[blkptr] [ identifier[k] ]+ identifier[nn] , identifier[ldA] = identifier[nj] , identifier[alpha] = identifier[alpha] )
identifier[blas] . identifier[ger] ( identifier[vk] , identifier[uk] , identifier[blkval] , identifier[m] = identifier[na] , identifier[n] = identifier[nn] , identifier[offsetx] = identifier[nn] , identifier[offsetA] = identifier[blkptr] [ identifier[k] ]+ identifier[nn] , identifier[ldA] = identifier[nj] , identifier[alpha] = identifier[alpha] )
keyword[return] | def syr2(X, u, v, alpha=1.0, beta=1.0, reordered=False):
"""
Computes the projected rank 2 update of a cspmatrix X
.. math::
X := \\alpha*P(u v^T + v u^T) + \\beta X.
"""
assert X.is_factor is False, 'cspmatrix factor object'
symb = X.symb
n = symb.n
snptr = symb.snptr
snode = symb.snode
blkval = X.blkval
blkptr = symb.blkptr
relptr = symb.relptr
snrowidx = symb.snrowidx
sncolptr = symb.sncolptr
if symb.p is not None and reordered is False:
up = u[symb.p]
vp = v[symb.p] # depends on [control=['if'], data=[]]
else:
up = u
vp = v
for k in range(symb.Nsn):
nn = snptr[k + 1] - snptr[k]
na = relptr[k + 1] - relptr[k]
nj = na + nn
for i in range(nn):
blas.scal(beta, blkval, n=nj - i, offset=blkptr[k] + (nj + 1) * i) # depends on [control=['for'], data=['i']]
uk = up[snrowidx[sncolptr[k]:sncolptr[k + 1]]]
vk = vp[snrowidx[sncolptr[k]:sncolptr[k + 1]]]
blas.syr2(uk, vk, blkval, n=nn, offsetA=blkptr[k], ldA=nj, alpha=alpha)
blas.ger(uk, vk, blkval, m=na, n=nn, offsetx=nn, offsetA=blkptr[k] + nn, ldA=nj, alpha=alpha)
blas.ger(vk, uk, blkval, m=na, n=nn, offsetx=nn, offsetA=blkptr[k] + nn, ldA=nj, alpha=alpha) # depends on [control=['for'], data=['k']]
return |
def _get_env_data(self, reload=False):
"""Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
# This is called much too often and finding-process is really expensive :-(
if not reload and getattr(self, "_env_data_cache", {}):
return getattr(self, "_env_data_cache")
env_data = {}
for supplyer in ENV_SUPPLYER:
env_data.update(supplyer(self))
env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}
new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]
if new_kernels:
self.log.info("Found new kernels in environments: %s", ", ".join(new_kernels))
self._env_data_cache = env_data
return env_data | def function[_get_env_data, parameter[self, reload]]:
constant[Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
]
if <ast.BoolOp object at 0x7da1b10d5de0> begin[:]
return[call[name[getattr], parameter[name[self], constant[_env_data_cache]]]]
variable[env_data] assign[=] dictionary[[], []]
for taget[name[supplyer]] in starred[name[ENV_SUPPLYER]] begin[:]
call[name[env_data].update, parameter[call[name[supplyer], parameter[name[self]]]]]
variable[env_data] assign[=] <ast.DictComp object at 0x7da1b10d77c0>
variable[new_kernels] assign[=] <ast.ListComp object at 0x7da1b2347820>
if name[new_kernels] begin[:]
call[name[self].log.info, parameter[constant[Found new kernels in environments: %s], call[constant[, ].join, parameter[name[new_kernels]]]]]
name[self]._env_data_cache assign[=] name[env_data]
return[name[env_data]] | keyword[def] identifier[_get_env_data] ( identifier[self] , identifier[reload] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[reload] keyword[and] identifier[getattr] ( identifier[self] , literal[string] ,{}):
keyword[return] identifier[getattr] ( identifier[self] , literal[string] )
identifier[env_data] ={}
keyword[for] identifier[supplyer] keyword[in] identifier[ENV_SUPPLYER] :
identifier[env_data] . identifier[update] ( identifier[supplyer] ( identifier[self] ))
identifier[env_data] ={ identifier[name] : identifier[env_data] [ identifier[name] ] keyword[for] identifier[name] keyword[in] identifier[env_data] keyword[if] identifier[self] . identifier[validate_env] ( identifier[name] )}
identifier[new_kernels] =[ identifier[env] keyword[for] identifier[env] keyword[in] identifier[list] ( identifier[env_data] . identifier[keys] ()) keyword[if] identifier[env] keyword[not] keyword[in] identifier[list] ( identifier[self] . identifier[_env_data_cache] . identifier[keys] ())]
keyword[if] identifier[new_kernels] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[new_kernels] ))
identifier[self] . identifier[_env_data_cache] = identifier[env_data]
keyword[return] identifier[env_data] | def _get_env_data(self, reload=False):
"""Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
# This is called much too often and finding-process is really expensive :-(
if not reload and getattr(self, '_env_data_cache', {}):
return getattr(self, '_env_data_cache') # depends on [control=['if'], data=[]]
env_data = {}
for supplyer in ENV_SUPPLYER:
env_data.update(supplyer(self)) # depends on [control=['for'], data=['supplyer']]
env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}
new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]
if new_kernels:
self.log.info('Found new kernels in environments: %s', ', '.join(new_kernels)) # depends on [control=['if'], data=[]]
self._env_data_cache = env_data
return env_data |
def add(self, data, id='*', maxlen=None, approximate=True):
"""
Add data to a stream.
:param dict data: data to add to stream
:param id: identifier for message ('*' to automatically append)
:param maxlen: maximum length for stream
:param approximate: allow stream max length to be approximate
:returns: the added message id.
"""
return self.database.xadd(self.key, data, id, maxlen, approximate) | def function[add, parameter[self, data, id, maxlen, approximate]]:
constant[
Add data to a stream.
:param dict data: data to add to stream
:param id: identifier for message ('*' to automatically append)
:param maxlen: maximum length for stream
:param approximate: allow stream max length to be approximate
:returns: the added message id.
]
return[call[name[self].database.xadd, parameter[name[self].key, name[data], name[id], name[maxlen], name[approximate]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[data] , identifier[id] = literal[string] , identifier[maxlen] = keyword[None] , identifier[approximate] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[database] . identifier[xadd] ( identifier[self] . identifier[key] , identifier[data] , identifier[id] , identifier[maxlen] , identifier[approximate] ) | def add(self, data, id='*', maxlen=None, approximate=True):
"""
Add data to a stream.
:param dict data: data to add to stream
:param id: identifier for message ('*' to automatically append)
:param maxlen: maximum length for stream
:param approximate: allow stream max length to be approximate
:returns: the added message id.
"""
return self.database.xadd(self.key, data, id, maxlen, approximate) |
def create_match(self, matcher_name, classification):
"""
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
"""
if classification is None:
classification = ClassifiedFailure.objects.create()
TextLogErrorMatch.objects.create(
text_log_error=self,
classified_failure=classification,
matcher_name=matcher_name,
score=1,
) | def function[create_match, parameter[self, matcher_name, classification]]:
constant[
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
]
if compare[name[classification] is constant[None]] begin[:]
variable[classification] assign[=] call[name[ClassifiedFailure].objects.create, parameter[]]
call[name[TextLogErrorMatch].objects.create, parameter[]] | keyword[def] identifier[create_match] ( identifier[self] , identifier[matcher_name] , identifier[classification] ):
literal[string]
keyword[if] identifier[classification] keyword[is] keyword[None] :
identifier[classification] = identifier[ClassifiedFailure] . identifier[objects] . identifier[create] ()
identifier[TextLogErrorMatch] . identifier[objects] . identifier[create] (
identifier[text_log_error] = identifier[self] ,
identifier[classified_failure] = identifier[classification] ,
identifier[matcher_name] = identifier[matcher_name] ,
identifier[score] = literal[int] ,
) | def create_match(self, matcher_name, classification):
"""
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
"""
if classification is None:
classification = ClassifiedFailure.objects.create() # depends on [control=['if'], data=['classification']]
TextLogErrorMatch.objects.create(text_log_error=self, classified_failure=classification, matcher_name=matcher_name, score=1) |
def dayspan(startdate: datetime.date,
enddate: datetime.date,
include_end: bool = True) -> Optional["Interval"]:
"""
Returns an :class:`Interval` representing the date range given, from
midnight at the start of the first day to midnight at the end of the
last (i.e. at the start of the next day after the last), or if
include_end is False, 24h before that.
If the parameters are invalid, returns ``None``.
"""
if enddate < startdate:
return None
if enddate == startdate and include_end:
return None
start_dt = datetime.datetime.combine(startdate, datetime.time())
end_dt = datetime.datetime.combine(enddate, datetime.time())
if include_end:
end_dt += datetime.timedelta(days=1)
return Interval(start_dt, end_dt) | def function[dayspan, parameter[startdate, enddate, include_end]]:
constant[
Returns an :class:`Interval` representing the date range given, from
midnight at the start of the first day to midnight at the end of the
last (i.e. at the start of the next day after the last), or if
include_end is False, 24h before that.
If the parameters are invalid, returns ``None``.
]
if compare[name[enddate] less[<] name[startdate]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b18365f0> begin[:]
return[constant[None]]
variable[start_dt] assign[=] call[name[datetime].datetime.combine, parameter[name[startdate], call[name[datetime].time, parameter[]]]]
variable[end_dt] assign[=] call[name[datetime].datetime.combine, parameter[name[enddate], call[name[datetime].time, parameter[]]]]
if name[include_end] begin[:]
<ast.AugAssign object at 0x7da1b1836e30>
return[call[name[Interval], parameter[name[start_dt], name[end_dt]]]] | keyword[def] identifier[dayspan] ( identifier[startdate] : identifier[datetime] . identifier[date] ,
identifier[enddate] : identifier[datetime] . identifier[date] ,
identifier[include_end] : identifier[bool] = keyword[True] )-> identifier[Optional] [ literal[string] ]:
literal[string]
keyword[if] identifier[enddate] < identifier[startdate] :
keyword[return] keyword[None]
keyword[if] identifier[enddate] == identifier[startdate] keyword[and] identifier[include_end] :
keyword[return] keyword[None]
identifier[start_dt] = identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[startdate] , identifier[datetime] . identifier[time] ())
identifier[end_dt] = identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[enddate] , identifier[datetime] . identifier[time] ())
keyword[if] identifier[include_end] :
identifier[end_dt] += identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
keyword[return] identifier[Interval] ( identifier[start_dt] , identifier[end_dt] ) | def dayspan(startdate: datetime.date, enddate: datetime.date, include_end: bool=True) -> Optional['Interval']:
"""
Returns an :class:`Interval` representing the date range given, from
midnight at the start of the first day to midnight at the end of the
last (i.e. at the start of the next day after the last), or if
include_end is False, 24h before that.
If the parameters are invalid, returns ``None``.
"""
if enddate < startdate:
return None # depends on [control=['if'], data=[]]
if enddate == startdate and include_end:
return None # depends on [control=['if'], data=[]]
start_dt = datetime.datetime.combine(startdate, datetime.time())
end_dt = datetime.datetime.combine(enddate, datetime.time())
if include_end:
end_dt += datetime.timedelta(days=1) # depends on [control=['if'], data=[]]
return Interval(start_dt, end_dt) |
def _assign_taxonomy_with_diamond(self, base_list, db_search_results,
graftm_package, graftm_files):
'''Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected.
'''
runner = Diamond(graftm_package.diamond_database_path(),
self.args.threads,
self.args.evalue)
taxonomy_definition = Getaxnseq().read_taxtastic_taxonomy_and_seqinfo\
(open(graftm_package.taxtastic_taxonomy_path()),
open(graftm_package.taxtastic_seqinfo_path()))
results = {}
# For each of the search results,
for i, search_result in enumerate(db_search_results):
sequence_id_to_hit = {}
# Run diamond
logging.debug("Running diamond on %s" % search_result.hit_fasta())
diamond_result = runner.run(search_result.hit_fasta(),
UnpackRawReads.PROTEIN_SEQUENCE_TYPE,
daa_file_basename=graftm_files.diamond_assignment_output_basename(base_list[i]))
for res in diamond_result.each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.HIT_ID_FIELD]):
if res[0] in sequence_id_to_hit:
# do not accept duplicates
if sequence_id_to_hit[res[0]] != res[1]:
raise Exception("Diamond unexpectedly gave two hits for a single query sequence for %s" % res[0])
else:
sequence_id_to_hit[res[0]] = res[1]
# Extract taxonomy of the best hit, and add in the no hits
sequence_id_to_taxonomy = {}
for seqio in SequenceIO().read_fasta_file(search_result.hit_fasta()):
name = seqio.name
if name in sequence_id_to_hit:
# Add Root; to be in line with pplacer assignment method
sequence_id_to_taxonomy[name] = ['Root']+taxonomy_definition[sequence_id_to_hit[name]]
else:
# picked up in the initial search (by hmmsearch, say), but diamond misses it
sequence_id_to_taxonomy[name] = ['Root']
results[base_list[i]] = sequence_id_to_taxonomy
return results | def function[_assign_taxonomy_with_diamond, parameter[self, base_list, db_search_results, graftm_package, graftm_files]]:
constant[Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected.
]
variable[runner] assign[=] call[name[Diamond], parameter[call[name[graftm_package].diamond_database_path, parameter[]], name[self].args.threads, name[self].args.evalue]]
variable[taxonomy_definition] assign[=] call[call[name[Getaxnseq], parameter[]].read_taxtastic_taxonomy_and_seqinfo, parameter[call[name[open], parameter[call[name[graftm_package].taxtastic_taxonomy_path, parameter[]]]], call[name[open], parameter[call[name[graftm_package].taxtastic_seqinfo_path, parameter[]]]]]]
variable[results] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c7c9270>, <ast.Name object at 0x7da20c7c8100>]]] in starred[call[name[enumerate], parameter[name[db_search_results]]]] begin[:]
variable[sequence_id_to_hit] assign[=] dictionary[[], []]
call[name[logging].debug, parameter[binary_operation[constant[Running diamond on %s] <ast.Mod object at 0x7da2590d6920> call[name[search_result].hit_fasta, parameter[]]]]]
variable[diamond_result] assign[=] call[name[runner].run, parameter[call[name[search_result].hit_fasta, parameter[]], name[UnpackRawReads].PROTEIN_SEQUENCE_TYPE]]
for taget[name[res]] in starred[call[name[diamond_result].each, parameter[list[[<ast.Attribute object at 0x7da1b0c51d80>, <ast.Attribute object at 0x7da1b0c50ee0>]]]]] begin[:]
if compare[call[name[res]][constant[0]] in name[sequence_id_to_hit]] begin[:]
if compare[call[name[sequence_id_to_hit]][call[name[res]][constant[0]]] not_equal[!=] call[name[res]][constant[1]]] begin[:]
<ast.Raise object at 0x7da1b0c51570>
variable[sequence_id_to_taxonomy] assign[=] dictionary[[], []]
for taget[name[seqio]] in starred[call[call[name[SequenceIO], parameter[]].read_fasta_file, parameter[call[name[search_result].hit_fasta, parameter[]]]]] begin[:]
variable[name] assign[=] name[seqio].name
if compare[name[name] in name[sequence_id_to_hit]] begin[:]
call[name[sequence_id_to_taxonomy]][name[name]] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0c51510>]] + call[name[taxonomy_definition]][call[name[sequence_id_to_hit]][name[name]]]]
call[name[results]][call[name[base_list]][name[i]]] assign[=] name[sequence_id_to_taxonomy]
return[name[results]] | keyword[def] identifier[_assign_taxonomy_with_diamond] ( identifier[self] , identifier[base_list] , identifier[db_search_results] ,
identifier[graftm_package] , identifier[graftm_files] ):
literal[string]
identifier[runner] = identifier[Diamond] ( identifier[graftm_package] . identifier[diamond_database_path] (),
identifier[self] . identifier[args] . identifier[threads] ,
identifier[self] . identifier[args] . identifier[evalue] )
identifier[taxonomy_definition] = identifier[Getaxnseq] (). identifier[read_taxtastic_taxonomy_and_seqinfo] ( identifier[open] ( identifier[graftm_package] . identifier[taxtastic_taxonomy_path] ()),
identifier[open] ( identifier[graftm_package] . identifier[taxtastic_seqinfo_path] ()))
identifier[results] ={}
keyword[for] identifier[i] , identifier[search_result] keyword[in] identifier[enumerate] ( identifier[db_search_results] ):
identifier[sequence_id_to_hit] ={}
identifier[logging] . identifier[debug] ( literal[string] % identifier[search_result] . identifier[hit_fasta] ())
identifier[diamond_result] = identifier[runner] . identifier[run] ( identifier[search_result] . identifier[hit_fasta] (),
identifier[UnpackRawReads] . identifier[PROTEIN_SEQUENCE_TYPE] ,
identifier[daa_file_basename] = identifier[graftm_files] . identifier[diamond_assignment_output_basename] ( identifier[base_list] [ identifier[i] ]))
keyword[for] identifier[res] keyword[in] identifier[diamond_result] . identifier[each] ([ identifier[SequenceSearchResult] . identifier[QUERY_ID_FIELD] ,
identifier[SequenceSearchResult] . identifier[HIT_ID_FIELD] ]):
keyword[if] identifier[res] [ literal[int] ] keyword[in] identifier[sequence_id_to_hit] :
keyword[if] identifier[sequence_id_to_hit] [ identifier[res] [ literal[int] ]]!= identifier[res] [ literal[int] ]:
keyword[raise] identifier[Exception] ( literal[string] % identifier[res] [ literal[int] ])
keyword[else] :
identifier[sequence_id_to_hit] [ identifier[res] [ literal[int] ]]= identifier[res] [ literal[int] ]
identifier[sequence_id_to_taxonomy] ={}
keyword[for] identifier[seqio] keyword[in] identifier[SequenceIO] (). identifier[read_fasta_file] ( identifier[search_result] . identifier[hit_fasta] ()):
identifier[name] = identifier[seqio] . identifier[name]
keyword[if] identifier[name] keyword[in] identifier[sequence_id_to_hit] :
identifier[sequence_id_to_taxonomy] [ identifier[name] ]=[ literal[string] ]+ identifier[taxonomy_definition] [ identifier[sequence_id_to_hit] [ identifier[name] ]]
keyword[else] :
identifier[sequence_id_to_taxonomy] [ identifier[name] ]=[ literal[string] ]
identifier[results] [ identifier[base_list] [ identifier[i] ]]= identifier[sequence_id_to_taxonomy]
keyword[return] identifier[results] | def _assign_taxonomy_with_diamond(self, base_list, db_search_results, graftm_package, graftm_files):
"""Run diamond to assign taxonomy
Parameters
----------
base_list: list of str
list of sequence block names
db_search_results: list of DBSearchResult
the result of running hmmsearches
graftm_package: GraftMPackage object
Diamond is run against this database
graftm_files: GraftMFiles object
Result files are written here
Returns
-------
list of
1. time taken for assignment
2. assignments i.e. dict of base_list entry to dict of read names to
to taxonomies, or None if there was no hit detected.
"""
runner = Diamond(graftm_package.diamond_database_path(), self.args.threads, self.args.evalue)
taxonomy_definition = Getaxnseq().read_taxtastic_taxonomy_and_seqinfo(open(graftm_package.taxtastic_taxonomy_path()), open(graftm_package.taxtastic_seqinfo_path()))
results = {}
# For each of the search results,
for (i, search_result) in enumerate(db_search_results):
sequence_id_to_hit = {}
# Run diamond
logging.debug('Running diamond on %s' % search_result.hit_fasta())
diamond_result = runner.run(search_result.hit_fasta(), UnpackRawReads.PROTEIN_SEQUENCE_TYPE, daa_file_basename=graftm_files.diamond_assignment_output_basename(base_list[i]))
for res in diamond_result.each([SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HIT_ID_FIELD]):
if res[0] in sequence_id_to_hit:
# do not accept duplicates
if sequence_id_to_hit[res[0]] != res[1]:
raise Exception('Diamond unexpectedly gave two hits for a single query sequence for %s' % res[0]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['sequence_id_to_hit']]
else:
sequence_id_to_hit[res[0]] = res[1] # depends on [control=['for'], data=['res']]
# Extract taxonomy of the best hit, and add in the no hits
sequence_id_to_taxonomy = {}
for seqio in SequenceIO().read_fasta_file(search_result.hit_fasta()):
name = seqio.name
if name in sequence_id_to_hit:
# Add Root; to be in line with pplacer assignment method
sequence_id_to_taxonomy[name] = ['Root'] + taxonomy_definition[sequence_id_to_hit[name]] # depends on [control=['if'], data=['name', 'sequence_id_to_hit']]
else:
# picked up in the initial search (by hmmsearch, say), but diamond misses it
sequence_id_to_taxonomy[name] = ['Root'] # depends on [control=['for'], data=['seqio']]
results[base_list[i]] = sequence_id_to_taxonomy # depends on [control=['for'], data=[]]
return results |
def file_list(self, tgt_env):
'''
Get file list for the target environment using GitPython
'''
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return files, symlinks | def function[file_list, parameter[self, tgt_env]]:
constant[
Get file list for the target environment using GitPython
]
variable[files] assign[=] call[name[set], parameter[]]
variable[symlinks] assign[=] dictionary[[], []]
variable[tree] assign[=] call[name[self].get_tree, parameter[name[tgt_env]]]
if <ast.UnaryOp object at 0x7da1b20b8a00> begin[:]
return[tuple[[<ast.Name object at 0x7da1b20b95a0>, <ast.Name object at 0x7da1b20ba350>]]]
if call[name[self].root, parameter[name[tgt_env]]] begin[:]
<ast.Try object at 0x7da1b21789d0>
variable[relpath] assign[=] <ast.Lambda object at 0x7da1b2344520>
variable[add_mountpoint] assign[=] <ast.Lambda object at 0x7da1b1f37130>
for taget[name[file_blob]] in starred[call[name[tree].traverse, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b217ce50> begin[:]
continue
variable[file_path] assign[=] call[name[add_mountpoint], parameter[call[name[relpath], parameter[name[file_blob].path]]]]
call[name[files].add, parameter[name[file_path]]]
if call[name[stat].S_ISLNK, parameter[name[file_blob].mode]] begin[:]
variable[stream] assign[=] call[name[six].StringIO, parameter[]]
call[name[file_blob].stream_data, parameter[name[stream]]]
call[name[stream].seek, parameter[constant[0]]]
variable[link_tgt] assign[=] call[name[stream].read, parameter[]]
call[name[stream].close, parameter[]]
call[name[symlinks]][name[file_path]] assign[=] name[link_tgt]
return[tuple[[<ast.Name object at 0x7da1b217c490>, <ast.Name object at 0x7da1b217c100>]]] | keyword[def] identifier[file_list] ( identifier[self] , identifier[tgt_env] ):
literal[string]
identifier[files] = identifier[set] ()
identifier[symlinks] ={}
identifier[tree] = identifier[self] . identifier[get_tree] ( identifier[tgt_env] )
keyword[if] keyword[not] identifier[tree] :
keyword[return] identifier[files] , identifier[symlinks]
keyword[if] identifier[self] . identifier[root] ( identifier[tgt_env] ):
keyword[try] :
identifier[tree] = identifier[tree] / identifier[self] . identifier[root] ( identifier[tgt_env] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[files] , identifier[symlinks]
identifier[relpath] = keyword[lambda] identifier[path] : identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] , identifier[self] . identifier[root] ( identifier[tgt_env] ))
keyword[else] :
identifier[relpath] = keyword[lambda] identifier[path] : identifier[path]
identifier[add_mountpoint] = keyword[lambda] identifier[path] : identifier[salt] . identifier[utils] . identifier[path] . identifier[join] (
identifier[self] . identifier[mountpoint] ( identifier[tgt_env] ), identifier[path] , identifier[use_posixpath] = keyword[True] )
keyword[for] identifier[file_blob] keyword[in] identifier[tree] . identifier[traverse] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[file_blob] , identifier[git] . identifier[Blob] ):
keyword[continue]
identifier[file_path] = identifier[add_mountpoint] ( identifier[relpath] ( identifier[file_blob] . identifier[path] ))
identifier[files] . identifier[add] ( identifier[file_path] )
keyword[if] identifier[stat] . identifier[S_ISLNK] ( identifier[file_blob] . identifier[mode] ):
identifier[stream] = identifier[six] . identifier[StringIO] ()
identifier[file_blob] . identifier[stream_data] ( identifier[stream] )
identifier[stream] . identifier[seek] ( literal[int] )
identifier[link_tgt] = identifier[stream] . identifier[read] ()
identifier[stream] . identifier[close] ()
identifier[symlinks] [ identifier[file_path] ]= identifier[link_tgt]
keyword[return] identifier[files] , identifier[symlinks] | def file_list(self, tgt_env):
"""
Get file list for the target environment using GitPython
"""
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return (files, symlinks) # depends on [control=['if'], data=[]]
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env) # depends on [control=['try'], data=[]]
except KeyError:
return (files, symlinks) # depends on [control=['except'], data=[]]
relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) # depends on [control=['if'], data=[]]
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(self.mountpoint(tgt_env), path, use_posixpath=True)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue # depends on [control=['if'], data=[]]
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_blob']]
return (files, symlinks) |
def raise_on_error(f):
"""
This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
retcode = GetLastError()
if retcode and retcode != ERROR_IO_PENDING:
err = WinError(code=retcode)
windll.kernel32.SetLastError(0) # clear error code so that we don't raise twice.
raise err
return result
return wrapper | def function[raise_on_error, parameter[f]]:
constant[
This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored.
]
def function[wrapper, parameter[]]:
variable[result] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da2045665c0>]]
variable[retcode] assign[=] call[name[GetLastError], parameter[]]
if <ast.BoolOp object at 0x7da204566230> begin[:]
variable[err] assign[=] call[name[WinError], parameter[]]
call[name[windll].kernel32.SetLastError, parameter[constant[0]]]
<ast.Raise object at 0x7da204565fc0>
return[name[result]]
return[name[wrapper]] | keyword[def] identifier[raise_on_error] ( identifier[f] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[result] = identifier[f] (* identifier[args] ,** identifier[kwargs] )
identifier[retcode] = identifier[GetLastError] ()
keyword[if] identifier[retcode] keyword[and] identifier[retcode] != identifier[ERROR_IO_PENDING] :
identifier[err] = identifier[WinError] ( identifier[code] = identifier[retcode] )
identifier[windll] . identifier[kernel32] . identifier[SetLastError] ( literal[int] )
keyword[raise] identifier[err]
keyword[return] identifier[result]
keyword[return] identifier[wrapper] | def raise_on_error(f):
"""
This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
retcode = GetLastError()
if retcode and retcode != ERROR_IO_PENDING:
err = WinError(code=retcode)
windll.kernel32.SetLastError(0) # clear error code so that we don't raise twice.
raise err # depends on [control=['if'], data=[]]
return result
return wrapper |
async def _init(self):
"""
Initialize the association.
"""
chunk = InitChunk()
chunk.initiate_tag = self._local_verification_tag
chunk.advertised_rwnd = self._advertised_rwnd
chunk.outbound_streams = self._outbound_streams_count
chunk.inbound_streams = self._inbound_streams_max
chunk.initial_tsn = self._local_tsn
self._set_extensions(chunk.params)
await self._send_chunk(chunk)
# start T1 timer and enter COOKIE-WAIT state
self._t1_start(chunk)
self._set_state(self.State.COOKIE_WAIT) | <ast.AsyncFunctionDef object at 0x7da2054a5cc0> | keyword[async] keyword[def] identifier[_init] ( identifier[self] ):
literal[string]
identifier[chunk] = identifier[InitChunk] ()
identifier[chunk] . identifier[initiate_tag] = identifier[self] . identifier[_local_verification_tag]
identifier[chunk] . identifier[advertised_rwnd] = identifier[self] . identifier[_advertised_rwnd]
identifier[chunk] . identifier[outbound_streams] = identifier[self] . identifier[_outbound_streams_count]
identifier[chunk] . identifier[inbound_streams] = identifier[self] . identifier[_inbound_streams_max]
identifier[chunk] . identifier[initial_tsn] = identifier[self] . identifier[_local_tsn]
identifier[self] . identifier[_set_extensions] ( identifier[chunk] . identifier[params] )
keyword[await] identifier[self] . identifier[_send_chunk] ( identifier[chunk] )
identifier[self] . identifier[_t1_start] ( identifier[chunk] )
identifier[self] . identifier[_set_state] ( identifier[self] . identifier[State] . identifier[COOKIE_WAIT] ) | async def _init(self):
"""
Initialize the association.
"""
chunk = InitChunk()
chunk.initiate_tag = self._local_verification_tag
chunk.advertised_rwnd = self._advertised_rwnd
chunk.outbound_streams = self._outbound_streams_count
chunk.inbound_streams = self._inbound_streams_max
chunk.initial_tsn = self._local_tsn
self._set_extensions(chunk.params)
await self._send_chunk(chunk)
# start T1 timer and enter COOKIE-WAIT state
self._t1_start(chunk)
self._set_state(self.State.COOKIE_WAIT) |
def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set to value `np.nan`"""
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name))
if name not in self.meta:
self.meta[name] = np.nan | def function[_new_meta_column, parameter[self, name]]:
constant[Add a column to meta if it doesn't exist, set to value `np.nan`]
if compare[name[name] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc04fd0>
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].meta] begin[:]
call[name[self].meta][name[name]] assign[=] name[np].nan | keyword[def] identifier[_new_meta_column] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[meta] :
identifier[self] . identifier[meta] [ identifier[name] ]= identifier[np] . identifier[nan] | def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set to value `np.nan`"""
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name)) # depends on [control=['if'], data=['name']]
if name not in self.meta:
self.meta[name] = np.nan # depends on [control=['if'], data=['name']] |
def to_csv(weekmatrices, filename, digits=5):
"""
Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file.
"""
with open(filename, 'w') as f:
w = csv.writer(f, lineterminator='\n')
w.writerow(['year_week', 'channel', 'weekday', 'section', 'value'])
def make_repr(item):
if item is None:
return None
elif isinstance(item, float):
return repr(round(item, digits))
else:
return str(item)
for row in weekmatrices:
w.writerow([make_repr(item) for item in row]) | def function[to_csv, parameter[weekmatrices, filename, digits]]:
constant[
Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file.
]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
variable[w] assign[=] call[name[csv].writer, parameter[name[f]]]
call[name[w].writerow, parameter[list[[<ast.Constant object at 0x7da20c6e7910>, <ast.Constant object at 0x7da20c6e41c0>, <ast.Constant object at 0x7da20c6e5e10>, <ast.Constant object at 0x7da20c6e4a90>, <ast.Constant object at 0x7da20c6e7790>]]]]
def function[make_repr, parameter[item]]:
if compare[name[item] is constant[None]] begin[:]
return[constant[None]]
for taget[name[row]] in starred[name[weekmatrices]] begin[:]
call[name[w].writerow, parameter[<ast.ListComp object at 0x7da20c6e5a80>]] | keyword[def] identifier[to_csv] ( identifier[weekmatrices] , identifier[filename] , identifier[digits] = literal[int] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[w] = identifier[csv] . identifier[writer] ( identifier[f] , identifier[lineterminator] = literal[string] )
identifier[w] . identifier[writerow] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[def] identifier[make_repr] ( identifier[item] ):
keyword[if] identifier[item] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[float] ):
keyword[return] identifier[repr] ( identifier[round] ( identifier[item] , identifier[digits] ))
keyword[else] :
keyword[return] identifier[str] ( identifier[item] )
keyword[for] identifier[row] keyword[in] identifier[weekmatrices] :
identifier[w] . identifier[writerow] ([ identifier[make_repr] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[row] ]) | def to_csv(weekmatrices, filename, digits=5):
"""
Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file.
"""
with open(filename, 'w') as f:
w = csv.writer(f, lineterminator='\n')
w.writerow(['year_week', 'channel', 'weekday', 'section', 'value'])
def make_repr(item):
if item is None:
return None # depends on [control=['if'], data=[]]
elif isinstance(item, float):
return repr(round(item, digits)) # depends on [control=['if'], data=[]]
else:
return str(item)
for row in weekmatrices:
w.writerow([make_repr(item) for item in row]) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['f']] |
def get_snapshot(nexus_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
'''
Gets snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
'''
log.debug('======================== MODULE FUNCTION: nexus.get_snapshot(nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)',
nexus_url, repository, group_id, artifact_id, packaging, version, target_dir, classifier)
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
snapshot_url, file_name = _get_snapshot_url(nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers) | def function[get_snapshot, parameter[nexus_url, repository, group_id, artifact_id, packaging, version, snapshot_version, target_dir, target_file, classifier, username, password]]:
constant[
Gets snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
]
call[name[log].debug, parameter[constant[======================== MODULE FUNCTION: nexus.get_snapshot(nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)], name[nexus_url], name[repository], name[group_id], name[artifact_id], name[packaging], name[version], name[target_dir], name[classifier]]]
variable[headers] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b1c48670> begin[:]
call[name[headers]][constant[Authorization]] assign[=] call[constant[Basic {0}].format, parameter[call[call[name[base64].encodestring, parameter[call[constant[{0}:{1}].format, parameter[name[username], name[password]]]]].replace, parameter[constant[
], constant[]]]]]
<ast.Tuple object at 0x7da1b1c4a530> assign[=] call[name[_get_snapshot_url], parameter[]]
variable[target_file] assign[=] call[name[__resolve_target_file], parameter[name[file_name], name[target_dir], name[target_file]]]
return[call[name[__save_artifact], parameter[name[snapshot_url], name[target_file], name[headers]]]] | keyword[def] identifier[get_snapshot] ( identifier[nexus_url] , identifier[repository] , identifier[group_id] , identifier[artifact_id] , identifier[packaging] , identifier[version] , identifier[snapshot_version] = keyword[None] , identifier[target_dir] = literal[string] , identifier[target_file] = keyword[None] , identifier[classifier] = keyword[None] , identifier[username] = keyword[None] , identifier[password] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] ,
identifier[nexus_url] , identifier[repository] , identifier[group_id] , identifier[artifact_id] , identifier[packaging] , identifier[version] , identifier[target_dir] , identifier[classifier] )
identifier[headers] ={}
keyword[if] identifier[username] keyword[and] identifier[password] :
identifier[headers] [ literal[string] ]= literal[string] . identifier[format] ( identifier[base64] . identifier[encodestring] ( literal[string] . identifier[format] ( identifier[username] , identifier[password] )). identifier[replace] ( literal[string] , literal[string] ))
identifier[snapshot_url] , identifier[file_name] = identifier[_get_snapshot_url] ( identifier[nexus_url] = identifier[nexus_url] , identifier[repository] = identifier[repository] , identifier[group_id] = identifier[group_id] , identifier[artifact_id] = identifier[artifact_id] , identifier[version] = identifier[version] , identifier[packaging] = identifier[packaging] , identifier[snapshot_version] = identifier[snapshot_version] , identifier[classifier] = identifier[classifier] , identifier[headers] = identifier[headers] )
identifier[target_file] = identifier[__resolve_target_file] ( identifier[file_name] , identifier[target_dir] , identifier[target_file] )
keyword[return] identifier[__save_artifact] ( identifier[snapshot_url] , identifier[target_file] , identifier[headers] ) | def get_snapshot(nexus_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
"""
Gets snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug('======================== MODULE FUNCTION: nexus.get_snapshot(nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)', nexus_url, repository, group_id, artifact_id, packaging, version, target_dir, classifier)
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) # depends on [control=['if'], data=[]]
(snapshot_url, file_name) = _get_snapshot_url(nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers) |
def metric(self, name, filter_=None, description=""):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description) | def function[metric, parameter[self, name, filter_, description]]:
constant[Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
]
return[call[name[Metric], parameter[name[name], name[filter_]]]] | keyword[def] identifier[metric] ( identifier[self] , identifier[name] , identifier[filter_] = keyword[None] , identifier[description] = literal[string] ):
literal[string]
keyword[return] identifier[Metric] ( identifier[name] , identifier[filter_] , identifier[client] = identifier[self] , identifier[description] = identifier[description] ) | def metric(self, name, filter_=None, description=''):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description) |
def is_answer_available(self, assessment_section_id, item_id):
"""Tests if an answer is available for the given item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (boolean) - ``true`` if an answer are available,
``false`` otherwise
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Note: we need more settings elsewhere to indicate answer available conditions
# This makes the simple assumption that answers are available only when
# a response has been submitted for an Item.
try:
response = self.get_response(assessment_section_id, item_id)
# need to invoke something like .object_map before
# a "null" response throws IllegalState
response.object_map
except errors.IllegalState:
return False
else:
return True | def function[is_answer_available, parameter[self, assessment_section_id, item_id]]:
constant[Tests if an answer is available for the given item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (boolean) - ``true`` if an answer are available,
``false`` otherwise
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
<ast.Try object at 0x7da1b26ae950> | keyword[def] identifier[is_answer_available] ( identifier[self] , identifier[assessment_section_id] , identifier[item_id] ):
literal[string]
keyword[try] :
identifier[response] = identifier[self] . identifier[get_response] ( identifier[assessment_section_id] , identifier[item_id] )
identifier[response] . identifier[object_map]
keyword[except] identifier[errors] . identifier[IllegalState] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def is_answer_available(self, assessment_section_id, item_id):
"""Tests if an answer is available for the given item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (boolean) - ``true`` if an answer are available,
``false`` otherwise
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Note: we need more settings elsewhere to indicate answer available conditions
# This makes the simple assumption that answers are available only when
# a response has been submitted for an Item.
try:
response = self.get_response(assessment_section_id, item_id)
# need to invoke something like .object_map before
# a "null" response throws IllegalState
response.object_map # depends on [control=['try'], data=[]]
except errors.IllegalState:
return False # depends on [control=['except'], data=[]]
else:
return True |
def getPort(self):
"""
Helper method for testing; returns the TCP port used for this
registration, even if it was specified as 0 and thus allocated by the
OS.
"""
disp = self.pbmanager.dispatchers[self.portstr]
return disp.port.getHost().port | def function[getPort, parameter[self]]:
constant[
Helper method for testing; returns the TCP port used for this
registration, even if it was specified as 0 and thus allocated by the
OS.
]
variable[disp] assign[=] call[name[self].pbmanager.dispatchers][name[self].portstr]
return[call[name[disp].port.getHost, parameter[]].port] | keyword[def] identifier[getPort] ( identifier[self] ):
literal[string]
identifier[disp] = identifier[self] . identifier[pbmanager] . identifier[dispatchers] [ identifier[self] . identifier[portstr] ]
keyword[return] identifier[disp] . identifier[port] . identifier[getHost] (). identifier[port] | def getPort(self):
"""
Helper method for testing; returns the TCP port used for this
registration, even if it was specified as 0 and thus allocated by the
OS.
"""
disp = self.pbmanager.dispatchers[self.portstr]
return disp.port.getHost().port |
def preprocess_notebook(ntbk, cr):
"""
Process notebook object `ntbk` in preparation for conversion to an
rst document. This processing replaces links to online docs with
corresponding sphinx cross-references within the local docs.
Parameter `cr` is a CrossReferenceLookup object.
"""
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_url_with_ref(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt | def function[preprocess_notebook, parameter[ntbk, cr]]:
constant[
Process notebook object `ntbk` in preparation for conversion to an
rst document. This processing replaces links to online docs with
corresponding sphinx cross-references within the local docs.
Parameter `cr` is a CrossReferenceLookup object.
]
for taget[name[n]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[ntbk]][constant[cells]]]]]]] begin[:]
if compare[call[call[call[name[ntbk]][constant[cells]]][name[n]]][constant[cell_type]] equal[==] constant[markdown]] begin[:]
variable[txt] assign[=] call[call[call[name[ntbk]][constant[cells]]][name[n]]][constant[source]]
variable[txt] assign[=] call[name[cr].substitute_url_with_ref, parameter[name[txt]]]
call[call[call[name[ntbk]][constant[cells]]][name[n]]][constant[source]] assign[=] name[txt] | keyword[def] identifier[preprocess_notebook] ( identifier[ntbk] , identifier[cr] ):
literal[string]
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[len] ( identifier[ntbk] [ literal[string] ])):
keyword[if] identifier[ntbk] [ literal[string] ][ identifier[n] ][ literal[string] ]== literal[string] :
identifier[txt] = identifier[ntbk] [ literal[string] ][ identifier[n] ][ literal[string] ]
identifier[txt] = identifier[cr] . identifier[substitute_url_with_ref] ( identifier[txt] )
identifier[ntbk] [ literal[string] ][ identifier[n] ][ literal[string] ]= identifier[txt] | def preprocess_notebook(ntbk, cr):
"""
Process notebook object `ntbk` in preparation for conversion to an
rst document. This processing replaces links to online docs with
corresponding sphinx cross-references within the local docs.
Parameter `cr` is a CrossReferenceLookup object.
"""
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_url_with_ref(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']] |
def find_message_handler(self, handler_name, handler_type='primary'):
"""Returns the MessageHandler given its name and type for this class."""
ret = lib.EnvFindDefmessageHandler(
self._env, self._cls, handler_name.encode(), handler_type.encode())
if ret == 0:
raise CLIPSError(self._env)
return MessageHandler(self._env, self._cls, ret) | def function[find_message_handler, parameter[self, handler_name, handler_type]]:
constant[Returns the MessageHandler given its name and type for this class.]
variable[ret] assign[=] call[name[lib].EnvFindDefmessageHandler, parameter[name[self]._env, name[self]._cls, call[name[handler_name].encode, parameter[]], call[name[handler_type].encode, parameter[]]]]
if compare[name[ret] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18bcca350>
return[call[name[MessageHandler], parameter[name[self]._env, name[self]._cls, name[ret]]]] | keyword[def] identifier[find_message_handler] ( identifier[self] , identifier[handler_name] , identifier[handler_type] = literal[string] ):
literal[string]
identifier[ret] = identifier[lib] . identifier[EnvFindDefmessageHandler] (
identifier[self] . identifier[_env] , identifier[self] . identifier[_cls] , identifier[handler_name] . identifier[encode] (), identifier[handler_type] . identifier[encode] ())
keyword[if] identifier[ret] == literal[int] :
keyword[raise] identifier[CLIPSError] ( identifier[self] . identifier[_env] )
keyword[return] identifier[MessageHandler] ( identifier[self] . identifier[_env] , identifier[self] . identifier[_cls] , identifier[ret] ) | def find_message_handler(self, handler_name, handler_type='primary'):
"""Returns the MessageHandler given its name and type for this class."""
ret = lib.EnvFindDefmessageHandler(self._env, self._cls, handler_name.encode(), handler_type.encode())
if ret == 0:
raise CLIPSError(self._env) # depends on [control=['if'], data=[]]
return MessageHandler(self._env, self._cls, ret) |
async def get_tracks(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0) -> List[Track]:
"""get the albums tracks from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many tracks to retrieve for this album (default is 20).
offset : Optional[int]
The offset from where the api should start from in the tracks.
Returns
-------
tracks : List[Track]
The tracks of the artist.
"""
data = await self.__client.http.album_tracks(self.id, limit=limit, offset=offset)
return list(Track(self.__client, item) for item in data['items']) | <ast.AsyncFunctionDef object at 0x7da204621690> | keyword[async] keyword[def] identifier[get_tracks] ( identifier[self] ,*, identifier[limit] : identifier[Optional] [ identifier[int] ]= literal[int] , identifier[offset] : identifier[Optional] [ identifier[int] ]= literal[int] )-> identifier[List] [ identifier[Track] ]:
literal[string]
identifier[data] = keyword[await] identifier[self] . identifier[__client] . identifier[http] . identifier[album_tracks] ( identifier[self] . identifier[id] , identifier[limit] = identifier[limit] , identifier[offset] = identifier[offset] )
keyword[return] identifier[list] ( identifier[Track] ( identifier[self] . identifier[__client] , identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[data] [ literal[string] ]) | async def get_tracks(self, *, limit: Optional[int]=20, offset: Optional[int]=0) -> List[Track]:
"""get the albums tracks from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many tracks to retrieve for this album (default is 20).
offset : Optional[int]
The offset from where the api should start from in the tracks.
Returns
-------
tracks : List[Track]
The tracks of the artist.
"""
data = await self.__client.http.album_tracks(self.id, limit=limit, offset=offset)
return list((Track(self.__client, item) for item in data['items'])) |
def cee_map_priority_table_map_cos4_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos4_pgid = ET.SubElement(priority_table, "map-cos4-pgid")
map_cos4_pgid.text = kwargs.pop('map_cos4_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[cee_map_priority_table_map_cos4_pgid, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[cee_map] assign[=] call[name[ET].SubElement, parameter[name[config], constant[cee-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[priority_table] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[priority-table]]]
variable[map_cos4_pgid] assign[=] call[name[ET].SubElement, parameter[name[priority_table], constant[map-cos4-pgid]]]
name[map_cos4_pgid].text assign[=] call[name[kwargs].pop, parameter[constant[map_cos4_pgid]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[cee_map_priority_table_map_cos4_pgid] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[cee_map] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[priority_table] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[map_cos4_pgid] = identifier[ET] . identifier[SubElement] ( identifier[priority_table] , literal[string] )
identifier[map_cos4_pgid] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def cee_map_priority_table_map_cos4_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
cee_map = ET.SubElement(config, 'cee-map', xmlns='urn:brocade.com:mgmt:brocade-cee-map')
name_key = ET.SubElement(cee_map, 'name')
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, 'priority-table')
map_cos4_pgid = ET.SubElement(priority_table, 'map-cos4-pgid')
map_cos4_pgid.text = kwargs.pop('map_cos4_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence | def function[__check_spaces, parameter[sentence]]:
constant[
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
]
if compare[name[sentence] is_not constant[None]] begin[:]
variable[words] assign[=] call[name[sentence].split, parameter[]]
variable[new_sentence] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b10d7880>, <ast.Name object at 0x7da1b10d5b70>]]] in starred[call[name[enumerate], parameter[name[words]]]] begin[:]
if compare[call[name[word]][<ast.UnaryOp object at 0x7da1b10d4700>] in call[name[set], parameter[constant[.!?]]]] begin[:]
<ast.AugAssign object at 0x7da1b10d6ef0>
variable[new_word] assign[=] call[constant[].join, parameter[name[word]]]
<ast.AugAssign object at 0x7da1b10d4d60>
variable[new_sentence] assign[=] call[name[new_sentence].strip, parameter[]]
return[name[new_sentence]] | keyword[def] identifier[__check_spaces] ( identifier[sentence] ):
literal[string]
keyword[if] identifier[sentence] keyword[is] keyword[not] keyword[None] :
identifier[words] = identifier[sentence] . identifier[split] ()
identifier[new_sentence] = literal[string]
keyword[for] ( identifier[i] , identifier[word] ) keyword[in] identifier[enumerate] ( identifier[words] ):
keyword[if] identifier[word] [- literal[int] ] keyword[in] identifier[set] ( literal[string] ):
identifier[word] += literal[string]
identifier[new_word] = literal[string] . identifier[join] ( identifier[word] )
identifier[new_sentence] += literal[string] + identifier[new_word]
identifier[new_sentence] = identifier[new_sentence] . identifier[strip] ()
keyword[return] identifier[new_sentence] | def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' ' # depends on [control=['if'], data=[]]
new_word = ''.join(word)
new_sentence += ' ' + new_word # depends on [control=['for'], data=[]]
# remove any trailing whitespace
new_sentence = new_sentence.strip() # depends on [control=['if'], data=['sentence']]
return new_sentence |
def search_location(loc, locations=None,
critical=False, create_in=None, verbose=True):
'''
Locates files with a twist:
* Check the existence of a file using the full path in `loc`
* Search for the filename `loc` in `locations`
* Create it's enclosing folders if the file does not exist. \
use `create_in`
:param loc:
Filename to search
:param locations:
A list of possible locations to search within
(can be a dictionary, see note below)
:param critical:
|appteardown| if file was not found
:param create_in:
If `loc` was not found, the folder `create_in` is created.
If `locations` is a dictionary, `create_in` can also specify
a key of `locations`. The value will be used then.
:param verbose:
Pass verbose flag to :func:`make_locations`
:returns:
The full path of `loc` in matched location
.. note::
* |params_locations_dict|
* |param_locations_none|
'''
from photon.util.structures import to_list
from photon.util.system import shell_notify
if not locations:
locations = get_locations()
for p in reversed(sorted(to_list(locations))):
f = _path.join(p, loc)
if _path.exists(f):
return f
if _path.exists(_path.abspath(_path.expanduser(loc))):
return _path.abspath(_path.expanduser(loc))
if critical:
shell_notify('could not locate', state=True, more=dict(
file=loc, locations=locations
))
if create_in:
if isinstance(locations, dict):
create_in = locations.get(create_in, create_in)
make_locations(locations=[create_in], verbose=verbose)
return _path.join(create_in, loc) | def function[search_location, parameter[loc, locations, critical, create_in, verbose]]:
constant[
Locates files with a twist:
* Check the existence of a file using the full path in `loc`
* Search for the filename `loc` in `locations`
* Create it's enclosing folders if the file does not exist. use `create_in`
:param loc:
Filename to search
:param locations:
A list of possible locations to search within
(can be a dictionary, see note below)
:param critical:
|appteardown| if file was not found
:param create_in:
If `loc` was not found, the folder `create_in` is created.
If `locations` is a dictionary, `create_in` can also specify
a key of `locations`. The value will be used then.
:param verbose:
Pass verbose flag to :func:`make_locations`
:returns:
The full path of `loc` in matched location
.. note::
* |params_locations_dict|
* |param_locations_none|
]
from relative_module[photon.util.structures] import module[to_list]
from relative_module[photon.util.system] import module[shell_notify]
if <ast.UnaryOp object at 0x7da20c7c9e70> begin[:]
variable[locations] assign[=] call[name[get_locations], parameter[]]
for taget[name[p]] in starred[call[name[reversed], parameter[call[name[sorted], parameter[call[name[to_list], parameter[name[locations]]]]]]]] begin[:]
variable[f] assign[=] call[name[_path].join, parameter[name[p], name[loc]]]
if call[name[_path].exists, parameter[name[f]]] begin[:]
return[name[f]]
if call[name[_path].exists, parameter[call[name[_path].abspath, parameter[call[name[_path].expanduser, parameter[name[loc]]]]]]] begin[:]
return[call[name[_path].abspath, parameter[call[name[_path].expanduser, parameter[name[loc]]]]]]
if name[critical] begin[:]
call[name[shell_notify], parameter[constant[could not locate]]]
if name[create_in] begin[:]
if call[name[isinstance], parameter[name[locations], name[dict]]] begin[:]
variable[create_in] assign[=] call[name[locations].get, parameter[name[create_in], name[create_in]]]
call[name[make_locations], parameter[]]
return[call[name[_path].join, parameter[name[create_in], name[loc]]]] | keyword[def] identifier[search_location] ( identifier[loc] , identifier[locations] = keyword[None] ,
identifier[critical] = keyword[False] , identifier[create_in] = keyword[None] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[from] identifier[photon] . identifier[util] . identifier[structures] keyword[import] identifier[to_list]
keyword[from] identifier[photon] . identifier[util] . identifier[system] keyword[import] identifier[shell_notify]
keyword[if] keyword[not] identifier[locations] :
identifier[locations] = identifier[get_locations] ()
keyword[for] identifier[p] keyword[in] identifier[reversed] ( identifier[sorted] ( identifier[to_list] ( identifier[locations] ))):
identifier[f] = identifier[_path] . identifier[join] ( identifier[p] , identifier[loc] )
keyword[if] identifier[_path] . identifier[exists] ( identifier[f] ):
keyword[return] identifier[f]
keyword[if] identifier[_path] . identifier[exists] ( identifier[_path] . identifier[abspath] ( identifier[_path] . identifier[expanduser] ( identifier[loc] ))):
keyword[return] identifier[_path] . identifier[abspath] ( identifier[_path] . identifier[expanduser] ( identifier[loc] ))
keyword[if] identifier[critical] :
identifier[shell_notify] ( literal[string] , identifier[state] = keyword[True] , identifier[more] = identifier[dict] (
identifier[file] = identifier[loc] , identifier[locations] = identifier[locations]
))
keyword[if] identifier[create_in] :
keyword[if] identifier[isinstance] ( identifier[locations] , identifier[dict] ):
identifier[create_in] = identifier[locations] . identifier[get] ( identifier[create_in] , identifier[create_in] )
identifier[make_locations] ( identifier[locations] =[ identifier[create_in] ], identifier[verbose] = identifier[verbose] )
keyword[return] identifier[_path] . identifier[join] ( identifier[create_in] , identifier[loc] ) | def search_location(loc, locations=None, critical=False, create_in=None, verbose=True):
"""
Locates files with a twist:
* Check the existence of a file using the full path in `loc`
* Search for the filename `loc` in `locations`
* Create it's enclosing folders if the file does not exist. use `create_in`
:param loc:
Filename to search
:param locations:
A list of possible locations to search within
(can be a dictionary, see note below)
:param critical:
|appteardown| if file was not found
:param create_in:
If `loc` was not found, the folder `create_in` is created.
If `locations` is a dictionary, `create_in` can also specify
a key of `locations`. The value will be used then.
:param verbose:
Pass verbose flag to :func:`make_locations`
:returns:
The full path of `loc` in matched location
.. note::
* |params_locations_dict|
* |param_locations_none|
"""
from photon.util.structures import to_list
from photon.util.system import shell_notify
if not locations:
locations = get_locations() # depends on [control=['if'], data=[]]
for p in reversed(sorted(to_list(locations))):
f = _path.join(p, loc)
if _path.exists(f):
return f # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
if _path.exists(_path.abspath(_path.expanduser(loc))):
return _path.abspath(_path.expanduser(loc)) # depends on [control=['if'], data=[]]
if critical:
shell_notify('could not locate', state=True, more=dict(file=loc, locations=locations)) # depends on [control=['if'], data=[]]
if create_in:
if isinstance(locations, dict):
create_in = locations.get(create_in, create_in) # depends on [control=['if'], data=[]]
make_locations(locations=[create_in], verbose=verbose)
return _path.join(create_in, loc) # depends on [control=['if'], data=[]] |
def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = QtGui.QDoubleSpinBox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
editor.setDecimals(self.decimals)
except TypeError as err:
# initiate the spinbox with default values.
pass
return editor | def function[createEditor, parameter[self, parent, option, index]]:
constant[Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
]
variable[editor] assign[=] call[name[QtGui].QDoubleSpinBox, parameter[name[parent]]]
<ast.Try object at 0x7da1b0771cf0>
return[name[editor]] | keyword[def] identifier[createEditor] ( identifier[self] , identifier[parent] , identifier[option] , identifier[index] ):
literal[string]
identifier[editor] = identifier[QtGui] . identifier[QDoubleSpinBox] ( identifier[parent] )
keyword[try] :
identifier[editor] . identifier[setMinimum] ( identifier[self] . identifier[minimum] )
identifier[editor] . identifier[setMaximum] ( identifier[self] . identifier[maximum] )
identifier[editor] . identifier[setSingleStep] ( identifier[self] . identifier[singleStep] )
identifier[editor] . identifier[setDecimals] ( identifier[self] . identifier[decimals] )
keyword[except] identifier[TypeError] keyword[as] identifier[err] :
keyword[pass]
keyword[return] identifier[editor] | def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = QtGui.QDoubleSpinBox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
editor.setDecimals(self.decimals) # depends on [control=['try'], data=[]]
except TypeError as err:
# initiate the spinbox with default values.
pass # depends on [control=['except'], data=[]]
return editor |
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
) | def function[_compute_slices, parameter[self, start_idx, end_idx, assets]]:
constant[
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
]
return[call[name[_compute_row_slices], parameter[name[self]._first_rows, name[self]._last_rows, name[self]._calendar_offsets, name[start_idx], name[end_idx], name[assets]]]] | keyword[def] identifier[_compute_slices] ( identifier[self] , identifier[start_idx] , identifier[end_idx] , identifier[assets] ):
literal[string]
keyword[return] identifier[_compute_row_slices] (
identifier[self] . identifier[_first_rows] ,
identifier[self] . identifier[_last_rows] ,
identifier[self] . identifier[_calendar_offsets] ,
identifier[start_idx] ,
identifier[end_idx] ,
identifier[assets] ,
) | def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(self._first_rows, self._last_rows, self._calendar_offsets, start_idx, end_idx, assets) |
def overall():
""" The overall grammer for pulling apart the main input files. """
return ZeroOrMore(Grammar.comment) + Dict(ZeroOrMore(Group(
Grammar._section + ZeroOrMore(Group(Grammar.line)))
)) | def function[overall, parameter[]]:
constant[ The overall grammer for pulling apart the main input files. ]
return[binary_operation[call[name[ZeroOrMore], parameter[name[Grammar].comment]] + call[name[Dict], parameter[call[name[ZeroOrMore], parameter[call[name[Group], parameter[binary_operation[name[Grammar]._section + call[name[ZeroOrMore], parameter[call[name[Group], parameter[name[Grammar].line]]]]]]]]]]]]] | keyword[def] identifier[overall] ():
literal[string]
keyword[return] identifier[ZeroOrMore] ( identifier[Grammar] . identifier[comment] )+ identifier[Dict] ( identifier[ZeroOrMore] ( identifier[Group] (
identifier[Grammar] . identifier[_section] + identifier[ZeroOrMore] ( identifier[Group] ( identifier[Grammar] . identifier[line] )))
)) | def overall():
""" The overall grammer for pulling apart the main input files. """
return ZeroOrMore(Grammar.comment) + Dict(ZeroOrMore(Group(Grammar._section + ZeroOrMore(Group(Grammar.line))))) |
def reset_stats(self, pattern):
"""Reset VM statistics.
in pattern of type str
The selection pattern. A bit similar to filename globbing.
"""
if not isinstance(pattern, basestring):
raise TypeError("pattern can only be an instance of type basestring")
self._call("resetStats",
in_p=[pattern]) | def function[reset_stats, parameter[self, pattern]]:
constant[Reset VM statistics.
in pattern of type str
The selection pattern. A bit similar to filename globbing.
]
if <ast.UnaryOp object at 0x7da204344af0> begin[:]
<ast.Raise object at 0x7da204346380>
call[name[self]._call, parameter[constant[resetStats]]] | keyword[def] identifier[reset_stats] ( identifier[self] , identifier[pattern] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[pattern] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[pattern] ]) | def reset_stats(self, pattern):
"""Reset VM statistics.
in pattern of type str
The selection pattern. A bit similar to filename globbing.
"""
if not isinstance(pattern, basestring):
raise TypeError('pattern can only be an instance of type basestring') # depends on [control=['if'], data=[]]
self._call('resetStats', in_p=[pattern]) |
def registerParser(self, parser):
"""
Registers a parser to parse configuration inputs.
"""
if not isinstance(parser, Subparser):
raise TypeError("%s is not an instance of a subparser." % parser)
self.parsers.append(parser) | def function[registerParser, parameter[self, parser]]:
constant[
Registers a parser to parse configuration inputs.
]
if <ast.UnaryOp object at 0x7da20cabc460> begin[:]
<ast.Raise object at 0x7da20cabc700>
call[name[self].parsers.append, parameter[name[parser]]] | keyword[def] identifier[registerParser] ( identifier[self] , identifier[parser] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[parser] , identifier[Subparser] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[parser] )
identifier[self] . identifier[parsers] . identifier[append] ( identifier[parser] ) | def registerParser(self, parser):
"""
Registers a parser to parse configuration inputs.
"""
if not isinstance(parser, Subparser):
raise TypeError('%s is not an instance of a subparser.' % parser) # depends on [control=['if'], data=[]]
self.parsers.append(parser) |
def with_(self, replacement):
"""Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given
"""
ensure_string(replacement)
if is_mapping(self._replacements):
raise ReplacementError("string replacements already provided")
self._replacements = dict.fromkeys(self._replacements, replacement)
return self | def function[with_, parameter[self, replacement]]:
constant[Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given
]
call[name[ensure_string], parameter[name[replacement]]]
if call[name[is_mapping], parameter[name[self]._replacements]] begin[:]
<ast.Raise object at 0x7da1b2055270>
name[self]._replacements assign[=] call[name[dict].fromkeys, parameter[name[self]._replacements, name[replacement]]]
return[name[self]] | keyword[def] identifier[with_] ( identifier[self] , identifier[replacement] ):
literal[string]
identifier[ensure_string] ( identifier[replacement] )
keyword[if] identifier[is_mapping] ( identifier[self] . identifier[_replacements] ):
keyword[raise] identifier[ReplacementError] ( literal[string] )
identifier[self] . identifier[_replacements] = identifier[dict] . identifier[fromkeys] ( identifier[self] . identifier[_replacements] , identifier[replacement] )
keyword[return] identifier[self] | def with_(self, replacement):
"""Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given
"""
ensure_string(replacement)
if is_mapping(self._replacements):
raise ReplacementError('string replacements already provided') # depends on [control=['if'], data=[]]
self._replacements = dict.fromkeys(self._replacements, replacement)
return self |
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, 0.001)
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk | def function[next, parameter[self]]:
constant[ allow us to iterate over the output of our command ]
if name[self]._stopped_iteration begin[:]
<ast.Raise object at 0x7da18ede4520>
while constant[True] begin[:]
<ast.Try object at 0x7da18fe93bb0> | keyword[def] identifier[next] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_stopped_iteration] :
keyword[raise] identifier[StopIteration] ()
keyword[while] keyword[True] :
keyword[try] :
identifier[chunk] = identifier[self] . identifier[process] . identifier[_pipe_queue] . identifier[get] ( keyword[True] , literal[int] )
keyword[except] identifier[Empty] :
keyword[if] identifier[self] . identifier[call_args] [ literal[string] ]:
keyword[return] identifier[errno] . identifier[EWOULDBLOCK]
keyword[else] :
keyword[if] identifier[chunk] keyword[is] keyword[None] :
identifier[self] . identifier[wait] ()
identifier[self] . identifier[_stopped_iteration] = keyword[True]
keyword[raise] identifier[StopIteration] ()
keyword[try] :
keyword[return] identifier[chunk] . identifier[decode] ( identifier[self] . identifier[call_args] [ literal[string] ],
identifier[self] . identifier[call_args] [ literal[string] ])
keyword[except] identifier[UnicodeDecodeError] :
keyword[return] identifier[chunk] | def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration() # depends on [control=['if'], data=[]]
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, 0.001) # depends on [control=['try'], data=[]]
except Empty:
if self.call_args['iter_noblock']:
return errno.EWOULDBLOCK # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration() # depends on [control=['if'], data=[]]
try:
return chunk.decode(self.call_args['encoding'], self.call_args['decode_errors']) # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
return chunk # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def get_username():
"""
Try to retrieve the username from a variety of sources. First the
environment variable LOGNAME is tried, if that is not set the
environment variable USERNAME is tried, if that is not set the
password database is consulted (only on Unix systems, if the import
of the pwd module succeeds), finally if that fails KeyError is
raised.
"""
try:
return os.environ["LOGNAME"]
except KeyError:
pass
try:
return os.environ["USERNAME"]
except KeyError:
pass
try:
import pwd
return pwd.getpwuid(os.getuid())[0]
except (ImportError, KeyError):
raise KeyError | def function[get_username, parameter[]]:
constant[
Try to retrieve the username from a variety of sources. First the
environment variable LOGNAME is tried, if that is not set the
environment variable USERNAME is tried, if that is not set the
password database is consulted (only on Unix systems, if the import
of the pwd module succeeds), finally if that fails KeyError is
raised.
]
<ast.Try object at 0x7da18bcc95a0>
<ast.Try object at 0x7da18bcc9e40>
<ast.Try object at 0x7da18bcca440> | keyword[def] identifier[get_username] ():
literal[string]
keyword[try] :
keyword[return] identifier[os] . identifier[environ] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[os] . identifier[environ] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[try] :
keyword[import] identifier[pwd]
keyword[return] identifier[pwd] . identifier[getpwuid] ( identifier[os] . identifier[getuid] ())[ literal[int] ]
keyword[except] ( identifier[ImportError] , identifier[KeyError] ):
keyword[raise] identifier[KeyError] | def get_username():
"""
Try to retrieve the username from a variety of sources. First the
environment variable LOGNAME is tried, if that is not set the
environment variable USERNAME is tried, if that is not set the
password database is consulted (only on Unix systems, if the import
of the pwd module succeeds), finally if that fails KeyError is
raised.
"""
try:
return os.environ['LOGNAME'] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
try:
return os.environ['USERNAME'] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
try:
import pwd
return pwd.getpwuid(os.getuid())[0] # depends on [control=['try'], data=[]]
except (ImportError, KeyError):
raise KeyError # depends on [control=['except'], data=[]] |
def ResetHandler(self, name):
'''
Method which assigns handler to the tag encountered before the current, or else
sets it to None
:param name: name of the latest tag
:return:
'''
if name in self.tags:
if len(self.tags) > 1:
key = len(self.tags) - 2
self.handler = None
while key >= 0:
if self.tags[key] in self.structure:
self.handler = self.structure[self.tags[key]]
break
key -= 1
else:
self.handler = None | def function[ResetHandler, parameter[self, name]]:
constant[
Method which assigns handler to the tag encountered before the current, or else
sets it to None
:param name: name of the latest tag
:return:
]
if compare[name[name] in name[self].tags] begin[:]
if compare[call[name[len], parameter[name[self].tags]] greater[>] constant[1]] begin[:]
variable[key] assign[=] binary_operation[call[name[len], parameter[name[self].tags]] - constant[2]]
name[self].handler assign[=] constant[None]
while compare[name[key] greater_or_equal[>=] constant[0]] begin[:]
if compare[call[name[self].tags][name[key]] in name[self].structure] begin[:]
name[self].handler assign[=] call[name[self].structure][call[name[self].tags][name[key]]]
break
<ast.AugAssign object at 0x7da1b2507a90> | keyword[def] identifier[ResetHandler] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[tags] :
keyword[if] identifier[len] ( identifier[self] . identifier[tags] )> literal[int] :
identifier[key] = identifier[len] ( identifier[self] . identifier[tags] )- literal[int]
identifier[self] . identifier[handler] = keyword[None]
keyword[while] identifier[key] >= literal[int] :
keyword[if] identifier[self] . identifier[tags] [ identifier[key] ] keyword[in] identifier[self] . identifier[structure] :
identifier[self] . identifier[handler] = identifier[self] . identifier[structure] [ identifier[self] . identifier[tags] [ identifier[key] ]]
keyword[break]
identifier[key] -= literal[int]
keyword[else] :
identifier[self] . identifier[handler] = keyword[None] | def ResetHandler(self, name):
"""
Method which assigns handler to the tag encountered before the current, or else
sets it to None
:param name: name of the latest tag
:return:
"""
if name in self.tags:
if len(self.tags) > 1:
key = len(self.tags) - 2
self.handler = None
while key >= 0:
if self.tags[key] in self.structure:
self.handler = self.structure[self.tags[key]]
break # depends on [control=['if'], data=[]]
key -= 1 # depends on [control=['while'], data=['key']] # depends on [control=['if'], data=[]]
else:
self.handler = None # depends on [control=['if'], data=[]] |
def get_process_runtime_cache(self, key, pid=None):
'''
get_process_runtime_cache(self, key, pid=None)
Get a pre-defined run time parameter value
:Parameters:
* *key* (`string`) -- Identifier of the runtime cache
* *pid* (`string`) -- Identifier of an existing process
'''
value = None
pid = self._get_pid(pid)
value = self._call_rest_api('get', '/processes/'+pid+'/cache?key=%s'%key, error='Failed to fetch process runtime cache')
return value | def function[get_process_runtime_cache, parameter[self, key, pid]]:
constant[
get_process_runtime_cache(self, key, pid=None)
Get a pre-defined run time parameter value
:Parameters:
* *key* (`string`) -- Identifier of the runtime cache
* *pid* (`string`) -- Identifier of an existing process
]
variable[value] assign[=] constant[None]
variable[pid] assign[=] call[name[self]._get_pid, parameter[name[pid]]]
variable[value] assign[=] call[name[self]._call_rest_api, parameter[constant[get], binary_operation[binary_operation[constant[/processes/] + name[pid]] + binary_operation[constant[/cache?key=%s] <ast.Mod object at 0x7da2590d6920> name[key]]]]]
return[name[value]] | keyword[def] identifier[get_process_runtime_cache] ( identifier[self] , identifier[key] , identifier[pid] = keyword[None] ):
literal[string]
identifier[value] = keyword[None]
identifier[pid] = identifier[self] . identifier[_get_pid] ( identifier[pid] )
identifier[value] = identifier[self] . identifier[_call_rest_api] ( literal[string] , literal[string] + identifier[pid] + literal[string] % identifier[key] , identifier[error] = literal[string] )
keyword[return] identifier[value] | def get_process_runtime_cache(self, key, pid=None):
"""
get_process_runtime_cache(self, key, pid=None)
Get a pre-defined run time parameter value
:Parameters:
* *key* (`string`) -- Identifier of the runtime cache
* *pid* (`string`) -- Identifier of an existing process
"""
value = None
pid = self._get_pid(pid)
value = self._call_rest_api('get', '/processes/' + pid + '/cache?key=%s' % key, error='Failed to fetch process runtime cache')
return value |
def image_function(f='sin(5*x)*cos(5*y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p='x,y', g=None, **kwargs):
"""
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
"""
default_kwargs = dict(clabel=str(f), xlabel='x', ylabel='y')
default_kwargs.update(kwargs)
# aggregate globals
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g)
# generate the grid x and y coordinates
xones = _n.linspace(1,1,ysteps)
x = _n.linspace(xmin, xmax, xsteps)
xgrid = _n.outer(xones, x)
yones = _n.linspace(1,1,xsteps)
y = _n.linspace(ymin, ymax, ysteps)
ygrid = _n.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid*0.0
except:
print("Notice: function is not rocking hardcore. Generating grid the slow way...")
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny]))
zgrid = _n.array(zgrid)
# now plot!
image_data(zgrid.transpose(), x, y, **default_kwargs) | def function[image_function, parameter[f, xmin, xmax, ymin, ymax, xsteps, ysteps, p, g]]:
constant[
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
]
variable[default_kwargs] assign[=] call[name[dict], parameter[]]
call[name[default_kwargs].update, parameter[name[kwargs]]]
if <ast.UnaryOp object at 0x7da20c6c6050> begin[:]
variable[g] assign[=] dictionary[[], []]
for taget[name[k]] in starred[call[name[list], parameter[call[call[name[globals], parameter[]].keys, parameter[]]]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[g]] begin[:]
call[name[g]][name[k]] assign[=] call[call[name[globals], parameter[]]][name[k]]
if compare[call[name[type], parameter[name[f]]] equal[==] name[str]] begin[:]
variable[f] assign[=] call[name[eval], parameter[binary_operation[binary_operation[binary_operation[constant[lambda ] + name[p]] + constant[: ]] + name[f]], name[g]]]
variable[xones] assign[=] call[name[_n].linspace, parameter[constant[1], constant[1], name[ysteps]]]
variable[x] assign[=] call[name[_n].linspace, parameter[name[xmin], name[xmax], name[xsteps]]]
variable[xgrid] assign[=] call[name[_n].outer, parameter[name[xones], name[x]]]
variable[yones] assign[=] call[name[_n].linspace, parameter[constant[1], constant[1], name[xsteps]]]
variable[y] assign[=] call[name[_n].linspace, parameter[name[ymin], name[ymax], name[ysteps]]]
variable[ygrid] assign[=] call[name[_n].outer, parameter[name[y], name[yones]]]
<ast.Try object at 0x7da20c6c40d0>
call[name[image_data], parameter[call[name[zgrid].transpose, parameter[]], name[x], name[y]]] | keyword[def] identifier[image_function] ( identifier[f] = literal[string] , identifier[xmin] =- literal[int] , identifier[xmax] = literal[int] , identifier[ymin] =- literal[int] , identifier[ymax] = literal[int] , identifier[xsteps] = literal[int] , identifier[ysteps] = literal[int] , identifier[p] = literal[string] , identifier[g] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[default_kwargs] = identifier[dict] ( identifier[clabel] = identifier[str] ( identifier[f] ), identifier[xlabel] = literal[string] , identifier[ylabel] = literal[string] )
identifier[default_kwargs] . identifier[update] ( identifier[kwargs] )
keyword[if] keyword[not] identifier[g] : identifier[g] ={}
keyword[for] identifier[k] keyword[in] identifier[list] ( identifier[globals] (). identifier[keys] ()):
keyword[if] identifier[k] keyword[not] keyword[in] identifier[g] : identifier[g] [ identifier[k] ]= identifier[globals] ()[ identifier[k] ]
keyword[if] identifier[type] ( identifier[f] )== identifier[str] :
identifier[f] = identifier[eval] ( literal[string] + identifier[p] + literal[string] + identifier[f] , identifier[g] )
identifier[xones] = identifier[_n] . identifier[linspace] ( literal[int] , literal[int] , identifier[ysteps] )
identifier[x] = identifier[_n] . identifier[linspace] ( identifier[xmin] , identifier[xmax] , identifier[xsteps] )
identifier[xgrid] = identifier[_n] . identifier[outer] ( identifier[xones] , identifier[x] )
identifier[yones] = identifier[_n] . identifier[linspace] ( literal[int] , literal[int] , identifier[xsteps] )
identifier[y] = identifier[_n] . identifier[linspace] ( identifier[ymin] , identifier[ymax] , identifier[ysteps] )
identifier[ygrid] = identifier[_n] . identifier[outer] ( identifier[y] , identifier[yones] )
keyword[try] :
identifier[zgrid] = identifier[f] ( identifier[xgrid] , identifier[ygrid] )+ identifier[xgrid] * literal[int]
keyword[except] :
identifier[print] ( literal[string] )
identifier[zgrid] =[]
keyword[for] identifier[ny] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[y] )):
identifier[zgrid] . identifier[append] ([])
keyword[for] identifier[nx] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[x] )):
identifier[zgrid] [ identifier[ny] ]. identifier[append] ( identifier[f] ( identifier[x] [ identifier[nx] ], identifier[y] [ identifier[ny] ]))
identifier[zgrid] = identifier[_n] . identifier[array] ( identifier[zgrid] )
identifier[image_data] ( identifier[zgrid] . identifier[transpose] (), identifier[x] , identifier[y] ,** identifier[default_kwargs] ) | def image_function(f='sin(5*x)*cos(5*y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p='x,y', g=None, **kwargs):
"""
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
"""
default_kwargs = dict(clabel=str(f), xlabel='x', ylabel='y')
default_kwargs.update(kwargs)
# aggregate globals
if not g:
g = {} # depends on [control=['if'], data=[]]
for k in list(globals().keys()):
if k not in g:
g[k] = globals()[k] # depends on [control=['if'], data=['k', 'g']] # depends on [control=['for'], data=['k']]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g) # depends on [control=['if'], data=[]]
# generate the grid x and y coordinates
xones = _n.linspace(1, 1, ysteps)
x = _n.linspace(xmin, xmax, xsteps)
xgrid = _n.outer(xones, x)
yones = _n.linspace(1, 1, xsteps)
y = _n.linspace(ymin, ymax, ysteps)
ygrid = _n.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid * 0.0 # depends on [control=['try'], data=[]]
except:
print('Notice: function is not rocking hardcore. Generating grid the slow way...')
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny])) # depends on [control=['for'], data=['nx']] # depends on [control=['for'], data=['ny']]
zgrid = _n.array(zgrid) # depends on [control=['except'], data=[]]
# now plot!
image_data(zgrid.transpose(), x, y, **default_kwargs) |
def resize_bytes(fobj, old_size, new_size, offset):
"""Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
"""
if new_size < old_size:
delete_size = old_size - new_size
delete_at = offset + new_size
delete_bytes(fobj, delete_size, delete_at)
elif new_size > old_size:
insert_size = new_size - old_size
insert_at = offset + old_size
insert_bytes(fobj, insert_size, insert_at) | def function[resize_bytes, parameter[fobj, old_size, new_size, offset]]:
constant[Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
]
if compare[name[new_size] less[<] name[old_size]] begin[:]
variable[delete_size] assign[=] binary_operation[name[old_size] - name[new_size]]
variable[delete_at] assign[=] binary_operation[name[offset] + name[new_size]]
call[name[delete_bytes], parameter[name[fobj], name[delete_size], name[delete_at]]] | keyword[def] identifier[resize_bytes] ( identifier[fobj] , identifier[old_size] , identifier[new_size] , identifier[offset] ):
literal[string]
keyword[if] identifier[new_size] < identifier[old_size] :
identifier[delete_size] = identifier[old_size] - identifier[new_size]
identifier[delete_at] = identifier[offset] + identifier[new_size]
identifier[delete_bytes] ( identifier[fobj] , identifier[delete_size] , identifier[delete_at] )
keyword[elif] identifier[new_size] > identifier[old_size] :
identifier[insert_size] = identifier[new_size] - identifier[old_size]
identifier[insert_at] = identifier[offset] + identifier[old_size]
identifier[insert_bytes] ( identifier[fobj] , identifier[insert_size] , identifier[insert_at] ) | def resize_bytes(fobj, old_size, new_size, offset):
"""Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
"""
if new_size < old_size:
delete_size = old_size - new_size
delete_at = offset + new_size
delete_bytes(fobj, delete_size, delete_at) # depends on [control=['if'], data=['new_size', 'old_size']]
elif new_size > old_size:
insert_size = new_size - old_size
insert_at = offset + old_size
insert_bytes(fobj, insert_size, insert_at) # depends on [control=['if'], data=['new_size', 'old_size']] |
def filer(filelist, extension='fastq', returndict=False):
"""
Helper script that creates a set of the stain names created by stripping off parts of the filename.
Hopefully handles different naming conventions (e.g. 2015-SEQ-001_S1_L001_R1_001.fastq(.gz),
2015-SEQ-001_R1_001.fastq.gz, 2015-SEQ-001_R1.fastq.gz, 2015-SEQ-001_1.fastq.gz, and 2015-SEQ-001_1.fastq.gz
all become 2015-SEQ-001)
:param filelist: List of files to parse
:param extension: the file extension to use. Default value is 'fastq
:param returndict: type BOOL: Option to return a dictionary of file name: fastq files associated with that name
rather than a set of the file names
"""
# Initialise the variables
fileset = set()
filedict = dict()
for seqfile in filelist:
# Search for the conventional motifs present following strain names
# _S\d+_L001_R\d_001.fastq(.gz) is a typical unprocessed Illumina fastq file
if re.search("_S\\d+_L001", seqfile):
file_name = re.split("_S\\d+_L001", seqfile)[0]
# Files with _R\d_001.fastq(.gz) are created in the SPAdes assembly pipeline
elif re.search("_R\\d_001", seqfile):
file_name = re.split("_R\\d_001", seqfile)[0]
# _R\d.fastq(.gz) represents a simple naming scheme for paired end reads
elif re.search("R\\d.{}".format(extension), seqfile):
file_name = re.split("_R\\d.{}".format(extension), seqfile)[0]
# _\d.fastq is always possible
elif re.search("[-_]\\d.{}".format(extension), seqfile):
file_name = re.split("[-_]\\d.{}".format(extension), seqfile)[0]
# .fastq is the last option
else:
file_name = re.split(".{}".format(extension), seqfile)[0]
# Add the calculated file name to the set
fileset.add(file_name)
# Populate the dictionary with the file name: seq file pair
try:
filedict[file_name].append(seqfile)
except KeyError:
filedict[file_name] = [seqfile]
# Return the appropriate variable
if not returndict:
return fileset
else:
return filedict | def function[filer, parameter[filelist, extension, returndict]]:
constant[
Helper script that creates a set of the stain names created by stripping off parts of the filename.
Hopefully handles different naming conventions (e.g. 2015-SEQ-001_S1_L001_R1_001.fastq(.gz),
2015-SEQ-001_R1_001.fastq.gz, 2015-SEQ-001_R1.fastq.gz, 2015-SEQ-001_1.fastq.gz, and 2015-SEQ-001_1.fastq.gz
all become 2015-SEQ-001)
:param filelist: List of files to parse
:param extension: the file extension to use. Default value is 'fastq
:param returndict: type BOOL: Option to return a dictionary of file name: fastq files associated with that name
rather than a set of the file names
]
variable[fileset] assign[=] call[name[set], parameter[]]
variable[filedict] assign[=] call[name[dict], parameter[]]
for taget[name[seqfile]] in starred[name[filelist]] begin[:]
if call[name[re].search, parameter[constant[_S\d+_L001], name[seqfile]]] begin[:]
variable[file_name] assign[=] call[call[name[re].split, parameter[constant[_S\d+_L001], name[seqfile]]]][constant[0]]
call[name[fileset].add, parameter[name[file_name]]]
<ast.Try object at 0x7da2044c02b0>
if <ast.UnaryOp object at 0x7da2044c2800> begin[:]
return[name[fileset]] | keyword[def] identifier[filer] ( identifier[filelist] , identifier[extension] = literal[string] , identifier[returndict] = keyword[False] ):
literal[string]
identifier[fileset] = identifier[set] ()
identifier[filedict] = identifier[dict] ()
keyword[for] identifier[seqfile] keyword[in] identifier[filelist] :
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[seqfile] ):
identifier[file_name] = identifier[re] . identifier[split] ( literal[string] , identifier[seqfile] )[ literal[int] ]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[seqfile] ):
identifier[file_name] = identifier[re] . identifier[split] ( literal[string] , identifier[seqfile] )[ literal[int] ]
keyword[elif] identifier[re] . identifier[search] ( literal[string] . identifier[format] ( identifier[extension] ), identifier[seqfile] ):
identifier[file_name] = identifier[re] . identifier[split] ( literal[string] . identifier[format] ( identifier[extension] ), identifier[seqfile] )[ literal[int] ]
keyword[elif] identifier[re] . identifier[search] ( literal[string] . identifier[format] ( identifier[extension] ), identifier[seqfile] ):
identifier[file_name] = identifier[re] . identifier[split] ( literal[string] . identifier[format] ( identifier[extension] ), identifier[seqfile] )[ literal[int] ]
keyword[else] :
identifier[file_name] = identifier[re] . identifier[split] ( literal[string] . identifier[format] ( identifier[extension] ), identifier[seqfile] )[ literal[int] ]
identifier[fileset] . identifier[add] ( identifier[file_name] )
keyword[try] :
identifier[filedict] [ identifier[file_name] ]. identifier[append] ( identifier[seqfile] )
keyword[except] identifier[KeyError] :
identifier[filedict] [ identifier[file_name] ]=[ identifier[seqfile] ]
keyword[if] keyword[not] identifier[returndict] :
keyword[return] identifier[fileset]
keyword[else] :
keyword[return] identifier[filedict] | def filer(filelist, extension='fastq', returndict=False):
"""
Helper script that creates a set of the stain names created by stripping off parts of the filename.
Hopefully handles different naming conventions (e.g. 2015-SEQ-001_S1_L001_R1_001.fastq(.gz),
2015-SEQ-001_R1_001.fastq.gz, 2015-SEQ-001_R1.fastq.gz, 2015-SEQ-001_1.fastq.gz, and 2015-SEQ-001_1.fastq.gz
all become 2015-SEQ-001)
:param filelist: List of files to parse
:param extension: the file extension to use. Default value is 'fastq
:param returndict: type BOOL: Option to return a dictionary of file name: fastq files associated with that name
rather than a set of the file names
"""
# Initialise the variables
fileset = set()
filedict = dict()
for seqfile in filelist:
# Search for the conventional motifs present following strain names
# _S\d+_L001_R\d_001.fastq(.gz) is a typical unprocessed Illumina fastq file
if re.search('_S\\d+_L001', seqfile):
file_name = re.split('_S\\d+_L001', seqfile)[0] # depends on [control=['if'], data=[]]
# Files with _R\d_001.fastq(.gz) are created in the SPAdes assembly pipeline
elif re.search('_R\\d_001', seqfile):
file_name = re.split('_R\\d_001', seqfile)[0] # depends on [control=['if'], data=[]]
# _R\d.fastq(.gz) represents a simple naming scheme for paired end reads
elif re.search('R\\d.{}'.format(extension), seqfile):
file_name = re.split('_R\\d.{}'.format(extension), seqfile)[0] # depends on [control=['if'], data=[]]
# _\d.fastq is always possible
elif re.search('[-_]\\d.{}'.format(extension), seqfile):
file_name = re.split('[-_]\\d.{}'.format(extension), seqfile)[0] # depends on [control=['if'], data=[]]
else:
# .fastq is the last option
file_name = re.split('.{}'.format(extension), seqfile)[0]
# Add the calculated file name to the set
fileset.add(file_name)
# Populate the dictionary with the file name: seq file pair
try:
filedict[file_name].append(seqfile) # depends on [control=['try'], data=[]]
except KeyError:
filedict[file_name] = [seqfile] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['seqfile']]
# Return the appropriate variable
if not returndict:
return fileset # depends on [control=['if'], data=[]]
else:
return filedict |
def put_readme(self, content):
"""Store the readme descriptive metadata."""
logger.debug("Putting readme")
key = self.get_readme_key()
self.put_text(key, content) | def function[put_readme, parameter[self, content]]:
constant[Store the readme descriptive metadata.]
call[name[logger].debug, parameter[constant[Putting readme]]]
variable[key] assign[=] call[name[self].get_readme_key, parameter[]]
call[name[self].put_text, parameter[name[key], name[content]]] | keyword[def] identifier[put_readme] ( identifier[self] , identifier[content] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[key] = identifier[self] . identifier[get_readme_key] ()
identifier[self] . identifier[put_text] ( identifier[key] , identifier[content] ) | def put_readme(self, content):
"""Store the readme descriptive metadata."""
logger.debug('Putting readme')
key = self.get_readme_key()
self.put_text(key, content) |
def feed_backend(url, clean, fetch_archive, backend_name, backend_params,
es_index=None, es_index_enrich=None, project=None, arthur=False,
es_aliases=None, projects_json_repo=None):
""" Feed Ocean with backend data """
backend = None
repo = {'backend_name': backend_name, 'backend_params': backend_params} # repository data to be stored in conf
if es_index:
clean = False # don't remove index, it could be shared
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
logger.info("Feeding Ocean from %s (%s)", backend_name, es_index)
if not es_index:
logger.error("Raw index not defined for %s", backend_name)
repo['repo_update_start'] = datetime.now().isoformat()
# perceval backends fetch params
offset = None
from_date = None
category = None
latest_items = None
filter_classified = None
backend_cmd = klass(*backend_params)
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND,
parsed_args)
if backend_cmd.archive_manager and fetch_archive:
archive = Archive(parsed_args['archive_path'])
else:
archive = backend_cmd.archive_manager.create_archive() if backend_cmd.archive_manager else None
init_args['archive'] = archive
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
backend = backend_cmd.backend
ocean_backend = connector[1](backend, fetch_archive=fetch_archive, project=project)
elastic_ocean = get_elastic(url, es_index, clean, ocean_backend, es_aliases)
ocean_backend.set_elastic(elastic_ocean)
ocean_backend.set_projects_json_repo(projects_json_repo)
if fetch_archive:
signature = inspect.signature(backend.fetch_from_archive)
else:
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
try:
# Support perceval pre and post BackendCommand refactoring
from_date = backend_cmd.from_date
except AttributeError:
from_date = backend_cmd.parsed_args.from_date
if 'offset' in signature.parameters:
try:
offset = backend_cmd.offset
except AttributeError:
offset = backend_cmd.parsed_args.offset
if 'category' in signature.parameters:
try:
category = backend_cmd.category
except AttributeError:
try:
category = backend_cmd.parsed_args.category
except AttributeError:
pass
if 'filter_classified' in signature.parameters:
try:
filter_classified = backend_cmd.parsed_args.filter_classified
except AttributeError:
pass
if 'latest_items' in signature.parameters:
try:
latest_items = backend_cmd.latest_items
except AttributeError:
latest_items = backend_cmd.parsed_args.latest_items
# fetch params support
if arthur:
# If using arthur just provide the items generator to be used
# to collect the items and upload to Elasticsearch
aitems = feed_backend_arthur(backend_name, backend_params)
ocean_backend.feed(arthur_items=aitems)
else:
params = {}
if latest_items:
params['latest_items'] = latest_items
if category:
params['category'] = category
if filter_classified:
params['filter_classified'] = filter_classified
if from_date and (from_date.replace(tzinfo=None) != parser.parse("1970-01-01")):
params['from_date'] = from_date
if offset:
params['from_offset'] = offset
ocean_backend.feed(**params)
except Exception as ex:
if backend:
logger.error("Error feeding ocean from %s (%s): %s", backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error feeding ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) | def function[feed_backend, parameter[url, clean, fetch_archive, backend_name, backend_params, es_index, es_index_enrich, project, arthur, es_aliases, projects_json_repo]]:
constant[ Feed Ocean with backend data ]
variable[backend] assign[=] constant[None]
variable[repo] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f6aaa0>, <ast.Constant object at 0x7da1b0f6aad0>], [<ast.Name object at 0x7da1b0f6ab00>, <ast.Name object at 0x7da1b0f6b520>]]
if name[es_index] begin[:]
variable[clean] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b0f6b2e0> begin[:]
<ast.Raise object at 0x7da1b0f6ac20>
variable[connector] assign[=] call[name[get_connector_from_name], parameter[name[backend_name]]]
variable[klass] assign[=] call[name[connector]][constant[3]]
<ast.Try object at 0x7da1b0f6bbe0>
call[name[logger].info, parameter[constant[Done %s ], name[backend_name]]] | keyword[def] identifier[feed_backend] ( identifier[url] , identifier[clean] , identifier[fetch_archive] , identifier[backend_name] , identifier[backend_params] ,
identifier[es_index] = keyword[None] , identifier[es_index_enrich] = keyword[None] , identifier[project] = keyword[None] , identifier[arthur] = keyword[False] ,
identifier[es_aliases] = keyword[None] , identifier[projects_json_repo] = keyword[None] ):
literal[string]
identifier[backend] = keyword[None]
identifier[repo] ={ literal[string] : identifier[backend_name] , literal[string] : identifier[backend_params] }
keyword[if] identifier[es_index] :
identifier[clean] = keyword[False]
keyword[if] keyword[not] identifier[get_connector_from_name] ( identifier[backend_name] ):
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[backend_name] )
identifier[connector] = identifier[get_connector_from_name] ( identifier[backend_name] )
identifier[klass] = identifier[connector] [ literal[int] ]
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] , identifier[backend_name] , identifier[es_index] )
keyword[if] keyword[not] identifier[es_index] :
identifier[logger] . identifier[error] ( literal[string] , identifier[backend_name] )
identifier[repo] [ literal[string] ]= identifier[datetime] . identifier[now] (). identifier[isoformat] ()
identifier[offset] = keyword[None]
identifier[from_date] = keyword[None]
identifier[category] = keyword[None]
identifier[latest_items] = keyword[None]
identifier[filter_classified] = keyword[None]
identifier[backend_cmd] = identifier[klass] (* identifier[backend_params] )
identifier[parsed_args] = identifier[vars] ( identifier[backend_cmd] . identifier[parsed_args] )
identifier[init_args] = identifier[find_signature_parameters] ( identifier[backend_cmd] . identifier[BACKEND] ,
identifier[parsed_args] )
keyword[if] identifier[backend_cmd] . identifier[archive_manager] keyword[and] identifier[fetch_archive] :
identifier[archive] = identifier[Archive] ( identifier[parsed_args] [ literal[string] ])
keyword[else] :
identifier[archive] = identifier[backend_cmd] . identifier[archive_manager] . identifier[create_archive] () keyword[if] identifier[backend_cmd] . identifier[archive_manager] keyword[else] keyword[None]
identifier[init_args] [ literal[string] ]= identifier[archive]
identifier[backend_cmd] . identifier[backend] = identifier[backend_cmd] . identifier[BACKEND] (** identifier[init_args] )
identifier[backend] = identifier[backend_cmd] . identifier[backend]
identifier[ocean_backend] = identifier[connector] [ literal[int] ]( identifier[backend] , identifier[fetch_archive] = identifier[fetch_archive] , identifier[project] = identifier[project] )
identifier[elastic_ocean] = identifier[get_elastic] ( identifier[url] , identifier[es_index] , identifier[clean] , identifier[ocean_backend] , identifier[es_aliases] )
identifier[ocean_backend] . identifier[set_elastic] ( identifier[elastic_ocean] )
identifier[ocean_backend] . identifier[set_projects_json_repo] ( identifier[projects_json_repo] )
keyword[if] identifier[fetch_archive] :
identifier[signature] = identifier[inspect] . identifier[signature] ( identifier[backend] . identifier[fetch_from_archive] )
keyword[else] :
identifier[signature] = identifier[inspect] . identifier[signature] ( identifier[backend] . identifier[fetch] )
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[try] :
identifier[from_date] = identifier[backend_cmd] . identifier[from_date]
keyword[except] identifier[AttributeError] :
identifier[from_date] = identifier[backend_cmd] . identifier[parsed_args] . identifier[from_date]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[try] :
identifier[offset] = identifier[backend_cmd] . identifier[offset]
keyword[except] identifier[AttributeError] :
identifier[offset] = identifier[backend_cmd] . identifier[parsed_args] . identifier[offset]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[try] :
identifier[category] = identifier[backend_cmd] . identifier[category]
keyword[except] identifier[AttributeError] :
keyword[try] :
identifier[category] = identifier[backend_cmd] . identifier[parsed_args] . identifier[category]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[try] :
identifier[filter_classified] = identifier[backend_cmd] . identifier[parsed_args] . identifier[filter_classified]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[try] :
identifier[latest_items] = identifier[backend_cmd] . identifier[latest_items]
keyword[except] identifier[AttributeError] :
identifier[latest_items] = identifier[backend_cmd] . identifier[parsed_args] . identifier[latest_items]
keyword[if] identifier[arthur] :
identifier[aitems] = identifier[feed_backend_arthur] ( identifier[backend_name] , identifier[backend_params] )
identifier[ocean_backend] . identifier[feed] ( identifier[arthur_items] = identifier[aitems] )
keyword[else] :
identifier[params] ={}
keyword[if] identifier[latest_items] :
identifier[params] [ literal[string] ]= identifier[latest_items]
keyword[if] identifier[category] :
identifier[params] [ literal[string] ]= identifier[category]
keyword[if] identifier[filter_classified] :
identifier[params] [ literal[string] ]= identifier[filter_classified]
keyword[if] identifier[from_date] keyword[and] ( identifier[from_date] . identifier[replace] ( identifier[tzinfo] = keyword[None] )!= identifier[parser] . identifier[parse] ( literal[string] )):
identifier[params] [ literal[string] ]= identifier[from_date]
keyword[if] identifier[offset] :
identifier[params] [ literal[string] ]= identifier[offset]
identifier[ocean_backend] . identifier[feed] (** identifier[params] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[backend] :
identifier[logger] . identifier[error] ( literal[string] , identifier[backend_name] , identifier[backend] . identifier[origin] , identifier[ex] , identifier[exc_info] = keyword[True] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] , identifier[ex] , identifier[exc_info] = keyword[True] )
identifier[logger] . identifier[info] ( literal[string] , identifier[backend_name] ) | def feed_backend(url, clean, fetch_archive, backend_name, backend_params, es_index=None, es_index_enrich=None, project=None, arthur=False, es_aliases=None, projects_json_repo=None):
""" Feed Ocean with backend data """
backend = None
repo = {'backend_name': backend_name, 'backend_params': backend_params} # repository data to be stored in conf
if es_index:
clean = False # don't remove index, it could be shared # depends on [control=['if'], data=[]]
if not get_connector_from_name(backend_name):
raise RuntimeError('Unknown backend %s' % backend_name) # depends on [control=['if'], data=[]]
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
logger.info('Feeding Ocean from %s (%s)', backend_name, es_index)
if not es_index:
logger.error('Raw index not defined for %s', backend_name) # depends on [control=['if'], data=[]]
repo['repo_update_start'] = datetime.now().isoformat()
# perceval backends fetch params
offset = None
from_date = None
category = None
latest_items = None
filter_classified = None
backend_cmd = klass(*backend_params)
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND, parsed_args)
if backend_cmd.archive_manager and fetch_archive:
archive = Archive(parsed_args['archive_path']) # depends on [control=['if'], data=[]]
else:
archive = backend_cmd.archive_manager.create_archive() if backend_cmd.archive_manager else None
init_args['archive'] = archive
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
backend = backend_cmd.backend
ocean_backend = connector[1](backend, fetch_archive=fetch_archive, project=project)
elastic_ocean = get_elastic(url, es_index, clean, ocean_backend, es_aliases)
ocean_backend.set_elastic(elastic_ocean)
ocean_backend.set_projects_json_repo(projects_json_repo)
if fetch_archive:
signature = inspect.signature(backend.fetch_from_archive) # depends on [control=['if'], data=[]]
else:
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
try:
# Support perceval pre and post BackendCommand refactoring
from_date = backend_cmd.from_date # depends on [control=['try'], data=[]]
except AttributeError:
from_date = backend_cmd.parsed_args.from_date # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if 'offset' in signature.parameters:
try:
offset = backend_cmd.offset # depends on [control=['try'], data=[]]
except AttributeError:
offset = backend_cmd.parsed_args.offset # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if 'category' in signature.parameters:
try:
category = backend_cmd.category # depends on [control=['try'], data=[]]
except AttributeError:
try:
category = backend_cmd.parsed_args.category # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if 'filter_classified' in signature.parameters:
try:
filter_classified = backend_cmd.parsed_args.filter_classified # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if 'latest_items' in signature.parameters:
try:
latest_items = backend_cmd.latest_items # depends on [control=['try'], data=[]]
except AttributeError:
latest_items = backend_cmd.parsed_args.latest_items # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# fetch params support
if arthur:
# If using arthur just provide the items generator to be used
# to collect the items and upload to Elasticsearch
aitems = feed_backend_arthur(backend_name, backend_params)
ocean_backend.feed(arthur_items=aitems) # depends on [control=['if'], data=[]]
else:
params = {}
if latest_items:
params['latest_items'] = latest_items # depends on [control=['if'], data=[]]
if category:
params['category'] = category # depends on [control=['if'], data=[]]
if filter_classified:
params['filter_classified'] = filter_classified # depends on [control=['if'], data=[]]
if from_date and from_date.replace(tzinfo=None) != parser.parse('1970-01-01'):
params['from_date'] = from_date # depends on [control=['if'], data=[]]
if offset:
params['from_offset'] = offset # depends on [control=['if'], data=[]]
ocean_backend.feed(**params) # depends on [control=['try'], data=[]]
except Exception as ex:
if backend:
logger.error('Error feeding ocean from %s (%s): %s', backend_name, backend.origin, ex, exc_info=True) # depends on [control=['if'], data=[]]
else:
logger.error('Error feeding ocean %s', ex, exc_info=True) # depends on [control=['except'], data=['ex']]
logger.info('Done %s ', backend_name) |
def _dict_to_row(keyword_value, keyword_property=None):
"""Helper to make a message row from a keyword where value is a dict.
.. versionadded:: 3.2
Use this when constructing a table from keywords to display as
part of a message object. This variant will unpack the dict and
present it nicely in the keyword value area as a nested table in the
cell.
We are expecting keyword value would be something like this:
"{'high': ['Kawasan Rawan Bencana III'], "
"'medium': ['Kawasan Rawan Bencana II'], "
"'low': ['Kawasan Rawan Bencana I']}"
Or by passing a python dict object with similar layout to above.
i.e. A string representation of a dict where the values are lists.
:param keyword_value: Value of the keyword to be rendered. This must
be a string representation of a dict, or a dict.
:type keyword_value: basestring, dict
:param keyword_property: The definition of the keyword property.
:type keyword_property: dict, None
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(keyword_value, str):
keyword_value = literal_eval(keyword_value)
table = m.Table(style_class='table table-condensed')
# Sorting the key
for key in sorted(keyword_value.keys()):
value = keyword_value[key]
row = m.Row()
# First the heading
if keyword_property is None:
if definition(key):
name = definition(key)['name']
else:
name = tr(key.replace('_', ' ').capitalize())
else:
default_name = tr(key.replace('_', ' ').capitalize())
name = keyword_property.get('member_names', {}).get(
key, default_name)
row.add(m.Cell(m.ImportantText(name)))
# Then the value. If it contains more than one element we
# present it as a bullet list, otherwise just as simple text
if isinstance(value, (tuple, list, dict, set)):
if len(value) > 1:
bullets = m.BulletedList()
for item in value:
bullets.add(item)
row.add(m.Cell(bullets))
elif len(value) == 0:
row.add(m.Cell(""))
else:
row.add(m.Cell(value[0]))
else:
if keyword_property == property_extra_keywords:
key_definition = definition(key)
if key_definition and key_definition.get('options'):
value_definition = definition(value)
if value_definition:
value = value_definition.get('name', value)
elif key_definition and key_definition.get(
'type') == datetime:
try:
value = datetime.strptime(value, key_definition[
'store_format'])
value = value.strftime(
key_definition['show_format'])
except ValueError:
try:
value = datetime.strptime(
value, key_definition['store_format2'])
value = value.strftime(
key_definition['show_format'])
except ValueError:
pass
row.add(m.Cell(value))
table.add(row)
return table | def function[_dict_to_row, parameter[keyword_value, keyword_property]]:
constant[Helper to make a message row from a keyword where value is a dict.
.. versionadded:: 3.2
Use this when constructing a table from keywords to display as
part of a message object. This variant will unpack the dict and
present it nicely in the keyword value area as a nested table in the
cell.
We are expecting keyword value would be something like this:
"{'high': ['Kawasan Rawan Bencana III'], "
"'medium': ['Kawasan Rawan Bencana II'], "
"'low': ['Kawasan Rawan Bencana I']}"
Or by passing a python dict object with similar layout to above.
i.e. A string representation of a dict where the values are lists.
:param keyword_value: Value of the keyword to be rendered. This must
be a string representation of a dict, or a dict.
:type keyword_value: basestring, dict
:param keyword_property: The definition of the keyword property.
:type keyword_property: dict, None
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
]
if call[name[isinstance], parameter[name[keyword_value], name[str]]] begin[:]
variable[keyword_value] assign[=] call[name[literal_eval], parameter[name[keyword_value]]]
variable[table] assign[=] call[name[m].Table, parameter[]]
for taget[name[key]] in starred[call[name[sorted], parameter[call[name[keyword_value].keys, parameter[]]]]] begin[:]
variable[value] assign[=] call[name[keyword_value]][name[key]]
variable[row] assign[=] call[name[m].Row, parameter[]]
if compare[name[keyword_property] is constant[None]] begin[:]
if call[name[definition], parameter[name[key]]] begin[:]
variable[name] assign[=] call[call[name[definition], parameter[name[key]]]][constant[name]]
call[name[row].add, parameter[call[name[m].Cell, parameter[call[name[m].ImportantText, parameter[name[name]]]]]]]
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b0ce1930>, <ast.Name object at 0x7da1b0ce15a0>, <ast.Name object at 0x7da1b0ce2ad0>, <ast.Name object at 0x7da1b0ce2e30>]]]] begin[:]
if compare[call[name[len], parameter[name[value]]] greater[>] constant[1]] begin[:]
variable[bullets] assign[=] call[name[m].BulletedList, parameter[]]
for taget[name[item]] in starred[name[value]] begin[:]
call[name[bullets].add, parameter[name[item]]]
call[name[row].add, parameter[call[name[m].Cell, parameter[name[bullets]]]]]
call[name[table].add, parameter[name[row]]]
return[name[table]] | keyword[def] identifier[_dict_to_row] ( identifier[keyword_value] , identifier[keyword_property] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[keyword_value] , identifier[str] ):
identifier[keyword_value] = identifier[literal_eval] ( identifier[keyword_value] )
identifier[table] = identifier[m] . identifier[Table] ( identifier[style_class] = literal[string] )
keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[keyword_value] . identifier[keys] ()):
identifier[value] = identifier[keyword_value] [ identifier[key] ]
identifier[row] = identifier[m] . identifier[Row] ()
keyword[if] identifier[keyword_property] keyword[is] keyword[None] :
keyword[if] identifier[definition] ( identifier[key] ):
identifier[name] = identifier[definition] ( identifier[key] )[ literal[string] ]
keyword[else] :
identifier[name] = identifier[tr] ( identifier[key] . identifier[replace] ( literal[string] , literal[string] ). identifier[capitalize] ())
keyword[else] :
identifier[default_name] = identifier[tr] ( identifier[key] . identifier[replace] ( literal[string] , literal[string] ). identifier[capitalize] ())
identifier[name] = identifier[keyword_property] . identifier[get] ( literal[string] ,{}). identifier[get] (
identifier[key] , identifier[default_name] )
identifier[row] . identifier[add] ( identifier[m] . identifier[Cell] ( identifier[m] . identifier[ImportantText] ( identifier[name] )))
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] , identifier[dict] , identifier[set] )):
keyword[if] identifier[len] ( identifier[value] )> literal[int] :
identifier[bullets] = identifier[m] . identifier[BulletedList] ()
keyword[for] identifier[item] keyword[in] identifier[value] :
identifier[bullets] . identifier[add] ( identifier[item] )
identifier[row] . identifier[add] ( identifier[m] . identifier[Cell] ( identifier[bullets] ))
keyword[elif] identifier[len] ( identifier[value] )== literal[int] :
identifier[row] . identifier[add] ( identifier[m] . identifier[Cell] ( literal[string] ))
keyword[else] :
identifier[row] . identifier[add] ( identifier[m] . identifier[Cell] ( identifier[value] [ literal[int] ]))
keyword[else] :
keyword[if] identifier[keyword_property] == identifier[property_extra_keywords] :
identifier[key_definition] = identifier[definition] ( identifier[key] )
keyword[if] identifier[key_definition] keyword[and] identifier[key_definition] . identifier[get] ( literal[string] ):
identifier[value_definition] = identifier[definition] ( identifier[value] )
keyword[if] identifier[value_definition] :
identifier[value] = identifier[value_definition] . identifier[get] ( literal[string] , identifier[value] )
keyword[elif] identifier[key_definition] keyword[and] identifier[key_definition] . identifier[get] (
literal[string] )== identifier[datetime] :
keyword[try] :
identifier[value] = identifier[datetime] . identifier[strptime] ( identifier[value] , identifier[key_definition] [
literal[string] ])
identifier[value] = identifier[value] . identifier[strftime] (
identifier[key_definition] [ literal[string] ])
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[value] = identifier[datetime] . identifier[strptime] (
identifier[value] , identifier[key_definition] [ literal[string] ])
identifier[value] = identifier[value] . identifier[strftime] (
identifier[key_definition] [ literal[string] ])
keyword[except] identifier[ValueError] :
keyword[pass]
identifier[row] . identifier[add] ( identifier[m] . identifier[Cell] ( identifier[value] ))
identifier[table] . identifier[add] ( identifier[row] )
keyword[return] identifier[table] | def _dict_to_row(keyword_value, keyword_property=None):
"""Helper to make a message row from a keyword where value is a dict.
.. versionadded:: 3.2
Use this when constructing a table from keywords to display as
part of a message object. This variant will unpack the dict and
present it nicely in the keyword value area as a nested table in the
cell.
We are expecting keyword value would be something like this:
"{'high': ['Kawasan Rawan Bencana III'], "
"'medium': ['Kawasan Rawan Bencana II'], "
"'low': ['Kawasan Rawan Bencana I']}"
Or by passing a python dict object with similar layout to above.
i.e. A string representation of a dict where the values are lists.
:param keyword_value: Value of the keyword to be rendered. This must
be a string representation of a dict, or a dict.
:type keyword_value: basestring, dict
:param keyword_property: The definition of the keyword property.
:type keyword_property: dict, None
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(keyword_value, str):
keyword_value = literal_eval(keyword_value) # depends on [control=['if'], data=[]]
table = m.Table(style_class='table table-condensed')
# Sorting the key
for key in sorted(keyword_value.keys()):
value = keyword_value[key]
row = m.Row()
# First the heading
if keyword_property is None:
if definition(key):
name = definition(key)['name'] # depends on [control=['if'], data=[]]
else:
name = tr(key.replace('_', ' ').capitalize()) # depends on [control=['if'], data=[]]
else:
default_name = tr(key.replace('_', ' ').capitalize())
name = keyword_property.get('member_names', {}).get(key, default_name)
row.add(m.Cell(m.ImportantText(name)))
# Then the value. If it contains more than one element we
# present it as a bullet list, otherwise just as simple text
if isinstance(value, (tuple, list, dict, set)):
if len(value) > 1:
bullets = m.BulletedList()
for item in value:
bullets.add(item) # depends on [control=['for'], data=['item']]
row.add(m.Cell(bullets)) # depends on [control=['if'], data=[]]
elif len(value) == 0:
row.add(m.Cell('')) # depends on [control=['if'], data=[]]
else:
row.add(m.Cell(value[0])) # depends on [control=['if'], data=[]]
else:
if keyword_property == property_extra_keywords:
key_definition = definition(key)
if key_definition and key_definition.get('options'):
value_definition = definition(value)
if value_definition:
value = value_definition.get('name', value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key_definition and key_definition.get('type') == datetime:
try:
value = datetime.strptime(value, key_definition['store_format'])
value = value.strftime(key_definition['show_format']) # depends on [control=['try'], data=[]]
except ValueError:
try:
value = datetime.strptime(value, key_definition['store_format2'])
value = value.strftime(key_definition['show_format']) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
row.add(m.Cell(value))
table.add(row) # depends on [control=['for'], data=['key']]
return table |
def is_valid(self):
"""
Check whether this SteamID is valid
:rtype: :py:class:`bool`
"""
if self.type == EType.Invalid or self.type >= EType.Max:
return False
if self.universe == EUniverse.Invalid or self.universe >= EUniverse.Max:
return False
if self.type == EType.Individual:
if self.id == 0 or self.instance > 4:
return False
if self.type == EType.Clan:
if self.id == 0 or self.instance != 0:
return False
if self.type == EType.GameServer:
if self.id == 0:
return False
if self.type == EType.AnonGameServer:
if self.id == 0 and self.instance == 0:
return False
return True | def function[is_valid, parameter[self]]:
constant[
Check whether this SteamID is valid
:rtype: :py:class:`bool`
]
if <ast.BoolOp object at 0x7da1b2346b30> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b23444c0> begin[:]
return[constant[False]]
if compare[name[self].type equal[==] name[EType].Individual] begin[:]
if <ast.BoolOp object at 0x7da1b2347400> begin[:]
return[constant[False]]
if compare[name[self].type equal[==] name[EType].Clan] begin[:]
if <ast.BoolOp object at 0x7da1b2346170> begin[:]
return[constant[False]]
if compare[name[self].type equal[==] name[EType].GameServer] begin[:]
if compare[name[self].id equal[==] constant[0]] begin[:]
return[constant[False]]
if compare[name[self].type equal[==] name[EType].AnonGameServer] begin[:]
if <ast.BoolOp object at 0x7da18f00fbb0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_valid] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[type] == identifier[EType] . identifier[Invalid] keyword[or] identifier[self] . identifier[type] >= identifier[EType] . identifier[Max] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[universe] == identifier[EUniverse] . identifier[Invalid] keyword[or] identifier[self] . identifier[universe] >= identifier[EUniverse] . identifier[Max] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[type] == identifier[EType] . identifier[Individual] :
keyword[if] identifier[self] . identifier[id] == literal[int] keyword[or] identifier[self] . identifier[instance] > literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[type] == identifier[EType] . identifier[Clan] :
keyword[if] identifier[self] . identifier[id] == literal[int] keyword[or] identifier[self] . identifier[instance] != literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[type] == identifier[EType] . identifier[GameServer] :
keyword[if] identifier[self] . identifier[id] == literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[type] == identifier[EType] . identifier[AnonGameServer] :
keyword[if] identifier[self] . identifier[id] == literal[int] keyword[and] identifier[self] . identifier[instance] == literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_valid(self):
"""
Check whether this SteamID is valid
:rtype: :py:class:`bool`
"""
if self.type == EType.Invalid or self.type >= EType.Max:
return False # depends on [control=['if'], data=[]]
if self.universe == EUniverse.Invalid or self.universe >= EUniverse.Max:
return False # depends on [control=['if'], data=[]]
if self.type == EType.Individual:
if self.id == 0 or self.instance > 4:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.type == EType.Clan:
if self.id == 0 or self.instance != 0:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.type == EType.GameServer:
if self.id == 0:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.type == EType.AnonGameServer:
if self.id == 0 and self.instance == 0:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, 'kv_serialize'):
elem = elem_type() if elem is None else elem
return await elem.kv_serialize(self, elem=elem, elem_type=elem_type, params=params)
if self.writing:
elem_is_blob = isinstance(elem, x.BlobType)
data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem
if data is None:
return NoSetSentinel()
if len(data) == 0:
return b''
fval = Modeler.to_bytes(data)
if self.hexlify:
return binascii.hexlify(fval).decode('ascii')
else:
return fval
else:
if elem is None:
return NoSetSentinel()
if self.hexlify:
return bytes(binascii.unhexlify(elem))
else:
return bytes(elem) | <ast.AsyncFunctionDef object at 0x7da1b2453fd0> | keyword[async] keyword[def] identifier[blob] ( identifier[self] , identifier[elem] = keyword[None] , identifier[elem_type] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
identifier[elem_type] = identifier[elem_type] keyword[if] identifier[elem_type] keyword[else] identifier[elem] . identifier[__class__]
keyword[if] identifier[hasattr] ( identifier[elem_type] , literal[string] ):
identifier[elem] = identifier[elem_type] () keyword[if] identifier[elem] keyword[is] keyword[None] keyword[else] identifier[elem]
keyword[return] keyword[await] identifier[elem] . identifier[kv_serialize] ( identifier[self] , identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] )
keyword[if] identifier[self] . identifier[writing] :
identifier[elem_is_blob] = identifier[isinstance] ( identifier[elem] , identifier[x] . identifier[BlobType] )
identifier[data] = identifier[getattr] ( identifier[elem] , identifier[x] . identifier[BlobType] . identifier[DATA_ATTR] ) keyword[if] identifier[elem_is_blob] keyword[else] identifier[elem]
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] identifier[NoSetSentinel] ()
keyword[if] identifier[len] ( identifier[data] )== literal[int] :
keyword[return] literal[string]
identifier[fval] = identifier[Modeler] . identifier[to_bytes] ( identifier[data] )
keyword[if] identifier[self] . identifier[hexlify] :
keyword[return] identifier[binascii] . identifier[hexlify] ( identifier[fval] ). identifier[decode] ( literal[string] )
keyword[else] :
keyword[return] identifier[fval]
keyword[else] :
keyword[if] identifier[elem] keyword[is] keyword[None] :
keyword[return] identifier[NoSetSentinel] ()
keyword[if] identifier[self] . identifier[hexlify] :
keyword[return] identifier[bytes] ( identifier[binascii] . identifier[unhexlify] ( identifier[elem] ))
keyword[else] :
keyword[return] identifier[bytes] ( identifier[elem] ) | async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, 'kv_serialize'):
elem = elem_type() if elem is None else elem
return await elem.kv_serialize(self, elem=elem, elem_type=elem_type, params=params) # depends on [control=['if'], data=[]]
if self.writing:
elem_is_blob = isinstance(elem, x.BlobType)
data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem
if data is None:
return NoSetSentinel() # depends on [control=['if'], data=[]]
if len(data) == 0:
return b'' # depends on [control=['if'], data=[]]
fval = Modeler.to_bytes(data)
if self.hexlify:
return binascii.hexlify(fval).decode('ascii') # depends on [control=['if'], data=[]]
else:
return fval # depends on [control=['if'], data=[]]
else:
if elem is None:
return NoSetSentinel() # depends on [control=['if'], data=[]]
if self.hexlify:
return bytes(binascii.unhexlify(elem)) # depends on [control=['if'], data=[]]
else:
return bytes(elem) |
def set_idle_priority(pid=None):
"""
Puts a process in the idle io priority class.
If pid is omitted, applies to the current process.
"""
if pid is None:
pid = os.getpid()
lib.ioprio_set(
lib.IOPRIO_WHO_PROCESS, pid,
lib.IOPRIO_PRIO_VALUE(lib.IOPRIO_CLASS_IDLE, 0)) | def function[set_idle_priority, parameter[pid]]:
constant[
Puts a process in the idle io priority class.
If pid is omitted, applies to the current process.
]
if compare[name[pid] is constant[None]] begin[:]
variable[pid] assign[=] call[name[os].getpid, parameter[]]
call[name[lib].ioprio_set, parameter[name[lib].IOPRIO_WHO_PROCESS, name[pid], call[name[lib].IOPRIO_PRIO_VALUE, parameter[name[lib].IOPRIO_CLASS_IDLE, constant[0]]]]] | keyword[def] identifier[set_idle_priority] ( identifier[pid] = keyword[None] ):
literal[string]
keyword[if] identifier[pid] keyword[is] keyword[None] :
identifier[pid] = identifier[os] . identifier[getpid] ()
identifier[lib] . identifier[ioprio_set] (
identifier[lib] . identifier[IOPRIO_WHO_PROCESS] , identifier[pid] ,
identifier[lib] . identifier[IOPRIO_PRIO_VALUE] ( identifier[lib] . identifier[IOPRIO_CLASS_IDLE] , literal[int] )) | def set_idle_priority(pid=None):
"""
Puts a process in the idle io priority class.
If pid is omitted, applies to the current process.
"""
if pid is None:
pid = os.getpid() # depends on [control=['if'], data=['pid']]
lib.ioprio_set(lib.IOPRIO_WHO_PROCESS, pid, lib.IOPRIO_PRIO_VALUE(lib.IOPRIO_CLASS_IDLE, 0)) |
def _get_binary(data, position, obj_end, opts, dummy1):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
end = position + length
if length < 0 or end > obj_end:
raise InvalidBSON('bad binary object length')
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end])
# Python
else:
value = uuid.UUID(bytes=data[position:end])
return value, end
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end]
else:
value = Binary(data[position:end], subtype)
return value, end | def function[_get_binary, parameter[data, position, obj_end, opts, dummy1]]:
constant[Decode a BSON binary to bson.binary.Binary or python UUID.]
<ast.Tuple object at 0x7da20c6aac20> assign[=] call[name[_UNPACK_LENGTH_SUBTYPE], parameter[call[name[data]][<ast.Slice object at 0x7da20c6a8940>]]]
<ast.AugAssign object at 0x7da20c6a96f0>
if compare[name[subtype] equal[==] constant[2]] begin[:]
variable[length2] assign[=] call[call[name[_UNPACK_INT], parameter[call[name[data]][<ast.Slice object at 0x7da20c6a8d60>]]]][constant[0]]
<ast.AugAssign object at 0x7da20c6abe80>
if compare[name[length2] not_equal[!=] binary_operation[name[length] - constant[4]]] begin[:]
<ast.Raise object at 0x7da20c6a9e10>
variable[length] assign[=] name[length2]
variable[end] assign[=] binary_operation[name[position] + name[length]]
if <ast.BoolOp object at 0x7da20c6a9900> begin[:]
<ast.Raise object at 0x7da20c6a94b0>
if compare[name[subtype] in tuple[[<ast.Constant object at 0x7da20c6ab340>, <ast.Constant object at 0x7da20c6a8eb0>]]] begin[:]
variable[uuid_representation] assign[=] name[opts].uuid_representation
if compare[name[uuid_representation] equal[==] name[JAVA_LEGACY]] begin[:]
variable[java] assign[=] call[name[data]][<ast.Slice object at 0x7da20c6a9810>]
variable[value] assign[=] call[name[uuid].UUID, parameter[]]
return[tuple[[<ast.Name object at 0x7da20c6a8910>, <ast.Name object at 0x7da20c6aa860>]]]
if <ast.BoolOp object at 0x7da20c6aa6e0> begin[:]
variable[value] assign[=] call[name[data]][<ast.Slice object at 0x7da20c6a9f60>]
return[tuple[[<ast.Name object at 0x7da20c6aa260>, <ast.Name object at 0x7da20c6ab3d0>]]] | keyword[def] identifier[_get_binary] ( identifier[data] , identifier[position] , identifier[obj_end] , identifier[opts] , identifier[dummy1] ):
literal[string]
identifier[length] , identifier[subtype] = identifier[_UNPACK_LENGTH_SUBTYPE] ( identifier[data] [ identifier[position] : identifier[position] + literal[int] ])
identifier[position] += literal[int]
keyword[if] identifier[subtype] == literal[int] :
identifier[length2] = identifier[_UNPACK_INT] ( identifier[data] [ identifier[position] : identifier[position] + literal[int] ])[ literal[int] ]
identifier[position] += literal[int]
keyword[if] identifier[length2] != identifier[length] - literal[int] :
keyword[raise] identifier[InvalidBSON] ( literal[string] )
identifier[length] = identifier[length2]
identifier[end] = identifier[position] + identifier[length]
keyword[if] identifier[length] < literal[int] keyword[or] identifier[end] > identifier[obj_end] :
keyword[raise] identifier[InvalidBSON] ( literal[string] )
keyword[if] identifier[subtype] keyword[in] ( literal[int] , literal[int] ):
identifier[uuid_representation] = identifier[opts] . identifier[uuid_representation]
keyword[if] identifier[uuid_representation] == identifier[JAVA_LEGACY] :
identifier[java] = identifier[data] [ identifier[position] : identifier[end] ]
identifier[value] = identifier[uuid] . identifier[UUID] ( identifier[bytes] = identifier[java] [ literal[int] : literal[int] ][::- literal[int] ]+ identifier[java] [ literal[int] : literal[int] ][::- literal[int] ])
keyword[elif] identifier[uuid_representation] == identifier[CSHARP_LEGACY] :
identifier[value] = identifier[uuid] . identifier[UUID] ( identifier[bytes_le] = identifier[data] [ identifier[position] : identifier[end] ])
keyword[else] :
identifier[value] = identifier[uuid] . identifier[UUID] ( identifier[bytes] = identifier[data] [ identifier[position] : identifier[end] ])
keyword[return] identifier[value] , identifier[end]
keyword[if] identifier[PY3] keyword[and] identifier[subtype] == literal[int] :
identifier[value] = identifier[data] [ identifier[position] : identifier[end] ]
keyword[else] :
identifier[value] = identifier[Binary] ( identifier[data] [ identifier[position] : identifier[end] ], identifier[subtype] )
keyword[return] identifier[value] , identifier[end] | def _get_binary(data, position, obj_end, opts, dummy1):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
(length, subtype) = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!") # depends on [control=['if'], data=[]]
length = length2 # depends on [control=['if'], data=[]]
end = position + length
if length < 0 or end > obj_end:
raise InvalidBSON('bad binary object length') # depends on [control=['if'], data=[]]
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) # depends on [control=['if'], data=[]]
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end]) # depends on [control=['if'], data=[]]
else:
# Python
value = uuid.UUID(bytes=data[position:end])
return (value, end) # depends on [control=['if'], data=[]]
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end] # depends on [control=['if'], data=[]]
else:
value = Binary(data[position:end], subtype)
return (value, end) |
def int2roman(val):
"""Code roman number
:param val: integer between 1 and 9999
:returns: the corresponding roman number
:complexity: linear (if that makes sense for constant bounded input size)
"""
s = ''
pos10 = 1000
for pos in range(3, -1, -1):
digit = val // pos10
s += roman[pos][digit]
val %= pos10
pos10 //= 10
return s | def function[int2roman, parameter[val]]:
constant[Code roman number
:param val: integer between 1 and 9999
:returns: the corresponding roman number
:complexity: linear (if that makes sense for constant bounded input size)
]
variable[s] assign[=] constant[]
variable[pos10] assign[=] constant[1000]
for taget[name[pos]] in starred[call[name[range], parameter[constant[3], <ast.UnaryOp object at 0x7da1b07cc670>, <ast.UnaryOp object at 0x7da1b07ce7d0>]]] begin[:]
variable[digit] assign[=] binary_operation[name[val] <ast.FloorDiv object at 0x7da2590d6bc0> name[pos10]]
<ast.AugAssign object at 0x7da1b07cfd00>
<ast.AugAssign object at 0x7da1b07cc130>
<ast.AugAssign object at 0x7da1b07ce110>
return[name[s]] | keyword[def] identifier[int2roman] ( identifier[val] ):
literal[string]
identifier[s] = literal[string]
identifier[pos10] = literal[int]
keyword[for] identifier[pos] keyword[in] identifier[range] ( literal[int] ,- literal[int] ,- literal[int] ):
identifier[digit] = identifier[val] // identifier[pos10]
identifier[s] += identifier[roman] [ identifier[pos] ][ identifier[digit] ]
identifier[val] %= identifier[pos10]
identifier[pos10] //= literal[int]
keyword[return] identifier[s] | def int2roman(val):
"""Code roman number
:param val: integer between 1 and 9999
:returns: the corresponding roman number
:complexity: linear (if that makes sense for constant bounded input size)
"""
s = ''
pos10 = 1000
for pos in range(3, -1, -1):
digit = val // pos10
s += roman[pos][digit]
val %= pos10
pos10 //= 10 # depends on [control=['for'], data=['pos']]
return s |
def yeasttruth(args):
"""
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
"""
p = OptionParser(yeasttruth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
pillars = args[0]
gffiles = args[1:]
aliases = {}
pivot = {}
for gffile in gffiles:
is_pivot = op.basename(gffile).startswith("Saccharomyces_cerevisiae")
gff = Gff(gffile)
for g in gff:
if g.type != "gene":
continue
for a in g.attributes["Alias"]:
aliases[a] = g.accn
if is_pivot:
pivot[a] = g.accn
logging.debug("Aliases imported: {0}".format(len(aliases)))
logging.debug("Pivot imported: {0}".format(len(pivot)))
fw = open("yeast.aliases", "w")
for k, v in sorted(aliases.items()):
print("\t".join((k, v)), file=fw)
fw.close()
fp = open(pillars)
pairs = set()
fw = must_open(opts.outfile, "w")
for row in fp:
atoms = [x for x in row.split() if x != "---"]
pps = [pivot[x] for x in atoms if x in pivot]
atoms = [aliases[x] for x in atoms if x in aliases]
for p in pps:
for a in atoms:
if p == a:
continue
pairs.add(tuple(sorted((p, a))))
for a, b in sorted(pairs):
print("\t".join((a, b)), file=fw)
fw.close() | def function[yeasttruth, parameter[args]]:
constant[
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[yeasttruth].__doc__]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da1b080d120> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b080dc30>]]
variable[pillars] assign[=] call[name[args]][constant[0]]
variable[gffiles] assign[=] call[name[args]][<ast.Slice object at 0x7da1b080dde0>]
variable[aliases] assign[=] dictionary[[], []]
variable[pivot] assign[=] dictionary[[], []]
for taget[name[gffile]] in starred[name[gffiles]] begin[:]
variable[is_pivot] assign[=] call[call[name[op].basename, parameter[name[gffile]]].startswith, parameter[constant[Saccharomyces_cerevisiae]]]
variable[gff] assign[=] call[name[Gff], parameter[name[gffile]]]
for taget[name[g]] in starred[name[gff]] begin[:]
if compare[name[g].type not_equal[!=] constant[gene]] begin[:]
continue
for taget[name[a]] in starred[call[name[g].attributes][constant[Alias]]] begin[:]
call[name[aliases]][name[a]] assign[=] name[g].accn
if name[is_pivot] begin[:]
call[name[pivot]][name[a]] assign[=] name[g].accn
call[name[logging].debug, parameter[call[constant[Aliases imported: {0}].format, parameter[call[name[len], parameter[name[aliases]]]]]]]
call[name[logging].debug, parameter[call[constant[Pivot imported: {0}].format, parameter[call[name[len], parameter[name[pivot]]]]]]]
variable[fw] assign[=] call[name[open], parameter[constant[yeast.aliases], constant[w]]]
for taget[tuple[[<ast.Name object at 0x7da1b080f820>, <ast.Name object at 0x7da1b080f910>]]] in starred[call[name[sorted], parameter[call[name[aliases].items, parameter[]]]]] begin[:]
call[name[print], parameter[call[constant[ ].join, parameter[tuple[[<ast.Name object at 0x7da1b080eef0>, <ast.Name object at 0x7da1b080ee90>]]]]]]
call[name[fw].close, parameter[]]
variable[fp] assign[=] call[name[open], parameter[name[pillars]]]
variable[pairs] assign[=] call[name[set], parameter[]]
variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]]
for taget[name[row]] in starred[name[fp]] begin[:]
variable[atoms] assign[=] <ast.ListComp object at 0x7da1b080e200>
variable[pps] assign[=] <ast.ListComp object at 0x7da1b080e590>
variable[atoms] assign[=] <ast.ListComp object at 0x7da1b080de40>
for taget[name[p]] in starred[name[pps]] begin[:]
for taget[name[a]] in starred[name[atoms]] begin[:]
if compare[name[p] equal[==] name[a]] begin[:]
continue
call[name[pairs].add, parameter[call[name[tuple], parameter[call[name[sorted], parameter[tuple[[<ast.Name object at 0x7da1b080e710>, <ast.Name object at 0x7da1b080e860>]]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b09ea5c0>, <ast.Name object at 0x7da1b09ea6b0>]]] in starred[call[name[sorted], parameter[name[pairs]]]] begin[:]
call[name[print], parameter[call[constant[ ].join, parameter[tuple[[<ast.Name object at 0x7da1b09e89a0>, <ast.Name object at 0x7da1b09e83a0>]]]]]]
call[name[fw].close, parameter[]] | keyword[def] identifier[yeasttruth] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[yeasttruth] . identifier[__doc__] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[pillars] = identifier[args] [ literal[int] ]
identifier[gffiles] = identifier[args] [ literal[int] :]
identifier[aliases] ={}
identifier[pivot] ={}
keyword[for] identifier[gffile] keyword[in] identifier[gffiles] :
identifier[is_pivot] = identifier[op] . identifier[basename] ( identifier[gffile] ). identifier[startswith] ( literal[string] )
identifier[gff] = identifier[Gff] ( identifier[gffile] )
keyword[for] identifier[g] keyword[in] identifier[gff] :
keyword[if] identifier[g] . identifier[type] != literal[string] :
keyword[continue]
keyword[for] identifier[a] keyword[in] identifier[g] . identifier[attributes] [ literal[string] ]:
identifier[aliases] [ identifier[a] ]= identifier[g] . identifier[accn]
keyword[if] identifier[is_pivot] :
identifier[pivot] [ identifier[a] ]= identifier[g] . identifier[accn]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[aliases] )))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[pivot] )))
identifier[fw] = identifier[open] ( literal[string] , literal[string] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[aliases] . identifier[items] ()):
identifier[print] ( literal[string] . identifier[join] (( identifier[k] , identifier[v] )), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
identifier[fp] = identifier[open] ( identifier[pillars] )
identifier[pairs] = identifier[set] ()
identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] )
keyword[for] identifier[row] keyword[in] identifier[fp] :
identifier[atoms] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[row] . identifier[split] () keyword[if] identifier[x] != literal[string] ]
identifier[pps] =[ identifier[pivot] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[atoms] keyword[if] identifier[x] keyword[in] identifier[pivot] ]
identifier[atoms] =[ identifier[aliases] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[atoms] keyword[if] identifier[x] keyword[in] identifier[aliases] ]
keyword[for] identifier[p] keyword[in] identifier[pps] :
keyword[for] identifier[a] keyword[in] identifier[atoms] :
keyword[if] identifier[p] == identifier[a] :
keyword[continue]
identifier[pairs] . identifier[add] ( identifier[tuple] ( identifier[sorted] (( identifier[p] , identifier[a] ))))
keyword[for] identifier[a] , identifier[b] keyword[in] identifier[sorted] ( identifier[pairs] ):
identifier[print] ( literal[string] . identifier[join] (( identifier[a] , identifier[b] )), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] () | def yeasttruth(args):
"""
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
"""
p = OptionParser(yeasttruth.__doc__)
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
pillars = args[0]
gffiles = args[1:]
aliases = {}
pivot = {}
for gffile in gffiles:
is_pivot = op.basename(gffile).startswith('Saccharomyces_cerevisiae')
gff = Gff(gffile)
for g in gff:
if g.type != 'gene':
continue # depends on [control=['if'], data=[]]
for a in g.attributes['Alias']:
aliases[a] = g.accn
if is_pivot:
pivot[a] = g.accn # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] # depends on [control=['for'], data=['g']] # depends on [control=['for'], data=['gffile']]
logging.debug('Aliases imported: {0}'.format(len(aliases)))
logging.debug('Pivot imported: {0}'.format(len(pivot)))
fw = open('yeast.aliases', 'w')
for (k, v) in sorted(aliases.items()):
print('\t'.join((k, v)), file=fw) # depends on [control=['for'], data=[]]
fw.close()
fp = open(pillars)
pairs = set()
fw = must_open(opts.outfile, 'w')
for row in fp:
atoms = [x for x in row.split() if x != '---']
pps = [pivot[x] for x in atoms if x in pivot]
atoms = [aliases[x] for x in atoms if x in aliases]
for p in pps:
for a in atoms:
if p == a:
continue # depends on [control=['if'], data=[]]
pairs.add(tuple(sorted((p, a)))) # depends on [control=['for'], data=['a']] # depends on [control=['for'], data=['p']] # depends on [control=['for'], data=['row']]
for (a, b) in sorted(pairs):
print('\t'.join((a, b)), file=fw) # depends on [control=['for'], data=[]]
fw.close() |
def woe(df, feature_name, target_name):
"""Calculate weight of evidence.
Parameters
----------
df: Dataframe
feature_name: str
Column name to encode.
target_name: str
Target column name.
Returns
-------
Series
"""
def group_woe(group):
event = float(group.sum())
non_event = group.shape[0] - event
rel_event = event / event_total
rel_non_event = non_event / non_event_total
return np.log(rel_non_event / rel_event) * 100
if df[target_name].nunique() > 2:
raise ValueError('Target column should be binary (1/0).')
event_total = float(df[df[target_name] == 1.0].shape[0])
non_event_total = float(df.shape[0] - event_total)
woe_vals = df.groupby(feature_name)[target_name].transform(group_woe)
return woe_vals | def function[woe, parameter[df, feature_name, target_name]]:
constant[Calculate weight of evidence.
Parameters
----------
df: Dataframe
feature_name: str
Column name to encode.
target_name: str
Target column name.
Returns
-------
Series
]
def function[group_woe, parameter[group]]:
variable[event] assign[=] call[name[float], parameter[call[name[group].sum, parameter[]]]]
variable[non_event] assign[=] binary_operation[call[name[group].shape][constant[0]] - name[event]]
variable[rel_event] assign[=] binary_operation[name[event] / name[event_total]]
variable[rel_non_event] assign[=] binary_operation[name[non_event] / name[non_event_total]]
return[binary_operation[call[name[np].log, parameter[binary_operation[name[rel_non_event] / name[rel_event]]]] * constant[100]]]
if compare[call[call[name[df]][name[target_name]].nunique, parameter[]] greater[>] constant[2]] begin[:]
<ast.Raise object at 0x7da18f722d10>
variable[event_total] assign[=] call[name[float], parameter[call[call[name[df]][compare[call[name[df]][name[target_name]] equal[==] constant[1.0]]].shape][constant[0]]]]
variable[non_event_total] assign[=] call[name[float], parameter[binary_operation[call[name[df].shape][constant[0]] - name[event_total]]]]
variable[woe_vals] assign[=] call[call[call[name[df].groupby, parameter[name[feature_name]]]][name[target_name]].transform, parameter[name[group_woe]]]
return[name[woe_vals]] | keyword[def] identifier[woe] ( identifier[df] , identifier[feature_name] , identifier[target_name] ):
literal[string]
keyword[def] identifier[group_woe] ( identifier[group] ):
identifier[event] = identifier[float] ( identifier[group] . identifier[sum] ())
identifier[non_event] = identifier[group] . identifier[shape] [ literal[int] ]- identifier[event]
identifier[rel_event] = identifier[event] / identifier[event_total]
identifier[rel_non_event] = identifier[non_event] / identifier[non_event_total]
keyword[return] identifier[np] . identifier[log] ( identifier[rel_non_event] / identifier[rel_event] )* literal[int]
keyword[if] identifier[df] [ identifier[target_name] ]. identifier[nunique] ()> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[event_total] = identifier[float] ( identifier[df] [ identifier[df] [ identifier[target_name] ]== literal[int] ]. identifier[shape] [ literal[int] ])
identifier[non_event_total] = identifier[float] ( identifier[df] . identifier[shape] [ literal[int] ]- identifier[event_total] )
identifier[woe_vals] = identifier[df] . identifier[groupby] ( identifier[feature_name] )[ identifier[target_name] ]. identifier[transform] ( identifier[group_woe] )
keyword[return] identifier[woe_vals] | def woe(df, feature_name, target_name):
"""Calculate weight of evidence.
Parameters
----------
df: Dataframe
feature_name: str
Column name to encode.
target_name: str
Target column name.
Returns
-------
Series
"""
def group_woe(group):
event = float(group.sum())
non_event = group.shape[0] - event
rel_event = event / event_total
rel_non_event = non_event / non_event_total
return np.log(rel_non_event / rel_event) * 100
if df[target_name].nunique() > 2:
raise ValueError('Target column should be binary (1/0).') # depends on [control=['if'], data=[]]
event_total = float(df[df[target_name] == 1.0].shape[0])
non_event_total = float(df.shape[0] - event_total)
woe_vals = df.groupby(feature_name)[target_name].transform(group_woe)
return woe_vals |
def _do_check(self, handlers, terminate_func=None):
"""
checks the specified handlers. Returns True iff errors caught
"""
corrections = []
for h in handlers:
try:
if h.check():
if h.max_num_corrections is not None \
and h.n_applied_corrections >= h.max_num_corrections:
msg = "Maximum number of corrections {} reached " \
"for handler {}".format(h.max_num_corrections, h)
if h.raise_on_max:
self.run_log[-1]["handler"] = h
self.run_log[-1]["max_errors_per_handler"] = True
raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h)
else:
logger.warning(msg+" Correction not applied.")
continue
if terminate_func is not None and h.is_terminating:
logger.info("Terminating job")
terminate_func()
# make sure we don't terminate twice
terminate_func = None
d = h.correct()
d["handler"] = h
logger.error("\n" + pformat(d, indent=2, width=-1))
corrections.append(d)
h.n_applied_corrections += 1
except Exception:
if not self.skip_over_errors:
raise
else:
import traceback
logger.error("Bad handler %s " % h)
logger.error(traceback.format_exc())
corrections.append(
{"errors": ["Bad handler %s " % h],
"actions": []})
self.total_errors += len(corrections)
self.errors_current_job += len(corrections)
self.run_log[-1]["corrections"].extend(corrections)
# We do a dump of the run log after each check.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
return len(corrections) > 0 | def function[_do_check, parameter[self, handlers, terminate_func]]:
constant[
checks the specified handlers. Returns True iff errors caught
]
variable[corrections] assign[=] list[[]]
for taget[name[h]] in starred[name[handlers]] begin[:]
<ast.Try object at 0x7da18dc9a3e0>
<ast.AugAssign object at 0x7da2054a7310>
<ast.AugAssign object at 0x7da2054a4bb0>
call[call[call[name[self].run_log][<ast.UnaryOp object at 0x7da2054a5c00>]][constant[corrections]].extend, parameter[name[corrections]]]
call[name[dumpfn], parameter[name[self].run_log, name[Custodian].LOG_FILE]]
return[compare[call[name[len], parameter[name[corrections]]] greater[>] constant[0]]] | keyword[def] identifier[_do_check] ( identifier[self] , identifier[handlers] , identifier[terminate_func] = keyword[None] ):
literal[string]
identifier[corrections] =[]
keyword[for] identifier[h] keyword[in] identifier[handlers] :
keyword[try] :
keyword[if] identifier[h] . identifier[check] ():
keyword[if] identifier[h] . identifier[max_num_corrections] keyword[is] keyword[not] keyword[None] keyword[and] identifier[h] . identifier[n_applied_corrections] >= identifier[h] . identifier[max_num_corrections] :
identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[h] . identifier[max_num_corrections] , identifier[h] )
keyword[if] identifier[h] . identifier[raise_on_max] :
identifier[self] . identifier[run_log] [- literal[int] ][ literal[string] ]= identifier[h]
identifier[self] . identifier[run_log] [- literal[int] ][ literal[string] ]= keyword[True]
keyword[raise] identifier[MaxCorrectionsPerHandlerError] ( identifier[msg] , keyword[True] , identifier[h] . identifier[max_num_corrections] , identifier[h] )
keyword[else] :
identifier[logger] . identifier[warning] ( identifier[msg] + literal[string] )
keyword[continue]
keyword[if] identifier[terminate_func] keyword[is] keyword[not] keyword[None] keyword[and] identifier[h] . identifier[is_terminating] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[terminate_func] ()
identifier[terminate_func] = keyword[None]
identifier[d] = identifier[h] . identifier[correct] ()
identifier[d] [ literal[string] ]= identifier[h]
identifier[logger] . identifier[error] ( literal[string] + identifier[pformat] ( identifier[d] , identifier[indent] = literal[int] , identifier[width] =- literal[int] ))
identifier[corrections] . identifier[append] ( identifier[d] )
identifier[h] . identifier[n_applied_corrections] += literal[int]
keyword[except] identifier[Exception] :
keyword[if] keyword[not] identifier[self] . identifier[skip_over_errors] :
keyword[raise]
keyword[else] :
keyword[import] identifier[traceback]
identifier[logger] . identifier[error] ( literal[string] % identifier[h] )
identifier[logger] . identifier[error] ( identifier[traceback] . identifier[format_exc] ())
identifier[corrections] . identifier[append] (
{ literal[string] :[ literal[string] % identifier[h] ],
literal[string] :[]})
identifier[self] . identifier[total_errors] += identifier[len] ( identifier[corrections] )
identifier[self] . identifier[errors_current_job] += identifier[len] ( identifier[corrections] )
identifier[self] . identifier[run_log] [- literal[int] ][ literal[string] ]. identifier[extend] ( identifier[corrections] )
identifier[dumpfn] ( identifier[self] . identifier[run_log] , identifier[Custodian] . identifier[LOG_FILE] , identifier[cls] = identifier[MontyEncoder] ,
identifier[indent] = literal[int] )
keyword[return] identifier[len] ( identifier[corrections] )> literal[int] | def _do_check(self, handlers, terminate_func=None):
"""
checks the specified handlers. Returns True iff errors caught
"""
corrections = []
for h in handlers:
try:
if h.check():
if h.max_num_corrections is not None and h.n_applied_corrections >= h.max_num_corrections:
msg = 'Maximum number of corrections {} reached for handler {}'.format(h.max_num_corrections, h)
if h.raise_on_max:
self.run_log[-1]['handler'] = h
self.run_log[-1]['max_errors_per_handler'] = True
raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h) # depends on [control=['if'], data=[]]
else:
logger.warning(msg + ' Correction not applied.')
continue # depends on [control=['if'], data=[]]
if terminate_func is not None and h.is_terminating:
logger.info('Terminating job')
terminate_func()
# make sure we don't terminate twice
terminate_func = None # depends on [control=['if'], data=[]]
d = h.correct()
d['handler'] = h
logger.error('\n' + pformat(d, indent=2, width=-1))
corrections.append(d)
h.n_applied_corrections += 1 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
if not self.skip_over_errors:
raise # depends on [control=['if'], data=[]]
else:
import traceback
logger.error('Bad handler %s ' % h)
logger.error(traceback.format_exc())
corrections.append({'errors': ['Bad handler %s ' % h], 'actions': []}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['h']]
self.total_errors += len(corrections)
self.errors_current_job += len(corrections)
self.run_log[-1]['corrections'].extend(corrections)
# We do a dump of the run log after each check.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4)
return len(corrections) > 0 |
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0) | def function[write, parameter[self, bucket, rows, keyed, as_generator, update_keys]]:
constant[https://github.com/frictionlessdata/tableschema-sql-py#storage
]
if <ast.BoolOp object at 0x7da18f09fd60> begin[:]
variable[message] assign[=] constant[Argument "update_keys" cannot be an empty list]
<ast.Raise object at 0x7da18f09f730>
variable[table] assign[=] call[name[self].__get_table, parameter[name[bucket]]]
variable[schema] assign[=] call[name[tableschema].Schema, parameter[call[name[self].describe, parameter[name[bucket]]]]]
variable[fallbacks] assign[=] call[name[self].__fallbacks.get, parameter[name[bucket], list[[]]]]
variable[convert_row] assign[=] call[name[partial], parameter[name[self].__mapper.convert_row]]
variable[writer] assign[=] call[name[Writer], parameter[name[table], name[schema], name[update_keys], name[self].__autoincrement, name[convert_row]]]
with call[name[self].__connection.begin, parameter[]] begin[:]
variable[gen] assign[=] call[name[writer].write, parameter[name[rows]]]
if name[as_generator] begin[:]
return[name[gen]]
call[name[collections].deque, parameter[name[gen]]] | keyword[def] identifier[write] ( identifier[self] , identifier[bucket] , identifier[rows] , identifier[keyed] = keyword[False] , identifier[as_generator] = keyword[False] , identifier[update_keys] = keyword[None] ):
literal[string]
keyword[if] identifier[update_keys] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[update_keys] )== literal[int] :
identifier[message] = literal[string]
keyword[raise] identifier[tableschema] . identifier[exceptions] . identifier[StorageError] ( identifier[message] )
identifier[table] = identifier[self] . identifier[__get_table] ( identifier[bucket] )
identifier[schema] = identifier[tableschema] . identifier[Schema] ( identifier[self] . identifier[describe] ( identifier[bucket] ))
identifier[fallbacks] = identifier[self] . identifier[__fallbacks] . identifier[get] ( identifier[bucket] ,[])
identifier[convert_row] = identifier[partial] ( identifier[self] . identifier[__mapper] . identifier[convert_row] , identifier[schema] = identifier[schema] , identifier[fallbacks] = identifier[fallbacks] )
identifier[writer] = identifier[Writer] ( identifier[table] , identifier[schema] , identifier[update_keys] , identifier[self] . identifier[__autoincrement] , identifier[convert_row] )
keyword[with] identifier[self] . identifier[__connection] . identifier[begin] ():
identifier[gen] = identifier[writer] . identifier[write] ( identifier[rows] , identifier[keyed] = identifier[keyed] )
keyword[if] identifier[as_generator] :
keyword[return] identifier[gen]
identifier[collections] . identifier[deque] ( identifier[gen] , identifier[maxlen] = literal[int] ) | def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message) # depends on [control=['if'], data=[]]
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen # depends on [control=['if'], data=[]]
collections.deque(gen, maxlen=0) # depends on [control=['with'], data=[]] |
async def close_connection(self) -> None:
"""
7.1.1. Close the WebSocket Connection
When the opening handshake succeeds, :meth:`connection_open` starts
this coroutine in a task. It waits for the data transfer phase to
complete then it closes the TCP connection cleanly.
When the opening handshake fails, :meth:`fail_connection` does the
same. There's no data transfer phase in that case.
"""
try:
# Wait for the data transfer phase to complete.
if hasattr(self, "transfer_data_task"):
try:
await self.transfer_data_task
except asyncio.CancelledError:
pass
# Cancel the keepalive ping task.
if hasattr(self, "keepalive_ping_task"):
self.keepalive_ping_task.cancel()
# A client should wait for a TCP close from the server.
if self.is_client and hasattr(self, "transfer_data_task"):
if await self.wait_for_connection_lost():
return
logger.debug("%s ! timed out waiting for TCP close", self.side)
# Half-close the TCP connection if possible (when there's no TLS).
if self.writer.can_write_eof():
logger.debug("%s x half-closing TCP connection", self.side)
self.writer.write_eof()
if await self.wait_for_connection_lost():
return
logger.debug("%s ! timed out waiting for TCP close", self.side)
finally:
# The try/finally ensures that the transport never remains open,
# even if this coroutine is canceled (for example).
# If connection_lost() was called, the TCP connection is closed.
# However, if TLS is enabled, the transport still needs closing.
# Else asyncio complains: ResourceWarning: unclosed transport.
if self.connection_lost_waiter.done() and not self.secure:
return
# Close the TCP connection. Buffers are flushed asynchronously.
logger.debug("%s x closing TCP connection", self.side)
self.writer.close()
if await self.wait_for_connection_lost():
return
logger.debug("%s ! timed out waiting for TCP close", self.side)
# Abort the TCP connection. Buffers are discarded.
logger.debug("%s x aborting TCP connection", self.side)
# mypy thinks self.writer.transport is a BaseTransport, not a Transport.
self.writer.transport.abort() # type: ignore
# connection_lost() is called quickly after aborting.
await self.wait_for_connection_lost() | <ast.AsyncFunctionDef object at 0x7da20c794e80> | keyword[async] keyword[def] identifier[close_connection] ( identifier[self] )-> keyword[None] :
literal[string]
keyword[try] :
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[try] :
keyword[await] identifier[self] . identifier[transfer_data_task]
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[pass]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[keepalive_ping_task] . identifier[cancel] ()
keyword[if] identifier[self] . identifier[is_client] keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] keyword[await] identifier[self] . identifier[wait_for_connection_lost] ():
keyword[return]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
keyword[if] identifier[self] . identifier[writer] . identifier[can_write_eof] ():
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
identifier[self] . identifier[writer] . identifier[write_eof] ()
keyword[if] keyword[await] identifier[self] . identifier[wait_for_connection_lost] ():
keyword[return]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
keyword[finally] :
keyword[if] identifier[self] . identifier[connection_lost_waiter] . identifier[done] () keyword[and] keyword[not] identifier[self] . identifier[secure] :
keyword[return]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
identifier[self] . identifier[writer] . identifier[close] ()
keyword[if] keyword[await] identifier[self] . identifier[wait_for_connection_lost] ():
keyword[return]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[side] )
identifier[self] . identifier[writer] . identifier[transport] . identifier[abort] ()
keyword[await] identifier[self] . identifier[wait_for_connection_lost] () | async def close_connection(self) -> None:
"""
7.1.1. Close the WebSocket Connection
When the opening handshake succeeds, :meth:`connection_open` starts
this coroutine in a task. It waits for the data transfer phase to
complete then it closes the TCP connection cleanly.
When the opening handshake fails, :meth:`fail_connection` does the
same. There's no data transfer phase in that case.
"""
try:
# Wait for the data transfer phase to complete.
if hasattr(self, 'transfer_data_task'):
try:
await self.transfer_data_task # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Cancel the keepalive ping task.
if hasattr(self, 'keepalive_ping_task'):
self.keepalive_ping_task.cancel() # depends on [control=['if'], data=[]]
# A client should wait for a TCP close from the server.
if self.is_client and hasattr(self, 'transfer_data_task'):
if await self.wait_for_connection_lost():
return # depends on [control=['if'], data=[]]
logger.debug('%s ! timed out waiting for TCP close', self.side) # depends on [control=['if'], data=[]]
# Half-close the TCP connection if possible (when there's no TLS).
if self.writer.can_write_eof():
logger.debug('%s x half-closing TCP connection', self.side)
self.writer.write_eof()
if await self.wait_for_connection_lost():
return # depends on [control=['if'], data=[]]
logger.debug('%s ! timed out waiting for TCP close', self.side) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
# The try/finally ensures that the transport never remains open,
# even if this coroutine is canceled (for example).
# If connection_lost() was called, the TCP connection is closed.
# However, if TLS is enabled, the transport still needs closing.
# Else asyncio complains: ResourceWarning: unclosed transport.
if self.connection_lost_waiter.done() and (not self.secure):
return # depends on [control=['if'], data=[]]
# Close the TCP connection. Buffers are flushed asynchronously.
logger.debug('%s x closing TCP connection', self.side)
self.writer.close()
if await self.wait_for_connection_lost():
return # depends on [control=['if'], data=[]]
logger.debug('%s ! timed out waiting for TCP close', self.side)
# Abort the TCP connection. Buffers are discarded.
logger.debug('%s x aborting TCP connection', self.side)
# mypy thinks self.writer.transport is a BaseTransport, not a Transport.
self.writer.transport.abort() # type: ignore
# connection_lost() is called quickly after aborting.
await self.wait_for_connection_lost() |
def __get_edge_by_two_vertices(self, vertex1, vertex2, key=None):
""" Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge).
Checks that both specified vertices are in current :class:`BreakpointGraph` and then depending on ``key`` argument, creates a new :class:`bg.edge.BBGEdge` instance and incorporates respective multi-color information into it.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph`
:type key: any python object. ``None`` or ``int`` is expected
:return: edge between two specified edges respecting a ``key`` argument.
:rtype: :class:`bg.edge.BGEdge`
"""
if vertex1 in self.bg and vertex2 in self.bg[vertex1]:
if key is None:
key = min(self.bg[vertex1][vertex2])
return BGEdge(vertex1=vertex1, vertex2=vertex2,
multicolor=self.bg[vertex1][vertex2][key]["attr_dict"]["multicolor"],
data=self.bg[vertex1][vertex2][key]["attr_dict"]["data"])
return None | def function[__get_edge_by_two_vertices, parameter[self, vertex1, vertex2, key]]:
constant[ Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge).
Checks that both specified vertices are in current :class:`BreakpointGraph` and then depending on ``key`` argument, creates a new :class:`bg.edge.BBGEdge` instance and incorporates respective multi-color information into it.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph`
:type key: any python object. ``None`` or ``int`` is expected
:return: edge between two specified edges respecting a ``key`` argument.
:rtype: :class:`bg.edge.BGEdge`
]
if <ast.BoolOp object at 0x7da18fe93940> begin[:]
if compare[name[key] is constant[None]] begin[:]
variable[key] assign[=] call[name[min], parameter[call[call[name[self].bg][name[vertex1]]][name[vertex2]]]]
return[call[name[BGEdge], parameter[]]]
return[constant[None]] | keyword[def] identifier[__get_edge_by_two_vertices] ( identifier[self] , identifier[vertex1] , identifier[vertex2] , identifier[key] = keyword[None] ):
literal[string]
keyword[if] identifier[vertex1] keyword[in] identifier[self] . identifier[bg] keyword[and] identifier[vertex2] keyword[in] identifier[self] . identifier[bg] [ identifier[vertex1] ]:
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[key] = identifier[min] ( identifier[self] . identifier[bg] [ identifier[vertex1] ][ identifier[vertex2] ])
keyword[return] identifier[BGEdge] ( identifier[vertex1] = identifier[vertex1] , identifier[vertex2] = identifier[vertex2] ,
identifier[multicolor] = identifier[self] . identifier[bg] [ identifier[vertex1] ][ identifier[vertex2] ][ identifier[key] ][ literal[string] ][ literal[string] ],
identifier[data] = identifier[self] . identifier[bg] [ identifier[vertex1] ][ identifier[vertex2] ][ identifier[key] ][ literal[string] ][ literal[string] ])
keyword[return] keyword[None] | def __get_edge_by_two_vertices(self, vertex1, vertex2, key=None):
""" Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge).
Checks that both specified vertices are in current :class:`BreakpointGraph` and then depending on ``key`` argument, creates a new :class:`bg.edge.BBGEdge` instance and incorporates respective multi-color information into it.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph`
:type key: any python object. ``None`` or ``int`` is expected
:return: edge between two specified edges respecting a ``key`` argument.
:rtype: :class:`bg.edge.BGEdge`
"""
if vertex1 in self.bg and vertex2 in self.bg[vertex1]:
if key is None:
key = min(self.bg[vertex1][vertex2]) # depends on [control=['if'], data=['key']]
return BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=self.bg[vertex1][vertex2][key]['attr_dict']['multicolor'], data=self.bg[vertex1][vertex2][key]['attr_dict']['data']) # depends on [control=['if'], data=[]]
return None |
def getFooter():
"""return a header string with command line options and
timestamp.
"""
return "# job finished in %i seconds at %s -- %s -- %s" %\
(time.time() - global_starting_time,
time.asctime(time.localtime(time.time())),
" ".join(map(lambda x: "%5.2f" % x, os.times()[:4])),
global_id) | def function[getFooter, parameter[]]:
constant[return a header string with command line options and
timestamp.
]
return[binary_operation[constant[# job finished in %i seconds at %s -- %s -- %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18f811300>, <ast.Call object at 0x7da18f812b90>, <ast.Call object at 0x7da18f8122f0>, <ast.Name object at 0x7da18f812c50>]]]] | keyword[def] identifier[getFooter] ():
literal[string]
keyword[return] literal[string] %( identifier[time] . identifier[time] ()- identifier[global_starting_time] ,
identifier[time] . identifier[asctime] ( identifier[time] . identifier[localtime] ( identifier[time] . identifier[time] ())),
literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[x] : literal[string] % identifier[x] , identifier[os] . identifier[times] ()[: literal[int] ])),
identifier[global_id] ) | def getFooter():
"""return a header string with command line options and
timestamp.
"""
return '# job finished in %i seconds at %s -- %s -- %s' % (time.time() - global_starting_time, time.asctime(time.localtime(time.time())), ' '.join(map(lambda x: '%5.2f' % x, os.times()[:4])), global_id) |
async def deploy(
self, entity_url, application_name=None, bind=None, budget=None,
channel=None, config=None, constraints=None, force=False,
num_units=1, plan=None, resources=None, series=None, storage=None,
to=None, devices=None):
"""Deploy a new service or bundle.
:param str entity_url: Charm or bundle url
:param str application_name: Name to give the service
:param dict bind: <charm endpoint>:<network space> pairs
:param dict budget: <budget name>:<limit> pairs
:param str channel: Charm store channel from which to retrieve
the charm or bundle, e.g. 'edge'
:param dict config: Charm configuration dictionary
:param constraints: Service constraints
:type constraints: :class:`juju.Constraints`
:param bool force: Allow charm to be deployed to a machine running
an unsupported series
:param int num_units: Number of units to deploy
:param str plan: Plan under which to deploy charm
:param dict resources: <resource name>:<file path> pairs
:param str series: Series on which to deploy
:param dict storage: Storage constraints TODO how do these look?
:param to: Placement directive as a string. For example:
'23' - place on machine 23
'lxd:7' - place in new lxd container on machine 7
'24/lxd/3' - place in container 3 on machine 24
If None, a new machine is provisioned.
TODO::
- support local resources
"""
if storage:
storage = {
k: client.Constraints(**v)
for k, v in storage.items()
}
entity_path = Path(entity_url.replace('local:', ''))
bundle_path = entity_path / 'bundle.yaml'
metadata_path = entity_path / 'metadata.yaml'
is_local = (
entity_url.startswith('local:') or
entity_path.is_dir() or
entity_path.is_file()
)
if is_local:
entity_id = entity_url.replace('local:', '')
else:
entity = await self.charmstore.entity(entity_url, channel=channel,
include_stats=False)
entity_id = entity['Id']
client_facade = client.ClientFacade.from_connection(self.connection())
is_bundle = ((is_local and
(entity_id.endswith('.yaml') and entity_path.exists()) or
bundle_path.exists()) or
(not is_local and 'bundle/' in entity_id))
if is_bundle:
handler = BundleHandler(self)
await handler.fetch_plan(entity_id)
await handler.execute_plan()
extant_apps = {app for app in self.applications}
pending_apps = set(handler.applications) - extant_apps
if pending_apps:
# new apps will usually be in the model by now, but if some
# haven't made it yet we'll need to wait on them to be added
await asyncio.gather(*[
asyncio.ensure_future(
self._wait_for_new('application', app_name),
loop=self._connector.loop)
for app_name in pending_apps
], loop=self._connector.loop)
return [app for name, app in self.applications.items()
if name in handler.applications]
else:
if not is_local:
if not application_name:
application_name = entity['Meta']['charm-metadata']['Name']
if not series:
series = self._get_series(entity_url, entity)
await client_facade.AddCharm(channel, entity_id)
# XXX: we're dropping local resources here, but we don't
# actually support them yet anyway
resources = await self._add_store_resources(application_name,
entity_id,
entity=entity)
else:
if not application_name:
metadata = yaml.load(metadata_path.read_text())
application_name = metadata['name']
# We have a local charm dir that needs to be uploaded
charm_dir = os.path.abspath(
os.path.expanduser(entity_id))
series = series or get_charm_series(charm_dir)
if not series:
raise JujuError(
"Couldn't determine series for charm at {}. "
"Pass a 'series' kwarg to Model.deploy().".format(
charm_dir))
entity_id = await self.add_local_charm_dir(charm_dir, series)
return await self._deploy(
charm_url=entity_id,
application=application_name,
series=series,
config=config or {},
constraints=constraints,
endpoint_bindings=bind,
resources=resources,
storage=storage,
channel=channel,
num_units=num_units,
placement=parse_placement(to),
devices=devices,
) | <ast.AsyncFunctionDef object at 0x7da1b0e23fa0> | keyword[async] keyword[def] identifier[deploy] (
identifier[self] , identifier[entity_url] , identifier[application_name] = keyword[None] , identifier[bind] = keyword[None] , identifier[budget] = keyword[None] ,
identifier[channel] = keyword[None] , identifier[config] = keyword[None] , identifier[constraints] = keyword[None] , identifier[force] = keyword[False] ,
identifier[num_units] = literal[int] , identifier[plan] = keyword[None] , identifier[resources] = keyword[None] , identifier[series] = keyword[None] , identifier[storage] = keyword[None] ,
identifier[to] = keyword[None] , identifier[devices] = keyword[None] ):
literal[string]
keyword[if] identifier[storage] :
identifier[storage] ={
identifier[k] : identifier[client] . identifier[Constraints] (** identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[storage] . identifier[items] ()
}
identifier[entity_path] = identifier[Path] ( identifier[entity_url] . identifier[replace] ( literal[string] , literal[string] ))
identifier[bundle_path] = identifier[entity_path] / literal[string]
identifier[metadata_path] = identifier[entity_path] / literal[string]
identifier[is_local] =(
identifier[entity_url] . identifier[startswith] ( literal[string] ) keyword[or]
identifier[entity_path] . identifier[is_dir] () keyword[or]
identifier[entity_path] . identifier[is_file] ()
)
keyword[if] identifier[is_local] :
identifier[entity_id] = identifier[entity_url] . identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[entity] = keyword[await] identifier[self] . identifier[charmstore] . identifier[entity] ( identifier[entity_url] , identifier[channel] = identifier[channel] ,
identifier[include_stats] = keyword[False] )
identifier[entity_id] = identifier[entity] [ literal[string] ]
identifier[client_facade] = identifier[client] . identifier[ClientFacade] . identifier[from_connection] ( identifier[self] . identifier[connection] ())
identifier[is_bundle] =(( identifier[is_local] keyword[and]
( identifier[entity_id] . identifier[endswith] ( literal[string] ) keyword[and] identifier[entity_path] . identifier[exists] ()) keyword[or]
identifier[bundle_path] . identifier[exists] ()) keyword[or]
( keyword[not] identifier[is_local] keyword[and] literal[string] keyword[in] identifier[entity_id] ))
keyword[if] identifier[is_bundle] :
identifier[handler] = identifier[BundleHandler] ( identifier[self] )
keyword[await] identifier[handler] . identifier[fetch_plan] ( identifier[entity_id] )
keyword[await] identifier[handler] . identifier[execute_plan] ()
identifier[extant_apps] ={ identifier[app] keyword[for] identifier[app] keyword[in] identifier[self] . identifier[applications] }
identifier[pending_apps] = identifier[set] ( identifier[handler] . identifier[applications] )- identifier[extant_apps]
keyword[if] identifier[pending_apps] :
keyword[await] identifier[asyncio] . identifier[gather] (*[
identifier[asyncio] . identifier[ensure_future] (
identifier[self] . identifier[_wait_for_new] ( literal[string] , identifier[app_name] ),
identifier[loop] = identifier[self] . identifier[_connector] . identifier[loop] )
keyword[for] identifier[app_name] keyword[in] identifier[pending_apps]
], identifier[loop] = identifier[self] . identifier[_connector] . identifier[loop] )
keyword[return] [ identifier[app] keyword[for] identifier[name] , identifier[app] keyword[in] identifier[self] . identifier[applications] . identifier[items] ()
keyword[if] identifier[name] keyword[in] identifier[handler] . identifier[applications] ]
keyword[else] :
keyword[if] keyword[not] identifier[is_local] :
keyword[if] keyword[not] identifier[application_name] :
identifier[application_name] = identifier[entity] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[if] keyword[not] identifier[series] :
identifier[series] = identifier[self] . identifier[_get_series] ( identifier[entity_url] , identifier[entity] )
keyword[await] identifier[client_facade] . identifier[AddCharm] ( identifier[channel] , identifier[entity_id] )
identifier[resources] = keyword[await] identifier[self] . identifier[_add_store_resources] ( identifier[application_name] ,
identifier[entity_id] ,
identifier[entity] = identifier[entity] )
keyword[else] :
keyword[if] keyword[not] identifier[application_name] :
identifier[metadata] = identifier[yaml] . identifier[load] ( identifier[metadata_path] . identifier[read_text] ())
identifier[application_name] = identifier[metadata] [ literal[string] ]
identifier[charm_dir] = identifier[os] . identifier[path] . identifier[abspath] (
identifier[os] . identifier[path] . identifier[expanduser] ( identifier[entity_id] ))
identifier[series] = identifier[series] keyword[or] identifier[get_charm_series] ( identifier[charm_dir] )
keyword[if] keyword[not] identifier[series] :
keyword[raise] identifier[JujuError] (
literal[string]
literal[string] . identifier[format] (
identifier[charm_dir] ))
identifier[entity_id] = keyword[await] identifier[self] . identifier[add_local_charm_dir] ( identifier[charm_dir] , identifier[series] )
keyword[return] keyword[await] identifier[self] . identifier[_deploy] (
identifier[charm_url] = identifier[entity_id] ,
identifier[application] = identifier[application_name] ,
identifier[series] = identifier[series] ,
identifier[config] = identifier[config] keyword[or] {},
identifier[constraints] = identifier[constraints] ,
identifier[endpoint_bindings] = identifier[bind] ,
identifier[resources] = identifier[resources] ,
identifier[storage] = identifier[storage] ,
identifier[channel] = identifier[channel] ,
identifier[num_units] = identifier[num_units] ,
identifier[placement] = identifier[parse_placement] ( identifier[to] ),
identifier[devices] = identifier[devices] ,
) | async def deploy(self, entity_url, application_name=None, bind=None, budget=None, channel=None, config=None, constraints=None, force=False, num_units=1, plan=None, resources=None, series=None, storage=None, to=None, devices=None):
"""Deploy a new service or bundle.
:param str entity_url: Charm or bundle url
:param str application_name: Name to give the service
:param dict bind: <charm endpoint>:<network space> pairs
:param dict budget: <budget name>:<limit> pairs
:param str channel: Charm store channel from which to retrieve
the charm or bundle, e.g. 'edge'
:param dict config: Charm configuration dictionary
:param constraints: Service constraints
:type constraints: :class:`juju.Constraints`
:param bool force: Allow charm to be deployed to a machine running
an unsupported series
:param int num_units: Number of units to deploy
:param str plan: Plan under which to deploy charm
:param dict resources: <resource name>:<file path> pairs
:param str series: Series on which to deploy
:param dict storage: Storage constraints TODO how do these look?
:param to: Placement directive as a string. For example:
'23' - place on machine 23
'lxd:7' - place in new lxd container on machine 7
'24/lxd/3' - place in container 3 on machine 24
If None, a new machine is provisioned.
TODO::
- support local resources
"""
if storage:
storage = {k: client.Constraints(**v) for (k, v) in storage.items()} # depends on [control=['if'], data=[]]
entity_path = Path(entity_url.replace('local:', ''))
bundle_path = entity_path / 'bundle.yaml'
metadata_path = entity_path / 'metadata.yaml'
is_local = entity_url.startswith('local:') or entity_path.is_dir() or entity_path.is_file()
if is_local:
entity_id = entity_url.replace('local:', '') # depends on [control=['if'], data=[]]
else:
entity = await self.charmstore.entity(entity_url, channel=channel, include_stats=False)
entity_id = entity['Id']
client_facade = client.ClientFacade.from_connection(self.connection())
is_bundle = (is_local and (entity_id.endswith('.yaml') and entity_path.exists()) or bundle_path.exists()) or (not is_local and 'bundle/' in entity_id)
if is_bundle:
handler = BundleHandler(self)
await handler.fetch_plan(entity_id)
await handler.execute_plan()
extant_apps = {app for app in self.applications}
pending_apps = set(handler.applications) - extant_apps
if pending_apps:
# new apps will usually be in the model by now, but if some
# haven't made it yet we'll need to wait on them to be added
await asyncio.gather(*[asyncio.ensure_future(self._wait_for_new('application', app_name), loop=self._connector.loop) for app_name in pending_apps], loop=self._connector.loop) # depends on [control=['if'], data=[]]
return [app for (name, app) in self.applications.items() if name in handler.applications] # depends on [control=['if'], data=[]]
else:
if not is_local:
if not application_name:
application_name = entity['Meta']['charm-metadata']['Name'] # depends on [control=['if'], data=[]]
if not series:
series = self._get_series(entity_url, entity) # depends on [control=['if'], data=[]]
await client_facade.AddCharm(channel, entity_id)
# XXX: we're dropping local resources here, but we don't
# actually support them yet anyway
resources = await self._add_store_resources(application_name, entity_id, entity=entity) # depends on [control=['if'], data=[]]
else:
if not application_name:
metadata = yaml.load(metadata_path.read_text())
application_name = metadata['name'] # depends on [control=['if'], data=[]]
# We have a local charm dir that needs to be uploaded
charm_dir = os.path.abspath(os.path.expanduser(entity_id))
series = series or get_charm_series(charm_dir)
if not series:
raise JujuError("Couldn't determine series for charm at {}. Pass a 'series' kwarg to Model.deploy().".format(charm_dir)) # depends on [control=['if'], data=[]]
entity_id = await self.add_local_charm_dir(charm_dir, series)
return await self._deploy(charm_url=entity_id, application=application_name, series=series, config=config or {}, constraints=constraints, endpoint_bindings=bind, resources=resources, storage=storage, channel=channel, num_units=num_units, placement=parse_placement(to), devices=devices) |
def get_root_gradebook_ids(self):
"""Gets the root gradebook ``Ids`` in this hierarchy.
return: (osid.id.IdList) - the root gradebook ``Ids``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_root_catalog_ids()
return self._hierarchy_session.get_roots() | def function[get_root_gradebook_ids, parameter[self]]:
constant[Gets the root gradebook ``Ids`` in this hierarchy.
return: (osid.id.IdList) - the root gradebook ``Ids``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.get_root_catalog_ids, parameter[]]]
return[call[name[self]._hierarchy_session.get_roots, parameter[]]] | keyword[def] identifier[get_root_gradebook_ids] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[get_root_catalog_ids] ()
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[get_roots] () | def get_root_gradebook_ids(self):
"""Gets the root gradebook ``Ids`` in this hierarchy.
return: (osid.id.IdList) - the root gradebook ``Ids``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_root_catalog_ids() # depends on [control=['if'], data=[]]
return self._hierarchy_session.get_roots() |
def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
id = d.get('ID', None) or d.get('id', None)
external_id = d.get('external_id', None)
lon = d.get('longitude', None)
lat = d.get('latitude', None)
alt = d.get('altitude', None)
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
name = d.get('name', None)
rank = d.get('rank', None)
created_at = d.get('created_at', None)
updated_at = d.get('updated_at', None)
return Station(id, created_at, updated_at, external_id, name, lon, lat,
alt, rank) | def function[parse_JSON, parameter[self, JSON_string]]:
constant[
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
]
if compare[name[JSON_string] is constant[None]] begin[:]
<ast.Raise object at 0x7da2054a7f40>
variable[d] assign[=] call[name[json].loads, parameter[name[JSON_string]]]
<ast.Try object at 0x7da2054a5b70>
variable[name] assign[=] call[name[d].get, parameter[constant[name], constant[None]]]
variable[rank] assign[=] call[name[d].get, parameter[constant[rank], constant[None]]]
variable[created_at] assign[=] call[name[d].get, parameter[constant[created_at], constant[None]]]
variable[updated_at] assign[=] call[name[d].get, parameter[constant[updated_at], constant[None]]]
return[call[name[Station], parameter[name[id], name[created_at], name[updated_at], name[external_id], name[name], name[lon], name[lat], name[alt], name[rank]]]] | keyword[def] identifier[parse_JSON] ( identifier[self] , identifier[JSON_string] ):
literal[string]
keyword[if] identifier[JSON_string] keyword[is] keyword[None] :
keyword[raise] identifier[parse_response_error] . identifier[ParseResponseError] ( literal[string] )
identifier[d] = identifier[json] . identifier[loads] ( identifier[JSON_string] )
keyword[try] :
identifier[id] = identifier[d] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[external_id] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[lon] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[lat] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[alt] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
keyword[raise] identifier[parse_response_error] . identifier[ParseResponseError] ( literal[string] % identifier[e] )
identifier[name] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[rank] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[created_at] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
identifier[updated_at] = identifier[d] . identifier[get] ( literal[string] , keyword[None] )
keyword[return] identifier[Station] ( identifier[id] , identifier[created_at] , identifier[updated_at] , identifier[external_id] , identifier[name] , identifier[lon] , identifier[lat] ,
identifier[alt] , identifier[rank] ) | def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None') # depends on [control=['if'], data=[]]
d = json.loads(JSON_string)
try:
id = d.get('ID', None) or d.get('id', None)
external_id = d.get('external_id', None)
lon = d.get('longitude', None)
lat = d.get('latitude', None)
alt = d.get('altitude', None) # depends on [control=['try'], data=[]]
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e) # depends on [control=['except'], data=['e']]
name = d.get('name', None)
rank = d.get('rank', None)
created_at = d.get('created_at', None)
updated_at = d.get('updated_at', None)
return Station(id, created_at, updated_at, external_id, name, lon, lat, alt, rank) |
def resource_update_list(self, reset=False):
"""
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
"""
if not self._resource_path:
raise PluginException("No resource path set")
if not os.path.isdir(self._resource_path):
raise PluginException(
u"Resource path directory '{}' not found".format(
self._resource_path
)
)
res = []
with self._resource_lock:
if reset:
self._resources = {}
old = dict(self._resources)
for dirname, dirnames, filenames in os.walk(self._resource_path):
for file_name in filenames:
file_ext = os.path.splitext(file_name)[1].lower()[1:]
if file_ext not in self._resource_file_types:
self.debug(u"Skipping '{}'".format(file_name))
continue
file_path = os.path.join(dirname, file_name)
try:
file_hash = get_file_hash(file_path)
except:
self.exception(
u"Failed to hash '{}'".format(file_path)
)
continue
self._resources[file_name] = {
'name': file_name,
'path': file_path,
'hash': file_hash,
'checked': datetime.datetime.utcnow()
}
# generate diff
for key in self._resources:
resource = self._resources[key]
if key not in old or old[key]['hash'] != resource['hash']:
# new file or hash changed
res.append((key, resource['hash']))
return res | def function[resource_update_list, parameter[self, reset]]:
constant[
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
]
if <ast.UnaryOp object at 0x7da20c794fa0> begin[:]
<ast.Raise object at 0x7da20c795cc0>
if <ast.UnaryOp object at 0x7da20c796620> begin[:]
<ast.Raise object at 0x7da20c796a70>
variable[res] assign[=] list[[]]
with name[self]._resource_lock begin[:]
if name[reset] begin[:]
name[self]._resources assign[=] dictionary[[], []]
variable[old] assign[=] call[name[dict], parameter[name[self]._resources]]
for taget[tuple[[<ast.Name object at 0x7da20c794700>, <ast.Name object at 0x7da20c7956c0>, <ast.Name object at 0x7da20c795060>]]] in starred[call[name[os].walk, parameter[name[self]._resource_path]]] begin[:]
for taget[name[file_name]] in starred[name[filenames]] begin[:]
variable[file_ext] assign[=] call[call[call[call[name[os].path.splitext, parameter[name[file_name]]]][constant[1]].lower, parameter[]]][<ast.Slice object at 0x7da20c7959f0>]
if compare[name[file_ext] <ast.NotIn object at 0x7da2590d7190> name[self]._resource_file_types] begin[:]
call[name[self].debug, parameter[call[constant[Skipping '{}'].format, parameter[name[file_name]]]]]
continue
variable[file_path] assign[=] call[name[os].path.join, parameter[name[dirname], name[file_name]]]
<ast.Try object at 0x7da20c794dc0>
call[name[self]._resources][name[file_name]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7951e0>, <ast.Constant object at 0x7da20c794bb0>, <ast.Constant object at 0x7da20c7965f0>, <ast.Constant object at 0x7da20c796a40>], [<ast.Name object at 0x7da20c7966b0>, <ast.Name object at 0x7da20c796230>, <ast.Name object at 0x7da20c7952a0>, <ast.Call object at 0x7da20c794e20>]]
for taget[name[key]] in starred[name[self]._resources] begin[:]
variable[resource] assign[=] call[name[self]._resources][name[key]]
if <ast.BoolOp object at 0x7da20c795540> begin[:]
call[name[res].append, parameter[tuple[[<ast.Name object at 0x7da20c6a9570>, <ast.Subscript object at 0x7da20c6abd00>]]]]
return[name[res]] | keyword[def] identifier[resource_update_list] ( identifier[self] , identifier[reset] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_resource_path] :
keyword[raise] identifier[PluginException] ( literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[_resource_path] ):
keyword[raise] identifier[PluginException] (
literal[string] . identifier[format] (
identifier[self] . identifier[_resource_path]
)
)
identifier[res] =[]
keyword[with] identifier[self] . identifier[_resource_lock] :
keyword[if] identifier[reset] :
identifier[self] . identifier[_resources] ={}
identifier[old] = identifier[dict] ( identifier[self] . identifier[_resources] )
keyword[for] identifier[dirname] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[_resource_path] ):
keyword[for] identifier[file_name] keyword[in] identifier[filenames] :
identifier[file_ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_name] )[ literal[int] ]. identifier[lower] ()[ literal[int] :]
keyword[if] identifier[file_ext] keyword[not] keyword[in] identifier[self] . identifier[_resource_file_types] :
identifier[self] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file_name] ))
keyword[continue]
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[file_name] )
keyword[try] :
identifier[file_hash] = identifier[get_file_hash] ( identifier[file_path] )
keyword[except] :
identifier[self] . identifier[exception] (
literal[string] . identifier[format] ( identifier[file_path] )
)
keyword[continue]
identifier[self] . identifier[_resources] [ identifier[file_name] ]={
literal[string] : identifier[file_name] ,
literal[string] : identifier[file_path] ,
literal[string] : identifier[file_hash] ,
literal[string] : identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
}
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_resources] :
identifier[resource] = identifier[self] . identifier[_resources] [ identifier[key] ]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[old] keyword[or] identifier[old] [ identifier[key] ][ literal[string] ]!= identifier[resource] [ literal[string] ]:
identifier[res] . identifier[append] (( identifier[key] , identifier[resource] [ literal[string] ]))
keyword[return] identifier[res] | def resource_update_list(self, reset=False):
"""
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
"""
if not self._resource_path:
raise PluginException('No resource path set') # depends on [control=['if'], data=[]]
if not os.path.isdir(self._resource_path):
raise PluginException(u"Resource path directory '{}' not found".format(self._resource_path)) # depends on [control=['if'], data=[]]
res = []
with self._resource_lock:
if reset:
self._resources = {} # depends on [control=['if'], data=[]]
old = dict(self._resources)
for (dirname, dirnames, filenames) in os.walk(self._resource_path):
for file_name in filenames:
file_ext = os.path.splitext(file_name)[1].lower()[1:]
if file_ext not in self._resource_file_types:
self.debug(u"Skipping '{}'".format(file_name))
continue # depends on [control=['if'], data=[]]
file_path = os.path.join(dirname, file_name)
try:
file_hash = get_file_hash(file_path) # depends on [control=['try'], data=[]]
except:
self.exception(u"Failed to hash '{}'".format(file_path))
continue # depends on [control=['except'], data=[]]
self._resources[file_name] = {'name': file_name, 'path': file_path, 'hash': file_hash, 'checked': datetime.datetime.utcnow()} # depends on [control=['for'], data=['file_name']] # depends on [control=['for'], data=[]]
# generate diff
for key in self._resources:
resource = self._resources[key]
if key not in old or old[key]['hash'] != resource['hash']:
# new file or hash changed
res.append((key, resource['hash'])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['with'], data=[]]
return res |
def xpat_gen(self, header, msgid_range, *pattern):
"""Generator for the XPAT command.
"""
args = " ".join(
[header, utils.unparse_msgid_range(msgid_range)] + list(pattern)
)
code, message = self.command("XPAT", args)
if code != 221:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.strip() | def function[xpat_gen, parameter[self, header, msgid_range]]:
constant[Generator for the XPAT command.
]
variable[args] assign[=] call[constant[ ].join, parameter[binary_operation[list[[<ast.Name object at 0x7da1b02111b0>, <ast.Call object at 0x7da1b0211960>]] + call[name[list], parameter[name[pattern]]]]]]
<ast.Tuple object at 0x7da1b013fee0> assign[=] call[name[self].command, parameter[constant[XPAT], name[args]]]
if compare[name[code] not_equal[!=] constant[221]] begin[:]
<ast.Raise object at 0x7da1b013d4b0>
for taget[name[line]] in starred[call[name[self].info_gen, parameter[name[code], name[message]]]] begin[:]
<ast.Yield object at 0x7da1b013e830> | keyword[def] identifier[xpat_gen] ( identifier[self] , identifier[header] , identifier[msgid_range] ,* identifier[pattern] ):
literal[string]
identifier[args] = literal[string] . identifier[join] (
[ identifier[header] , identifier[utils] . identifier[unparse_msgid_range] ( identifier[msgid_range] )]+ identifier[list] ( identifier[pattern] )
)
identifier[code] , identifier[message] = identifier[self] . identifier[command] ( literal[string] , identifier[args] )
keyword[if] identifier[code] != literal[int] :
keyword[raise] identifier[NNTPReplyError] ( identifier[code] , identifier[message] )
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[info_gen] ( identifier[code] , identifier[message] ):
keyword[yield] identifier[line] . identifier[strip] () | def xpat_gen(self, header, msgid_range, *pattern):
"""Generator for the XPAT command.
"""
args = ' '.join([header, utils.unparse_msgid_range(msgid_range)] + list(pattern))
(code, message) = self.command('XPAT', args)
if code != 221:
raise NNTPReplyError(code, message) # depends on [control=['if'], data=['code']]
for line in self.info_gen(code, message):
yield line.strip() # depends on [control=['for'], data=['line']] |
def write_pdf(self, html):
""" Tries to write a PDF export from the command line using Prince if
available.
"""
try:
f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
f.write(html.encode('utf_8', 'xmlcharrefreplace'))
f.close()
except Exception:
raise IOError(u"Unable to create temporary file, aborting")
dummy_fh = open(os.path.devnull, 'w')
try:
command = ["prince", f.name, "-o", self.destination_file]
Popen(command, stderr=dummy_fh).communicate()
except Exception:
raise EnvironmentError(u"Unable to generate PDF file using "
"prince. Is it installed and available?")
finally:
dummy_fh.close() | def function[write_pdf, parameter[self, html]]:
constant[ Tries to write a PDF export from the command line using Prince if
available.
]
<ast.Try object at 0x7da1b22eabc0>
variable[dummy_fh] assign[=] call[name[open], parameter[name[os].path.devnull, constant[w]]]
<ast.Try object at 0x7da1b22ebbe0> | keyword[def] identifier[write_pdf] ( identifier[self] , identifier[html] ):
literal[string]
keyword[try] :
identifier[f] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] , identifier[suffix] = literal[string] )
identifier[f] . identifier[write] ( identifier[html] . identifier[encode] ( literal[string] , literal[string] ))
identifier[f] . identifier[close] ()
keyword[except] identifier[Exception] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[dummy_fh] = identifier[open] ( identifier[os] . identifier[path] . identifier[devnull] , literal[string] )
keyword[try] :
identifier[command] =[ literal[string] , identifier[f] . identifier[name] , literal[string] , identifier[self] . identifier[destination_file] ]
identifier[Popen] ( identifier[command] , identifier[stderr] = identifier[dummy_fh] ). identifier[communicate] ()
keyword[except] identifier[Exception] :
keyword[raise] identifier[EnvironmentError] ( literal[string]
literal[string] )
keyword[finally] :
identifier[dummy_fh] . identifier[close] () | def write_pdf(self, html):
""" Tries to write a PDF export from the command line using Prince if
available.
"""
try:
f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
f.write(html.encode('utf_8', 'xmlcharrefreplace'))
f.close() # depends on [control=['try'], data=[]]
except Exception:
raise IOError(u'Unable to create temporary file, aborting') # depends on [control=['except'], data=[]]
dummy_fh = open(os.path.devnull, 'w')
try:
command = ['prince', f.name, '-o', self.destination_file]
Popen(command, stderr=dummy_fh).communicate() # depends on [control=['try'], data=[]]
except Exception:
raise EnvironmentError(u'Unable to generate PDF file using prince. Is it installed and available?') # depends on [control=['except'], data=[]]
finally:
dummy_fh.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.