code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def unify(self, result):
'''
While comments as a list are allowed,
comments needs to be strings for backward compatibility.
See such claim here: https://github.com/saltstack/salt/pull/43070
Rules applied:
- 'comment' is joined into a multi-line string, in case the value is a list.
- 'result' should be always either True, False or None.
:param result:
:return:
'''
if isinstance(result.get('comment'), list):
result['comment'] = u'\n'.join([
salt.utils.stringutils.to_unicode(elm) for elm in result['comment']
])
if result.get('result') is not None:
result['result'] = bool(result['result'])
return result | def function[unify, parameter[self, result]]:
constant[
While comments as a list are allowed,
comments needs to be strings for backward compatibility.
See such claim here: https://github.com/saltstack/salt/pull/43070
Rules applied:
- 'comment' is joined into a multi-line string, in case the value is a list.
- 'result' should be always either True, False or None.
:param result:
:return:
]
if call[name[isinstance], parameter[call[name[result].get, parameter[constant[comment]]], name[list]]] begin[:]
call[name[result]][constant[comment]] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b1c05ba0>]]
if compare[call[name[result].get, parameter[constant[result]]] is_not constant[None]] begin[:]
call[name[result]][constant[result]] assign[=] call[name[bool], parameter[call[name[result]][constant[result]]]]
return[name[result]] | keyword[def] identifier[unify] ( identifier[self] , identifier[result] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[result] . identifier[get] ( literal[string] ), identifier[list] ):
identifier[result] [ literal[string] ]= literal[string] . identifier[join] ([
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[elm] ) keyword[for] identifier[elm] keyword[in] identifier[result] [ literal[string] ]
])
keyword[if] identifier[result] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[result] [ literal[string] ]= identifier[bool] ( identifier[result] [ literal[string] ])
keyword[return] identifier[result] | def unify(self, result):
"""
While comments as a list are allowed,
comments needs to be strings for backward compatibility.
See such claim here: https://github.com/saltstack/salt/pull/43070
Rules applied:
- 'comment' is joined into a multi-line string, in case the value is a list.
- 'result' should be always either True, False or None.
:param result:
:return:
"""
if isinstance(result.get('comment'), list):
result['comment'] = u'\n'.join([salt.utils.stringutils.to_unicode(elm) for elm in result['comment']]) # depends on [control=['if'], data=[]]
if result.get('result') is not None:
result['result'] = bool(result['result']) # depends on [control=['if'], data=[]]
return result |
def read_command(self, prompt=''):
'''Script interface to read a command. `prompt' is a parameter for
compatibilty and is ignored.'''
self.input_lineno += 1
line = self.readline()
if self.verbose:
location = "%s line %s" % (self.script_name, self.input_lineno)
self.msg('+ %s: %s' % (location, line))
pass
# Do something with history?
return line | def function[read_command, parameter[self, prompt]]:
constant[Script interface to read a command. `prompt' is a parameter for
compatibilty and is ignored.]
<ast.AugAssign object at 0x7da1b05c7580>
variable[line] assign[=] call[name[self].readline, parameter[]]
if name[self].verbose begin[:]
variable[location] assign[=] binary_operation[constant[%s line %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b05c5db0>, <ast.Attribute object at 0x7da1b05c50f0>]]]
call[name[self].msg, parameter[binary_operation[constant[+ %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b05c7f70>, <ast.Name object at 0x7da1b05c5930>]]]]]
pass
return[name[line]] | keyword[def] identifier[read_command] ( identifier[self] , identifier[prompt] = literal[string] ):
literal[string]
identifier[self] . identifier[input_lineno] += literal[int]
identifier[line] = identifier[self] . identifier[readline] ()
keyword[if] identifier[self] . identifier[verbose] :
identifier[location] = literal[string] %( identifier[self] . identifier[script_name] , identifier[self] . identifier[input_lineno] )
identifier[self] . identifier[msg] ( literal[string] %( identifier[location] , identifier[line] ))
keyword[pass]
keyword[return] identifier[line] | def read_command(self, prompt=''):
"""Script interface to read a command. `prompt' is a parameter for
compatibilty and is ignored."""
self.input_lineno += 1
line = self.readline()
if self.verbose:
location = '%s line %s' % (self.script_name, self.input_lineno)
self.msg('+ %s: %s' % (location, line))
pass # depends on [control=['if'], data=[]]
# Do something with history?
return line |
def load_csv_stream(ctx, model, data,
header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a stream.
:param ctx: current anthem context
:param model: model name as string or model klass
:param data: csv data to load
:param header: csv fieldnames whitelist
:param header_exclude: csv fieldnames blacklist
Usage example::
from pkg_resources import Requirement, resource_stream
req = Requirement.parse('my-project')
load_csv_stream(ctx, ctx.env['res.users'],
resource_stream(req, 'data/users.csv'),
delimiter=',')
"""
_header, _rows = read_csv(data, **fmtparams)
header = header if header else _header
if _rows:
# check if passed header contains all the fields
if header != _header and not header_exclude:
# if not, we exclude the rest of the fields
header_exclude = [x for x in _header if x not in header]
if header_exclude:
# exclude fields from header as well as respective values
header = [x for x in header if x not in header_exclude]
# we must loop trough all the rows too to pop values
# since odoo import works only w/ reader and not w/ dictreader
pop_idxs = [_header.index(x) for x in header_exclude]
rows = []
for i, row in enumerate(_rows):
rows.append(
[x for j, x in enumerate(row) if j not in pop_idxs]
)
else:
rows = list(_rows)
if rows:
load_rows(ctx, model, header, rows) | def function[load_csv_stream, parameter[ctx, model, data, header, header_exclude]]:
constant[Load a CSV from a stream.
:param ctx: current anthem context
:param model: model name as string or model klass
:param data: csv data to load
:param header: csv fieldnames whitelist
:param header_exclude: csv fieldnames blacklist
Usage example::
from pkg_resources import Requirement, resource_stream
req = Requirement.parse('my-project')
load_csv_stream(ctx, ctx.env['res.users'],
resource_stream(req, 'data/users.csv'),
delimiter=',')
]
<ast.Tuple object at 0x7da1b27bb3d0> assign[=] call[name[read_csv], parameter[name[data]]]
variable[header] assign[=] <ast.IfExp object at 0x7da1b27b85b0>
if name[_rows] begin[:]
if <ast.BoolOp object at 0x7da1b27bb520> begin[:]
variable[header_exclude] assign[=] <ast.ListComp object at 0x7da1b27b9ff0>
if name[header_exclude] begin[:]
variable[header] assign[=] <ast.ListComp object at 0x7da1b27bafb0>
variable[pop_idxs] assign[=] <ast.ListComp object at 0x7da1b27baa40>
variable[rows] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b27bbf10>, <ast.Name object at 0x7da1b27b81f0>]]] in starred[call[name[enumerate], parameter[name[_rows]]]] begin[:]
call[name[rows].append, parameter[<ast.ListComp object at 0x7da1b27bb040>]]
if name[rows] begin[:]
call[name[load_rows], parameter[name[ctx], name[model], name[header], name[rows]]] | keyword[def] identifier[load_csv_stream] ( identifier[ctx] , identifier[model] , identifier[data] ,
identifier[header] = keyword[None] , identifier[header_exclude] = keyword[None] ,** identifier[fmtparams] ):
literal[string]
identifier[_header] , identifier[_rows] = identifier[read_csv] ( identifier[data] ,** identifier[fmtparams] )
identifier[header] = identifier[header] keyword[if] identifier[header] keyword[else] identifier[_header]
keyword[if] identifier[_rows] :
keyword[if] identifier[header] != identifier[_header] keyword[and] keyword[not] identifier[header_exclude] :
identifier[header_exclude] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[_header] keyword[if] identifier[x] keyword[not] keyword[in] identifier[header] ]
keyword[if] identifier[header_exclude] :
identifier[header] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[header] keyword[if] identifier[x] keyword[not] keyword[in] identifier[header_exclude] ]
identifier[pop_idxs] =[ identifier[_header] . identifier[index] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[header_exclude] ]
identifier[rows] =[]
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[_rows] ):
identifier[rows] . identifier[append] (
[ identifier[x] keyword[for] identifier[j] , identifier[x] keyword[in] identifier[enumerate] ( identifier[row] ) keyword[if] identifier[j] keyword[not] keyword[in] identifier[pop_idxs] ]
)
keyword[else] :
identifier[rows] = identifier[list] ( identifier[_rows] )
keyword[if] identifier[rows] :
identifier[load_rows] ( identifier[ctx] , identifier[model] , identifier[header] , identifier[rows] ) | def load_csv_stream(ctx, model, data, header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a stream.
:param ctx: current anthem context
:param model: model name as string or model klass
:param data: csv data to load
:param header: csv fieldnames whitelist
:param header_exclude: csv fieldnames blacklist
Usage example::
from pkg_resources import Requirement, resource_stream
req = Requirement.parse('my-project')
load_csv_stream(ctx, ctx.env['res.users'],
resource_stream(req, 'data/users.csv'),
delimiter=',')
"""
(_header, _rows) = read_csv(data, **fmtparams)
header = header if header else _header
if _rows:
# check if passed header contains all the fields
if header != _header and (not header_exclude):
# if not, we exclude the rest of the fields
header_exclude = [x for x in _header if x not in header] # depends on [control=['if'], data=[]]
if header_exclude:
# exclude fields from header as well as respective values
header = [x for x in header if x not in header_exclude]
# we must loop trough all the rows too to pop values
# since odoo import works only w/ reader and not w/ dictreader
pop_idxs = [_header.index(x) for x in header_exclude]
rows = []
for (i, row) in enumerate(_rows):
rows.append([x for (j, x) in enumerate(row) if j not in pop_idxs]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
rows = list(_rows)
if rows:
load_rows(ctx, model, header, rows) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def tabulate(json_model):
'''
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
'''
import types
from jsonmodel._extensions import tabulate as _tabulate
try:
from tabulate import tabulate
except:
import sys
print('jsonmodel.extensions.tabulate requires the tabulate module. try: pip install tabulate')
sys.exit(1)
setattr(json_model, 'tabulate', _tabulate.__get__(json_model, types.MethodType))
return json_model | def function[tabulate, parameter[json_model]]:
constant[
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
]
import module[types]
from relative_module[jsonmodel._extensions] import module[tabulate]
<ast.Try object at 0x7da1b0aa6770>
call[name[setattr], parameter[name[json_model], constant[tabulate], call[name[_tabulate].__get__, parameter[name[json_model], name[types].MethodType]]]]
return[name[json_model]] | keyword[def] identifier[tabulate] ( identifier[json_model] ):
literal[string]
keyword[import] identifier[types]
keyword[from] identifier[jsonmodel] . identifier[_extensions] keyword[import] identifier[tabulate] keyword[as] identifier[_tabulate]
keyword[try] :
keyword[from] identifier[tabulate] keyword[import] identifier[tabulate]
keyword[except] :
keyword[import] identifier[sys]
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[setattr] ( identifier[json_model] , literal[string] , identifier[_tabulate] . identifier[__get__] ( identifier[json_model] , identifier[types] . identifier[MethodType] ))
keyword[return] identifier[json_model] | def tabulate(json_model):
"""
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
"""
import types
from jsonmodel._extensions import tabulate as _tabulate
try:
from tabulate import tabulate # depends on [control=['try'], data=[]]
except:
import sys
print('jsonmodel.extensions.tabulate requires the tabulate module. try: pip install tabulate')
sys.exit(1) # depends on [control=['except'], data=[]]
setattr(json_model, 'tabulate', _tabulate.__get__(json_model, types.MethodType))
return json_model |
def strel_line(length, angle):
"""Create a line structuring element for morphological operations
length - distance between first and last pixels of the line, rounded down
angle - angle from the horizontal, counter-clockwise in degrees.
Note: uses draw_line's Bresenham algorithm to select points.
"""
angle = float(angle) * np.pi / 180.
x_off = int(np.finfo(float).eps + np.cos(angle) * length / 2)
# Y is flipped here because "up" is negative
y_off = -int(np.finfo(float).eps + np.sin(angle) * length / 2)
x_center = abs(x_off)
y_center = abs(y_off)
strel = np.zeros((y_center * 2 + 1,
x_center * 2 + 1), bool)
draw_line(strel,
(y_center - y_off, x_center - x_off),
(y_center, x_center), True)
draw_line(strel,
(y_center + y_off, x_center + x_off),
(y_center, x_center), True)
return strel | def function[strel_line, parameter[length, angle]]:
constant[Create a line structuring element for morphological operations
length - distance between first and last pixels of the line, rounded down
angle - angle from the horizontal, counter-clockwise in degrees.
Note: uses draw_line's Bresenham algorithm to select points.
]
variable[angle] assign[=] binary_operation[binary_operation[call[name[float], parameter[name[angle]]] * name[np].pi] / constant[180.0]]
variable[x_off] assign[=] call[name[int], parameter[binary_operation[call[name[np].finfo, parameter[name[float]]].eps + binary_operation[binary_operation[call[name[np].cos, parameter[name[angle]]] * name[length]] / constant[2]]]]]
variable[y_off] assign[=] <ast.UnaryOp object at 0x7da1b2344040>
variable[x_center] assign[=] call[name[abs], parameter[name[x_off]]]
variable[y_center] assign[=] call[name[abs], parameter[name[y_off]]]
variable[strel] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b2346680>, <ast.BinOp object at 0x7da1b23465f0>]], name[bool]]]
call[name[draw_line], parameter[name[strel], tuple[[<ast.BinOp object at 0x7da1b2347940>, <ast.BinOp object at 0x7da1b2344f40>]], tuple[[<ast.Name object at 0x7da1b2347730>, <ast.Name object at 0x7da1b2345150>]], constant[True]]]
call[name[draw_line], parameter[name[strel], tuple[[<ast.BinOp object at 0x7da1b2344dc0>, <ast.BinOp object at 0x7da1b23477f0>]], tuple[[<ast.Name object at 0x7da1b2347430>, <ast.Name object at 0x7da1b23474c0>]], constant[True]]]
return[name[strel]] | keyword[def] identifier[strel_line] ( identifier[length] , identifier[angle] ):
literal[string]
identifier[angle] = identifier[float] ( identifier[angle] )* identifier[np] . identifier[pi] / literal[int]
identifier[x_off] = identifier[int] ( identifier[np] . identifier[finfo] ( identifier[float] ). identifier[eps] + identifier[np] . identifier[cos] ( identifier[angle] )* identifier[length] / literal[int] )
identifier[y_off] =- identifier[int] ( identifier[np] . identifier[finfo] ( identifier[float] ). identifier[eps] + identifier[np] . identifier[sin] ( identifier[angle] )* identifier[length] / literal[int] )
identifier[x_center] = identifier[abs] ( identifier[x_off] )
identifier[y_center] = identifier[abs] ( identifier[y_off] )
identifier[strel] = identifier[np] . identifier[zeros] (( identifier[y_center] * literal[int] + literal[int] ,
identifier[x_center] * literal[int] + literal[int] ), identifier[bool] )
identifier[draw_line] ( identifier[strel] ,
( identifier[y_center] - identifier[y_off] , identifier[x_center] - identifier[x_off] ),
( identifier[y_center] , identifier[x_center] ), keyword[True] )
identifier[draw_line] ( identifier[strel] ,
( identifier[y_center] + identifier[y_off] , identifier[x_center] + identifier[x_off] ),
( identifier[y_center] , identifier[x_center] ), keyword[True] )
keyword[return] identifier[strel] | def strel_line(length, angle):
"""Create a line structuring element for morphological operations
length - distance between first and last pixels of the line, rounded down
angle - angle from the horizontal, counter-clockwise in degrees.
Note: uses draw_line's Bresenham algorithm to select points.
"""
angle = float(angle) * np.pi / 180.0
x_off = int(np.finfo(float).eps + np.cos(angle) * length / 2)
# Y is flipped here because "up" is negative
y_off = -int(np.finfo(float).eps + np.sin(angle) * length / 2)
x_center = abs(x_off)
y_center = abs(y_off)
strel = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool)
draw_line(strel, (y_center - y_off, x_center - x_off), (y_center, x_center), True)
draw_line(strel, (y_center + y_off, x_center + x_off), (y_center, x_center), True)
return strel |
def get_max_posteriors(self, parameters=None, squeeze=True, chains=None):
""" Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
"""
results = []
if chains is None:
chains = self.parent.chains
else:
if isinstance(chains, (int, str)):
chains = [chains]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
if isinstance(parameters, str):
parameters = [parameters]
for chain in chains:
if chain.posterior_max_index is None:
results.append(None)
continue
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p in chain.parameters:
res[p] = chain.posterior_max_params[p]
results.append(res)
if squeeze and len(results) == 1:
return results[0]
return results | def function[get_max_posteriors, parameter[self, parameters, squeeze, chains]]:
constant[ Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
]
variable[results] assign[=] list[[]]
if compare[name[chains] is constant[None]] begin[:]
variable[chains] assign[=] name[self].parent.chains
if call[name[isinstance], parameter[name[parameters], name[str]]] begin[:]
variable[parameters] assign[=] list[[<ast.Name object at 0x7da1b10df790>]]
for taget[name[chain]] in starred[name[chains]] begin[:]
if compare[name[chain].posterior_max_index is constant[None]] begin[:]
call[name[results].append, parameter[constant[None]]]
continue
variable[res] assign[=] dictionary[[], []]
variable[params_to_find] assign[=] <ast.IfExp object at 0x7da18f09cd60>
for taget[name[p]] in starred[name[params_to_find]] begin[:]
if compare[name[p] in name[chain].parameters] begin[:]
call[name[res]][name[p]] assign[=] call[name[chain].posterior_max_params][name[p]]
call[name[results].append, parameter[name[res]]]
if <ast.BoolOp object at 0x7da18f09cb20> begin[:]
return[call[name[results]][constant[0]]]
return[name[results]] | keyword[def] identifier[get_max_posteriors] ( identifier[self] , identifier[parameters] = keyword[None] , identifier[squeeze] = keyword[True] , identifier[chains] = keyword[None] ):
literal[string]
identifier[results] =[]
keyword[if] identifier[chains] keyword[is] keyword[None] :
identifier[chains] = identifier[self] . identifier[parent] . identifier[chains]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[chains] ,( identifier[int] , identifier[str] )):
identifier[chains] =[ identifier[chains] ]
identifier[chains] =[ identifier[self] . identifier[parent] . identifier[chains] [ identifier[i] ] keyword[for] identifier[c] keyword[in] identifier[chains] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[parent] . identifier[_get_chain] ( identifier[c] )]
keyword[if] identifier[isinstance] ( identifier[parameters] , identifier[str] ):
identifier[parameters] =[ identifier[parameters] ]
keyword[for] identifier[chain] keyword[in] identifier[chains] :
keyword[if] identifier[chain] . identifier[posterior_max_index] keyword[is] keyword[None] :
identifier[results] . identifier[append] ( keyword[None] )
keyword[continue]
identifier[res] ={}
identifier[params_to_find] = identifier[parameters] keyword[if] identifier[parameters] keyword[is] keyword[not] keyword[None] keyword[else] identifier[chain] . identifier[parameters]
keyword[for] identifier[p] keyword[in] identifier[params_to_find] :
keyword[if] identifier[p] keyword[in] identifier[chain] . identifier[parameters] :
identifier[res] [ identifier[p] ]= identifier[chain] . identifier[posterior_max_params] [ identifier[p] ]
identifier[results] . identifier[append] ( identifier[res] )
keyword[if] identifier[squeeze] keyword[and] identifier[len] ( identifier[results] )== literal[int] :
keyword[return] identifier[results] [ literal[int] ]
keyword[return] identifier[results] | def get_max_posteriors(self, parameters=None, squeeze=True, chains=None):
""" Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
"""
results = []
if chains is None:
chains = self.parent.chains # depends on [control=['if'], data=['chains']]
else:
if isinstance(chains, (int, str)):
chains = [chains] # depends on [control=['if'], data=[]]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
if isinstance(parameters, str):
parameters = [parameters] # depends on [control=['if'], data=[]]
for chain in chains:
if chain.posterior_max_index is None:
results.append(None)
continue # depends on [control=['if'], data=[]]
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p in chain.parameters:
res[p] = chain.posterior_max_params[p] # depends on [control=['if'], data=['p']] # depends on [control=['for'], data=['p']]
results.append(res) # depends on [control=['for'], data=['chain']]
if squeeze and len(results) == 1:
return results[0] # depends on [control=['if'], data=[]]
return results |
def get_variable(self, name):
"""
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
"""
name = get_op_tensor_name(name)[1]
if len(self.vs_name):
name_with_vs = self.vs_name + "/" + name
else:
name_with_vs = name
return get_op_or_tensor_by_name(name_with_vs) | def function[get_variable, parameter[self, name]]:
constant[
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
]
variable[name] assign[=] call[call[name[get_op_tensor_name], parameter[name[name]]]][constant[1]]
if call[name[len], parameter[name[self].vs_name]] begin[:]
variable[name_with_vs] assign[=] binary_operation[binary_operation[name[self].vs_name + constant[/]] + name[name]]
return[call[name[get_op_or_tensor_by_name], parameter[name[name_with_vs]]]] | keyword[def] identifier[get_variable] ( identifier[self] , identifier[name] ):
literal[string]
identifier[name] = identifier[get_op_tensor_name] ( identifier[name] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[self] . identifier[vs_name] ):
identifier[name_with_vs] = identifier[self] . identifier[vs_name] + literal[string] + identifier[name]
keyword[else] :
identifier[name_with_vs] = identifier[name]
keyword[return] identifier[get_op_or_tensor_by_name] ( identifier[name_with_vs] ) | def get_variable(self, name):
"""
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
"""
name = get_op_tensor_name(name)[1]
if len(self.vs_name):
name_with_vs = self.vs_name + '/' + name # depends on [control=['if'], data=[]]
else:
name_with_vs = name
return get_op_or_tensor_by_name(name_with_vs) |
def dateJDN(year, month, day, calendar):
""" Converts date to Julian Day Number. """
a = (14 - month) // 12
y = year + 4800 - a
m = month + 12*a - 3
if calendar == GREGORIAN:
return day + (153*m + 2)//5 + 365*y + y//4 - y//100 + y//400 - 32045
else:
return day + (153*m + 2)//5 + 365*y + y//4 - 32083 | def function[dateJDN, parameter[year, month, day, calendar]]:
constant[ Converts date to Julian Day Number. ]
variable[a] assign[=] binary_operation[binary_operation[constant[14] - name[month]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[12]]
variable[y] assign[=] binary_operation[binary_operation[name[year] + constant[4800]] - name[a]]
variable[m] assign[=] binary_operation[binary_operation[name[month] + binary_operation[constant[12] * name[a]]] - constant[3]]
if compare[name[calendar] equal[==] name[GREGORIAN]] begin[:]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[day] + binary_operation[binary_operation[binary_operation[constant[153] * name[m]] + constant[2]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[5]]] + binary_operation[constant[365] * name[y]]] + binary_operation[name[y] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]]] - binary_operation[name[y] <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]]] + binary_operation[name[y] <ast.FloorDiv object at 0x7da2590d6bc0> constant[400]]] - constant[32045]]] | keyword[def] identifier[dateJDN] ( identifier[year] , identifier[month] , identifier[day] , identifier[calendar] ):
literal[string]
identifier[a] =( literal[int] - identifier[month] )// literal[int]
identifier[y] = identifier[year] + literal[int] - identifier[a]
identifier[m] = identifier[month] + literal[int] * identifier[a] - literal[int]
keyword[if] identifier[calendar] == identifier[GREGORIAN] :
keyword[return] identifier[day] +( literal[int] * identifier[m] + literal[int] )// literal[int] + literal[int] * identifier[y] + identifier[y] // literal[int] - identifier[y] // literal[int] + identifier[y] // literal[int] - literal[int]
keyword[else] :
keyword[return] identifier[day] +( literal[int] * identifier[m] + literal[int] )// literal[int] + literal[int] * identifier[y] + identifier[y] // literal[int] - literal[int] | def dateJDN(year, month, day, calendar):
""" Converts date to Julian Day Number. """
a = (14 - month) // 12
y = year + 4800 - a
m = month + 12 * a - 3
if calendar == GREGORIAN:
return day + (153 * m + 2) // 5 + 365 * y + y // 4 - y // 100 + y // 400 - 32045 # depends on [control=['if'], data=[]]
else:
return day + (153 * m + 2) // 5 + 365 * y + y // 4 - 32083 |
def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):
"""
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
"""
# If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn
#this is the state we want to oxidize!
oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol])
oxid_new = math.floor(oxid_old + 1)
#if this is not a valid solution, break out of here and don't add anything to the list
if oxid_new > oxid_el.max_oxidation_state:
return numa
#update the spec_amts_oxi map to reflect that the oxidation took place
spec_old = Specie(oxid_el.symbol, oxid_old)
spec_new = Specie(oxid_el.symbol, oxid_new)
specamt = spec_amts_oxi[spec_old]
spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old}
spec_amts_oxi[spec_new] = specamt
spec_amts_oxi = Composition(spec_amts_oxi)
#determine the amount of cation A in the structure needed for charge balance and add it to the list
oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if
spec.symbol not in self.cation.symbol])
a = max(0, -oxi_noA / self.cation_charge)
numa = numa.union({a})
#recursively try the other oxidation states
if a == 0:
return numa
else:
for oxid_el in oxid_els:
numa = numa.union(
self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa))
return numa | def function[_get_int_removals_helper, parameter[self, spec_amts_oxi, oxid_el, oxid_els, numa]]:
constant[
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
]
variable[oxid_old] assign[=] call[name[min], parameter[<ast.ListComp object at 0x7da207f02f50>]]
variable[oxid_new] assign[=] call[name[math].floor, parameter[binary_operation[name[oxid_old] + constant[1]]]]
if compare[name[oxid_new] greater[>] name[oxid_el].max_oxidation_state] begin[:]
return[name[numa]]
variable[spec_old] assign[=] call[name[Specie], parameter[name[oxid_el].symbol, name[oxid_old]]]
variable[spec_new] assign[=] call[name[Specie], parameter[name[oxid_el].symbol, name[oxid_new]]]
variable[specamt] assign[=] call[name[spec_amts_oxi]][name[spec_old]]
variable[spec_amts_oxi] assign[=] <ast.DictComp object at 0x7da207f033a0>
call[name[spec_amts_oxi]][name[spec_new]] assign[=] name[specamt]
variable[spec_amts_oxi] assign[=] call[name[Composition], parameter[name[spec_amts_oxi]]]
variable[oxi_noA] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da207f012a0>]]
variable[a] assign[=] call[name[max], parameter[constant[0], binary_operation[<ast.UnaryOp object at 0x7da207f00370> / name[self].cation_charge]]]
variable[numa] assign[=] call[name[numa].union, parameter[<ast.Set object at 0x7da2047e8880>]]
if compare[name[a] equal[==] constant[0]] begin[:]
return[name[numa]] | keyword[def] identifier[_get_int_removals_helper] ( identifier[self] , identifier[spec_amts_oxi] , identifier[oxid_el] , identifier[oxid_els] , identifier[numa] ):
literal[string]
identifier[oxid_old] = identifier[min] ([ identifier[spec] . identifier[oxi_state] keyword[for] identifier[spec] keyword[in] identifier[spec_amts_oxi] keyword[if] identifier[spec] . identifier[symbol] == identifier[oxid_el] . identifier[symbol] ])
identifier[oxid_new] = identifier[math] . identifier[floor] ( identifier[oxid_old] + literal[int] )
keyword[if] identifier[oxid_new] > identifier[oxid_el] . identifier[max_oxidation_state] :
keyword[return] identifier[numa]
identifier[spec_old] = identifier[Specie] ( identifier[oxid_el] . identifier[symbol] , identifier[oxid_old] )
identifier[spec_new] = identifier[Specie] ( identifier[oxid_el] . identifier[symbol] , identifier[oxid_new] )
identifier[specamt] = identifier[spec_amts_oxi] [ identifier[spec_old] ]
identifier[spec_amts_oxi] ={ identifier[sp] : identifier[amt] keyword[for] identifier[sp] , identifier[amt] keyword[in] identifier[spec_amts_oxi] . identifier[items] () keyword[if] identifier[sp] != identifier[spec_old] }
identifier[spec_amts_oxi] [ identifier[spec_new] ]= identifier[specamt]
identifier[spec_amts_oxi] = identifier[Composition] ( identifier[spec_amts_oxi] )
identifier[oxi_noA] = identifier[sum] ([ identifier[spec] . identifier[oxi_state] * identifier[spec_amts_oxi] [ identifier[spec] ] keyword[for] identifier[spec] keyword[in] identifier[spec_amts_oxi] keyword[if]
identifier[spec] . identifier[symbol] keyword[not] keyword[in] identifier[self] . identifier[cation] . identifier[symbol] ])
identifier[a] = identifier[max] ( literal[int] ,- identifier[oxi_noA] / identifier[self] . identifier[cation_charge] )
identifier[numa] = identifier[numa] . identifier[union] ({ identifier[a] })
keyword[if] identifier[a] == literal[int] :
keyword[return] identifier[numa]
keyword[else] :
keyword[for] identifier[oxid_el] keyword[in] identifier[oxid_els] :
identifier[numa] = identifier[numa] . identifier[union] (
identifier[self] . identifier[_get_int_removals_helper] ( identifier[spec_amts_oxi] . identifier[copy] (), identifier[oxid_el] , identifier[oxid_els] , identifier[numa] ))
keyword[return] identifier[numa] | def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):
"""
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
"""
# If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn
#this is the state we want to oxidize!
oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol])
oxid_new = math.floor(oxid_old + 1)
#if this is not a valid solution, break out of here and don't add anything to the list
if oxid_new > oxid_el.max_oxidation_state:
return numa # depends on [control=['if'], data=[]]
#update the spec_amts_oxi map to reflect that the oxidation took place
spec_old = Specie(oxid_el.symbol, oxid_old)
spec_new = Specie(oxid_el.symbol, oxid_new)
specamt = spec_amts_oxi[spec_old]
spec_amts_oxi = {sp: amt for (sp, amt) in spec_amts_oxi.items() if sp != spec_old}
spec_amts_oxi[spec_new] = specamt
spec_amts_oxi = Composition(spec_amts_oxi)
#determine the amount of cation A in the structure needed for charge balance and add it to the list
oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if spec.symbol not in self.cation.symbol])
a = max(0, -oxi_noA / self.cation_charge)
numa = numa.union({a})
#recursively try the other oxidation states
if a == 0:
return numa # depends on [control=['if'], data=[]]
else:
for oxid_el in oxid_els:
numa = numa.union(self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa)) # depends on [control=['for'], data=['oxid_el']]
return numa |
def uniformat(value):
"""Convert a Unicode char."""
if value in GROUP_ESCAPES:
# Escape characters that are (or will be in the future) problematic
c = "\\x%02x\\x%02x" % (0x5c, value)
elif value <= 0xFF:
c = "\\x%02x" % value
elif value <= 0xFFFF:
c = "\\u%04x" % value
else:
c = "\\U%08x" % value
return c | def function[uniformat, parameter[value]]:
constant[Convert a Unicode char.]
if compare[name[value] in name[GROUP_ESCAPES]] begin[:]
variable[c] assign[=] binary_operation[constant[\x%02x\x%02x] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Constant object at 0x7da1b04ef070>, <ast.Name object at 0x7da1b04eca60>]]]
return[name[c]] | keyword[def] identifier[uniformat] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[in] identifier[GROUP_ESCAPES] :
identifier[c] = literal[string] %( literal[int] , identifier[value] )
keyword[elif] identifier[value] <= literal[int] :
identifier[c] = literal[string] % identifier[value]
keyword[elif] identifier[value] <= literal[int] :
identifier[c] = literal[string] % identifier[value]
keyword[else] :
identifier[c] = literal[string] % identifier[value]
keyword[return] identifier[c] | def uniformat(value):
"""Convert a Unicode char."""
if value in GROUP_ESCAPES:
# Escape characters that are (or will be in the future) problematic
c = '\\x%02x\\x%02x' % (92, value) # depends on [control=['if'], data=['value']]
elif value <= 255:
c = '\\x%02x' % value # depends on [control=['if'], data=['value']]
elif value <= 65535:
c = '\\u%04x' % value # depends on [control=['if'], data=['value']]
else:
c = '\\U%08x' % value
return c |
def fetch(self):
"""Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame
"""
self._validate_params()
if self.islast:
data = super(DailySummaryReader, self).fetch()
else:
data = self._fetch_dates()
if self.output_format == 'pandas':
data.set_index('date', inplace=True)
return data
else:
return data | def function[fetch, parameter[self]]:
constant[Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame
]
call[name[self]._validate_params, parameter[]]
if name[self].islast begin[:]
variable[data] assign[=] call[call[name[super], parameter[name[DailySummaryReader], name[self]]].fetch, parameter[]]
if compare[name[self].output_format equal[==] constant[pandas]] begin[:]
call[name[data].set_index, parameter[constant[date]]]
return[name[data]] | keyword[def] identifier[fetch] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_validate_params] ()
keyword[if] identifier[self] . identifier[islast] :
identifier[data] = identifier[super] ( identifier[DailySummaryReader] , identifier[self] ). identifier[fetch] ()
keyword[else] :
identifier[data] = identifier[self] . identifier[_fetch_dates] ()
keyword[if] identifier[self] . identifier[output_format] == literal[string] :
identifier[data] . identifier[set_index] ( literal[string] , identifier[inplace] = keyword[True] )
keyword[return] identifier[data]
keyword[else] :
keyword[return] identifier[data] | def fetch(self):
"""Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame
"""
self._validate_params()
if self.islast:
data = super(DailySummaryReader, self).fetch() # depends on [control=['if'], data=[]]
else:
data = self._fetch_dates()
if self.output_format == 'pandas':
data.set_index('date', inplace=True)
return data # depends on [control=['if'], data=[]]
else:
return data |
def get_event(self, event_name, event_history=None):
"""Get an event from the database.
Gets an event from the named event list removing the event and
adding it to the event history.
Args:
event_name (str): Event list key.
event_history (str, optional): Event history list.
Returns:
str: string representation of the event object
"""
if event_history is None:
event_history = event_name + '_history'
return self._db.rpoplpush(event_name, event_history) | def function[get_event, parameter[self, event_name, event_history]]:
constant[Get an event from the database.
Gets an event from the named event list removing the event and
adding it to the event history.
Args:
event_name (str): Event list key.
event_history (str, optional): Event history list.
Returns:
str: string representation of the event object
]
if compare[name[event_history] is constant[None]] begin[:]
variable[event_history] assign[=] binary_operation[name[event_name] + constant[_history]]
return[call[name[self]._db.rpoplpush, parameter[name[event_name], name[event_history]]]] | keyword[def] identifier[get_event] ( identifier[self] , identifier[event_name] , identifier[event_history] = keyword[None] ):
literal[string]
keyword[if] identifier[event_history] keyword[is] keyword[None] :
identifier[event_history] = identifier[event_name] + literal[string]
keyword[return] identifier[self] . identifier[_db] . identifier[rpoplpush] ( identifier[event_name] , identifier[event_history] ) | def get_event(self, event_name, event_history=None):
"""Get an event from the database.
Gets an event from the named event list removing the event and
adding it to the event history.
Args:
event_name (str): Event list key.
event_history (str, optional): Event history list.
Returns:
str: string representation of the event object
"""
if event_history is None:
event_history = event_name + '_history' # depends on [control=['if'], data=['event_history']]
return self._db.rpoplpush(event_name, event_history) |
def add_injectable(
name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
"""
Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching.
"""
if isinstance(value, Callable):
if autocall:
value = _InjectableFuncWrapper(
name, value, cache=cache, cache_scope=cache_scope)
# clear any cached data from a previously registered value
value.clear_cached()
elif not autocall and memoize:
value = _memoize_function(value, name, cache_scope=cache_scope)
logger.debug('registering injectable {!r}'.format(name))
_INJECTABLES[name] = value | def function[add_injectable, parameter[name, value, autocall, cache, cache_scope, memoize]]:
constant[
Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching.
]
if call[name[isinstance], parameter[name[value], name[Callable]]] begin[:]
if name[autocall] begin[:]
variable[value] assign[=] call[name[_InjectableFuncWrapper], parameter[name[name], name[value]]]
call[name[value].clear_cached, parameter[]]
call[name[logger].debug, parameter[call[constant[registering injectable {!r}].format, parameter[name[name]]]]]
call[name[_INJECTABLES]][name[name]] assign[=] name[value] | keyword[def] identifier[add_injectable] (
identifier[name] , identifier[value] , identifier[autocall] = keyword[True] , identifier[cache] = keyword[False] , identifier[cache_scope] = identifier[_CS_FOREVER] ,
identifier[memoize] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Callable] ):
keyword[if] identifier[autocall] :
identifier[value] = identifier[_InjectableFuncWrapper] (
identifier[name] , identifier[value] , identifier[cache] = identifier[cache] , identifier[cache_scope] = identifier[cache_scope] )
identifier[value] . identifier[clear_cached] ()
keyword[elif] keyword[not] identifier[autocall] keyword[and] identifier[memoize] :
identifier[value] = identifier[_memoize_function] ( identifier[value] , identifier[name] , identifier[cache_scope] = identifier[cache_scope] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[_INJECTABLES] [ identifier[name] ]= identifier[value] | def add_injectable(name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER, memoize=False):
"""
Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching.
"""
if isinstance(value, Callable):
if autocall:
value = _InjectableFuncWrapper(name, value, cache=cache, cache_scope=cache_scope)
# clear any cached data from a previously registered value
value.clear_cached() # depends on [control=['if'], data=[]]
elif not autocall and memoize:
value = _memoize_function(value, name, cache_scope=cache_scope) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
logger.debug('registering injectable {!r}'.format(name))
_INJECTABLES[name] = value |
def load_from_json(file_path):
"""Load the stored data from json, and return as a dict."""
if os.path.exists(file_path):
raw_data = open(file_path, 'rb').read()
return json.loads(base64.decodestring(raw_data).decode('utf-8')) | def function[load_from_json, parameter[file_path]]:
constant[Load the stored data from json, and return as a dict.]
if call[name[os].path.exists, parameter[name[file_path]]] begin[:]
variable[raw_data] assign[=] call[call[name[open], parameter[name[file_path], constant[rb]]].read, parameter[]]
return[call[name[json].loads, parameter[call[call[name[base64].decodestring, parameter[name[raw_data]]].decode, parameter[constant[utf-8]]]]]] | keyword[def] identifier[load_from_json] ( identifier[file_path] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_path] ):
identifier[raw_data] = identifier[open] ( identifier[file_path] , literal[string] ). identifier[read] ()
keyword[return] identifier[json] . identifier[loads] ( identifier[base64] . identifier[decodestring] ( identifier[raw_data] ). identifier[decode] ( literal[string] )) | def load_from_json(file_path):
"""Load the stored data from json, and return as a dict."""
if os.path.exists(file_path):
raw_data = open(file_path, 'rb').read()
return json.loads(base64.decodestring(raw_data).decode('utf-8')) # depends on [control=['if'], data=[]] |
def lincomb(self, a, x1, b=None, x2=None, out=None):
"""Implement ``out[:] = a * x1 + b * x2``.
This function implements
``out[:] = a * x1``
or, if ``b`` and ``x2`` are given,
``out = a * x1 + b * x2``.
Parameters
----------
a : `field` element
Scalar to multiply ``x1`` with.
x1 : `LinearSpaceElement`
First space element in the linear combination.
b : `field` element, optional
Scalar to multiply ``x2`` with. Required if ``x2`` is
provided.
x2 : `LinearSpaceElement`, optional
Second space element in the linear combination.
out : `LinearSpaceElement`, optional
Element to which the result is written.
Returns
-------
out : `LinearSpaceElement`
Result of the linear combination. If ``out`` was provided,
the returned object is a reference to it.
Notes
-----
The elements ``out``, ``x1`` and ``x2`` may be aligned, thus a call
``space.lincomb(2, x, 3.14, x, out=x)``
is (mathematically) equivalent to
``x = x * (2 + 3.14)``.
"""
if out is None:
out = self.element()
elif out not in self:
raise LinearSpaceTypeError('`out` {!r} is not an element of {!r}'
''.format(out, self))
if self.field is not None and a not in self.field:
raise LinearSpaceTypeError('`a` {!r} not an element of the field '
'{!r} of {!r}'
''.format(a, self.field, self))
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of {!r}'
''.format(x1, self))
if b is None: # Single element
if x2 is not None:
raise ValueError('`x2` provided but not `b`')
self._lincomb(a, x1, 0, x1, out)
return out
else: # Two elements
if self.field is not None and b not in self.field:
raise LinearSpaceTypeError('`b` {!r} not an element of the '
'field {!r} of {!r}'
''.format(b, self.field, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
self._lincomb(a, x1, b, x2, out)
return out | def function[lincomb, parameter[self, a, x1, b, x2, out]]:
constant[Implement ``out[:] = a * x1 + b * x2``.
This function implements
``out[:] = a * x1``
or, if ``b`` and ``x2`` are given,
``out = a * x1 + b * x2``.
Parameters
----------
a : `field` element
Scalar to multiply ``x1`` with.
x1 : `LinearSpaceElement`
First space element in the linear combination.
b : `field` element, optional
Scalar to multiply ``x2`` with. Required if ``x2`` is
provided.
x2 : `LinearSpaceElement`, optional
Second space element in the linear combination.
out : `LinearSpaceElement`, optional
Element to which the result is written.
Returns
-------
out : `LinearSpaceElement`
Result of the linear combination. If ``out`` was provided,
the returned object is a reference to it.
Notes
-----
The elements ``out``, ``x1`` and ``x2`` may be aligned, thus a call
``space.lincomb(2, x, 3.14, x, out=x)``
is (mathematically) equivalent to
``x = x * (2 + 3.14)``.
]
if compare[name[out] is constant[None]] begin[:]
variable[out] assign[=] call[name[self].element, parameter[]]
if <ast.BoolOp object at 0x7da1b1ea00a0> begin[:]
<ast.Raise object at 0x7da1b1ea3160>
if compare[name[x1] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
<ast.Raise object at 0x7da1b1ea2140>
if compare[name[b] is constant[None]] begin[:]
if compare[name[x2] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1ea27d0>
call[name[self]._lincomb, parameter[name[a], name[x1], constant[0], name[x1], name[out]]]
return[name[out]]
return[name[out]] | keyword[def] identifier[lincomb] ( identifier[self] , identifier[a] , identifier[x1] , identifier[b] = keyword[None] , identifier[x2] = keyword[None] , identifier[out] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[self] . identifier[element] ()
keyword[elif] identifier[out] keyword[not] keyword[in] identifier[self] :
keyword[raise] identifier[LinearSpaceTypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[out] , identifier[self] ))
keyword[if] identifier[self] . identifier[field] keyword[is] keyword[not] keyword[None] keyword[and] identifier[a] keyword[not] keyword[in] identifier[self] . identifier[field] :
keyword[raise] identifier[LinearSpaceTypeError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[a] , identifier[self] . identifier[field] , identifier[self] ))
keyword[if] identifier[x1] keyword[not] keyword[in] identifier[self] :
keyword[raise] identifier[LinearSpaceTypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[x1] , identifier[self] ))
keyword[if] identifier[b] keyword[is] keyword[None] :
keyword[if] identifier[x2] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_lincomb] ( identifier[a] , identifier[x1] , literal[int] , identifier[x1] , identifier[out] )
keyword[return] identifier[out]
keyword[else] :
keyword[if] identifier[self] . identifier[field] keyword[is] keyword[not] keyword[None] keyword[and] identifier[b] keyword[not] keyword[in] identifier[self] . identifier[field] :
keyword[raise] identifier[LinearSpaceTypeError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[b] , identifier[self] . identifier[field] , identifier[self] ))
keyword[if] identifier[x2] keyword[not] keyword[in] identifier[self] :
keyword[raise] identifier[LinearSpaceTypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[x2] , identifier[self] ))
identifier[self] . identifier[_lincomb] ( identifier[a] , identifier[x1] , identifier[b] , identifier[x2] , identifier[out] )
keyword[return] identifier[out] | def lincomb(self, a, x1, b=None, x2=None, out=None):
"""Implement ``out[:] = a * x1 + b * x2``.
This function implements
``out[:] = a * x1``
or, if ``b`` and ``x2`` are given,
``out = a * x1 + b * x2``.
Parameters
----------
a : `field` element
Scalar to multiply ``x1`` with.
x1 : `LinearSpaceElement`
First space element in the linear combination.
b : `field` element, optional
Scalar to multiply ``x2`` with. Required if ``x2`` is
provided.
x2 : `LinearSpaceElement`, optional
Second space element in the linear combination.
out : `LinearSpaceElement`, optional
Element to which the result is written.
Returns
-------
out : `LinearSpaceElement`
Result of the linear combination. If ``out`` was provided,
the returned object is a reference to it.
Notes
-----
The elements ``out``, ``x1`` and ``x2`` may be aligned, thus a call
``space.lincomb(2, x, 3.14, x, out=x)``
is (mathematically) equivalent to
``x = x * (2 + 3.14)``.
"""
if out is None:
out = self.element() # depends on [control=['if'], data=['out']]
elif out not in self:
raise LinearSpaceTypeError('`out` {!r} is not an element of {!r}'.format(out, self)) # depends on [control=['if'], data=['out', 'self']]
if self.field is not None and a not in self.field:
raise LinearSpaceTypeError('`a` {!r} not an element of the field {!r} of {!r}'.format(a, self.field, self)) # depends on [control=['if'], data=[]]
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of {!r}'.format(x1, self)) # depends on [control=['if'], data=['x1', 'self']]
if b is None: # Single element
if x2 is not None:
raise ValueError('`x2` provided but not `b`') # depends on [control=['if'], data=[]]
self._lincomb(a, x1, 0, x1, out)
return out # depends on [control=['if'], data=[]]
else: # Two elements
if self.field is not None and b not in self.field:
raise LinearSpaceTypeError('`b` {!r} not an element of the field {!r} of {!r}'.format(b, self.field, self)) # depends on [control=['if'], data=[]]
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of {!r}'.format(x2, self)) # depends on [control=['if'], data=['x2', 'self']]
self._lincomb(a, x1, b, x2, out)
return out |
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None | def function[set_seq1, parameter[self, a]]:
constant[Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
]
if compare[name[a] is name[self].a] begin[:]
return[None]
name[self].a assign[=] name[a]
name[self].matching_blocks assign[=] constant[None] | keyword[def] identifier[set_seq1] ( identifier[self] , identifier[a] ):
literal[string]
keyword[if] identifier[a] keyword[is] identifier[self] . identifier[a] :
keyword[return]
identifier[self] . identifier[a] = identifier[a]
identifier[self] . identifier[matching_blocks] = identifier[self] . identifier[opcodes] = keyword[None] | def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return # depends on [control=['if'], data=[]]
self.a = a
self.matching_blocks = self.opcodes = None |
def try_get_dn_string(subject, shorten=False):
"""
Returns DN as a string
:param subject:
:param shorten:
:return:
"""
try:
from cryptography.x509.oid import NameOID
from cryptography.x509 import ObjectIdentifier
oid_names = {
getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN",
getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C",
getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L",
getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST",
getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St",
getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O",
getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU",
getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN",
getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID",
getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent",
getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress",
getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP",
}
ret = []
try:
for attribute in subject:
oid = attribute.oid
dot = oid.dotted_string
oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name
val = attribute.value
ret.append('%s: %s' % (oid_name, val))
except:
pass
return ', '.join(ret)
except Exception as e:
logger.warning('Unexpected error: %s' % e)
return 'N/A' | def function[try_get_dn_string, parameter[subject, shorten]]:
constant[
Returns DN as a string
:param subject:
:param shorten:
:return:
]
<ast.Try object at 0x7da2047e98a0> | keyword[def] identifier[try_get_dn_string] ( identifier[subject] , identifier[shorten] = keyword[False] ):
literal[string]
keyword[try] :
keyword[from] identifier[cryptography] . identifier[x509] . identifier[oid] keyword[import] identifier[NameOID]
keyword[from] identifier[cryptography] . identifier[x509] keyword[import] identifier[ObjectIdentifier]
identifier[oid_names] ={
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
identifier[getattr] ( identifier[NameOID] , literal[string] , identifier[ObjectIdentifier] ( literal[string] )): literal[string] ,
}
identifier[ret] =[]
keyword[try] :
keyword[for] identifier[attribute] keyword[in] identifier[subject] :
identifier[oid] = identifier[attribute] . identifier[oid]
identifier[dot] = identifier[oid] . identifier[dotted_string]
identifier[oid_name] = identifier[oid_names] [ identifier[oid] ] keyword[if] identifier[shorten] keyword[and] identifier[oid] keyword[in] identifier[oid_names] keyword[else] identifier[oid] . identifier[_name]
identifier[val] = identifier[attribute] . identifier[value]
identifier[ret] . identifier[append] ( literal[string] %( identifier[oid_name] , identifier[val] ))
keyword[except] :
keyword[pass]
keyword[return] literal[string] . identifier[join] ( identifier[ret] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( literal[string] % identifier[e] )
keyword[return] literal[string] | def try_get_dn_string(subject, shorten=False):
"""
Returns DN as a string
:param subject:
:param shorten:
:return:
"""
try:
from cryptography.x509.oid import NameOID
from cryptography.x509 import ObjectIdentifier
oid_names = {getattr(NameOID, 'COMMON_NAME', ObjectIdentifier('2.5.4.3')): 'CN', getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier('2.5.4.6')): 'C', getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier('2.5.4.7')): 'L', getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier('2.5.4.8')): 'ST', getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier('2.5.4.9')): 'St', getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier('2.5.4.10')): 'O', getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier('2.5.4.11')): 'OU', getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier('2.5.4.5')): 'SN', getattr(NameOID, 'USER_ID', ObjectIdentifier('0.9.2342.19200300.100.1.1')): 'userID', getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier('0.9.2342.19200300.100.1.25')): 'domainComponent', getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier('1.2.840.113549.1.9.1')): 'emailAddress', getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier('2.5.4.17')): 'ZIP'}
ret = []
try:
for attribute in subject:
oid = attribute.oid
dot = oid.dotted_string
oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name
val = attribute.value
ret.append('%s: %s' % (oid_name, val)) # depends on [control=['for'], data=['attribute']] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
return ', '.join(ret) # depends on [control=['try'], data=[]]
except Exception as e:
logger.warning('Unexpected error: %s' % e)
return 'N/A' # depends on [control=['except'], data=['e']] |
def verify(self):
"""
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
"""
for param in self.longs:
item = getattr(self, param)
if not item or isinstance(item, str):
continue
if isinstance(item, bytes):
item = item.decode('utf-8')
setattr(self, param, item)
try:
_ = base64url_to_long(item)
except Exception:
return False
else:
if [e for e in ['+', '/', '='] if e in item]:
return False
if self.kid:
if not isinstance(self.kid, str):
raise ValueError("kid of wrong value type")
return True | def function[verify, parameter[self]]:
constant[
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
]
for taget[name[param]] in starred[name[self].longs] begin[:]
variable[item] assign[=] call[name[getattr], parameter[name[self], name[param]]]
if <ast.BoolOp object at 0x7da1b05c48e0> begin[:]
continue
if call[name[isinstance], parameter[name[item], name[bytes]]] begin[:]
variable[item] assign[=] call[name[item].decode, parameter[constant[utf-8]]]
call[name[setattr], parameter[name[self], name[param], name[item]]]
<ast.Try object at 0x7da1b05bcd60>
if name[self].kid begin[:]
if <ast.UnaryOp object at 0x7da1b05beda0> begin[:]
<ast.Raise object at 0x7da1b0432860>
return[constant[True]] | keyword[def] identifier[verify] ( identifier[self] ):
literal[string]
keyword[for] identifier[param] keyword[in] identifier[self] . identifier[longs] :
identifier[item] = identifier[getattr] ( identifier[self] , identifier[param] )
keyword[if] keyword[not] identifier[item] keyword[or] identifier[isinstance] ( identifier[item] , identifier[str] ):
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[bytes] ):
identifier[item] = identifier[item] . identifier[decode] ( literal[string] )
identifier[setattr] ( identifier[self] , identifier[param] , identifier[item] )
keyword[try] :
identifier[_] = identifier[base64url_to_long] ( identifier[item] )
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[else] :
keyword[if] [ identifier[e] keyword[for] identifier[e] keyword[in] [ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[e] keyword[in] identifier[item] ]:
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[kid] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[kid] , identifier[str] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] keyword[True] | def verify(self):
"""
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
"""
for param in self.longs:
item = getattr(self, param)
if not item or isinstance(item, str):
continue # depends on [control=['if'], data=[]]
if isinstance(item, bytes):
item = item.decode('utf-8')
setattr(self, param, item) # depends on [control=['if'], data=[]]
try:
_ = base64url_to_long(item) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]]
else:
if [e for e in ['+', '/', '='] if e in item]:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param']]
if self.kid:
if not isinstance(self.kid, str):
raise ValueError('kid of wrong value type') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def load_remote_db(self):
"""
Load remote S3 DB
"""
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
if '/tmp/' not in self.settings_dict['NAME']:
try:
etag = ''
if os.path.isfile('/tmp/' + self.settings_dict['NAME']):
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
# In general the ETag is the md5 of the file, in some cases it's not,
# and in that case we will just need to reload the file, I don't see any other way
etag = m.hexdigest()
obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME'])
obj_bytes = obj.get(IfNoneMatch=etag)["Body"] # Will throw E on 304 or 404
with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f:
f.write(obj_bytes.read())
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
self.db_hash = m.hexdigest()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "304":
logging.debug("ETag matches md5 of local copy, using local copy of DB!")
self.db_hash = etag
else:
logging.debug("Couldn't load remote DB object.")
except Exception as e:
# Weird one
logging.debug(e)
# SQLite DatabaseWrapper will treat our tmp as normal now
# Check because Django likes to call this function a lot more than it should
if '/tmp/' not in self.settings_dict['NAME']:
self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME']
self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME']
# Make sure it exists if it doesn't yet
if not os.path.isfile(self.settings_dict['NAME']):
open(self.settings_dict['NAME'], 'a').close()
logging.debug("Loaded remote DB!") | def function[load_remote_db, parameter[self]]:
constant[
Load remote S3 DB
]
variable[signature_version] assign[=] call[name[self].settings_dict.get, parameter[constant[SIGNATURE_VERSION], constant[s3v4]]]
variable[s3] assign[=] call[name[boto3].resource, parameter[constant[s3]]]
if compare[constant[/tmp/] <ast.NotIn object at 0x7da2590d7190> call[name[self].settings_dict][constant[NAME]]] begin[:]
<ast.Try object at 0x7da2049629b0>
if compare[constant[/tmp/] <ast.NotIn object at 0x7da2590d7190> call[name[self].settings_dict][constant[NAME]]] begin[:]
call[name[self].settings_dict][constant[REMOTE_NAME]] assign[=] call[name[self].settings_dict][constant[NAME]]
call[name[self].settings_dict][constant[NAME]] assign[=] binary_operation[constant[/tmp/] + call[name[self].settings_dict][constant[NAME]]]
if <ast.UnaryOp object at 0x7da1b0653b50> begin[:]
call[call[name[open], parameter[call[name[self].settings_dict][constant[NAME]], constant[a]]].close, parameter[]]
call[name[logging].debug, parameter[constant[Loaded remote DB!]]] | keyword[def] identifier[load_remote_db] ( identifier[self] ):
literal[string]
identifier[signature_version] = identifier[self] . identifier[settings_dict] . identifier[get] ( literal[string] , literal[string] )
identifier[s3] = identifier[boto3] . identifier[resource] (
literal[string] ,
identifier[config] = identifier[botocore] . identifier[client] . identifier[Config] ( identifier[signature_version] = identifier[signature_version] ),
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[settings_dict] [ literal[string] ]:
keyword[try] :
identifier[etag] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( literal[string] + identifier[self] . identifier[settings_dict] [ literal[string] ]):
identifier[m] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[open] ( literal[string] + identifier[self] . identifier[settings_dict] [ literal[string] ], literal[string] ) keyword[as] identifier[f] :
identifier[m] . identifier[update] ( identifier[f] . identifier[read] ())
identifier[etag] = identifier[m] . identifier[hexdigest] ()
identifier[obj] = identifier[s3] . identifier[Object] ( identifier[self] . identifier[settings_dict] [ literal[string] ], identifier[self] . identifier[settings_dict] [ literal[string] ])
identifier[obj_bytes] = identifier[obj] . identifier[get] ( identifier[IfNoneMatch] = identifier[etag] )[ literal[string] ]
keyword[with] identifier[open] ( literal[string] + identifier[self] . identifier[settings_dict] [ literal[string] ], literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[obj_bytes] . identifier[read] ())
identifier[m] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[open] ( literal[string] + identifier[self] . identifier[settings_dict] [ literal[string] ], literal[string] ) keyword[as] identifier[f] :
identifier[m] . identifier[update] ( identifier[f] . identifier[read] ())
identifier[self] . identifier[db_hash] = identifier[m] . identifier[hexdigest] ()
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]== literal[string] :
identifier[logging] . identifier[debug] ( literal[string] )
identifier[self] . identifier[db_hash] = identifier[etag]
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[debug] ( identifier[e] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[settings_dict] [ literal[string] ]:
identifier[self] . identifier[settings_dict] [ literal[string] ]= identifier[self] . identifier[settings_dict] [ literal[string] ]
identifier[self] . identifier[settings_dict] [ literal[string] ]= literal[string] + identifier[self] . identifier[settings_dict] [ literal[string] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[settings_dict] [ literal[string] ]):
identifier[open] ( identifier[self] . identifier[settings_dict] [ literal[string] ], literal[string] ). identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] ) | def load_remote_db(self):
"""
Load remote S3 DB
"""
signature_version = self.settings_dict.get('SIGNATURE_VERSION', 's3v4')
s3 = boto3.resource('s3', config=botocore.client.Config(signature_version=signature_version))
if '/tmp/' not in self.settings_dict['NAME']:
try:
etag = ''
if os.path.isfile('/tmp/' + self.settings_dict['NAME']):
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read()) # depends on [control=['with'], data=['f']]
# In general the ETag is the md5 of the file, in some cases it's not,
# and in that case we will just need to reload the file, I don't see any other way
etag = m.hexdigest() # depends on [control=['if'], data=[]]
obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME'])
obj_bytes = obj.get(IfNoneMatch=etag)['Body'] # Will throw E on 304 or 404
with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f:
f.write(obj_bytes.read()) # depends on [control=['with'], data=['f']]
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read()) # depends on [control=['with'], data=['f']]
self.db_hash = m.hexdigest() # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '304':
logging.debug('ETag matches md5 of local copy, using local copy of DB!')
self.db_hash = etag # depends on [control=['if'], data=[]]
else:
logging.debug("Couldn't load remote DB object.") # depends on [control=['except'], data=['e']]
except Exception as e:
# Weird one
logging.debug(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
# SQLite DatabaseWrapper will treat our tmp as normal now
# Check because Django likes to call this function a lot more than it should
if '/tmp/' not in self.settings_dict['NAME']:
self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME']
self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME'] # depends on [control=['if'], data=[]]
# Make sure it exists if it doesn't yet
if not os.path.isfile(self.settings_dict['NAME']):
open(self.settings_dict['NAME'], 'a').close() # depends on [control=['if'], data=[]]
logging.debug('Loaded remote DB!') |
def uninstall(self):
"""
Remove agent's files from remote host
"""
log_filename = "agent_{host}.log".format(host=self.host)
data_filename = "agent_{host}.rawdata".format(host=self.host)
try:
if self.session:
self.session.send("stop\n")
self.session.close()
self.session = None
except BaseException:
logger.warning(
'Unable to correctly stop monitoring agent - session is broken. Pay attention to agent log (%s).',
log_filename,
exc_info=True)
else:
try:
self.ssh.get_file(
os.path.join(
self.path['AGENT_REMOTE_FOLDER'],
"_agent.log"),
log_filename)
self.ssh.get_file(
os.path.join(
self.path['AGENT_REMOTE_FOLDER'],
"monitoring.rawdata"),
data_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except Exception:
logger.error("Unable to get agent artefacts", exc_info=True)
self._kill_agent()
return log_filename, data_filename | def function[uninstall, parameter[self]]:
constant[
Remove agent's files from remote host
]
variable[log_filename] assign[=] call[constant[agent_{host}.log].format, parameter[]]
variable[data_filename] assign[=] call[constant[agent_{host}.rawdata].format, parameter[]]
<ast.Try object at 0x7da1b059d150>
call[name[self]._kill_agent, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b03a6080>, <ast.Name object at 0x7da1b03a51b0>]]] | keyword[def] identifier[uninstall] ( identifier[self] ):
literal[string]
identifier[log_filename] = literal[string] . identifier[format] ( identifier[host] = identifier[self] . identifier[host] )
identifier[data_filename] = literal[string] . identifier[format] ( identifier[host] = identifier[self] . identifier[host] )
keyword[try] :
keyword[if] identifier[self] . identifier[session] :
identifier[self] . identifier[session] . identifier[send] ( literal[string] )
identifier[self] . identifier[session] . identifier[close] ()
identifier[self] . identifier[session] = keyword[None]
keyword[except] identifier[BaseException] :
identifier[logger] . identifier[warning] (
literal[string] ,
identifier[log_filename] ,
identifier[exc_info] = keyword[True] )
keyword[else] :
keyword[try] :
identifier[self] . identifier[ssh] . identifier[get_file] (
identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[path] [ literal[string] ],
literal[string] ),
identifier[log_filename] )
identifier[self] . identifier[ssh] . identifier[get_file] (
identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[path] [ literal[string] ],
literal[string] ),
identifier[data_filename] )
identifier[self] . identifier[ssh] . identifier[rm_r] ( identifier[self] . identifier[path] [ literal[string] ])
keyword[except] identifier[Exception] :
identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] )
identifier[self] . identifier[_kill_agent] ()
keyword[return] identifier[log_filename] , identifier[data_filename] | def uninstall(self):
"""
Remove agent's files from remote host
"""
log_filename = 'agent_{host}.log'.format(host=self.host)
data_filename = 'agent_{host}.rawdata'.format(host=self.host)
try:
if self.session:
self.session.send('stop\n')
self.session.close()
self.session = None # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except BaseException:
logger.warning('Unable to correctly stop monitoring agent - session is broken. Pay attention to agent log (%s).', log_filename, exc_info=True) # depends on [control=['except'], data=[]]
else:
try:
self.ssh.get_file(os.path.join(self.path['AGENT_REMOTE_FOLDER'], '_agent.log'), log_filename)
self.ssh.get_file(os.path.join(self.path['AGENT_REMOTE_FOLDER'], 'monitoring.rawdata'), data_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER']) # depends on [control=['try'], data=[]]
except Exception:
logger.error('Unable to get agent artefacts', exc_info=True) # depends on [control=['except'], data=[]]
self._kill_agent()
return (log_filename, data_filename) |
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres)) | def function[plotres, parameter[psr, deleted, group]]:
constant[Plot residuals, compute unweighted rms residual.]
<ast.Tuple object at 0x7da2047e8c40> assign[=] tuple[[<ast.Call object at 0x7da2047e8340>, <ast.Call object at 0x7da2047e8640>, <ast.Attribute object at 0x7da2047eb820>]]
if <ast.BoolOp object at 0x7da2047ea950> begin[:]
<ast.Tuple object at 0x7da2047e9540> assign[=] tuple[[<ast.Subscript object at 0x7da2047eada0>, <ast.Subscript object at 0x7da2047e8be0>, <ast.Subscript object at 0x7da2047eb520>]]
call[name[print], parameter[call[constant[Plotting {0}/{1} nondeleted points.].format, parameter[call[name[len], parameter[name[res]]], name[psr].nobs]]]]
variable[meanres] assign[=] binary_operation[call[name[math].sqrt, parameter[call[name[N].mean, parameter[binary_operation[name[res] ** constant[2]]]]]] / constant[1e-06]]
if compare[name[group] is constant[None]] begin[:]
variable[i] assign[=] call[name[N].argsort, parameter[name[t]]]
call[name[P].errorbar, parameter[call[name[t]][name[i]], binary_operation[call[name[res]][name[i]] / constant[1e-06]]]]
call[name[P].xlabel, parameter[constant[MJD]]]
call[name[P].ylabel, parameter[constant[res [us]]]]
call[name[P].title, parameter[call[constant[{0} - rms res = {1:.2f} us].format, parameter[name[psr].name, name[meanres]]]]] | keyword[def] identifier[plotres] ( identifier[psr] , identifier[deleted] = keyword[False] , identifier[group] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[res] , identifier[t] , identifier[errs] = identifier[psr] . identifier[residuals] (), identifier[psr] . identifier[toas] (), identifier[psr] . identifier[toaerrs]
keyword[if] ( keyword[not] identifier[deleted] ) keyword[and] identifier[N] . identifier[any] ( identifier[psr] . identifier[deleted] != literal[int] ):
identifier[res] , identifier[t] , identifier[errs] = identifier[res] [ identifier[psr] . identifier[deleted] == literal[int] ], identifier[t] [ identifier[psr] . identifier[deleted] == literal[int] ], identifier[errs] [ identifier[psr] . identifier[deleted] == literal[int] ]
identifier[print] ( literal[string] . identifier[format] ( identifier[len] ( identifier[res] ), identifier[psr] . identifier[nobs] ))
identifier[meanres] = identifier[math] . identifier[sqrt] ( identifier[N] . identifier[mean] ( identifier[res] ** literal[int] ))/ literal[int]
keyword[if] identifier[group] keyword[is] keyword[None] :
identifier[i] = identifier[N] . identifier[argsort] ( identifier[t] )
identifier[P] . identifier[errorbar] ( identifier[t] [ identifier[i] ], identifier[res] [ identifier[i] ]/ literal[int] , identifier[yerr] = identifier[errs] [ identifier[i] ], identifier[fmt] = literal[string] ,** identifier[kwargs] )
keyword[else] :
keyword[if] ( keyword[not] identifier[deleted] ) keyword[and] identifier[N] . identifier[any] ( identifier[psr] . identifier[deleted] ):
identifier[flagmask] = identifier[psr] . identifier[flagvals] ( identifier[group] )[~ identifier[psr] . identifier[deleted] ]
keyword[else] :
identifier[flagmask] = identifier[psr] . identifier[flagvals] ( identifier[group] )
identifier[unique] = identifier[list] ( identifier[set] ( identifier[flagmask] ))
keyword[for] identifier[flagval] keyword[in] identifier[unique] :
identifier[f] =( identifier[flagmask] == identifier[flagval] )
identifier[flagres] , identifier[flagt] , identifier[flagerrs] = identifier[res] [ identifier[f] ], identifier[t] [ identifier[f] ], identifier[errs] [ identifier[f] ]
identifier[i] = identifier[N] . identifier[argsort] ( identifier[flagt] )
identifier[P] . identifier[errorbar] ( identifier[flagt] [ identifier[i] ], identifier[flagres] [ identifier[i] ]/ literal[int] , identifier[yerr] = identifier[flagerrs] [ identifier[i] ], identifier[fmt] = literal[string] ,** identifier[kwargs] )
identifier[P] . identifier[legend] ( identifier[unique] , identifier[numpoints] = literal[int] , identifier[bbox_to_anchor] =( literal[int] , literal[int] ))
identifier[P] . identifier[xlabel] ( literal[string] ); identifier[P] . identifier[ylabel] ( literal[string] )
identifier[P] . identifier[title] ( literal[string] . identifier[format] ( identifier[psr] . identifier[name] , identifier[meanres] )) | def plotres(psr, deleted=False, group=None, **kwargs):
"""Plot residuals, compute unweighted rms residual."""
(res, t, errs) = (psr.residuals(), psr.toas(), psr.toaerrs)
if not deleted and N.any(psr.deleted != 0):
(res, t, errs) = (res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0])
print('Plotting {0}/{1} nondeleted points.'.format(len(res), psr.nobs)) # depends on [control=['if'], data=[]]
meanres = math.sqrt(N.mean(res ** 2)) / 1e-06
if group is None:
i = N.argsort(t)
P.errorbar(t[i], res[i] / 1e-06, yerr=errs[i], fmt='x', **kwargs) # depends on [control=['if'], data=[]]
else:
if not deleted and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted] # depends on [control=['if'], data=[]]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = flagmask == flagval
(flagres, flagt, flagerrs) = (res[f], t[f], errs[f])
i = N.argsort(flagt)
P.errorbar(flagt[i], flagres[i] / 1e-06, yerr=flagerrs[i], fmt='x', **kwargs) # depends on [control=['for'], data=['flagval']]
P.legend(unique, numpoints=1, bbox_to_anchor=(1.1, 1.1))
P.xlabel('MJD')
P.ylabel('res [us]')
P.title('{0} - rms res = {1:.2f} us'.format(psr.name, meanres)) |
def proc_atom(self):
"""Processes an "ATOM" or "HETATM" record."""
atom_data = self.proc_line_coordinate(self.current_line)
(at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq,
i_code, x, y, z, occupancy, temp_factor, element, charge) = atom_data
# currently active state
a_state = self.pdb_parse_tree['data'][self.state]
res_id = (res_seq, i_code)
if chain_id not in a_state:
a_state[chain_id] = (set(), OrderedDict())
if res_id not in a_state[chain_id][1]:
a_state[chain_id][1][res_id] = (set(), OrderedDict())
if at_type == 'ATOM':
if res_name in standard_amino_acids.values():
poly = 'P'
else:
poly = 'N'
else:
poly = 'H'
a_state[chain_id][0].add((chain_id, at_type, poly))
a_state[chain_id][1][res_id][0].add(
(at_type, res_seq, res_name, i_code))
if at_ser not in a_state[chain_id][1][res_id][1]:
a_state[chain_id][1][res_id][1][at_ser] = [atom_data]
else:
a_state[chain_id][1][res_id][1][at_ser].append(atom_data)
return | def function[proc_atom, parameter[self]]:
constant[Processes an "ATOM" or "HETATM" record.]
variable[atom_data] assign[=] call[name[self].proc_line_coordinate, parameter[name[self].current_line]]
<ast.Tuple object at 0x7da1b26486a0> assign[=] name[atom_data]
variable[a_state] assign[=] call[call[name[self].pdb_parse_tree][constant[data]]][name[self].state]
variable[res_id] assign[=] tuple[[<ast.Name object at 0x7da1b26483d0>, <ast.Name object at 0x7da1b2649300>]]
if compare[name[chain_id] <ast.NotIn object at 0x7da2590d7190> name[a_state]] begin[:]
call[name[a_state]][name[chain_id]] assign[=] tuple[[<ast.Call object at 0x7da1b264ab30>, <ast.Call object at 0x7da1b2649600>]]
if compare[name[res_id] <ast.NotIn object at 0x7da2590d7190> call[call[name[a_state]][name[chain_id]]][constant[1]]] begin[:]
call[call[call[name[a_state]][name[chain_id]]][constant[1]]][name[res_id]] assign[=] tuple[[<ast.Call object at 0x7da1b2649990>, <ast.Call object at 0x7da1b2649030>]]
if compare[name[at_type] equal[==] constant[ATOM]] begin[:]
if compare[name[res_name] in call[name[standard_amino_acids].values, parameter[]]] begin[:]
variable[poly] assign[=] constant[P]
call[call[call[name[a_state]][name[chain_id]]][constant[0]].add, parameter[tuple[[<ast.Name object at 0x7da1b2648d30>, <ast.Name object at 0x7da1b2648820>, <ast.Name object at 0x7da1b264aad0>]]]]
call[call[call[call[call[name[a_state]][name[chain_id]]][constant[1]]][name[res_id]]][constant[0]].add, parameter[tuple[[<ast.Name object at 0x7da1b2648d90>, <ast.Name object at 0x7da1b2649330>, <ast.Name object at 0x7da1b26486d0>, <ast.Name object at 0x7da1b264a740>]]]]
if compare[name[at_ser] <ast.NotIn object at 0x7da2590d7190> call[call[call[call[name[a_state]][name[chain_id]]][constant[1]]][name[res_id]]][constant[1]]] begin[:]
call[call[call[call[call[name[a_state]][name[chain_id]]][constant[1]]][name[res_id]]][constant[1]]][name[at_ser]] assign[=] list[[<ast.Name object at 0x7da1b2648cd0>]]
return[None] | keyword[def] identifier[proc_atom] ( identifier[self] ):
literal[string]
identifier[atom_data] = identifier[self] . identifier[proc_line_coordinate] ( identifier[self] . identifier[current_line] )
( identifier[at_type] , identifier[at_ser] , identifier[at_name] , identifier[alt_loc] , identifier[res_name] , identifier[chain_id] , identifier[res_seq] ,
identifier[i_code] , identifier[x] , identifier[y] , identifier[z] , identifier[occupancy] , identifier[temp_factor] , identifier[element] , identifier[charge] )= identifier[atom_data]
identifier[a_state] = identifier[self] . identifier[pdb_parse_tree] [ literal[string] ][ identifier[self] . identifier[state] ]
identifier[res_id] =( identifier[res_seq] , identifier[i_code] )
keyword[if] identifier[chain_id] keyword[not] keyword[in] identifier[a_state] :
identifier[a_state] [ identifier[chain_id] ]=( identifier[set] (), identifier[OrderedDict] ())
keyword[if] identifier[res_id] keyword[not] keyword[in] identifier[a_state] [ identifier[chain_id] ][ literal[int] ]:
identifier[a_state] [ identifier[chain_id] ][ literal[int] ][ identifier[res_id] ]=( identifier[set] (), identifier[OrderedDict] ())
keyword[if] identifier[at_type] == literal[string] :
keyword[if] identifier[res_name] keyword[in] identifier[standard_amino_acids] . identifier[values] ():
identifier[poly] = literal[string]
keyword[else] :
identifier[poly] = literal[string]
keyword[else] :
identifier[poly] = literal[string]
identifier[a_state] [ identifier[chain_id] ][ literal[int] ]. identifier[add] (( identifier[chain_id] , identifier[at_type] , identifier[poly] ))
identifier[a_state] [ identifier[chain_id] ][ literal[int] ][ identifier[res_id] ][ literal[int] ]. identifier[add] (
( identifier[at_type] , identifier[res_seq] , identifier[res_name] , identifier[i_code] ))
keyword[if] identifier[at_ser] keyword[not] keyword[in] identifier[a_state] [ identifier[chain_id] ][ literal[int] ][ identifier[res_id] ][ literal[int] ]:
identifier[a_state] [ identifier[chain_id] ][ literal[int] ][ identifier[res_id] ][ literal[int] ][ identifier[at_ser] ]=[ identifier[atom_data] ]
keyword[else] :
identifier[a_state] [ identifier[chain_id] ][ literal[int] ][ identifier[res_id] ][ literal[int] ][ identifier[at_ser] ]. identifier[append] ( identifier[atom_data] )
keyword[return] | def proc_atom(self):
"""Processes an "ATOM" or "HETATM" record."""
atom_data = self.proc_line_coordinate(self.current_line)
(at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq, i_code, x, y, z, occupancy, temp_factor, element, charge) = atom_data
# currently active state
a_state = self.pdb_parse_tree['data'][self.state]
res_id = (res_seq, i_code)
if chain_id not in a_state:
a_state[chain_id] = (set(), OrderedDict()) # depends on [control=['if'], data=['chain_id', 'a_state']]
if res_id not in a_state[chain_id][1]:
a_state[chain_id][1][res_id] = (set(), OrderedDict()) # depends on [control=['if'], data=['res_id']]
if at_type == 'ATOM':
if res_name in standard_amino_acids.values():
poly = 'P' # depends on [control=['if'], data=[]]
else:
poly = 'N' # depends on [control=['if'], data=[]]
else:
poly = 'H'
a_state[chain_id][0].add((chain_id, at_type, poly))
a_state[chain_id][1][res_id][0].add((at_type, res_seq, res_name, i_code))
if at_ser not in a_state[chain_id][1][res_id][1]:
a_state[chain_id][1][res_id][1][at_ser] = [atom_data] # depends on [control=['if'], data=['at_ser']]
else:
a_state[chain_id][1][res_id][1][at_ser].append(atom_data)
return |
def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self.expr_code(dots[0])
args = [repr(d) for d in dots[1:]]
code = "dot(%s, %s)" % (code, ", ".join(args))
else:
self.all_vars.add(expr)
code = "c_%s" % expr
return code | def function[expr_code, parameter[self, expr]]:
constant[Generate a Python expression for `expr`.]
if compare[constant[|] in name[expr]] begin[:]
variable[pipes] assign[=] call[name[expr].split, parameter[constant[|]]]
variable[code] assign[=] call[name[self].expr_code, parameter[call[name[pipes]][constant[0]]]]
for taget[name[func]] in starred[call[name[pipes]][<ast.Slice object at 0x7da20e9b3ca0>]] begin[:]
call[name[self].all_vars.add, parameter[name[func]]]
variable[code] assign[=] binary_operation[constant[c_%s(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9b0a30>, <ast.Name object at 0x7da20e9b0670>]]]
return[name[code]] | keyword[def] identifier[expr_code] ( identifier[self] , identifier[expr] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[expr] :
identifier[pipes] = identifier[expr] . identifier[split] ( literal[string] )
identifier[code] = identifier[self] . identifier[expr_code] ( identifier[pipes] [ literal[int] ])
keyword[for] identifier[func] keyword[in] identifier[pipes] [ literal[int] :]:
identifier[self] . identifier[all_vars] . identifier[add] ( identifier[func] )
identifier[code] = literal[string] %( identifier[func] , identifier[code] )
keyword[elif] literal[string] keyword[in] identifier[expr] :
identifier[dots] = identifier[expr] . identifier[split] ( literal[string] )
identifier[code] = identifier[self] . identifier[expr_code] ( identifier[dots] [ literal[int] ])
identifier[args] =[ identifier[repr] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[dots] [ literal[int] :]]
identifier[code] = literal[string] %( identifier[code] , literal[string] . identifier[join] ( identifier[args] ))
keyword[else] :
identifier[self] . identifier[all_vars] . identifier[add] ( identifier[expr] )
identifier[code] = literal[string] % identifier[expr]
keyword[return] identifier[code] | def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if '|' in expr:
pipes = expr.split('|')
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = 'c_%s(%s)' % (func, code) # depends on [control=['for'], data=['func']] # depends on [control=['if'], data=['expr']]
elif '.' in expr:
dots = expr.split('.')
code = self.expr_code(dots[0])
args = [repr(d) for d in dots[1:]]
code = 'dot(%s, %s)' % (code, ', '.join(args)) # depends on [control=['if'], data=['expr']]
else:
self.all_vars.add(expr)
code = 'c_%s' % expr
return code |
async def stop(self):
"""Safely shut down this interface"""
await self._command_task.future_command(['_set_mode', 0, 0]) # Disable advertising
await self._cleanup_old_connections()
self._command_task.stop()
self._stream.stop()
self._serial_port.close()
await super(BLED112Server, self).stop() | <ast.AsyncFunctionDef object at 0x7da18f7236d0> | keyword[async] keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[await] identifier[self] . identifier[_command_task] . identifier[future_command] ([ literal[string] , literal[int] , literal[int] ])
keyword[await] identifier[self] . identifier[_cleanup_old_connections] ()
identifier[self] . identifier[_command_task] . identifier[stop] ()
identifier[self] . identifier[_stream] . identifier[stop] ()
identifier[self] . identifier[_serial_port] . identifier[close] ()
keyword[await] identifier[super] ( identifier[BLED112Server] , identifier[self] ). identifier[stop] () | async def stop(self):
"""Safely shut down this interface"""
await self._command_task.future_command(['_set_mode', 0, 0]) # Disable advertising
await self._cleanup_old_connections()
self._command_task.stop()
self._stream.stop()
self._serial_port.close()
await super(BLED112Server, self).stop() |
def __configure_canvas(self, *args):
"""
Private function to configure the internal Canvas.
Changes the width of the canvas to fit the interior Frame
:param args: Tkinter event
"""
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
self._canvas.configure(width=self.interior.winfo_reqwidth()) | def function[__configure_canvas, parameter[self]]:
constant[
Private function to configure the internal Canvas.
Changes the width of the canvas to fit the interior Frame
:param args: Tkinter event
]
if compare[call[name[self].interior.winfo_reqwidth, parameter[]] is_not call[name[self]._canvas.winfo_width, parameter[]]] begin[:]
call[name[self]._canvas.configure, parameter[]] | keyword[def] identifier[__configure_canvas] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[self] . identifier[interior] . identifier[winfo_reqwidth] () keyword[is] keyword[not] identifier[self] . identifier[_canvas] . identifier[winfo_width] ():
identifier[self] . identifier[_canvas] . identifier[configure] ( identifier[width] = identifier[self] . identifier[interior] . identifier[winfo_reqwidth] ()) | def __configure_canvas(self, *args):
"""
Private function to configure the internal Canvas.
Changes the width of the canvas to fit the interior Frame
:param args: Tkinter event
"""
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
self._canvas.configure(width=self.interior.winfo_reqwidth()) # depends on [control=['if'], data=[]] |
def _update_file_frontiers(
self,
frontier_list,
revision,
max_csets_proc=30,
going_forward=False
):
'''
Update the frontier for all given files, up to the given revision.
Built for quick continuous _forward_ updating of large sets
of files of TUIDs. Backward updating should be done through
get_tuids(files, revision). If we cannot find a frontier, we will
stop looking after max_csets_proc and update all files at the given
revision.
:param frontier_list: list of files to update
:param revision: revision to update files to
:param max_csets_proc: maximum number of changeset logs to look through
to find past frontiers.
:param going_forward: If we know the requested revision is in front
of the latest revision use this flag. Used when
the frontier is too far away. If this is not set and
a frontier is too far, the latest revision will not
be updated.
:return: list of (file, list(tuids)) tuples
'''
# Get the changelogs and revisions until we find the
# last one we've seen, and get the modified files in
# each one.
# Holds the files modified up to the last frontiers.
files_to_process = {}
# Holds all frontiers to find
remaining_frontiers = {cset for cset in list(set([frontier for _, frontier in frontier_list]))}
if len(remaining_frontiers) <= 1 and frontier_list[0][1] == revision:
# If the latest revision is the requested revision,
# and there is only one frontier requested
# continue to the tuid querys.
remaining_frontiers = {}
# Revision we are searching from
final_rev = revision
# If this exceeds max_csets_proc,
# all frontiers not found are considered lost
csets_proced = 0
# Holds info on how to apply the diffs onto each frontier,
# and all known frontiers.
diffs_to_frontier = {cset: [] for cset in remaining_frontiers}
Log.note("Searching for frontier(s): {{frontier}} ", frontier=str(list(remaining_frontiers)))
Log.note(
"Running on revision with HG URL: {{url}}",
url=self.hg_url / self.config.hg.branch / 'rev' / revision
)
while remaining_frontiers:
# Get a changelog
clog_url = self.hg_url / self.config.hg.branch / 'json-log' / final_rev
try:
Log.note("Searching through changelog {{url}}", url=clog_url)
clog_obj = http.get_json(clog_url, retry=RETRY)
if isinstance(clog_obj, (text_type, str)):
Log.error(
"Revision {{cset}} does not exist in the {{branch}} branch",
cset=final_rev, branch=self.config.hg.branch
)
except Exception as e:
Log.error(
"Unexpected error getting changset-log for {{url}}: {{error}}",
url=clog_url,
error=e
)
# For each changeset in the log (except the last one
# which is duplicated on the next log page requested.
clog_obj_list = list(clog_obj['changesets'])
for clog_cset in clog_obj_list[:-1]:
nodes_cset = clog_cset['node'][:12]
if remaining_frontiers:
if nodes_cset in remaining_frontiers:
# Found a frontier, remove it from search list.
remaining_frontiers.remove(nodes_cset)
if not remaining_frontiers:
# Found all frontiers, get out of the loop before
# we add the diff to a frontier update list.
break
# Add this diff to the processing list
# for each remaining frontier
for cset in diffs_to_frontier:
if cset in remaining_frontiers:
diffs_to_frontier[cset].append(nodes_cset)
csets_proced += 1
if not remaining_frontiers:
# End searching
break
elif csets_proced >= max_csets_proc:
# In this case, all files need to be updated to this revision to ensure
# line ordering consistency (between past, and future) when a revision
# that is in the past is asked for.
files_to_process = {file: [revision] for file, _ in frontier_list}
break
else:
# Go to the next log page
last_entry = clog_obj_list[-1]
final_rev = last_entry['node'][:12]
if not remaining_frontiers:
Log.note("Found all frontiers: {{frontiers_list}}", frontiers_list=str(list(diffs_to_frontier.keys())))
else:
found_frontiers = [
frontier for frontier in diffs_to_frontier if frontier not in remaining_frontiers
]
Log.note("Found frontiers: {{found}}", found=str(found_frontiers))
Log.note("Did not find frontiers: {{not_found}}", not_found=str(list(remaining_frontiers)))
added_files = {}
removed_files = {}
parsed_diffs = {}
# This list is used to determine what files
file_to_frontier = {file: frontier for file, frontier in frontier_list}
if len(remaining_frontiers) != len(diffs_to_frontier.keys()):
# If there is at least one frontier that was found
# Only get diffs that are needed (if any frontiers were not found)
diffs_cache = []
for cset in diffs_to_frontier:
if cset not in remaining_frontiers:
diffs_cache.extend(diffs_to_frontier[cset])
Log.note("Gathering diffs for: {{csets}}", csets=str(diffs_cache))
all_diffs = self.get_diffs(diffs_cache)
# Build a dict for faster access to the diffs,
# to be used later when applying them.
parsed_diffs = {diff_entry['cset']: diff_entry['diff'] for diff_entry in all_diffs}
# In case the file name changes, this will map
# the requested file to the new file name so
# diffs can all be gathered.
filenames_to_seek = {}
# Parse diffs for files to process and store diffs to
# apply for each file in files_to_process.
added_and_removed_counts = {file: 1 for file in file_to_frontier}
for csets_diff in all_diffs:
cset_len12 = csets_diff['cset']
parsed_diff = csets_diff['diff']['diffs']
for f_added in parsed_diff:
# Get new entries for removed files.
new_name = f_added['new'].name.lstrip('/')
old_name = f_added['old'].name.lstrip('/')
# If we don't need this file, skip it
if new_name not in file_to_frontier and \
new_name not in filenames_to_seek:
if old_name not in file_to_frontier and \
old_name not in filenames_to_seek:
# File not requested
continue
if new_name == 'dev/null':
frontier_filename = old_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename]
if frontier_filename not in removed_files:
removed_files[frontier_filename] = 0
removed_files[frontier_filename] = added_and_removed_counts[frontier_filename]
added_and_removed_counts[frontier_filename] += 1
continue
if old_name == 'dev/null':
frontier_filename = new_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename]
if frontier_filename not in added_files:
added_files[frontier_filename] = 0
added_files[frontier_filename] = added_and_removed_counts[frontier_filename]
added_and_removed_counts[frontier_filename] += 1
continue
if new_name != old_name:
# File name was changed, keep the diff anyway
# to add any changes it makes.
filenames_to_seek[new_name] = old_name
# Get the originally requested file name
# by following filenames_to_seek entries
frontier_filename = new_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename]
# If we are past the frontier for this file,
# or if we are at the frontier skip it.
if file_to_frontier[frontier_filename] == '':
# Previously found frontier, skip
continue
# At this point, file is in the database, is
# asked to be processed, and we are still
# searching for the last frontier.
if file_to_frontier[frontier_filename] == cset_len12:
file_to_frontier[frontier_filename] = ''
# Found the frontier, skip
continue
if old_name != new_name:
Log.note(
"{{cset}} changes a requested file's name: {{file}} from {{oldfile}}. ",
file=new_name,
oldfile=old_name,
cset=cset
)
# Store the diff as it needs to be applied
if frontier_filename in files_to_process:
files_to_process[frontier_filename].append(cset_len12)
else:
files_to_process[frontier_filename] = [cset_len12]
# Process each file that needs it based on the
# files_to_process list.
result = []
ann_inserts = []
latestFileMod_inserts = {}
anns_to_get = []
total = len(frontier_list)
tmp_results = {}
with self.conn.transaction() as transaction:
for count, (file, old_frontier) in enumerate(frontier_list):
if old_frontier in remaining_frontiers:
# If we were still looking for the frontier by the end, get a new
# annotation for this file.
anns_to_get.append(file)
if going_forward:
# If we are always going forward, update the frontier
latestFileMod_inserts[file] = (file, revision)
Log.note(
"Frontier update - can't find frontier {{lost_frontier}}: "
"{{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total,
lost_frontier=old_frontier
)
continue
elif file in removed_files or file in added_files:
if file not in removed_files:
removed_files[file] = 0
if file not in added_files:
added_files[file] = 0
if removed_files[file] <= added_files[file]:
# For it to still exist it has to be
# added last (to give it a larger count)
anns_to_get.append(file)
Log.note(
"Frontier update - adding: "
"{{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total,
lost_frontier=old_frontier
)
else:
Log.note(
"Frontier update - deleting: "
"{{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total,
lost_frontier=old_frontier
)
tmp_results[file] = []
if going_forward:
# If we are always going forward, update the frontier
latestFileMod_inserts[file] = (file, revision)
continue
# If the file was modified, get it's newest
# annotation and update the file.
tmp_res = None
if file in files_to_process:
# Process this file using the diffs found
tmp_ann = self._get_annotation(old_frontier, file, transaction)
if tmp_ann is None or tmp_ann == '' or self.destringify_tuids(tmp_ann) is None:
Log.warning(
"{{file}} has frontier but can't find old annotation for it in {{rev}}, "
"restarting it's frontier.",
rev=old_frontier,
file=file
)
anns_to_get.append(file)
else:
# File was modified, apply it's diffs
# Reverse the diff list, we always find the newest diff first
csets_to_proc = files_to_process[file][::-1]
tmp_res = self.destringify_tuids(tmp_ann)
new_fname = file
for i in csets_to_proc:
tmp_res, new_fname = self._apply_diff(transaction, tmp_res, parsed_diffs[i], i, new_fname)
ann_inserts.append((revision, file, self.stringify_tuids(tmp_res)))
Log.note(
"Frontier update - modified: {{count}}/{{total}} - {{percent|percent(decimal=0)}} "
"| {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total
)
else:
old_ann = self._get_annotation(old_frontier, file, transaction)
if old_ann is None or (old_ann == '' and file in added_files):
# File is new (likely from an error), or re-added - we need to create
# a new initial entry for this file.
anns_to_get.append(file)
Log.note(
"Frontier update - readded: {{count}}/{{total}} - {{percent|percent(decimal=0)}} "
"| {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total
)
else:
# File was not modified since last
# known revision
tmp_res = self.destringify_tuids(old_ann) if old_ann != '' else []
ann_inserts.append((revision, file, old_ann))
Log.note(
"Frontier update - not modified: {{count}}/{{total}} - {{percent|percent(decimal=0)}} "
"| {{rev}}|{{file}} ",
count=count,
total=total,
file=file,
rev=revision,
percent=count / total
)
if tmp_res:
tmp_results[file] = tmp_res
else:
Log.note(
"Error occured for file {{file}} in revision {{revision}}",
file=file,
revision=revision
)
tmp_results[file] = []
# If we have found all frontiers, update to the
# latest revision. Otherwise, the requested
# revision is too far away (can't be sure
# if it's past). Unless we are told that we are
# going forward.
if going_forward or not remaining_frontiers:
latest_rev = revision
else:
latest_rev = old_frontier
latestFileMod_inserts[file] = (file, latest_rev)
Log.note("Updating DB tables `latestFileMod` and `annotations`...")
# No need to double-check if latesteFileMods has been updated before,
# we perform an insert or replace any way.
if len(latestFileMod_inserts) > 0:
for _, inserts_list in jx.groupby(latestFileMod_inserts.values(), size=SQL_BATCH_SIZE):
transaction.execute(
"INSERT OR REPLACE INTO latestFileMod (file, revision) VALUES " +
sql_list(quote_list(i) for i in inserts_list)
)
anns_added_by_other_thread = {}
if len(ann_inserts) > 0:
ann_inserts = list(set(ann_inserts))
for _, tmp_inserts in jx.groupby(ann_inserts, size=SQL_ANN_BATCH_SIZE):
# Check if any were added in the mean time by another thread
recomputed_inserts = []
for rev, filename, string_tuids in tmp_inserts:
tmp_ann = self._get_annotation(rev, filename, transaction)
if not tmp_ann or tmp_ann == '':
recomputed_inserts.append((rev, filename, string_tuids))
else:
anns_added_by_other_thread[filename] = self.destringify_tuids(tmp_ann)
if len(recomputed_inserts) <= 0:
continue
try:
for rev, filename, tuids_ann in recomputed_inserts:
tmp_ann = self.destringify_tuids(tuids_ann)
for tuid_map in tmp_ann:
if tuid_map is None or tuid_map.tuid is None or tuid_map.line is None:
Log.warning(
"None value encountered in annotation insertion in {{rev}} for {{file}}: {{tuids}}" ,
rev=rev, file=filename, tuids=str(tuid_map)
)
self.insert_annotations(transaction, recomputed_inserts)
except Exception as e:
Log.error("Error inserting into annotations table: {{inserting}}", inserting=recomputed_inserts, cause=e)
if len(anns_to_get) > 0:
result.extend(self.get_tuids(anns_to_get, revision, commit=False))
for f in tmp_results:
tuids = tmp_results[f]
if f in anns_added_by_other_thread:
tuids = anns_added_by_other_thread[f]
result.append((f, tuids))
return result | def function[_update_file_frontiers, parameter[self, frontier_list, revision, max_csets_proc, going_forward]]:
constant[
Update the frontier for all given files, up to the given revision.
Built for quick continuous _forward_ updating of large sets
of files of TUIDs. Backward updating should be done through
get_tuids(files, revision). If we cannot find a frontier, we will
stop looking after max_csets_proc and update all files at the given
revision.
:param frontier_list: list of files to update
:param revision: revision to update files to
:param max_csets_proc: maximum number of changeset logs to look through
to find past frontiers.
:param going_forward: If we know the requested revision is in front
of the latest revision use this flag. Used when
the frontier is too far away. If this is not set and
a frontier is too far, the latest revision will not
be updated.
:return: list of (file, list(tuids)) tuples
]
variable[files_to_process] assign[=] dictionary[[], []]
variable[remaining_frontiers] assign[=] <ast.SetComp object at 0x7da1b0a824a0>
if <ast.BoolOp object at 0x7da1b0a82500> begin[:]
variable[remaining_frontiers] assign[=] dictionary[[], []]
variable[final_rev] assign[=] name[revision]
variable[csets_proced] assign[=] constant[0]
variable[diffs_to_frontier] assign[=] <ast.DictComp object at 0x7da1b0a83400>
call[name[Log].note, parameter[constant[Searching for frontier(s): {{frontier}} ]]]
call[name[Log].note, parameter[constant[Running on revision with HG URL: {{url}}]]]
while name[remaining_frontiers] begin[:]
variable[clog_url] assign[=] binary_operation[binary_operation[binary_operation[name[self].hg_url / name[self].config.hg.branch] / constant[json-log]] / name[final_rev]]
<ast.Try object at 0x7da1b0a82920>
variable[clog_obj_list] assign[=] call[name[list], parameter[call[name[clog_obj]][constant[changesets]]]]
for taget[name[clog_cset]] in starred[call[name[clog_obj_list]][<ast.Slice object at 0x7da1b0ab61a0>]] begin[:]
variable[nodes_cset] assign[=] call[call[name[clog_cset]][constant[node]]][<ast.Slice object at 0x7da1b0ab7160>]
if name[remaining_frontiers] begin[:]
if compare[name[nodes_cset] in name[remaining_frontiers]] begin[:]
call[name[remaining_frontiers].remove, parameter[name[nodes_cset]]]
if <ast.UnaryOp object at 0x7da1b0ab5c90> begin[:]
break
for taget[name[cset]] in starred[name[diffs_to_frontier]] begin[:]
if compare[name[cset] in name[remaining_frontiers]] begin[:]
call[call[name[diffs_to_frontier]][name[cset]].append, parameter[name[nodes_cset]]]
<ast.AugAssign object at 0x7da1b0ab73d0>
if <ast.UnaryOp object at 0x7da1b0ab5e10> begin[:]
break
if <ast.UnaryOp object at 0x7da1b0ab7bb0> begin[:]
call[name[Log].note, parameter[constant[Found all frontiers: {{frontiers_list}}]]]
variable[added_files] assign[=] dictionary[[], []]
variable[removed_files] assign[=] dictionary[[], []]
variable[parsed_diffs] assign[=] dictionary[[], []]
variable[file_to_frontier] assign[=] <ast.DictComp object at 0x7da1b0ab4e20>
if compare[call[name[len], parameter[name[remaining_frontiers]]] not_equal[!=] call[name[len], parameter[call[name[diffs_to_frontier].keys, parameter[]]]]] begin[:]
variable[diffs_cache] assign[=] list[[]]
for taget[name[cset]] in starred[name[diffs_to_frontier]] begin[:]
if compare[name[cset] <ast.NotIn object at 0x7da2590d7190> name[remaining_frontiers]] begin[:]
call[name[diffs_cache].extend, parameter[call[name[diffs_to_frontier]][name[cset]]]]
call[name[Log].note, parameter[constant[Gathering diffs for: {{csets}}]]]
variable[all_diffs] assign[=] call[name[self].get_diffs, parameter[name[diffs_cache]]]
variable[parsed_diffs] assign[=] <ast.DictComp object at 0x7da1b0ab4100>
variable[filenames_to_seek] assign[=] dictionary[[], []]
variable[added_and_removed_counts] assign[=] <ast.DictComp object at 0x7da1b0ab60b0>
for taget[name[csets_diff]] in starred[name[all_diffs]] begin[:]
variable[cset_len12] assign[=] call[name[csets_diff]][constant[cset]]
variable[parsed_diff] assign[=] call[call[name[csets_diff]][constant[diff]]][constant[diffs]]
for taget[name[f_added]] in starred[name[parsed_diff]] begin[:]
variable[new_name] assign[=] call[call[name[f_added]][constant[new]].name.lstrip, parameter[constant[/]]]
variable[old_name] assign[=] call[call[name[f_added]][constant[old]].name.lstrip, parameter[constant[/]]]
if <ast.BoolOp object at 0x7da1b0ab6740> begin[:]
if <ast.BoolOp object at 0x7da1b0ab6500> begin[:]
continue
if compare[name[new_name] equal[==] constant[dev/null]] begin[:]
variable[frontier_filename] assign[=] name[old_name]
while compare[name[frontier_filename] in name[filenames_to_seek]] begin[:]
variable[frontier_filename] assign[=] call[name[filenames_to_seek]][name[frontier_filename]]
if compare[name[frontier_filename] <ast.NotIn object at 0x7da2590d7190> name[removed_files]] begin[:]
call[name[removed_files]][name[frontier_filename]] assign[=] constant[0]
call[name[removed_files]][name[frontier_filename]] assign[=] call[name[added_and_removed_counts]][name[frontier_filename]]
<ast.AugAssign object at 0x7da1b0b739a0>
continue
if compare[name[old_name] equal[==] constant[dev/null]] begin[:]
variable[frontier_filename] assign[=] name[new_name]
while compare[name[frontier_filename] in name[filenames_to_seek]] begin[:]
variable[frontier_filename] assign[=] call[name[filenames_to_seek]][name[frontier_filename]]
if compare[name[frontier_filename] <ast.NotIn object at 0x7da2590d7190> name[added_files]] begin[:]
call[name[added_files]][name[frontier_filename]] assign[=] constant[0]
call[name[added_files]][name[frontier_filename]] assign[=] call[name[added_and_removed_counts]][name[frontier_filename]]
<ast.AugAssign object at 0x7da1b0b71c00>
continue
if compare[name[new_name] not_equal[!=] name[old_name]] begin[:]
call[name[filenames_to_seek]][name[new_name]] assign[=] name[old_name]
variable[frontier_filename] assign[=] name[new_name]
while compare[name[frontier_filename] in name[filenames_to_seek]] begin[:]
variable[frontier_filename] assign[=] call[name[filenames_to_seek]][name[frontier_filename]]
if compare[call[name[file_to_frontier]][name[frontier_filename]] equal[==] constant[]] begin[:]
continue
if compare[call[name[file_to_frontier]][name[frontier_filename]] equal[==] name[cset_len12]] begin[:]
call[name[file_to_frontier]][name[frontier_filename]] assign[=] constant[]
continue
if compare[name[old_name] not_equal[!=] name[new_name]] begin[:]
call[name[Log].note, parameter[constant[{{cset}} changes a requested file's name: {{file}} from {{oldfile}}. ]]]
if compare[name[frontier_filename] in name[files_to_process]] begin[:]
call[call[name[files_to_process]][name[frontier_filename]].append, parameter[name[cset_len12]]]
variable[result] assign[=] list[[]]
variable[ann_inserts] assign[=] list[[]]
variable[latestFileMod_inserts] assign[=] dictionary[[], []]
variable[anns_to_get] assign[=] list[[]]
variable[total] assign[=] call[name[len], parameter[name[frontier_list]]]
variable[tmp_results] assign[=] dictionary[[], []]
with call[name[self].conn.transaction, parameter[]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0b71f00>, <ast.Tuple object at 0x7da1b0b70400>]]] in starred[call[name[enumerate], parameter[name[frontier_list]]]] begin[:]
if compare[name[old_frontier] in name[remaining_frontiers]] begin[:]
call[name[anns_to_get].append, parameter[name[file]]]
if name[going_forward] begin[:]
call[name[latestFileMod_inserts]][name[file]] assign[=] tuple[[<ast.Name object at 0x7da1b0b70580>, <ast.Name object at 0x7da1b0b738e0>]]
call[name[Log].note, parameter[constant[Frontier update - can't find frontier {{lost_frontier}}: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ]]]
continue
variable[tmp_res] assign[=] constant[None]
if compare[name[file] in name[files_to_process]] begin[:]
variable[tmp_ann] assign[=] call[name[self]._get_annotation, parameter[name[old_frontier], name[file], name[transaction]]]
if <ast.BoolOp object at 0x7da1b0aecdc0> begin[:]
call[name[Log].warning, parameter[constant[{{file}} has frontier but can't find old annotation for it in {{rev}}, restarting it's frontier.]]]
call[name[anns_to_get].append, parameter[name[file]]]
if name[tmp_res] begin[:]
call[name[tmp_results]][name[file]] assign[=] name[tmp_res]
if <ast.BoolOp object at 0x7da1b0b6d1e0> begin[:]
variable[latest_rev] assign[=] name[revision]
call[name[latestFileMod_inserts]][name[file]] assign[=] tuple[[<ast.Name object at 0x7da1b0b6c940>, <ast.Name object at 0x7da1b0b6d000>]]
call[name[Log].note, parameter[constant[Updating DB tables `latestFileMod` and `annotations`...]]]
if compare[call[name[len], parameter[name[latestFileMod_inserts]]] greater[>] constant[0]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0b6d0f0>, <ast.Name object at 0x7da1b0b6e8f0>]]] in starred[call[name[jx].groupby, parameter[call[name[latestFileMod_inserts].values, parameter[]]]]] begin[:]
call[name[transaction].execute, parameter[binary_operation[constant[INSERT OR REPLACE INTO latestFileMod (file, revision) VALUES ] + call[name[sql_list], parameter[<ast.GeneratorExp object at 0x7da1b0b6dc90>]]]]]
variable[anns_added_by_other_thread] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[ann_inserts]]] greater[>] constant[0]] begin[:]
variable[ann_inserts] assign[=] call[name[list], parameter[call[name[set], parameter[name[ann_inserts]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0b6ef20>, <ast.Name object at 0x7da1b0b6d570>]]] in starred[call[name[jx].groupby, parameter[name[ann_inserts]]]] begin[:]
variable[recomputed_inserts] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0b6f550>, <ast.Name object at 0x7da1b0b6d3c0>, <ast.Name object at 0x7da1b0b6e470>]]] in starred[name[tmp_inserts]] begin[:]
variable[tmp_ann] assign[=] call[name[self]._get_annotation, parameter[name[rev], name[filename], name[transaction]]]
if <ast.BoolOp object at 0x7da1b0b6c2b0> begin[:]
call[name[recomputed_inserts].append, parameter[tuple[[<ast.Name object at 0x7da1b0b6fd90>, <ast.Name object at 0x7da1b0b6fa60>, <ast.Name object at 0x7da1b0b6f430>]]]]
if compare[call[name[len], parameter[name[recomputed_inserts]]] less_or_equal[<=] constant[0]] begin[:]
continue
<ast.Try object at 0x7da1b0b6d270>
if compare[call[name[len], parameter[name[anns_to_get]]] greater[>] constant[0]] begin[:]
call[name[result].extend, parameter[call[name[self].get_tuids, parameter[name[anns_to_get], name[revision]]]]]
for taget[name[f]] in starred[name[tmp_results]] begin[:]
variable[tuids] assign[=] call[name[tmp_results]][name[f]]
if compare[name[f] in name[anns_added_by_other_thread]] begin[:]
variable[tuids] assign[=] call[name[anns_added_by_other_thread]][name[f]]
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da1b0ba9120>, <ast.Name object at 0x7da1b0ba9ba0>]]]]
return[name[result]] | keyword[def] identifier[_update_file_frontiers] (
identifier[self] ,
identifier[frontier_list] ,
identifier[revision] ,
identifier[max_csets_proc] = literal[int] ,
identifier[going_forward] = keyword[False]
):
literal[string]
identifier[files_to_process] ={}
identifier[remaining_frontiers] ={ identifier[cset] keyword[for] identifier[cset] keyword[in] identifier[list] ( identifier[set] ([ identifier[frontier] keyword[for] identifier[_] , identifier[frontier] keyword[in] identifier[frontier_list] ]))}
keyword[if] identifier[len] ( identifier[remaining_frontiers] )<= literal[int] keyword[and] identifier[frontier_list] [ literal[int] ][ literal[int] ]== identifier[revision] :
identifier[remaining_frontiers] ={}
identifier[final_rev] = identifier[revision]
identifier[csets_proced] = literal[int]
identifier[diffs_to_frontier] ={ identifier[cset] :[] keyword[for] identifier[cset] keyword[in] identifier[remaining_frontiers] }
identifier[Log] . identifier[note] ( literal[string] , identifier[frontier] = identifier[str] ( identifier[list] ( identifier[remaining_frontiers] )))
identifier[Log] . identifier[note] (
literal[string] ,
identifier[url] = identifier[self] . identifier[hg_url] / identifier[self] . identifier[config] . identifier[hg] . identifier[branch] / literal[string] / identifier[revision]
)
keyword[while] identifier[remaining_frontiers] :
identifier[clog_url] = identifier[self] . identifier[hg_url] / identifier[self] . identifier[config] . identifier[hg] . identifier[branch] / literal[string] / identifier[final_rev]
keyword[try] :
identifier[Log] . identifier[note] ( literal[string] , identifier[url] = identifier[clog_url] )
identifier[clog_obj] = identifier[http] . identifier[get_json] ( identifier[clog_url] , identifier[retry] = identifier[RETRY] )
keyword[if] identifier[isinstance] ( identifier[clog_obj] ,( identifier[text_type] , identifier[str] )):
identifier[Log] . identifier[error] (
literal[string] ,
identifier[cset] = identifier[final_rev] , identifier[branch] = identifier[self] . identifier[config] . identifier[hg] . identifier[branch]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[Log] . identifier[error] (
literal[string] ,
identifier[url] = identifier[clog_url] ,
identifier[error] = identifier[e]
)
identifier[clog_obj_list] = identifier[list] ( identifier[clog_obj] [ literal[string] ])
keyword[for] identifier[clog_cset] keyword[in] identifier[clog_obj_list] [:- literal[int] ]:
identifier[nodes_cset] = identifier[clog_cset] [ literal[string] ][: literal[int] ]
keyword[if] identifier[remaining_frontiers] :
keyword[if] identifier[nodes_cset] keyword[in] identifier[remaining_frontiers] :
identifier[remaining_frontiers] . identifier[remove] ( identifier[nodes_cset] )
keyword[if] keyword[not] identifier[remaining_frontiers] :
keyword[break]
keyword[for] identifier[cset] keyword[in] identifier[diffs_to_frontier] :
keyword[if] identifier[cset] keyword[in] identifier[remaining_frontiers] :
identifier[diffs_to_frontier] [ identifier[cset] ]. identifier[append] ( identifier[nodes_cset] )
identifier[csets_proced] += literal[int]
keyword[if] keyword[not] identifier[remaining_frontiers] :
keyword[break]
keyword[elif] identifier[csets_proced] >= identifier[max_csets_proc] :
identifier[files_to_process] ={ identifier[file] :[ identifier[revision] ] keyword[for] identifier[file] , identifier[_] keyword[in] identifier[frontier_list] }
keyword[break]
keyword[else] :
identifier[last_entry] = identifier[clog_obj_list] [- literal[int] ]
identifier[final_rev] = identifier[last_entry] [ literal[string] ][: literal[int] ]
keyword[if] keyword[not] identifier[remaining_frontiers] :
identifier[Log] . identifier[note] ( literal[string] , identifier[frontiers_list] = identifier[str] ( identifier[list] ( identifier[diffs_to_frontier] . identifier[keys] ())))
keyword[else] :
identifier[found_frontiers] =[
identifier[frontier] keyword[for] identifier[frontier] keyword[in] identifier[diffs_to_frontier] keyword[if] identifier[frontier] keyword[not] keyword[in] identifier[remaining_frontiers]
]
identifier[Log] . identifier[note] ( literal[string] , identifier[found] = identifier[str] ( identifier[found_frontiers] ))
identifier[Log] . identifier[note] ( literal[string] , identifier[not_found] = identifier[str] ( identifier[list] ( identifier[remaining_frontiers] )))
identifier[added_files] ={}
identifier[removed_files] ={}
identifier[parsed_diffs] ={}
identifier[file_to_frontier] ={ identifier[file] : identifier[frontier] keyword[for] identifier[file] , identifier[frontier] keyword[in] identifier[frontier_list] }
keyword[if] identifier[len] ( identifier[remaining_frontiers] )!= identifier[len] ( identifier[diffs_to_frontier] . identifier[keys] ()):
identifier[diffs_cache] =[]
keyword[for] identifier[cset] keyword[in] identifier[diffs_to_frontier] :
keyword[if] identifier[cset] keyword[not] keyword[in] identifier[remaining_frontiers] :
identifier[diffs_cache] . identifier[extend] ( identifier[diffs_to_frontier] [ identifier[cset] ])
identifier[Log] . identifier[note] ( literal[string] , identifier[csets] = identifier[str] ( identifier[diffs_cache] ))
identifier[all_diffs] = identifier[self] . identifier[get_diffs] ( identifier[diffs_cache] )
identifier[parsed_diffs] ={ identifier[diff_entry] [ literal[string] ]: identifier[diff_entry] [ literal[string] ] keyword[for] identifier[diff_entry] keyword[in] identifier[all_diffs] }
identifier[filenames_to_seek] ={}
identifier[added_and_removed_counts] ={ identifier[file] : literal[int] keyword[for] identifier[file] keyword[in] identifier[file_to_frontier] }
keyword[for] identifier[csets_diff] keyword[in] identifier[all_diffs] :
identifier[cset_len12] = identifier[csets_diff] [ literal[string] ]
identifier[parsed_diff] = identifier[csets_diff] [ literal[string] ][ literal[string] ]
keyword[for] identifier[f_added] keyword[in] identifier[parsed_diff] :
identifier[new_name] = identifier[f_added] [ literal[string] ]. identifier[name] . identifier[lstrip] ( literal[string] )
identifier[old_name] = identifier[f_added] [ literal[string] ]. identifier[name] . identifier[lstrip] ( literal[string] )
keyword[if] identifier[new_name] keyword[not] keyword[in] identifier[file_to_frontier] keyword[and] identifier[new_name] keyword[not] keyword[in] identifier[filenames_to_seek] :
keyword[if] identifier[old_name] keyword[not] keyword[in] identifier[file_to_frontier] keyword[and] identifier[old_name] keyword[not] keyword[in] identifier[filenames_to_seek] :
keyword[continue]
keyword[if] identifier[new_name] == literal[string] :
identifier[frontier_filename] = identifier[old_name]
keyword[while] identifier[frontier_filename] keyword[in] identifier[filenames_to_seek] :
identifier[frontier_filename] = identifier[filenames_to_seek] [ identifier[frontier_filename] ]
keyword[if] identifier[frontier_filename] keyword[not] keyword[in] identifier[removed_files] :
identifier[removed_files] [ identifier[frontier_filename] ]= literal[int]
identifier[removed_files] [ identifier[frontier_filename] ]= identifier[added_and_removed_counts] [ identifier[frontier_filename] ]
identifier[added_and_removed_counts] [ identifier[frontier_filename] ]+= literal[int]
keyword[continue]
keyword[if] identifier[old_name] == literal[string] :
identifier[frontier_filename] = identifier[new_name]
keyword[while] identifier[frontier_filename] keyword[in] identifier[filenames_to_seek] :
identifier[frontier_filename] = identifier[filenames_to_seek] [ identifier[frontier_filename] ]
keyword[if] identifier[frontier_filename] keyword[not] keyword[in] identifier[added_files] :
identifier[added_files] [ identifier[frontier_filename] ]= literal[int]
identifier[added_files] [ identifier[frontier_filename] ]= identifier[added_and_removed_counts] [ identifier[frontier_filename] ]
identifier[added_and_removed_counts] [ identifier[frontier_filename] ]+= literal[int]
keyword[continue]
keyword[if] identifier[new_name] != identifier[old_name] :
identifier[filenames_to_seek] [ identifier[new_name] ]= identifier[old_name]
identifier[frontier_filename] = identifier[new_name]
keyword[while] identifier[frontier_filename] keyword[in] identifier[filenames_to_seek] :
identifier[frontier_filename] = identifier[filenames_to_seek] [ identifier[frontier_filename] ]
keyword[if] identifier[file_to_frontier] [ identifier[frontier_filename] ]== literal[string] :
keyword[continue]
keyword[if] identifier[file_to_frontier] [ identifier[frontier_filename] ]== identifier[cset_len12] :
identifier[file_to_frontier] [ identifier[frontier_filename] ]= literal[string]
keyword[continue]
keyword[if] identifier[old_name] != identifier[new_name] :
identifier[Log] . identifier[note] (
literal[string] ,
identifier[file] = identifier[new_name] ,
identifier[oldfile] = identifier[old_name] ,
identifier[cset] = identifier[cset]
)
keyword[if] identifier[frontier_filename] keyword[in] identifier[files_to_process] :
identifier[files_to_process] [ identifier[frontier_filename] ]. identifier[append] ( identifier[cset_len12] )
keyword[else] :
identifier[files_to_process] [ identifier[frontier_filename] ]=[ identifier[cset_len12] ]
identifier[result] =[]
identifier[ann_inserts] =[]
identifier[latestFileMod_inserts] ={}
identifier[anns_to_get] =[]
identifier[total] = identifier[len] ( identifier[frontier_list] )
identifier[tmp_results] ={}
keyword[with] identifier[self] . identifier[conn] . identifier[transaction] () keyword[as] identifier[transaction] :
keyword[for] identifier[count] ,( identifier[file] , identifier[old_frontier] ) keyword[in] identifier[enumerate] ( identifier[frontier_list] ):
keyword[if] identifier[old_frontier] keyword[in] identifier[remaining_frontiers] :
identifier[anns_to_get] . identifier[append] ( identifier[file] )
keyword[if] identifier[going_forward] :
identifier[latestFileMod_inserts] [ identifier[file] ]=( identifier[file] , identifier[revision] )
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total] ,
identifier[lost_frontier] = identifier[old_frontier]
)
keyword[continue]
keyword[elif] identifier[file] keyword[in] identifier[removed_files] keyword[or] identifier[file] keyword[in] identifier[added_files] :
keyword[if] identifier[file] keyword[not] keyword[in] identifier[removed_files] :
identifier[removed_files] [ identifier[file] ]= literal[int]
keyword[if] identifier[file] keyword[not] keyword[in] identifier[added_files] :
identifier[added_files] [ identifier[file] ]= literal[int]
keyword[if] identifier[removed_files] [ identifier[file] ]<= identifier[added_files] [ identifier[file] ]:
identifier[anns_to_get] . identifier[append] ( identifier[file] )
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total] ,
identifier[lost_frontier] = identifier[old_frontier]
)
keyword[else] :
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total] ,
identifier[lost_frontier] = identifier[old_frontier]
)
identifier[tmp_results] [ identifier[file] ]=[]
keyword[if] identifier[going_forward] :
identifier[latestFileMod_inserts] [ identifier[file] ]=( identifier[file] , identifier[revision] )
keyword[continue]
identifier[tmp_res] = keyword[None]
keyword[if] identifier[file] keyword[in] identifier[files_to_process] :
identifier[tmp_ann] = identifier[self] . identifier[_get_annotation] ( identifier[old_frontier] , identifier[file] , identifier[transaction] )
keyword[if] identifier[tmp_ann] keyword[is] keyword[None] keyword[or] identifier[tmp_ann] == literal[string] keyword[or] identifier[self] . identifier[destringify_tuids] ( identifier[tmp_ann] ) keyword[is] keyword[None] :
identifier[Log] . identifier[warning] (
literal[string]
literal[string] ,
identifier[rev] = identifier[old_frontier] ,
identifier[file] = identifier[file]
)
identifier[anns_to_get] . identifier[append] ( identifier[file] )
keyword[else] :
identifier[csets_to_proc] = identifier[files_to_process] [ identifier[file] ][::- literal[int] ]
identifier[tmp_res] = identifier[self] . identifier[destringify_tuids] ( identifier[tmp_ann] )
identifier[new_fname] = identifier[file]
keyword[for] identifier[i] keyword[in] identifier[csets_to_proc] :
identifier[tmp_res] , identifier[new_fname] = identifier[self] . identifier[_apply_diff] ( identifier[transaction] , identifier[tmp_res] , identifier[parsed_diffs] [ identifier[i] ], identifier[i] , identifier[new_fname] )
identifier[ann_inserts] . identifier[append] (( identifier[revision] , identifier[file] , identifier[self] . identifier[stringify_tuids] ( identifier[tmp_res] )))
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total]
)
keyword[else] :
identifier[old_ann] = identifier[self] . identifier[_get_annotation] ( identifier[old_frontier] , identifier[file] , identifier[transaction] )
keyword[if] identifier[old_ann] keyword[is] keyword[None] keyword[or] ( identifier[old_ann] == literal[string] keyword[and] identifier[file] keyword[in] identifier[added_files] ):
identifier[anns_to_get] . identifier[append] ( identifier[file] )
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total]
)
keyword[else] :
identifier[tmp_res] = identifier[self] . identifier[destringify_tuids] ( identifier[old_ann] ) keyword[if] identifier[old_ann] != literal[string] keyword[else] []
identifier[ann_inserts] . identifier[append] (( identifier[revision] , identifier[file] , identifier[old_ann] ))
identifier[Log] . identifier[note] (
literal[string]
literal[string] ,
identifier[count] = identifier[count] ,
identifier[total] = identifier[total] ,
identifier[file] = identifier[file] ,
identifier[rev] = identifier[revision] ,
identifier[percent] = identifier[count] / identifier[total]
)
keyword[if] identifier[tmp_res] :
identifier[tmp_results] [ identifier[file] ]= identifier[tmp_res]
keyword[else] :
identifier[Log] . identifier[note] (
literal[string] ,
identifier[file] = identifier[file] ,
identifier[revision] = identifier[revision]
)
identifier[tmp_results] [ identifier[file] ]=[]
keyword[if] identifier[going_forward] keyword[or] keyword[not] identifier[remaining_frontiers] :
identifier[latest_rev] = identifier[revision]
keyword[else] :
identifier[latest_rev] = identifier[old_frontier]
identifier[latestFileMod_inserts] [ identifier[file] ]=( identifier[file] , identifier[latest_rev] )
identifier[Log] . identifier[note] ( literal[string] )
keyword[if] identifier[len] ( identifier[latestFileMod_inserts] )> literal[int] :
keyword[for] identifier[_] , identifier[inserts_list] keyword[in] identifier[jx] . identifier[groupby] ( identifier[latestFileMod_inserts] . identifier[values] (), identifier[size] = identifier[SQL_BATCH_SIZE] ):
identifier[transaction] . identifier[execute] (
literal[string] +
identifier[sql_list] ( identifier[quote_list] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[inserts_list] )
)
identifier[anns_added_by_other_thread] ={}
keyword[if] identifier[len] ( identifier[ann_inserts] )> literal[int] :
identifier[ann_inserts] = identifier[list] ( identifier[set] ( identifier[ann_inserts] ))
keyword[for] identifier[_] , identifier[tmp_inserts] keyword[in] identifier[jx] . identifier[groupby] ( identifier[ann_inserts] , identifier[size] = identifier[SQL_ANN_BATCH_SIZE] ):
identifier[recomputed_inserts] =[]
keyword[for] identifier[rev] , identifier[filename] , identifier[string_tuids] keyword[in] identifier[tmp_inserts] :
identifier[tmp_ann] = identifier[self] . identifier[_get_annotation] ( identifier[rev] , identifier[filename] , identifier[transaction] )
keyword[if] keyword[not] identifier[tmp_ann] keyword[or] identifier[tmp_ann] == literal[string] :
identifier[recomputed_inserts] . identifier[append] (( identifier[rev] , identifier[filename] , identifier[string_tuids] ))
keyword[else] :
identifier[anns_added_by_other_thread] [ identifier[filename] ]= identifier[self] . identifier[destringify_tuids] ( identifier[tmp_ann] )
keyword[if] identifier[len] ( identifier[recomputed_inserts] )<= literal[int] :
keyword[continue]
keyword[try] :
keyword[for] identifier[rev] , identifier[filename] , identifier[tuids_ann] keyword[in] identifier[recomputed_inserts] :
identifier[tmp_ann] = identifier[self] . identifier[destringify_tuids] ( identifier[tuids_ann] )
keyword[for] identifier[tuid_map] keyword[in] identifier[tmp_ann] :
keyword[if] identifier[tuid_map] keyword[is] keyword[None] keyword[or] identifier[tuid_map] . identifier[tuid] keyword[is] keyword[None] keyword[or] identifier[tuid_map] . identifier[line] keyword[is] keyword[None] :
identifier[Log] . identifier[warning] (
literal[string] ,
identifier[rev] = identifier[rev] , identifier[file] = identifier[filename] , identifier[tuids] = identifier[str] ( identifier[tuid_map] )
)
identifier[self] . identifier[insert_annotations] ( identifier[transaction] , identifier[recomputed_inserts] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[Log] . identifier[error] ( literal[string] , identifier[inserting] = identifier[recomputed_inserts] , identifier[cause] = identifier[e] )
keyword[if] identifier[len] ( identifier[anns_to_get] )> literal[int] :
identifier[result] . identifier[extend] ( identifier[self] . identifier[get_tuids] ( identifier[anns_to_get] , identifier[revision] , identifier[commit] = keyword[False] ))
keyword[for] identifier[f] keyword[in] identifier[tmp_results] :
identifier[tuids] = identifier[tmp_results] [ identifier[f] ]
keyword[if] identifier[f] keyword[in] identifier[anns_added_by_other_thread] :
identifier[tuids] = identifier[anns_added_by_other_thread] [ identifier[f] ]
identifier[result] . identifier[append] (( identifier[f] , identifier[tuids] ))
keyword[return] identifier[result] | def _update_file_frontiers(self, frontier_list, revision, max_csets_proc=30, going_forward=False):
"""
Update the frontier for all given files, up to the given revision.
Built for quick continuous _forward_ updating of large sets
of files of TUIDs. Backward updating should be done through
get_tuids(files, revision). If we cannot find a frontier, we will
stop looking after max_csets_proc and update all files at the given
revision.
:param frontier_list: list of files to update
:param revision: revision to update files to
:param max_csets_proc: maximum number of changeset logs to look through
to find past frontiers.
:param going_forward: If we know the requested revision is in front
of the latest revision use this flag. Used when
the frontier is too far away. If this is not set and
a frontier is too far, the latest revision will not
be updated.
:return: list of (file, list(tuids)) tuples
"""
# Get the changelogs and revisions until we find the
# last one we've seen, and get the modified files in
# each one.
# Holds the files modified up to the last frontiers.
files_to_process = {}
# Holds all frontiers to find
remaining_frontiers = {cset for cset in list(set([frontier for (_, frontier) in frontier_list]))}
if len(remaining_frontiers) <= 1 and frontier_list[0][1] == revision:
# If the latest revision is the requested revision,
# and there is only one frontier requested
# continue to the tuid querys.
remaining_frontiers = {} # depends on [control=['if'], data=[]]
# Revision we are searching from
final_rev = revision
# If this exceeds max_csets_proc,
# all frontiers not found are considered lost
csets_proced = 0
# Holds info on how to apply the diffs onto each frontier,
# and all known frontiers.
diffs_to_frontier = {cset: [] for cset in remaining_frontiers}
Log.note('Searching for frontier(s): {{frontier}} ', frontier=str(list(remaining_frontiers)))
Log.note('Running on revision with HG URL: {{url}}', url=self.hg_url / self.config.hg.branch / 'rev' / revision)
while remaining_frontiers:
# Get a changelog
clog_url = self.hg_url / self.config.hg.branch / 'json-log' / final_rev
try:
Log.note('Searching through changelog {{url}}', url=clog_url)
clog_obj = http.get_json(clog_url, retry=RETRY)
if isinstance(clog_obj, (text_type, str)):
Log.error('Revision {{cset}} does not exist in the {{branch}} branch', cset=final_rev, branch=self.config.hg.branch) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
Log.error('Unexpected error getting changset-log for {{url}}: {{error}}', url=clog_url, error=e) # depends on [control=['except'], data=['e']]
# For each changeset in the log (except the last one
# which is duplicated on the next log page requested.
clog_obj_list = list(clog_obj['changesets'])
for clog_cset in clog_obj_list[:-1]:
nodes_cset = clog_cset['node'][:12]
if remaining_frontiers:
if nodes_cset in remaining_frontiers:
# Found a frontier, remove it from search list.
remaining_frontiers.remove(nodes_cset)
if not remaining_frontiers:
# Found all frontiers, get out of the loop before
# we add the diff to a frontier update list.
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['nodes_cset', 'remaining_frontiers']]
# Add this diff to the processing list
# for each remaining frontier
for cset in diffs_to_frontier:
if cset in remaining_frontiers:
diffs_to_frontier[cset].append(nodes_cset) # depends on [control=['if'], data=['cset']] # depends on [control=['for'], data=['cset']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['clog_cset']]
csets_proced += 1
if not remaining_frontiers:
# End searching
break # depends on [control=['if'], data=[]]
elif csets_proced >= max_csets_proc:
# In this case, all files need to be updated to this revision to ensure
# line ordering consistency (between past, and future) when a revision
# that is in the past is asked for.
files_to_process = {file: [revision] for (file, _) in frontier_list}
break # depends on [control=['if'], data=[]]
else:
# Go to the next log page
last_entry = clog_obj_list[-1]
final_rev = last_entry['node'][:12] # depends on [control=['while'], data=[]]
if not remaining_frontiers:
Log.note('Found all frontiers: {{frontiers_list}}', frontiers_list=str(list(diffs_to_frontier.keys()))) # depends on [control=['if'], data=[]]
else:
found_frontiers = [frontier for frontier in diffs_to_frontier if frontier not in remaining_frontiers]
Log.note('Found frontiers: {{found}}', found=str(found_frontiers))
Log.note('Did not find frontiers: {{not_found}}', not_found=str(list(remaining_frontiers)))
added_files = {}
removed_files = {}
parsed_diffs = {}
# This list is used to determine what files
file_to_frontier = {file: frontier for (file, frontier) in frontier_list}
if len(remaining_frontiers) != len(diffs_to_frontier.keys()):
# If there is at least one frontier that was found
# Only get diffs that are needed (if any frontiers were not found)
diffs_cache = []
for cset in diffs_to_frontier:
if cset not in remaining_frontiers:
diffs_cache.extend(diffs_to_frontier[cset]) # depends on [control=['if'], data=['cset']] # depends on [control=['for'], data=['cset']]
Log.note('Gathering diffs for: {{csets}}', csets=str(diffs_cache))
all_diffs = self.get_diffs(diffs_cache)
# Build a dict for faster access to the diffs,
# to be used later when applying them.
parsed_diffs = {diff_entry['cset']: diff_entry['diff'] for diff_entry in all_diffs}
# In case the file name changes, this will map
# the requested file to the new file name so
# diffs can all be gathered.
filenames_to_seek = {}
# Parse diffs for files to process and store diffs to
# apply for each file in files_to_process.
added_and_removed_counts = {file: 1 for file in file_to_frontier}
for csets_diff in all_diffs:
cset_len12 = csets_diff['cset']
parsed_diff = csets_diff['diff']['diffs']
for f_added in parsed_diff:
# Get new entries for removed files.
new_name = f_added['new'].name.lstrip('/')
old_name = f_added['old'].name.lstrip('/')
# If we don't need this file, skip it
if new_name not in file_to_frontier and new_name not in filenames_to_seek:
if old_name not in file_to_frontier and old_name not in filenames_to_seek:
# File not requested
continue # depends on [control=['if'], data=[]]
if new_name == 'dev/null':
frontier_filename = old_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename] # depends on [control=['while'], data=['frontier_filename', 'filenames_to_seek']]
if frontier_filename not in removed_files:
removed_files[frontier_filename] = 0 # depends on [control=['if'], data=['frontier_filename', 'removed_files']]
removed_files[frontier_filename] = added_and_removed_counts[frontier_filename]
added_and_removed_counts[frontier_filename] += 1
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if old_name == 'dev/null':
frontier_filename = new_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename] # depends on [control=['while'], data=['frontier_filename', 'filenames_to_seek']]
if frontier_filename not in added_files:
added_files[frontier_filename] = 0 # depends on [control=['if'], data=['frontier_filename', 'added_files']]
added_files[frontier_filename] = added_and_removed_counts[frontier_filename]
added_and_removed_counts[frontier_filename] += 1
continue # depends on [control=['if'], data=[]]
if new_name != old_name:
# File name was changed, keep the diff anyway
# to add any changes it makes.
filenames_to_seek[new_name] = old_name # depends on [control=['if'], data=['new_name', 'old_name']]
# Get the originally requested file name
# by following filenames_to_seek entries
frontier_filename = new_name
while frontier_filename in filenames_to_seek:
frontier_filename = filenames_to_seek[frontier_filename] # depends on [control=['while'], data=['frontier_filename', 'filenames_to_seek']]
# If we are past the frontier for this file,
# or if we are at the frontier skip it.
if file_to_frontier[frontier_filename] == '':
# Previously found frontier, skip
continue # depends on [control=['if'], data=[]]
# At this point, file is in the database, is
# asked to be processed, and we are still
# searching for the last frontier.
if file_to_frontier[frontier_filename] == cset_len12:
file_to_frontier[frontier_filename] = ''
# Found the frontier, skip
continue # depends on [control=['if'], data=[]]
if old_name != new_name:
Log.note("{{cset}} changes a requested file's name: {{file}} from {{oldfile}}. ", file=new_name, oldfile=old_name, cset=cset) # depends on [control=['if'], data=['old_name', 'new_name']]
# Store the diff as it needs to be applied
if frontier_filename in files_to_process:
files_to_process[frontier_filename].append(cset_len12) # depends on [control=['if'], data=['frontier_filename', 'files_to_process']]
else:
files_to_process[frontier_filename] = [cset_len12] # depends on [control=['for'], data=['f_added']] # depends on [control=['for'], data=['csets_diff']] # depends on [control=['if'], data=[]]
# Process each file that needs it based on the
# files_to_process list.
result = []
ann_inserts = []
latestFileMod_inserts = {}
anns_to_get = []
total = len(frontier_list)
tmp_results = {}
with self.conn.transaction() as transaction:
for (count, (file, old_frontier)) in enumerate(frontier_list):
if old_frontier in remaining_frontiers:
# If we were still looking for the frontier by the end, get a new
# annotation for this file.
anns_to_get.append(file)
if going_forward:
# If we are always going forward, update the frontier
latestFileMod_inserts[file] = (file, revision) # depends on [control=['if'], data=[]]
Log.note("Frontier update - can't find frontier {{lost_frontier}}: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ", count=count, total=total, file=file, rev=revision, percent=count / total, lost_frontier=old_frontier)
continue # depends on [control=['if'], data=['old_frontier']]
elif file in removed_files or file in added_files:
if file not in removed_files:
removed_files[file] = 0 # depends on [control=['if'], data=['file', 'removed_files']]
if file not in added_files:
added_files[file] = 0 # depends on [control=['if'], data=['file', 'added_files']]
if removed_files[file] <= added_files[file]:
# For it to still exist it has to be
# added last (to give it a larger count)
anns_to_get.append(file)
Log.note('Frontier update - adding: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ', count=count, total=total, file=file, rev=revision, percent=count / total, lost_frontier=old_frontier) # depends on [control=['if'], data=[]]
else:
Log.note('Frontier update - deleting: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ', count=count, total=total, file=file, rev=revision, percent=count / total, lost_frontier=old_frontier)
tmp_results[file] = []
if going_forward:
# If we are always going forward, update the frontier
latestFileMod_inserts[file] = (file, revision) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
# If the file was modified, get it's newest
# annotation and update the file.
tmp_res = None
if file in files_to_process:
# Process this file using the diffs found
tmp_ann = self._get_annotation(old_frontier, file, transaction)
if tmp_ann is None or tmp_ann == '' or self.destringify_tuids(tmp_ann) is None:
Log.warning("{{file}} has frontier but can't find old annotation for it in {{rev}}, restarting it's frontier.", rev=old_frontier, file=file)
anns_to_get.append(file) # depends on [control=['if'], data=[]]
else:
# File was modified, apply it's diffs
# Reverse the diff list, we always find the newest diff first
csets_to_proc = files_to_process[file][::-1]
tmp_res = self.destringify_tuids(tmp_ann)
new_fname = file
for i in csets_to_proc:
(tmp_res, new_fname) = self._apply_diff(transaction, tmp_res, parsed_diffs[i], i, new_fname) # depends on [control=['for'], data=['i']]
ann_inserts.append((revision, file, self.stringify_tuids(tmp_res)))
Log.note('Frontier update - modified: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ', count=count, total=total, file=file, rev=revision, percent=count / total) # depends on [control=['if'], data=['file', 'files_to_process']]
else:
old_ann = self._get_annotation(old_frontier, file, transaction)
if old_ann is None or (old_ann == '' and file in added_files):
# File is new (likely from an error), or re-added - we need to create
# a new initial entry for this file.
anns_to_get.append(file)
Log.note('Frontier update - readded: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ', count=count, total=total, file=file, rev=revision, percent=count / total) # depends on [control=['if'], data=[]]
else:
# File was not modified since last
# known revision
tmp_res = self.destringify_tuids(old_ann) if old_ann != '' else []
ann_inserts.append((revision, file, old_ann))
Log.note('Frontier update - not modified: {{count}}/{{total}} - {{percent|percent(decimal=0)}} | {{rev}}|{{file}} ', count=count, total=total, file=file, rev=revision, percent=count / total)
if tmp_res:
tmp_results[file] = tmp_res # depends on [control=['if'], data=[]]
else:
Log.note('Error occured for file {{file}} in revision {{revision}}', file=file, revision=revision)
tmp_results[file] = []
# If we have found all frontiers, update to the
# latest revision. Otherwise, the requested
# revision is too far away (can't be sure
# if it's past). Unless we are told that we are
# going forward.
if going_forward or not remaining_frontiers:
latest_rev = revision # depends on [control=['if'], data=[]]
else:
latest_rev = old_frontier
latestFileMod_inserts[file] = (file, latest_rev) # depends on [control=['for'], data=[]]
Log.note('Updating DB tables `latestFileMod` and `annotations`...')
# No need to double-check if latesteFileMods has been updated before,
# we perform an insert or replace any way.
if len(latestFileMod_inserts) > 0:
for (_, inserts_list) in jx.groupby(latestFileMod_inserts.values(), size=SQL_BATCH_SIZE):
transaction.execute('INSERT OR REPLACE INTO latestFileMod (file, revision) VALUES ' + sql_list((quote_list(i) for i in inserts_list))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
anns_added_by_other_thread = {}
if len(ann_inserts) > 0:
ann_inserts = list(set(ann_inserts))
for (_, tmp_inserts) in jx.groupby(ann_inserts, size=SQL_ANN_BATCH_SIZE):
# Check if any were added in the mean time by another thread
recomputed_inserts = []
for (rev, filename, string_tuids) in tmp_inserts:
tmp_ann = self._get_annotation(rev, filename, transaction)
if not tmp_ann or tmp_ann == '':
recomputed_inserts.append((rev, filename, string_tuids)) # depends on [control=['if'], data=[]]
else:
anns_added_by_other_thread[filename] = self.destringify_tuids(tmp_ann) # depends on [control=['for'], data=[]]
if len(recomputed_inserts) <= 0:
continue # depends on [control=['if'], data=[]]
try:
for (rev, filename, tuids_ann) in recomputed_inserts:
tmp_ann = self.destringify_tuids(tuids_ann)
for tuid_map in tmp_ann:
if tuid_map is None or tuid_map.tuid is None or tuid_map.line is None:
Log.warning('None value encountered in annotation insertion in {{rev}} for {{file}}: {{tuids}}', rev=rev, file=filename, tuids=str(tuid_map)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tuid_map']] # depends on [control=['for'], data=[]]
self.insert_annotations(transaction, recomputed_inserts) # depends on [control=['try'], data=[]]
except Exception as e:
Log.error('Error inserting into annotations table: {{inserting}}', inserting=recomputed_inserts, cause=e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['transaction']]
if len(anns_to_get) > 0:
result.extend(self.get_tuids(anns_to_get, revision, commit=False)) # depends on [control=['if'], data=[]]
for f in tmp_results:
tuids = tmp_results[f]
if f in anns_added_by_other_thread:
tuids = anns_added_by_other_thread[f] # depends on [control=['if'], data=['f', 'anns_added_by_other_thread']]
result.append((f, tuids)) # depends on [control=['for'], data=['f']]
return result |
def find_packages_parents_requirements_dists(pkg_names, working_set=None):
"""
Leverages the `find_packages_requirements_dists` but strip out the
distributions that matches pkg_names.
"""
dists = []
# opting for a naive implementation
targets = set(pkg_names)
for dist in find_packages_requirements_dists(pkg_names, working_set):
if dist.project_name in targets:
continue
dists.append(dist)
return dists | def function[find_packages_parents_requirements_dists, parameter[pkg_names, working_set]]:
constant[
Leverages the `find_packages_requirements_dists` but strip out the
distributions that matches pkg_names.
]
variable[dists] assign[=] list[[]]
variable[targets] assign[=] call[name[set], parameter[name[pkg_names]]]
for taget[name[dist]] in starred[call[name[find_packages_requirements_dists], parameter[name[pkg_names], name[working_set]]]] begin[:]
if compare[name[dist].project_name in name[targets]] begin[:]
continue
call[name[dists].append, parameter[name[dist]]]
return[name[dists]] | keyword[def] identifier[find_packages_parents_requirements_dists] ( identifier[pkg_names] , identifier[working_set] = keyword[None] ):
literal[string]
identifier[dists] =[]
identifier[targets] = identifier[set] ( identifier[pkg_names] )
keyword[for] identifier[dist] keyword[in] identifier[find_packages_requirements_dists] ( identifier[pkg_names] , identifier[working_set] ):
keyword[if] identifier[dist] . identifier[project_name] keyword[in] identifier[targets] :
keyword[continue]
identifier[dists] . identifier[append] ( identifier[dist] )
keyword[return] identifier[dists] | def find_packages_parents_requirements_dists(pkg_names, working_set=None):
"""
Leverages the `find_packages_requirements_dists` but strip out the
distributions that matches pkg_names.
"""
dists = []
# opting for a naive implementation
targets = set(pkg_names)
for dist in find_packages_requirements_dists(pkg_names, working_set):
if dist.project_name in targets:
continue # depends on [control=['if'], data=[]]
dists.append(dist) # depends on [control=['for'], data=['dist']]
return dists |
def provide(self, name):
"""Gets the value registered with ``name`` and determines whether the
value is a provider or a configuration setting. The ``KeyError`` is
raised when the ``name`` is not found.
The registered value is interpreted as a provider if it's callable. The
provider is called with a single argument, the current
:class:`Container` object. Returns the return value of a provider or
the value itself in case the value is not callable.
:param name:
The name of the provider or configuration setting.
"""
rv = self[name]
return rv(self) if callable(rv) else rv | def function[provide, parameter[self, name]]:
constant[Gets the value registered with ``name`` and determines whether the
value is a provider or a configuration setting. The ``KeyError`` is
raised when the ``name`` is not found.
The registered value is interpreted as a provider if it's callable. The
provider is called with a single argument, the current
:class:`Container` object. Returns the return value of a provider or
the value itself in case the value is not callable.
:param name:
The name of the provider or configuration setting.
]
variable[rv] assign[=] call[name[self]][name[name]]
return[<ast.IfExp object at 0x7da1b26af8e0>] | keyword[def] identifier[provide] ( identifier[self] , identifier[name] ):
literal[string]
identifier[rv] = identifier[self] [ identifier[name] ]
keyword[return] identifier[rv] ( identifier[self] ) keyword[if] identifier[callable] ( identifier[rv] ) keyword[else] identifier[rv] | def provide(self, name):
"""Gets the value registered with ``name`` and determines whether the
value is a provider or a configuration setting. The ``KeyError`` is
raised when the ``name`` is not found.
The registered value is interpreted as a provider if it's callable. The
provider is called with a single argument, the current
:class:`Container` object. Returns the return value of a provider or
the value itself in case the value is not callable.
:param name:
The name of the provider or configuration setting.
"""
rv = self[name]
return rv(self) if callable(rv) else rv |
def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p):
"""Calculate mean squared generalization error and its gradient for
overdetermined equation system.
"""
t, m, l = data.shape
d = None
l, k = 0, 0
nt = np.ceil(t / skipstep)
for trainset, testset in xvschema(t, skipstep):
a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)
c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)
e = sp.linalg.inv(np.eye(a.shape[1]) * delta ** 2 + a.T.dot(a))
ba = b.transpose().dot(a)
dc = d.transpose().dot(c)
bae = ba.dot(e)
baee = bae.dot(e)
baecc = bae.dot(c.transpose().dot(c))
l += np.sum(baecc * bae - 2 * bae * dc) + np.sum(d ** 2)
k += np.sum(baee * dc - baecc * baee) * 4 * delta
return l / (nt * d.size), k / (nt * d.size) | def function[_msge_with_gradient_overdetermined, parameter[data, delta, xvschema, skipstep, p]]:
constant[Calculate mean squared generalization error and its gradient for
overdetermined equation system.
]
<ast.Tuple object at 0x7da1b2630a30> assign[=] name[data].shape
variable[d] assign[=] constant[None]
<ast.Tuple object at 0x7da1b2632410> assign[=] tuple[[<ast.Constant object at 0x7da1b26322f0>, <ast.Constant object at 0x7da1b2630ee0>]]
variable[nt] assign[=] call[name[np].ceil, parameter[binary_operation[name[t] / name[skipstep]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2631c00>, <ast.Name object at 0x7da1b2631e10>]]] in starred[call[name[xvschema], parameter[name[t], name[skipstep]]]] begin[:]
<ast.Tuple object at 0x7da1b2631390> assign[=] call[name[_construct_var_eqns], parameter[call[name[atleast_3d], parameter[call[name[data]][tuple[[<ast.Name object at 0x7da1b2630040>, <ast.Slice object at 0x7da1b2630160>, <ast.Slice object at 0x7da1b2630100>]]]]], name[p]]]
<ast.Tuple object at 0x7da1b2631690> assign[=] call[name[_construct_var_eqns], parameter[call[name[atleast_3d], parameter[call[name[data]][tuple[[<ast.Name object at 0x7da1b2632080>, <ast.Slice object at 0x7da1b2632f50>, <ast.Slice object at 0x7da1b2630880>]]]]], name[p]]]
variable[e] assign[=] call[name[sp].linalg.inv, parameter[binary_operation[binary_operation[call[name[np].eye, parameter[call[name[a].shape][constant[1]]]] * binary_operation[name[delta] ** constant[2]]] + call[name[a].T.dot, parameter[name[a]]]]]]
variable[ba] assign[=] call[call[name[b].transpose, parameter[]].dot, parameter[name[a]]]
variable[dc] assign[=] call[call[name[d].transpose, parameter[]].dot, parameter[name[c]]]
variable[bae] assign[=] call[name[ba].dot, parameter[name[e]]]
variable[baee] assign[=] call[name[bae].dot, parameter[name[e]]]
variable[baecc] assign[=] call[name[bae].dot, parameter[call[call[name[c].transpose, parameter[]].dot, parameter[name[c]]]]]
<ast.AugAssign object at 0x7da1b2632e00>
<ast.AugAssign object at 0x7da1b2630a90>
return[tuple[[<ast.BinOp object at 0x7da1b26325c0>, <ast.BinOp object at 0x7da1b2631b70>]]] | keyword[def] identifier[_msge_with_gradient_overdetermined] ( identifier[data] , identifier[delta] , identifier[xvschema] , identifier[skipstep] , identifier[p] ):
literal[string]
identifier[t] , identifier[m] , identifier[l] = identifier[data] . identifier[shape]
identifier[d] = keyword[None]
identifier[l] , identifier[k] = literal[int] , literal[int]
identifier[nt] = identifier[np] . identifier[ceil] ( identifier[t] / identifier[skipstep] )
keyword[for] identifier[trainset] , identifier[testset] keyword[in] identifier[xvschema] ( identifier[t] , identifier[skipstep] ):
identifier[a] , identifier[b] = identifier[_construct_var_eqns] ( identifier[atleast_3d] ( identifier[data] [ identifier[trainset] ,:,:]), identifier[p] )
identifier[c] , identifier[d] = identifier[_construct_var_eqns] ( identifier[atleast_3d] ( identifier[data] [ identifier[testset] ,:,:]), identifier[p] )
identifier[e] = identifier[sp] . identifier[linalg] . identifier[inv] ( identifier[np] . identifier[eye] ( identifier[a] . identifier[shape] [ literal[int] ])* identifier[delta] ** literal[int] + identifier[a] . identifier[T] . identifier[dot] ( identifier[a] ))
identifier[ba] = identifier[b] . identifier[transpose] (). identifier[dot] ( identifier[a] )
identifier[dc] = identifier[d] . identifier[transpose] (). identifier[dot] ( identifier[c] )
identifier[bae] = identifier[ba] . identifier[dot] ( identifier[e] )
identifier[baee] = identifier[bae] . identifier[dot] ( identifier[e] )
identifier[baecc] = identifier[bae] . identifier[dot] ( identifier[c] . identifier[transpose] (). identifier[dot] ( identifier[c] ))
identifier[l] += identifier[np] . identifier[sum] ( identifier[baecc] * identifier[bae] - literal[int] * identifier[bae] * identifier[dc] )+ identifier[np] . identifier[sum] ( identifier[d] ** literal[int] )
identifier[k] += identifier[np] . identifier[sum] ( identifier[baee] * identifier[dc] - identifier[baecc] * identifier[baee] )* literal[int] * identifier[delta]
keyword[return] identifier[l] /( identifier[nt] * identifier[d] . identifier[size] ), identifier[k] /( identifier[nt] * identifier[d] . identifier[size] ) | def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p):
"""Calculate mean squared generalization error and its gradient for
overdetermined equation system.
"""
(t, m, l) = data.shape
d = None
(l, k) = (0, 0)
nt = np.ceil(t / skipstep)
for (trainset, testset) in xvschema(t, skipstep):
(a, b) = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)
(c, d) = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)
e = sp.linalg.inv(np.eye(a.shape[1]) * delta ** 2 + a.T.dot(a))
ba = b.transpose().dot(a)
dc = d.transpose().dot(c)
bae = ba.dot(e)
baee = bae.dot(e)
baecc = bae.dot(c.transpose().dot(c))
l += np.sum(baecc * bae - 2 * bae * dc) + np.sum(d ** 2)
k += np.sum(baee * dc - baecc * baee) * 4 * delta # depends on [control=['for'], data=[]]
return (l / (nt * d.size), k / (nt * d.size)) |
def save_instances(self, path, binary=False, mode=SaveMode.LOCAL_SAVE):
"""Save the instances in the system to the specified file.
If binary is True, the instances will be saved in binary format.
The Python equivalent of the CLIPS save-instances command.
"""
if binary:
ret = lib.EnvBinarySaveInstances(self._env, path.encode(), mode)
else:
ret = lib.EnvSaveInstances(self._env, path.encode(), mode)
if ret == 0:
raise CLIPSError(self._env)
return ret | def function[save_instances, parameter[self, path, binary, mode]]:
constant[Save the instances in the system to the specified file.
If binary is True, the instances will be saved in binary format.
The Python equivalent of the CLIPS save-instances command.
]
if name[binary] begin[:]
variable[ret] assign[=] call[name[lib].EnvBinarySaveInstances, parameter[name[self]._env, call[name[path].encode, parameter[]], name[mode]]]
if compare[name[ret] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18bcc8f10>
return[name[ret]] | keyword[def] identifier[save_instances] ( identifier[self] , identifier[path] , identifier[binary] = keyword[False] , identifier[mode] = identifier[SaveMode] . identifier[LOCAL_SAVE] ):
literal[string]
keyword[if] identifier[binary] :
identifier[ret] = identifier[lib] . identifier[EnvBinarySaveInstances] ( identifier[self] . identifier[_env] , identifier[path] . identifier[encode] (), identifier[mode] )
keyword[else] :
identifier[ret] = identifier[lib] . identifier[EnvSaveInstances] ( identifier[self] . identifier[_env] , identifier[path] . identifier[encode] (), identifier[mode] )
keyword[if] identifier[ret] == literal[int] :
keyword[raise] identifier[CLIPSError] ( identifier[self] . identifier[_env] )
keyword[return] identifier[ret] | def save_instances(self, path, binary=False, mode=SaveMode.LOCAL_SAVE):
"""Save the instances in the system to the specified file.
If binary is True, the instances will be saved in binary format.
The Python equivalent of the CLIPS save-instances command.
"""
if binary:
ret = lib.EnvBinarySaveInstances(self._env, path.encode(), mode) # depends on [control=['if'], data=[]]
else:
ret = lib.EnvSaveInstances(self._env, path.encode(), mode)
if ret == 0:
raise CLIPSError(self._env) # depends on [control=['if'], data=[]]
return ret |
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest() | def function[md5sum, parameter[fname, block_size]]:
constant[Calculate the md5sum for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
]
variable[md5] assign[=] call[name[hashlib].md5, parameter[]]
with call[name[open], parameter[name[fname], constant[rb]]] begin[:]
while constant[True] begin[:]
variable[data] assign[=] call[name[fid].read, parameter[name[block_size]]]
if <ast.UnaryOp object at 0x7da1b26af220> begin[:]
break
call[name[md5].update, parameter[name[data]]]
return[call[name[md5].hexdigest, parameter[]]] | keyword[def] identifier[md5sum] ( identifier[fname] , identifier[block_size] = literal[int] ):
literal[string]
identifier[md5] = identifier[hashlib] . identifier[md5] ()
keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[fid] :
keyword[while] keyword[True] :
identifier[data] = identifier[fid] . identifier[read] ( identifier[block_size] )
keyword[if] keyword[not] identifier[data] :
keyword[break]
identifier[md5] . identifier[update] ( identifier[data] )
keyword[return] identifier[md5] . identifier[hexdigest] () | def md5sum(fname, block_size=1048576): # 2 ** 20
'Calculate the md5sum for a file.\n\n Parameters\n ----------\n fname : str\n Filename.\n block_size : int\n Block size to use when reading.\n\n Returns\n -------\n hash_ : str\n The hexadecimal digest of the hash.\n '
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break # depends on [control=['if'], data=[]]
md5.update(data) # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['fid']]
return md5.hexdigest() |
def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers):
"""Fixes files used as input when pre-processing MPL-containers in their numbered form."""
# Fix input files for each MPL-container type.
for container in containers:
files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) )
for currentFile in sorted( files ):
fix_header_comment( currentFile, timestamp ) | def function[fix_input_files_for_numbered_seq, parameter[sourceDir, suffix, timestamp, containers]]:
constant[Fixes files used as input when pre-processing MPL-containers in their numbered form.]
for taget[name[container]] in starred[name[containers]] begin[:]
variable[files] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[sourceDir], name[container], binary_operation[binary_operation[name[container] + constant[*]] + name[suffix]]]]]]
for taget[name[currentFile]] in starred[call[name[sorted], parameter[name[files]]]] begin[:]
call[name[fix_header_comment], parameter[name[currentFile], name[timestamp]]] | keyword[def] identifier[fix_input_files_for_numbered_seq] ( identifier[sourceDir] , identifier[suffix] , identifier[timestamp] , identifier[containers] ):
literal[string]
keyword[for] identifier[container] keyword[in] identifier[containers] :
identifier[files] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[sourceDir] , identifier[container] , identifier[container] + literal[string] + identifier[suffix] ))
keyword[for] identifier[currentFile] keyword[in] identifier[sorted] ( identifier[files] ):
identifier[fix_header_comment] ( identifier[currentFile] , identifier[timestamp] ) | def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers):
"""Fixes files used as input when pre-processing MPL-containers in their numbered form."""
# Fix input files for each MPL-container type.
for container in containers:
files = glob.glob(os.path.join(sourceDir, container, container + '*' + suffix))
for currentFile in sorted(files):
fix_header_comment(currentFile, timestamp) # depends on [control=['for'], data=['currentFile']] # depends on [control=['for'], data=['container']] |
def off(self):
"""!
\~english
Close Audio output. set pin mode to output
@return a boolean value. if True means close audio output is OK otherwise failed to close.
\~chinese
关闭音频输出。 将引脚模式设置为输出
@return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。
"""
isOK = True
try:
if self.channelR!=None:
sub.call(["gpio","-g","mode", "{}".format(self.channelR), self.PIN_MODE_OUTPUT ])
except:
isOK = False
print("Close audio right channel failed.")
try:
if self.channelL!=None:
sub.call(["gpio","-g","mode", "{}".format(self.channelL), self.PIN_MODE_OUTPUT ])
except:
isOK = False
print("Close audio left channel failed.")
return isOK | def function[off, parameter[self]]:
constant[!
\~english
Close Audio output. set pin mode to output
@return a boolean value. if True means close audio output is OK otherwise failed to close.
\~chinese
关闭音频输出。 将引脚模式设置为输出
@return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。
]
variable[isOK] assign[=] constant[True]
<ast.Try object at 0x7da1b27830a0>
<ast.Try object at 0x7da1b2782b60>
return[name[isOK]] | keyword[def] identifier[off] ( identifier[self] ):
literal[string]
identifier[isOK] = keyword[True]
keyword[try] :
keyword[if] identifier[self] . identifier[channelR] != keyword[None] :
identifier[sub] . identifier[call] ([ literal[string] , literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[channelR] ), identifier[self] . identifier[PIN_MODE_OUTPUT] ])
keyword[except] :
identifier[isOK] = keyword[False]
identifier[print] ( literal[string] )
keyword[try] :
keyword[if] identifier[self] . identifier[channelL] != keyword[None] :
identifier[sub] . identifier[call] ([ literal[string] , literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[channelL] ), identifier[self] . identifier[PIN_MODE_OUTPUT] ])
keyword[except] :
identifier[isOK] = keyword[False]
identifier[print] ( literal[string] )
keyword[return] identifier[isOK] | def off(self):
"""!
\\~english
Close Audio output. set pin mode to output
@return a boolean value. if True means close audio output is OK otherwise failed to close.
\\~chinese
关闭音频输出。 将引脚模式设置为输出
@return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。
"""
isOK = True
try:
if self.channelR != None:
sub.call(['gpio', '-g', 'mode', '{}'.format(self.channelR), self.PIN_MODE_OUTPUT]) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
isOK = False
print('Close audio right channel failed.') # depends on [control=['except'], data=[]]
try:
if self.channelL != None:
sub.call(['gpio', '-g', 'mode', '{}'.format(self.channelL), self.PIN_MODE_OUTPUT]) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
isOK = False
print('Close audio left channel failed.') # depends on [control=['except'], data=[]]
return isOK |
def stick_perm(presenter, egg, dist_dict, strategy):
"""Computes weights for one reordering using stick-breaking method"""
# seed RNG
np.random.seed()
# unpack egg
egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg)
# reorder
regg = order_stick(presenter, egg, dist_dict, strategy)
# unpack regg
regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg)
# # get the order
regg_pres = list(regg_pres)
egg_pres = list(egg_pres)
idx = [egg_pres.index(r) for r in regg_pres]
# compute weights
weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict)
# save out the order
orders = idx
return weights, orders | def function[stick_perm, parameter[presenter, egg, dist_dict, strategy]]:
constant[Computes weights for one reordering using stick-breaking method]
call[name[np].random.seed, parameter[]]
<ast.Tuple object at 0x7da1b1026b60> assign[=] call[name[parse_egg], parameter[name[egg]]]
variable[regg] assign[=] call[name[order_stick], parameter[name[presenter], name[egg], name[dist_dict], name[strategy]]]
<ast.Tuple object at 0x7da1b1025480> assign[=] call[name[parse_egg], parameter[name[regg]]]
variable[regg_pres] assign[=] call[name[list], parameter[name[regg_pres]]]
variable[egg_pres] assign[=] call[name[list], parameter[name[egg_pres]]]
variable[idx] assign[=] <ast.ListComp object at 0x7da1b1025090>
variable[weights] assign[=] call[name[compute_feature_weights_dict], parameter[call[name[list], parameter[name[regg_pres]]], call[name[list], parameter[name[regg_pres]]], call[name[list], parameter[name[regg_features]]], name[dist_dict]]]
variable[orders] assign[=] name[idx]
return[tuple[[<ast.Name object at 0x7da1b10260b0>, <ast.Name object at 0x7da1b1024670>]]] | keyword[def] identifier[stick_perm] ( identifier[presenter] , identifier[egg] , identifier[dist_dict] , identifier[strategy] ):
literal[string]
identifier[np] . identifier[random] . identifier[seed] ()
identifier[egg_pres] , identifier[egg_rec] , identifier[egg_features] , identifier[egg_dist_funcs] = identifier[parse_egg] ( identifier[egg] )
identifier[regg] = identifier[order_stick] ( identifier[presenter] , identifier[egg] , identifier[dist_dict] , identifier[strategy] )
identifier[regg_pres] , identifier[regg_rec] , identifier[regg_features] , identifier[regg_dist_funcs] = identifier[parse_egg] ( identifier[regg] )
identifier[regg_pres] = identifier[list] ( identifier[regg_pres] )
identifier[egg_pres] = identifier[list] ( identifier[egg_pres] )
identifier[idx] =[ identifier[egg_pres] . identifier[index] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[regg_pres] ]
identifier[weights] = identifier[compute_feature_weights_dict] ( identifier[list] ( identifier[regg_pres] ), identifier[list] ( identifier[regg_pres] ), identifier[list] ( identifier[regg_features] ), identifier[dist_dict] )
identifier[orders] = identifier[idx]
keyword[return] identifier[weights] , identifier[orders] | def stick_perm(presenter, egg, dist_dict, strategy):
"""Computes weights for one reordering using stick-breaking method"""
# seed RNG
np.random.seed()
# unpack egg
(egg_pres, egg_rec, egg_features, egg_dist_funcs) = parse_egg(egg)
# reorder
regg = order_stick(presenter, egg, dist_dict, strategy)
# unpack regg
(regg_pres, regg_rec, regg_features, regg_dist_funcs) = parse_egg(regg)
# # get the order
regg_pres = list(regg_pres)
egg_pres = list(egg_pres)
idx = [egg_pres.index(r) for r in regg_pres]
# compute weights
weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict)
# save out the order
orders = idx
return (weights, orders) |
def get_response_message(self, resp_msg_signature=None, delay=COMMAND_DELAY):
""" Receive data from connected gateway and if required seach and return a stream that starts at the required
response message signature. The reason we couple the search for the response signature here is that given the
RNET protocol and TCP comms, we dont have an easy way of knowign that we have received the response. We want to
minimise the time spent reading the socket (to reduce user lag), hence we use the message response signature
at this point to determine when to stop reading."""
matching_message = None # Set intial value to none (assume no response found)
if resp_msg_signature is None:
no_of_socket_reads = 1 # If we are not looking for a specific response do a single read to clear the buffer
else:
no_of_socket_reads = 10 # Try 10x (= approx 1s at default)if we are looking for a specific response
time.sleep(delay) # Insert recommended delay to ensure command is processed correctly
self.sock.setblocking(0) # Needed to prevent request for waiting indefinitely
data = B''
for i in range(0, no_of_socket_reads):
try:
# Receive what has been sent
data += self.sock.recv(4096)
_LOGGER.debug('i= %s; len= %s data= %s', i, len(data), '[{}]'.format(', '.join(hex(x) for x in data)))
except BlockingIOError: # Expected outcome if there is not data
_LOGGER.debug('Passed=%s', i)
pass
except ConnectionResetError as msg:
_LOGGER.error("Error trying to connect to Russound controller. Check that no other device or system "
"is using the port that you are trying to connect to. "
"Try resetting the bridge you are using to connect.")
_LOGGER.error(msg)
# Check if we have our message. If so break out else keep looping.
if resp_msg_signature is not None: # If we are looking for a specific response
matching_message, data = self.find_signature(data, resp_msg_signature)
if matching_message is not None: # Required response found
_LOGGER.debug("Number of reads=%s", i + 1)
break
time.sleep(delay) # Wait before reading again - default of 100ms
return matching_message | def function[get_response_message, parameter[self, resp_msg_signature, delay]]:
constant[ Receive data from connected gateway and if required seach and return a stream that starts at the required
response message signature. The reason we couple the search for the response signature here is that given the
RNET protocol and TCP comms, we dont have an easy way of knowign that we have received the response. We want to
minimise the time spent reading the socket (to reduce user lag), hence we use the message response signature
at this point to determine when to stop reading.]
variable[matching_message] assign[=] constant[None]
if compare[name[resp_msg_signature] is constant[None]] begin[:]
variable[no_of_socket_reads] assign[=] constant[1]
call[name[time].sleep, parameter[name[delay]]]
call[name[self].sock.setblocking, parameter[constant[0]]]
variable[data] assign[=] constant[b'']
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[no_of_socket_reads]]]] begin[:]
<ast.Try object at 0x7da1b0910610>
if compare[name[resp_msg_signature] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b0911900> assign[=] call[name[self].find_signature, parameter[name[data], name[resp_msg_signature]]]
if compare[name[matching_message] is_not constant[None]] begin[:]
call[name[_LOGGER].debug, parameter[constant[Number of reads=%s], binary_operation[name[i] + constant[1]]]]
break
call[name[time].sleep, parameter[name[delay]]]
return[name[matching_message]] | keyword[def] identifier[get_response_message] ( identifier[self] , identifier[resp_msg_signature] = keyword[None] , identifier[delay] = identifier[COMMAND_DELAY] ):
literal[string]
identifier[matching_message] = keyword[None]
keyword[if] identifier[resp_msg_signature] keyword[is] keyword[None] :
identifier[no_of_socket_reads] = literal[int]
keyword[else] :
identifier[no_of_socket_reads] = literal[int]
identifier[time] . identifier[sleep] ( identifier[delay] )
identifier[self] . identifier[sock] . identifier[setblocking] ( literal[int] )
identifier[data] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[no_of_socket_reads] ):
keyword[try] :
identifier[data] += identifier[self] . identifier[sock] . identifier[recv] ( literal[int] )
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[i] , identifier[len] ( identifier[data] ), literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[hex] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[data] )))
keyword[except] identifier[BlockingIOError] :
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[i] )
keyword[pass]
keyword[except] identifier[ConnectionResetError] keyword[as] identifier[msg] :
identifier[_LOGGER] . identifier[error] ( literal[string]
literal[string]
literal[string] )
identifier[_LOGGER] . identifier[error] ( identifier[msg] )
keyword[if] identifier[resp_msg_signature] keyword[is] keyword[not] keyword[None] :
identifier[matching_message] , identifier[data] = identifier[self] . identifier[find_signature] ( identifier[data] , identifier[resp_msg_signature] )
keyword[if] identifier[matching_message] keyword[is] keyword[not] keyword[None] :
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[i] + literal[int] )
keyword[break]
identifier[time] . identifier[sleep] ( identifier[delay] )
keyword[return] identifier[matching_message] | def get_response_message(self, resp_msg_signature=None, delay=COMMAND_DELAY):
""" Receive data from connected gateway and if required seach and return a stream that starts at the required
response message signature. The reason we couple the search for the response signature here is that given the
RNET protocol and TCP comms, we dont have an easy way of knowign that we have received the response. We want to
minimise the time spent reading the socket (to reduce user lag), hence we use the message response signature
at this point to determine when to stop reading."""
matching_message = None # Set intial value to none (assume no response found)
if resp_msg_signature is None:
no_of_socket_reads = 1 # If we are not looking for a specific response do a single read to clear the buffer # depends on [control=['if'], data=[]]
else:
no_of_socket_reads = 10 # Try 10x (= approx 1s at default)if we are looking for a specific response
time.sleep(delay) # Insert recommended delay to ensure command is processed correctly
self.sock.setblocking(0) # Needed to prevent request for waiting indefinitely
data = b''
for i in range(0, no_of_socket_reads):
try:
# Receive what has been sent
data += self.sock.recv(4096)
_LOGGER.debug('i= %s; len= %s data= %s', i, len(data), '[{}]'.format(', '.join((hex(x) for x in data)))) # depends on [control=['try'], data=[]]
except BlockingIOError: # Expected outcome if there is not data
_LOGGER.debug('Passed=%s', i)
pass # depends on [control=['except'], data=[]]
except ConnectionResetError as msg:
_LOGGER.error('Error trying to connect to Russound controller. Check that no other device or system is using the port that you are trying to connect to. Try resetting the bridge you are using to connect.')
_LOGGER.error(msg) # depends on [control=['except'], data=['msg']]
# Check if we have our message. If so break out else keep looping.
if resp_msg_signature is not None: # If we are looking for a specific response
(matching_message, data) = self.find_signature(data, resp_msg_signature) # depends on [control=['if'], data=['resp_msg_signature']]
if matching_message is not None: # Required response found
_LOGGER.debug('Number of reads=%s', i + 1)
break # depends on [control=['if'], data=[]]
time.sleep(delay) # Wait before reading again - default of 100ms # depends on [control=['for'], data=['i']]
return matching_message |
def _release_waiter(self) -> None:
"""
Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections.
"""
if not self._waiters:
return
# Having the dict keys ordered this avoids to iterate
# at the same order at each call.
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return | def function[_release_waiter, parameter[self]]:
constant[
Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections.
]
if <ast.UnaryOp object at 0x7da1b1fd7340> begin[:]
return[None]
variable[queues] assign[=] call[name[list], parameter[call[name[self]._waiters.keys, parameter[]]]]
call[name[random].shuffle, parameter[name[queues]]]
for taget[name[key]] in starred[name[queues]] begin[:]
if compare[call[name[self]._available_connections, parameter[name[key]]] less[<] constant[1]] begin[:]
continue
variable[waiters] assign[=] call[name[self]._waiters][name[key]]
while name[waiters] begin[:]
variable[waiter] assign[=] call[name[waiters].popleft, parameter[]]
if <ast.UnaryOp object at 0x7da1b1fd7610> begin[:]
call[name[waiter].set_result, parameter[constant[None]]]
return[None] | keyword[def] identifier[_release_waiter] ( identifier[self] )-> keyword[None] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_waiters] :
keyword[return]
identifier[queues] = identifier[list] ( identifier[self] . identifier[_waiters] . identifier[keys] ())
identifier[random] . identifier[shuffle] ( identifier[queues] )
keyword[for] identifier[key] keyword[in] identifier[queues] :
keyword[if] identifier[self] . identifier[_available_connections] ( identifier[key] )< literal[int] :
keyword[continue]
identifier[waiters] = identifier[self] . identifier[_waiters] [ identifier[key] ]
keyword[while] identifier[waiters] :
identifier[waiter] = identifier[waiters] . identifier[popleft] ()
keyword[if] keyword[not] identifier[waiter] . identifier[done] ():
identifier[waiter] . identifier[set_result] ( keyword[None] )
keyword[return] | def _release_waiter(self) -> None:
"""
Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections.
"""
if not self._waiters:
return # depends on [control=['if'], data=[]]
# Having the dict keys ordered this avoids to iterate
# at the same order at each call.
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue # depends on [control=['if'], data=[]]
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['key']] |
def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity) | def function[from_dict, parameter[cls, d]]:
constant[
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
]
variable[sites] assign[=] <ast.ListComp object at 0x7da20c991420>
variable[charge] assign[=] call[name[d].get, parameter[constant[charge], constant[0]]]
variable[spin_multiplicity] assign[=] call[name[d].get, parameter[constant[spin_multiplicity]]]
return[call[name[cls].from_sites, parameter[name[sites]]]] | keyword[def] identifier[from_dict] ( identifier[cls] , identifier[d] ):
literal[string]
identifier[sites] =[ identifier[Site] . identifier[from_dict] ( identifier[sd] ) keyword[for] identifier[sd] keyword[in] identifier[d] [ literal[string] ]]
identifier[charge] = identifier[d] . identifier[get] ( literal[string] , literal[int] )
identifier[spin_multiplicity] = identifier[d] . identifier[get] ( literal[string] )
keyword[return] identifier[cls] . identifier[from_sites] ( identifier[sites] , identifier[charge] = identifier[charge] , identifier[spin_multiplicity] = identifier[spin_multiplicity] ) | def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d['sites']]
charge = d.get('charge', 0)
spin_multiplicity = d.get('spin_multiplicity')
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity) |
def do_DBKEY(self, key):
"""Print raw content of a DB key.
DBKEY g|u09tyzfe"""
type_ = DB.type(key).decode()
if type_ == 'set':
out = DB.smembers(key)
elif type_ == 'string':
out = DB.get(key)
else:
out = 'Unsupported type {}'.format(type_)
print('type:', magenta(type_))
print('value:', white(out)) | def function[do_DBKEY, parameter[self, key]]:
constant[Print raw content of a DB key.
DBKEY g|u09tyzfe]
variable[type_] assign[=] call[call[name[DB].type, parameter[name[key]]].decode, parameter[]]
if compare[name[type_] equal[==] constant[set]] begin[:]
variable[out] assign[=] call[name[DB].smembers, parameter[name[key]]]
call[name[print], parameter[constant[type:], call[name[magenta], parameter[name[type_]]]]]
call[name[print], parameter[constant[value:], call[name[white], parameter[name[out]]]]] | keyword[def] identifier[do_DBKEY] ( identifier[self] , identifier[key] ):
literal[string]
identifier[type_] = identifier[DB] . identifier[type] ( identifier[key] ). identifier[decode] ()
keyword[if] identifier[type_] == literal[string] :
identifier[out] = identifier[DB] . identifier[smembers] ( identifier[key] )
keyword[elif] identifier[type_] == literal[string] :
identifier[out] = identifier[DB] . identifier[get] ( identifier[key] )
keyword[else] :
identifier[out] = literal[string] . identifier[format] ( identifier[type_] )
identifier[print] ( literal[string] , identifier[magenta] ( identifier[type_] ))
identifier[print] ( literal[string] , identifier[white] ( identifier[out] )) | def do_DBKEY(self, key):
"""Print raw content of a DB key.
DBKEY g|u09tyzfe"""
type_ = DB.type(key).decode()
if type_ == 'set':
out = DB.smembers(key) # depends on [control=['if'], data=[]]
elif type_ == 'string':
out = DB.get(key) # depends on [control=['if'], data=[]]
else:
out = 'Unsupported type {}'.format(type_)
print('type:', magenta(type_))
print('value:', white(out)) |
def FormatNameToPython(i):
"""
Transform a (method) name into a form which can be used as a python
attribute
example::
>>> FormatNameToPython('<clinit>')
'clinit'
:param i: name to transform
:rtype: str
"""
i = i.replace("<", "")
i = i.replace(">", "")
i = i.replace("$", "_")
return i | def function[FormatNameToPython, parameter[i]]:
constant[
Transform a (method) name into a form which can be used as a python
attribute
example::
>>> FormatNameToPython('<clinit>')
'clinit'
:param i: name to transform
:rtype: str
]
variable[i] assign[=] call[name[i].replace, parameter[constant[<], constant[]]]
variable[i] assign[=] call[name[i].replace, parameter[constant[>], constant[]]]
variable[i] assign[=] call[name[i].replace, parameter[constant[$], constant[_]]]
return[name[i]] | keyword[def] identifier[FormatNameToPython] ( identifier[i] ):
literal[string]
identifier[i] = identifier[i] . identifier[replace] ( literal[string] , literal[string] )
identifier[i] = identifier[i] . identifier[replace] ( literal[string] , literal[string] )
identifier[i] = identifier[i] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[i] | def FormatNameToPython(i):
"""
Transform a (method) name into a form which can be used as a python
attribute
example::
>>> FormatNameToPython('<clinit>')
'clinit'
:param i: name to transform
:rtype: str
"""
i = i.replace('<', '')
i = i.replace('>', '')
i = i.replace('$', '_')
return i |
def ChiSquared(k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of k to
a matching shaped ChiSquared.
:param k: the number of degrees of freedom
"""
return Double(context.jvm_view().ChiSquaredVertex, label, cast_to_integer_vertex(k)) | def function[ChiSquared, parameter[k, label]]:
constant[
One to one constructor for mapping some shape of k to
a matching shaped ChiSquared.
:param k: the number of degrees of freedom
]
return[call[name[Double], parameter[call[name[context].jvm_view, parameter[]].ChiSquaredVertex, name[label], call[name[cast_to_integer_vertex], parameter[name[k]]]]]] | keyword[def] identifier[ChiSquared] ( identifier[k] : identifier[vertex_constructor_param_types] , identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] :
literal[string]
keyword[return] identifier[Double] ( identifier[context] . identifier[jvm_view] (). identifier[ChiSquaredVertex] , identifier[label] , identifier[cast_to_integer_vertex] ( identifier[k] )) | def ChiSquared(k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of k to
a matching shaped ChiSquared.
:param k: the number of degrees of freedom
"""
return Double(context.jvm_view().ChiSquaredVertex, label, cast_to_integer_vertex(k)) |
def _run_post_configure_callbacks(self, configure_args):
"""Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything.
"""
resulting_configuration = ImmutableDict(self.config)
# copy callbacks in case people edit them while running
multiple_callbacks = copy.copy(
self._post_configure_callbacks['multiple']
)
single_callbacks = copy.copy(self._post_configure_callbacks['single'])
# clear out the singles
self._post_configure_callbacks['single'] = []
for callback in multiple_callbacks:
callback(resulting_configuration, configure_args)
# now do the single run callbacks
for callback in single_callbacks:
callback(resulting_configuration, configure_args) | def function[_run_post_configure_callbacks, parameter[self, configure_args]]:
constant[Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything.
]
variable[resulting_configuration] assign[=] call[name[ImmutableDict], parameter[name[self].config]]
variable[multiple_callbacks] assign[=] call[name[copy].copy, parameter[call[name[self]._post_configure_callbacks][constant[multiple]]]]
variable[single_callbacks] assign[=] call[name[copy].copy, parameter[call[name[self]._post_configure_callbacks][constant[single]]]]
call[name[self]._post_configure_callbacks][constant[single]] assign[=] list[[]]
for taget[name[callback]] in starred[name[multiple_callbacks]] begin[:]
call[name[callback], parameter[name[resulting_configuration], name[configure_args]]]
for taget[name[callback]] in starred[name[single_callbacks]] begin[:]
call[name[callback], parameter[name[resulting_configuration], name[configure_args]]] | keyword[def] identifier[_run_post_configure_callbacks] ( identifier[self] , identifier[configure_args] ):
literal[string]
identifier[resulting_configuration] = identifier[ImmutableDict] ( identifier[self] . identifier[config] )
identifier[multiple_callbacks] = identifier[copy] . identifier[copy] (
identifier[self] . identifier[_post_configure_callbacks] [ literal[string] ]
)
identifier[single_callbacks] = identifier[copy] . identifier[copy] ( identifier[self] . identifier[_post_configure_callbacks] [ literal[string] ])
identifier[self] . identifier[_post_configure_callbacks] [ literal[string] ]=[]
keyword[for] identifier[callback] keyword[in] identifier[multiple_callbacks] :
identifier[callback] ( identifier[resulting_configuration] , identifier[configure_args] )
keyword[for] identifier[callback] keyword[in] identifier[single_callbacks] :
identifier[callback] ( identifier[resulting_configuration] , identifier[configure_args] ) | def _run_post_configure_callbacks(self, configure_args):
"""Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything.
"""
resulting_configuration = ImmutableDict(self.config)
# copy callbacks in case people edit them while running
multiple_callbacks = copy.copy(self._post_configure_callbacks['multiple'])
single_callbacks = copy.copy(self._post_configure_callbacks['single'])
# clear out the singles
self._post_configure_callbacks['single'] = []
for callback in multiple_callbacks:
callback(resulting_configuration, configure_args) # depends on [control=['for'], data=['callback']]
# now do the single run callbacks
for callback in single_callbacks:
callback(resulting_configuration, configure_args) # depends on [control=['for'], data=['callback']] |
def get(self, addresses):
"""Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
"""
with self._lock:
results = []
for add in addresses:
self.validate_read(add)
results.append(self._get(add))
return results | def function[get, parameter[self, addresses]]:
constant[Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
]
with name[self]._lock begin[:]
variable[results] assign[=] list[[]]
for taget[name[add]] in starred[name[addresses]] begin[:]
call[name[self].validate_read, parameter[name[add]]]
call[name[results].append, parameter[call[name[self]._get, parameter[name[add]]]]]
return[name[results]] | keyword[def] identifier[get] ( identifier[self] , identifier[addresses] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
identifier[results] =[]
keyword[for] identifier[add] keyword[in] identifier[addresses] :
identifier[self] . identifier[validate_read] ( identifier[add] )
identifier[results] . identifier[append] ( identifier[self] . identifier[_get] ( identifier[add] ))
keyword[return] identifier[results] | def get(self, addresses):
"""Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
"""
with self._lock:
results = []
for add in addresses:
self.validate_read(add)
results.append(self._get(add)) # depends on [control=['for'], data=['add']]
return results # depends on [control=['with'], data=[]] |
def dumps(obj):
"""
Serializes a dictionary into ACF data.
:param obj: A dictionary to serialize.
:return: ACF data.
"""
if not isinstance(obj, dict):
raise TypeError('can only dump a dictionary as an ACF but got ' + type(obj).__name__)
return '\n'.join(_dumps(obj, level=0)) + '\n' | def function[dumps, parameter[obj]]:
constant[
Serializes a dictionary into ACF data.
:param obj: A dictionary to serialize.
:return: ACF data.
]
if <ast.UnaryOp object at 0x7da18fe90100> begin[:]
<ast.Raise object at 0x7da18fe92aa0>
return[binary_operation[call[constant[
].join, parameter[call[name[_dumps], parameter[name[obj]]]]] + constant[
]]] | keyword[def] identifier[dumps] ( identifier[obj] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
keyword[raise] identifier[TypeError] ( literal[string] + identifier[type] ( identifier[obj] ). identifier[__name__] )
keyword[return] literal[string] . identifier[join] ( identifier[_dumps] ( identifier[obj] , identifier[level] = literal[int] ))+ literal[string] | def dumps(obj):
"""
Serializes a dictionary into ACF data.
:param obj: A dictionary to serialize.
:return: ACF data.
"""
if not isinstance(obj, dict):
raise TypeError('can only dump a dictionary as an ACF but got ' + type(obj).__name__) # depends on [control=['if'], data=[]]
return '\n'.join(_dumps(obj, level=0)) + '\n' |
def _save_obj_without_attr(obj, attr_list, path, values_to_save=None):
"""
Save object with attributes from attr_list.
Parameters
----------
obj: obj
Object of class with __dict__ attribute.
attr_list: list
List with attributes to exclude from saving to dill object. If empty
list all attributes will be saved.
path: str
Where to save dill object.
values_to_save: list, optional
Placeholders for original attributes for saving object. If None will be
extended to attr_list length like [None] * len(attr_list)
"""
if values_to_save is None:
values_to_save = [None] * len(attr_list)
saved_attr_dict = {}
for attr, val_save in zip(attr_list, values_to_save):
if attr in obj.__dict__:
item = obj.__dict__.pop(attr)
saved_attr_dict[attr] = item
setattr(obj, attr, val_save)
with open(path, "wb") as out_file:
dill.dump(obj, out_file)
for attr, item in saved_attr_dict.items():
setattr(obj, attr, item) | def function[_save_obj_without_attr, parameter[obj, attr_list, path, values_to_save]]:
constant[
Save object with attributes from attr_list.
Parameters
----------
obj: obj
Object of class with __dict__ attribute.
attr_list: list
List with attributes to exclude from saving to dill object. If empty
list all attributes will be saved.
path: str
Where to save dill object.
values_to_save: list, optional
Placeholders for original attributes for saving object. If None will be
extended to attr_list length like [None] * len(attr_list)
]
if compare[name[values_to_save] is constant[None]] begin[:]
variable[values_to_save] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b22bb520>]] * call[name[len], parameter[name[attr_list]]]]
variable[saved_attr_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b22b9360>, <ast.Name object at 0x7da1b22b9660>]]] in starred[call[name[zip], parameter[name[attr_list], name[values_to_save]]]] begin[:]
if compare[name[attr] in name[obj].__dict__] begin[:]
variable[item] assign[=] call[name[obj].__dict__.pop, parameter[name[attr]]]
call[name[saved_attr_dict]][name[attr]] assign[=] name[item]
call[name[setattr], parameter[name[obj], name[attr], name[val_save]]]
with call[name[open], parameter[name[path], constant[wb]]] begin[:]
call[name[dill].dump, parameter[name[obj], name[out_file]]]
for taget[tuple[[<ast.Name object at 0x7da1b22b9750>, <ast.Name object at 0x7da1b22b9000>]]] in starred[call[name[saved_attr_dict].items, parameter[]]] begin[:]
call[name[setattr], parameter[name[obj], name[attr], name[item]]] | keyword[def] identifier[_save_obj_without_attr] ( identifier[obj] , identifier[attr_list] , identifier[path] , identifier[values_to_save] = keyword[None] ):
literal[string]
keyword[if] identifier[values_to_save] keyword[is] keyword[None] :
identifier[values_to_save] =[ keyword[None] ]* identifier[len] ( identifier[attr_list] )
identifier[saved_attr_dict] ={}
keyword[for] identifier[attr] , identifier[val_save] keyword[in] identifier[zip] ( identifier[attr_list] , identifier[values_to_save] ):
keyword[if] identifier[attr] keyword[in] identifier[obj] . identifier[__dict__] :
identifier[item] = identifier[obj] . identifier[__dict__] . identifier[pop] ( identifier[attr] )
identifier[saved_attr_dict] [ identifier[attr] ]= identifier[item]
identifier[setattr] ( identifier[obj] , identifier[attr] , identifier[val_save] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[out_file] :
identifier[dill] . identifier[dump] ( identifier[obj] , identifier[out_file] )
keyword[for] identifier[attr] , identifier[item] keyword[in] identifier[saved_attr_dict] . identifier[items] ():
identifier[setattr] ( identifier[obj] , identifier[attr] , identifier[item] ) | def _save_obj_without_attr(obj, attr_list, path, values_to_save=None):
"""
Save object with attributes from attr_list.
Parameters
----------
obj: obj
Object of class with __dict__ attribute.
attr_list: list
List with attributes to exclude from saving to dill object. If empty
list all attributes will be saved.
path: str
Where to save dill object.
values_to_save: list, optional
Placeholders for original attributes for saving object. If None will be
extended to attr_list length like [None] * len(attr_list)
"""
if values_to_save is None:
values_to_save = [None] * len(attr_list) # depends on [control=['if'], data=['values_to_save']]
saved_attr_dict = {}
for (attr, val_save) in zip(attr_list, values_to_save):
if attr in obj.__dict__:
item = obj.__dict__.pop(attr)
saved_attr_dict[attr] = item
setattr(obj, attr, val_save) # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=[]]
with open(path, 'wb') as out_file:
dill.dump(obj, out_file) # depends on [control=['with'], data=['out_file']]
for (attr, item) in saved_attr_dict.items():
setattr(obj, attr, item) # depends on [control=['for'], data=[]] |
def run_main():
"""run_main
Search Splunk
"""
parser = argparse.ArgumentParser(
description=(
'Search Splunk'))
parser.add_argument(
'-u',
help='username',
required=False,
dest='user')
parser.add_argument(
'-p',
help='user password',
required=False,
dest='password')
parser.add_argument(
'-f',
help='splunk-ready request in a json file',
required=False,
dest='datafile')
parser.add_argument(
'-i',
help='index to search',
required=False,
dest='index_name')
parser.add_argument(
'-a',
help='host address: <fqdn:port>',
required=False,
dest='address')
parser.add_argument(
'-e',
help='(Optional) earliest_time minutes back',
required=False,
dest='earliest_time_minutes')
parser.add_argument(
'-l',
help='(Optional) latest_time minutes back',
required=False,
dest='latest_time_minutes')
parser.add_argument(
'-q',
'--queryargs',
nargs='*',
help=(
'query string for searching splunk: '
'search index="antinex" AND levelname="ERROR"'),
required=False,
dest='query_args')
parser.add_argument(
'-j',
help='(Optional) view as json dictionary logs',
required=False,
dest='json_view',
action='store_true')
parser.add_argument(
'-t',
help=(
'(Optional) pre-existing Splunk token '
'which can be set using export '
'SPLUNK_TOKEN=<token> - if provided '
'the user (-u) and password (-p) '
'arguments are not required'),
required=False,
dest='token')
parser.add_argument(
'-m',
help='(Optional) verbose message when getting logs',
required=False,
dest='message_details',
action='store_true')
parser.add_argument(
'-v',
help='(Optional) verify certs - disabled by default',
required=False,
dest='verify',
action='store_true')
parser.add_argument(
'-b',
help='verbose',
required=False,
dest='verbose',
action='store_true')
args = parser.parse_args()
user = SPLUNK_USER
password = SPLUNK_PASSWORD
token = SPLUNK_TOKEN
address = SPLUNK_API_ADDRESS
index_name = SPLUNK_INDEX
verbose = SPLUNK_VERBOSE
show_message_details = bool(str(ev(
'MESSAGE_DETAILS',
'0')).lower() == '1')
earliest_time_minutes = None
latest_time_minutes = None
verify = False
code_view = True
json_view = False
datafile = None
if args.user:
user = args.user
if args.password:
password = args.password
if args.address:
address = args.address
if args.datafile:
datafile = args.datafile
if args.index_name:
index_name = args.index_name
if args.verify:
verify = args.verify
if args.earliest_time_minutes:
earliest_time_minutes = int(args.earliest_time_minutes)
if args.latest_time_minutes:
latest_time_minutes = int(args.latest_time_minutes)
if args.verbose:
verbose = True
if args.message_details:
show_message_details = args.message_details
if args.token:
token = args.token
if args.json_view:
json_view = True
code_view = False
default_search_query = 'index="{}" | head 10 | reverse'.format(
index_name)
search_query = ev(
'SPLUNK_QUERY',
default_search_query)
if args.query_args:
search_query = ' '.join(
args.query_args)
valid = True
if not user or user == 'user-not-set':
log.critical('missing user')
valid = False
if not password or password == 'password-not-set':
log.critical('missing password')
valid = False
if not index_name:
log.critical('missing splunk index')
valid = False
if token:
# if the token is present,
# then the user and the password are not required
if not valid and index_name:
valid = True
if not valid:
log.critical(
'Please run with the following arguments:\n')
log.error(
'-u <username> -p <password> '
'-i <index> -t <token if user and password not set> '
'-a <host address as: fqdn:port>')
log.critical(
'\n'
'Or you can export the following '
'environment variables and retry the command: '
'\n')
log.error(
'export SPLUNK_ADDRESS="splunkenterprise:8088"\n'
'export SPLUNK_API_ADDRESS="splunkenterprise:8089"\n'
'export SPLUNK_PASSWORD="123321"\n'
'export SPLUNK_USER="trex"\n'
'export SPLUNK_INDEX="antinex"\n'
'export SPLUNK_TOKEN="<Optional pre-existing Splunk token>"\n')
sys.exit(1)
if verbose:
log.info((
'creating client user={} address={}').format(
user,
address))
last_msg = ''
host = ''
port = -1
try:
last_msg = (
'Invalid address={}').format(
address)
address_split = address.split(':')
last_msg = (
'Failed finding host in address={} '
'- please use: -a <fqdn:port>').format(
address)
host = address_split[0]
last_msg = (
'Failed finding integer port in address={} '
'- please use: -a <fqdn:port>').format(
address)
port = int(address_split[1])
except Exception as e:
log.error((
'Failed to parse -a {} for the '
'splunk host address: {} which threw an '
'ex={}').format(
address,
last_msg,
e))
sys.exit(1)
# end of try ex
if verbose:
log.info((
'connecting {}@{}:{}').format(
user,
host,
port))
req_body = None
if datafile:
if verbose:
log.info((
'loading request in datafile={}').format(
datafile))
with open(datafile, 'r') as f:
req_body = json.loads(f.read())
earliest_time = None
latest_time = None
now = datetime.datetime.now()
if earliest_time_minutes:
min_15_ago = now - datetime.timedelta(
minutes=earliest_time_minutes)
earliest_time = min_15_ago.strftime(
'%Y-%m-%dT%H:%M:%S.000-00:00')
if latest_time_minutes:
latest_time = (now - datetime.timedelta(
minutes=latest_time_minutes)).strftime(
'%Y-%m-%dT%H:%M:%S.000-00:00')
# Step 2: Create a search job
if not search_query.startswith('search'):
search_query = 'search {}'.format(
search_query)
search_data = req_body
if not search_data:
search_data = {
'search': search_query
}
if earliest_time:
search_data['earliest_time'] = earliest_time
if latest_time:
search_data['latest_time'] = latest_time
res = sp.search(
user=user,
password=password,
address=address,
token=token,
query_dict=search_data,
verify=verify)
if res['status'] == SUCCESS:
result_list = []
try:
result_list = res['record'].get(
'results',
result_list)
if len(result_list) == 0:
log.info((
'No matches for search={} '
'response={}').format(
ppj(search_data),
ppj(res['record'])))
except Exception as e:
result_list = []
log.error((
'Failed to find results for the query={} '
'with ex={}').format(
ppj(search_data),
e))
for ridx, log_record in enumerate(result_list):
log_raw = log_record.get(
'_raw',
None)
if log_raw:
show_search_results(
log_rec=log_raw,
code_view=code_view,
json_view=json_view,
show_message_details=show_message_details)
else:
show_non_search_results(
log_rec=log_record,
code_view=code_view,
json_view=json_view,
show_message_details=show_message_details)
# end of handling log record presentation as a view
# end for all log records
else:
log.error((
'Failed searching splunk with status={} and '
'error: {}').format(
res['status'],
res['err']))
# end of if job_id
if verbose:
log.info('done') | def function[run_main, parameter[]]:
constant[run_main
Search Splunk
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-u]]]
call[name[parser].add_argument, parameter[constant[-p]]]
call[name[parser].add_argument, parameter[constant[-f]]]
call[name[parser].add_argument, parameter[constant[-i]]]
call[name[parser].add_argument, parameter[constant[-a]]]
call[name[parser].add_argument, parameter[constant[-e]]]
call[name[parser].add_argument, parameter[constant[-l]]]
call[name[parser].add_argument, parameter[constant[-q], constant[--queryargs]]]
call[name[parser].add_argument, parameter[constant[-j]]]
call[name[parser].add_argument, parameter[constant[-t]]]
call[name[parser].add_argument, parameter[constant[-m]]]
call[name[parser].add_argument, parameter[constant[-v]]]
call[name[parser].add_argument, parameter[constant[-b]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
variable[user] assign[=] name[SPLUNK_USER]
variable[password] assign[=] name[SPLUNK_PASSWORD]
variable[token] assign[=] name[SPLUNK_TOKEN]
variable[address] assign[=] name[SPLUNK_API_ADDRESS]
variable[index_name] assign[=] name[SPLUNK_INDEX]
variable[verbose] assign[=] name[SPLUNK_VERBOSE]
variable[show_message_details] assign[=] call[name[bool], parameter[compare[call[call[name[str], parameter[call[name[ev], parameter[constant[MESSAGE_DETAILS], constant[0]]]]].lower, parameter[]] equal[==] constant[1]]]]
variable[earliest_time_minutes] assign[=] constant[None]
variable[latest_time_minutes] assign[=] constant[None]
variable[verify] assign[=] constant[False]
variable[code_view] assign[=] constant[True]
variable[json_view] assign[=] constant[False]
variable[datafile] assign[=] constant[None]
if name[args].user begin[:]
variable[user] assign[=] name[args].user
if name[args].password begin[:]
variable[password] assign[=] name[args].password
if name[args].address begin[:]
variable[address] assign[=] name[args].address
if name[args].datafile begin[:]
variable[datafile] assign[=] name[args].datafile
if name[args].index_name begin[:]
variable[index_name] assign[=] name[args].index_name
if name[args].verify begin[:]
variable[verify] assign[=] name[args].verify
if name[args].earliest_time_minutes begin[:]
variable[earliest_time_minutes] assign[=] call[name[int], parameter[name[args].earliest_time_minutes]]
if name[args].latest_time_minutes begin[:]
variable[latest_time_minutes] assign[=] call[name[int], parameter[name[args].latest_time_minutes]]
if name[args].verbose begin[:]
variable[verbose] assign[=] constant[True]
if name[args].message_details begin[:]
variable[show_message_details] assign[=] name[args].message_details
if name[args].token begin[:]
variable[token] assign[=] name[args].token
if name[args].json_view begin[:]
variable[json_view] assign[=] constant[True]
variable[code_view] assign[=] constant[False]
variable[default_search_query] assign[=] call[constant[index="{}" | head 10 | reverse].format, parameter[name[index_name]]]
variable[search_query] assign[=] call[name[ev], parameter[constant[SPLUNK_QUERY], name[default_search_query]]]
if name[args].query_args begin[:]
variable[search_query] assign[=] call[constant[ ].join, parameter[name[args].query_args]]
variable[valid] assign[=] constant[True]
if <ast.BoolOp object at 0x7da20c9927d0> begin[:]
call[name[log].critical, parameter[constant[missing user]]]
variable[valid] assign[=] constant[False]
if <ast.BoolOp object at 0x7da20c993af0> begin[:]
call[name[log].critical, parameter[constant[missing password]]]
variable[valid] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da207f9be50> begin[:]
call[name[log].critical, parameter[constant[missing splunk index]]]
variable[valid] assign[=] constant[False]
if name[token] begin[:]
if <ast.BoolOp object at 0x7da207f9b820> begin[:]
variable[valid] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da207f9bc10> begin[:]
call[name[log].critical, parameter[constant[Please run with the following arguments:
]]]
call[name[log].error, parameter[constant[-u <username> -p <password> -i <index> -t <token if user and password not set> -a <host address as: fqdn:port>]]]
call[name[log].critical, parameter[constant[
Or you can export the following environment variables and retry the command:
]]]
call[name[log].error, parameter[constant[export SPLUNK_ADDRESS="splunkenterprise:8088"
export SPLUNK_API_ADDRESS="splunkenterprise:8089"
export SPLUNK_PASSWORD="123321"
export SPLUNK_USER="trex"
export SPLUNK_INDEX="antinex"
export SPLUNK_TOKEN="<Optional pre-existing Splunk token>"
]]]
call[name[sys].exit, parameter[constant[1]]]
if name[verbose] begin[:]
call[name[log].info, parameter[call[constant[creating client user={} address={}].format, parameter[name[user], name[address]]]]]
variable[last_msg] assign[=] constant[]
variable[host] assign[=] constant[]
variable[port] assign[=] <ast.UnaryOp object at 0x7da207f98730>
<ast.Try object at 0x7da207f997b0>
if name[verbose] begin[:]
call[name[log].info, parameter[call[constant[connecting {}@{}:{}].format, parameter[name[user], name[host], name[port]]]]]
variable[req_body] assign[=] constant[None]
if name[datafile] begin[:]
if name[verbose] begin[:]
call[name[log].info, parameter[call[constant[loading request in datafile={}].format, parameter[name[datafile]]]]]
with call[name[open], parameter[name[datafile], constant[r]]] begin[:]
variable[req_body] assign[=] call[name[json].loads, parameter[call[name[f].read, parameter[]]]]
variable[earliest_time] assign[=] constant[None]
variable[latest_time] assign[=] constant[None]
variable[now] assign[=] call[name[datetime].datetime.now, parameter[]]
if name[earliest_time_minutes] begin[:]
variable[min_15_ago] assign[=] binary_operation[name[now] - call[name[datetime].timedelta, parameter[]]]
variable[earliest_time] assign[=] call[name[min_15_ago].strftime, parameter[constant[%Y-%m-%dT%H:%M:%S.000-00:00]]]
if name[latest_time_minutes] begin[:]
variable[latest_time] assign[=] call[binary_operation[name[now] - call[name[datetime].timedelta, parameter[]]].strftime, parameter[constant[%Y-%m-%dT%H:%M:%S.000-00:00]]]
if <ast.UnaryOp object at 0x7da204623040> begin[:]
variable[search_query] assign[=] call[constant[search {}].format, parameter[name[search_query]]]
variable[search_data] assign[=] name[req_body]
if <ast.UnaryOp object at 0x7da204623e20> begin[:]
variable[search_data] assign[=] dictionary[[<ast.Constant object at 0x7da204621db0>], [<ast.Name object at 0x7da204620d60>]]
if name[earliest_time] begin[:]
call[name[search_data]][constant[earliest_time]] assign[=] name[earliest_time]
if name[latest_time] begin[:]
call[name[search_data]][constant[latest_time]] assign[=] name[latest_time]
variable[res] assign[=] call[name[sp].search, parameter[]]
if compare[call[name[res]][constant[status]] equal[==] name[SUCCESS]] begin[:]
variable[result_list] assign[=] list[[]]
<ast.Try object at 0x7da204621f30>
for taget[tuple[[<ast.Name object at 0x7da18eb56fe0>, <ast.Name object at 0x7da18eb55690>]]] in starred[call[name[enumerate], parameter[name[result_list]]]] begin[:]
variable[log_raw] assign[=] call[name[log_record].get, parameter[constant[_raw], constant[None]]]
if name[log_raw] begin[:]
call[name[show_search_results], parameter[]]
if name[verbose] begin[:]
call[name[log].info, parameter[constant[done]]] | keyword[def] identifier[run_main] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] =(
literal[string] ))
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
literal[string] ,
identifier[nargs] = literal[string] ,
identifier[help] =(
literal[string]
literal[string] ),
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] ,
identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ),
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] ,
identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] ,
identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[required] = keyword[False] ,
identifier[dest] = literal[string] ,
identifier[action] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[user] = identifier[SPLUNK_USER]
identifier[password] = identifier[SPLUNK_PASSWORD]
identifier[token] = identifier[SPLUNK_TOKEN]
identifier[address] = identifier[SPLUNK_API_ADDRESS]
identifier[index_name] = identifier[SPLUNK_INDEX]
identifier[verbose] = identifier[SPLUNK_VERBOSE]
identifier[show_message_details] = identifier[bool] ( identifier[str] ( identifier[ev] (
literal[string] ,
literal[string] )). identifier[lower] ()== literal[string] )
identifier[earliest_time_minutes] = keyword[None]
identifier[latest_time_minutes] = keyword[None]
identifier[verify] = keyword[False]
identifier[code_view] = keyword[True]
identifier[json_view] = keyword[False]
identifier[datafile] = keyword[None]
keyword[if] identifier[args] . identifier[user] :
identifier[user] = identifier[args] . identifier[user]
keyword[if] identifier[args] . identifier[password] :
identifier[password] = identifier[args] . identifier[password]
keyword[if] identifier[args] . identifier[address] :
identifier[address] = identifier[args] . identifier[address]
keyword[if] identifier[args] . identifier[datafile] :
identifier[datafile] = identifier[args] . identifier[datafile]
keyword[if] identifier[args] . identifier[index_name] :
identifier[index_name] = identifier[args] . identifier[index_name]
keyword[if] identifier[args] . identifier[verify] :
identifier[verify] = identifier[args] . identifier[verify]
keyword[if] identifier[args] . identifier[earliest_time_minutes] :
identifier[earliest_time_minutes] = identifier[int] ( identifier[args] . identifier[earliest_time_minutes] )
keyword[if] identifier[args] . identifier[latest_time_minutes] :
identifier[latest_time_minutes] = identifier[int] ( identifier[args] . identifier[latest_time_minutes] )
keyword[if] identifier[args] . identifier[verbose] :
identifier[verbose] = keyword[True]
keyword[if] identifier[args] . identifier[message_details] :
identifier[show_message_details] = identifier[args] . identifier[message_details]
keyword[if] identifier[args] . identifier[token] :
identifier[token] = identifier[args] . identifier[token]
keyword[if] identifier[args] . identifier[json_view] :
identifier[json_view] = keyword[True]
identifier[code_view] = keyword[False]
identifier[default_search_query] = literal[string] . identifier[format] (
identifier[index_name] )
identifier[search_query] = identifier[ev] (
literal[string] ,
identifier[default_search_query] )
keyword[if] identifier[args] . identifier[query_args] :
identifier[search_query] = literal[string] . identifier[join] (
identifier[args] . identifier[query_args] )
identifier[valid] = keyword[True]
keyword[if] keyword[not] identifier[user] keyword[or] identifier[user] == literal[string] :
identifier[log] . identifier[critical] ( literal[string] )
identifier[valid] = keyword[False]
keyword[if] keyword[not] identifier[password] keyword[or] identifier[password] == literal[string] :
identifier[log] . identifier[critical] ( literal[string] )
identifier[valid] = keyword[False]
keyword[if] keyword[not] identifier[index_name] :
identifier[log] . identifier[critical] ( literal[string] )
identifier[valid] = keyword[False]
keyword[if] identifier[token] :
keyword[if] keyword[not] identifier[valid] keyword[and] identifier[index_name] :
identifier[valid] = keyword[True]
keyword[if] keyword[not] identifier[valid] :
identifier[log] . identifier[critical] (
literal[string] )
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string] )
identifier[log] . identifier[critical] (
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[verbose] :
identifier[log] . identifier[info] ((
literal[string] ). identifier[format] (
identifier[user] ,
identifier[address] ))
identifier[last_msg] = literal[string]
identifier[host] = literal[string]
identifier[port] =- literal[int]
keyword[try] :
identifier[last_msg] =(
literal[string] ). identifier[format] (
identifier[address] )
identifier[address_split] = identifier[address] . identifier[split] ( literal[string] )
identifier[last_msg] =(
literal[string]
literal[string] ). identifier[format] (
identifier[address] )
identifier[host] = identifier[address_split] [ literal[int] ]
identifier[last_msg] =(
literal[string]
literal[string] ). identifier[format] (
identifier[address] )
identifier[port] = identifier[int] ( identifier[address_split] [ literal[int] ])
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[error] ((
literal[string]
literal[string]
literal[string] ). identifier[format] (
identifier[address] ,
identifier[last_msg] ,
identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[verbose] :
identifier[log] . identifier[info] ((
literal[string] ). identifier[format] (
identifier[user] ,
identifier[host] ,
identifier[port] ))
identifier[req_body] = keyword[None]
keyword[if] identifier[datafile] :
keyword[if] identifier[verbose] :
identifier[log] . identifier[info] ((
literal[string] ). identifier[format] (
identifier[datafile] ))
keyword[with] identifier[open] ( identifier[datafile] , literal[string] ) keyword[as] identifier[f] :
identifier[req_body] = identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ())
identifier[earliest_time] = keyword[None]
identifier[latest_time] = keyword[None]
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[if] identifier[earliest_time_minutes] :
identifier[min_15_ago] = identifier[now] - identifier[datetime] . identifier[timedelta] (
identifier[minutes] = identifier[earliest_time_minutes] )
identifier[earliest_time] = identifier[min_15_ago] . identifier[strftime] (
literal[string] )
keyword[if] identifier[latest_time_minutes] :
identifier[latest_time] =( identifier[now] - identifier[datetime] . identifier[timedelta] (
identifier[minutes] = identifier[latest_time_minutes] )). identifier[strftime] (
literal[string] )
keyword[if] keyword[not] identifier[search_query] . identifier[startswith] ( literal[string] ):
identifier[search_query] = literal[string] . identifier[format] (
identifier[search_query] )
identifier[search_data] = identifier[req_body]
keyword[if] keyword[not] identifier[search_data] :
identifier[search_data] ={
literal[string] : identifier[search_query]
}
keyword[if] identifier[earliest_time] :
identifier[search_data] [ literal[string] ]= identifier[earliest_time]
keyword[if] identifier[latest_time] :
identifier[search_data] [ literal[string] ]= identifier[latest_time]
identifier[res] = identifier[sp] . identifier[search] (
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[address] = identifier[address] ,
identifier[token] = identifier[token] ,
identifier[query_dict] = identifier[search_data] ,
identifier[verify] = identifier[verify] )
keyword[if] identifier[res] [ literal[string] ]== identifier[SUCCESS] :
identifier[result_list] =[]
keyword[try] :
identifier[result_list] = identifier[res] [ literal[string] ]. identifier[get] (
literal[string] ,
identifier[result_list] )
keyword[if] identifier[len] ( identifier[result_list] )== literal[int] :
identifier[log] . identifier[info] ((
literal[string]
literal[string] ). identifier[format] (
identifier[ppj] ( identifier[search_data] ),
identifier[ppj] ( identifier[res] [ literal[string] ])))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[result_list] =[]
identifier[log] . identifier[error] ((
literal[string]
literal[string] ). identifier[format] (
identifier[ppj] ( identifier[search_data] ),
identifier[e] ))
keyword[for] identifier[ridx] , identifier[log_record] keyword[in] identifier[enumerate] ( identifier[result_list] ):
identifier[log_raw] = identifier[log_record] . identifier[get] (
literal[string] ,
keyword[None] )
keyword[if] identifier[log_raw] :
identifier[show_search_results] (
identifier[log_rec] = identifier[log_raw] ,
identifier[code_view] = identifier[code_view] ,
identifier[json_view] = identifier[json_view] ,
identifier[show_message_details] = identifier[show_message_details] )
keyword[else] :
identifier[show_non_search_results] (
identifier[log_rec] = identifier[log_record] ,
identifier[code_view] = identifier[code_view] ,
identifier[json_view] = identifier[json_view] ,
identifier[show_message_details] = identifier[show_message_details] )
keyword[else] :
identifier[log] . identifier[error] ((
literal[string]
literal[string] ). identifier[format] (
identifier[res] [ literal[string] ],
identifier[res] [ literal[string] ]))
keyword[if] identifier[verbose] :
identifier[log] . identifier[info] ( literal[string] ) | def run_main():
"""run_main
Search Splunk
"""
parser = argparse.ArgumentParser(description='Search Splunk')
parser.add_argument('-u', help='username', required=False, dest='user')
parser.add_argument('-p', help='user password', required=False, dest='password')
parser.add_argument('-f', help='splunk-ready request in a json file', required=False, dest='datafile')
parser.add_argument('-i', help='index to search', required=False, dest='index_name')
parser.add_argument('-a', help='host address: <fqdn:port>', required=False, dest='address')
parser.add_argument('-e', help='(Optional) earliest_time minutes back', required=False, dest='earliest_time_minutes')
parser.add_argument('-l', help='(Optional) latest_time minutes back', required=False, dest='latest_time_minutes')
parser.add_argument('-q', '--queryargs', nargs='*', help='query string for searching splunk: search index="antinex" AND levelname="ERROR"', required=False, dest='query_args')
parser.add_argument('-j', help='(Optional) view as json dictionary logs', required=False, dest='json_view', action='store_true')
parser.add_argument('-t', help='(Optional) pre-existing Splunk token which can be set using export SPLUNK_TOKEN=<token> - if provided the user (-u) and password (-p) arguments are not required', required=False, dest='token')
parser.add_argument('-m', help='(Optional) verbose message when getting logs', required=False, dest='message_details', action='store_true')
parser.add_argument('-v', help='(Optional) verify certs - disabled by default', required=False, dest='verify', action='store_true')
parser.add_argument('-b', help='verbose', required=False, dest='verbose', action='store_true')
args = parser.parse_args()
user = SPLUNK_USER
password = SPLUNK_PASSWORD
token = SPLUNK_TOKEN
address = SPLUNK_API_ADDRESS
index_name = SPLUNK_INDEX
verbose = SPLUNK_VERBOSE
show_message_details = bool(str(ev('MESSAGE_DETAILS', '0')).lower() == '1')
earliest_time_minutes = None
latest_time_minutes = None
verify = False
code_view = True
json_view = False
datafile = None
if args.user:
user = args.user # depends on [control=['if'], data=[]]
if args.password:
password = args.password # depends on [control=['if'], data=[]]
if args.address:
address = args.address # depends on [control=['if'], data=[]]
if args.datafile:
datafile = args.datafile # depends on [control=['if'], data=[]]
if args.index_name:
index_name = args.index_name # depends on [control=['if'], data=[]]
if args.verify:
verify = args.verify # depends on [control=['if'], data=[]]
if args.earliest_time_minutes:
earliest_time_minutes = int(args.earliest_time_minutes) # depends on [control=['if'], data=[]]
if args.latest_time_minutes:
latest_time_minutes = int(args.latest_time_minutes) # depends on [control=['if'], data=[]]
if args.verbose:
verbose = True # depends on [control=['if'], data=[]]
if args.message_details:
show_message_details = args.message_details # depends on [control=['if'], data=[]]
if args.token:
token = args.token # depends on [control=['if'], data=[]]
if args.json_view:
json_view = True
code_view = False # depends on [control=['if'], data=[]]
default_search_query = 'index="{}" | head 10 | reverse'.format(index_name)
search_query = ev('SPLUNK_QUERY', default_search_query)
if args.query_args:
search_query = ' '.join(args.query_args) # depends on [control=['if'], data=[]]
valid = True
if not user or user == 'user-not-set':
log.critical('missing user')
valid = False # depends on [control=['if'], data=[]]
if not password or password == 'password-not-set':
log.critical('missing password')
valid = False # depends on [control=['if'], data=[]]
if not index_name:
log.critical('missing splunk index')
valid = False # depends on [control=['if'], data=[]]
if token:
# if the token is present,
# then the user and the password are not required
if not valid and index_name:
valid = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not valid:
log.critical('Please run with the following arguments:\n')
log.error('-u <username> -p <password> -i <index> -t <token if user and password not set> -a <host address as: fqdn:port>')
log.critical('\nOr you can export the following environment variables and retry the command: \n')
log.error('export SPLUNK_ADDRESS="splunkenterprise:8088"\nexport SPLUNK_API_ADDRESS="splunkenterprise:8089"\nexport SPLUNK_PASSWORD="123321"\nexport SPLUNK_USER="trex"\nexport SPLUNK_INDEX="antinex"\nexport SPLUNK_TOKEN="<Optional pre-existing Splunk token>"\n')
sys.exit(1) # depends on [control=['if'], data=[]]
if verbose:
log.info('creating client user={} address={}'.format(user, address)) # depends on [control=['if'], data=[]]
last_msg = ''
host = ''
port = -1
try:
last_msg = 'Invalid address={}'.format(address)
address_split = address.split(':')
last_msg = 'Failed finding host in address={} - please use: -a <fqdn:port>'.format(address)
host = address_split[0]
last_msg = 'Failed finding integer port in address={} - please use: -a <fqdn:port>'.format(address)
port = int(address_split[1]) # depends on [control=['try'], data=[]]
except Exception as e:
log.error('Failed to parse -a {} for the splunk host address: {} which threw an ex={}'.format(address, last_msg, e))
sys.exit(1) # depends on [control=['except'], data=['e']]
# end of try ex
if verbose:
log.info('connecting {}@{}:{}'.format(user, host, port)) # depends on [control=['if'], data=[]]
req_body = None
if datafile:
if verbose:
log.info('loading request in datafile={}'.format(datafile)) # depends on [control=['if'], data=[]]
with open(datafile, 'r') as f:
req_body = json.loads(f.read()) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
earliest_time = None
latest_time = None
now = datetime.datetime.now()
if earliest_time_minutes:
min_15_ago = now - datetime.timedelta(minutes=earliest_time_minutes)
earliest_time = min_15_ago.strftime('%Y-%m-%dT%H:%M:%S.000-00:00') # depends on [control=['if'], data=[]]
if latest_time_minutes:
latest_time = (now - datetime.timedelta(minutes=latest_time_minutes)).strftime('%Y-%m-%dT%H:%M:%S.000-00:00') # depends on [control=['if'], data=[]]
# Step 2: Create a search job
if not search_query.startswith('search'):
search_query = 'search {}'.format(search_query) # depends on [control=['if'], data=[]]
search_data = req_body
if not search_data:
search_data = {'search': search_query}
if earliest_time:
search_data['earliest_time'] = earliest_time # depends on [control=['if'], data=[]]
if latest_time:
search_data['latest_time'] = latest_time # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
res = sp.search(user=user, password=password, address=address, token=token, query_dict=search_data, verify=verify)
if res['status'] == SUCCESS:
result_list = []
try:
result_list = res['record'].get('results', result_list)
if len(result_list) == 0:
log.info('No matches for search={} response={}'.format(ppj(search_data), ppj(res['record']))) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
result_list = []
log.error('Failed to find results for the query={} with ex={}'.format(ppj(search_data), e)) # depends on [control=['except'], data=['e']]
for (ridx, log_record) in enumerate(result_list):
log_raw = log_record.get('_raw', None)
if log_raw:
show_search_results(log_rec=log_raw, code_view=code_view, json_view=json_view, show_message_details=show_message_details) # depends on [control=['if'], data=[]]
else:
show_non_search_results(log_rec=log_record, code_view=code_view, json_view=json_view, show_message_details=show_message_details) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# end of handling log record presentation as a view
# end for all log records
log.error('Failed searching splunk with status={} and error: {}'.format(res['status'], res['err']))
# end of if job_id
if verbose:
log.info('done') # depends on [control=['if'], data=[]] |
def pairwise_cos_distance(A, B):
"""Pairwise cosine distance between two matrices.
:param A: a matrix.
:param B: a matrix.
:returns: A tensor for the pairwise cosine between A and B.
"""
normalized_A = tf.nn.l2_normalize(A, dim=1)
normalized_B = tf.nn.l2_normalize(B, dim=1)
prod = tf.matmul(normalized_A, normalized_B, adjoint_b=True)
return 1 - prod | def function[pairwise_cos_distance, parameter[A, B]]:
constant[Pairwise cosine distance between two matrices.
:param A: a matrix.
:param B: a matrix.
:returns: A tensor for the pairwise cosine between A and B.
]
variable[normalized_A] assign[=] call[name[tf].nn.l2_normalize, parameter[name[A]]]
variable[normalized_B] assign[=] call[name[tf].nn.l2_normalize, parameter[name[B]]]
variable[prod] assign[=] call[name[tf].matmul, parameter[name[normalized_A], name[normalized_B]]]
return[binary_operation[constant[1] - name[prod]]] | keyword[def] identifier[pairwise_cos_distance] ( identifier[A] , identifier[B] ):
literal[string]
identifier[normalized_A] = identifier[tf] . identifier[nn] . identifier[l2_normalize] ( identifier[A] , identifier[dim] = literal[int] )
identifier[normalized_B] = identifier[tf] . identifier[nn] . identifier[l2_normalize] ( identifier[B] , identifier[dim] = literal[int] )
identifier[prod] = identifier[tf] . identifier[matmul] ( identifier[normalized_A] , identifier[normalized_B] , identifier[adjoint_b] = keyword[True] )
keyword[return] literal[int] - identifier[prod] | def pairwise_cos_distance(A, B):
"""Pairwise cosine distance between two matrices.
:param A: a matrix.
:param B: a matrix.
:returns: A tensor for the pairwise cosine between A and B.
"""
normalized_A = tf.nn.l2_normalize(A, dim=1)
normalized_B = tf.nn.l2_normalize(B, dim=1)
prod = tf.matmul(normalized_A, normalized_B, adjoint_b=True)
return 1 - prod |
def with_ascendants_for_slug(self, slug, **kwargs):
"""
Given a slug, returns a list of pages from ascendants to
descendants, that form the parent/child page relationships
for that slug. The main concern is to do this in a single
database query rather than querying the database for parents
of a given page.
Primarily used in ``PageMiddleware`` to provide the current
page, which in the case of non-page views, won't match the
slug exactly, but will likely match a page that has been
created for linking to the entry point for the app, eg the
blog page when viewing blog posts.
Also used within ``Page.get_ascendants``, which gets called
in the ``pages.views`` view, for building a list of possible
templates that can be used for the page.
If a valid chain of pages is found, we also assign the pages
to the ``page._ascendants`` attr of the main/first/deepest
page, so that when its ``get_ascendants`` method is called,
the ascendants chain can be re-used without querying the
database again. This occurs at least once, given the second
use-case described above.
"""
if slug == "/":
slugs = [home_slug()]
else:
# Create a list of slugs within this slug,
# eg: ['about', 'about/team', 'about/team/mike']
parts = slug.split("/")
slugs = ["/".join(parts[:i]) for i in range(1, len(parts) + 1)]
# Find the deepest page that matches one of our slugs.
# Sorting by "-slug" should ensure that the pages are in
# descendant -> ascendant order.
pages_for_user = self.published(**kwargs)
pages = list(pages_for_user.filter(slug__in=slugs).order_by("-slug"))
if not pages:
return []
# Check to see if the other pages retrieved form a valid path
# in the page tree, i.e. pages[0].parent == pages[1],
# pages[1].parent == pages[2], and so on. If they do, assign
# the ascendants to the main/first/deepest page, so that it
# can be re-used on calls to its get_ascendants method.
pages[0]._ascendants = []
for i, page in enumerate(pages):
try:
parent = pages[i + 1]
except IndexError:
# IndexError indicates that this is the last page in
# the list, so it should have no parent.
if page.parent_id:
break # Invalid parent
else:
if page.parent_id != parent.id:
break # Invalid parent
else:
# Valid parents
pages[0]._ascendants = pages[1:]
return pages | def function[with_ascendants_for_slug, parameter[self, slug]]:
constant[
Given a slug, returns a list of pages from ascendants to
descendants, that form the parent/child page relationships
for that slug. The main concern is to do this in a single
database query rather than querying the database for parents
of a given page.
Primarily used in ``PageMiddleware`` to provide the current
page, which in the case of non-page views, won't match the
slug exactly, but will likely match a page that has been
created for linking to the entry point for the app, eg the
blog page when viewing blog posts.
Also used within ``Page.get_ascendants``, which gets called
in the ``pages.views`` view, for building a list of possible
templates that can be used for the page.
If a valid chain of pages is found, we also assign the pages
to the ``page._ascendants`` attr of the main/first/deepest
page, so that when its ``get_ascendants`` method is called,
the ascendants chain can be re-used without querying the
database again. This occurs at least once, given the second
use-case described above.
]
if compare[name[slug] equal[==] constant[/]] begin[:]
variable[slugs] assign[=] list[[<ast.Call object at 0x7da1b15f33d0>]]
variable[pages_for_user] assign[=] call[name[self].published, parameter[]]
variable[pages] assign[=] call[name[list], parameter[call[call[name[pages_for_user].filter, parameter[]].order_by, parameter[constant[-slug]]]]]
if <ast.UnaryOp object at 0x7da1b15f3400> begin[:]
return[list[[]]]
call[name[pages]][constant[0]]._ascendants assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b15f16c0>, <ast.Name object at 0x7da1b15f1c90>]]] in starred[call[name[enumerate], parameter[name[pages]]]] begin[:]
<ast.Try object at 0x7da1b15f12a0>
return[name[pages]] | keyword[def] identifier[with_ascendants_for_slug] ( identifier[self] , identifier[slug] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[slug] == literal[string] :
identifier[slugs] =[ identifier[home_slug] ()]
keyword[else] :
identifier[parts] = identifier[slug] . identifier[split] ( literal[string] )
identifier[slugs] =[ literal[string] . identifier[join] ( identifier[parts] [: identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[parts] )+ literal[int] )]
identifier[pages_for_user] = identifier[self] . identifier[published] (** identifier[kwargs] )
identifier[pages] = identifier[list] ( identifier[pages_for_user] . identifier[filter] ( identifier[slug__in] = identifier[slugs] ). identifier[order_by] ( literal[string] ))
keyword[if] keyword[not] identifier[pages] :
keyword[return] []
identifier[pages] [ literal[int] ]. identifier[_ascendants] =[]
keyword[for] identifier[i] , identifier[page] keyword[in] identifier[enumerate] ( identifier[pages] ):
keyword[try] :
identifier[parent] = identifier[pages] [ identifier[i] + literal[int] ]
keyword[except] identifier[IndexError] :
keyword[if] identifier[page] . identifier[parent_id] :
keyword[break]
keyword[else] :
keyword[if] identifier[page] . identifier[parent_id] != identifier[parent] . identifier[id] :
keyword[break]
keyword[else] :
identifier[pages] [ literal[int] ]. identifier[_ascendants] = identifier[pages] [ literal[int] :]
keyword[return] identifier[pages] | def with_ascendants_for_slug(self, slug, **kwargs):
"""
Given a slug, returns a list of pages from ascendants to
descendants, that form the parent/child page relationships
for that slug. The main concern is to do this in a single
database query rather than querying the database for parents
of a given page.
Primarily used in ``PageMiddleware`` to provide the current
page, which in the case of non-page views, won't match the
slug exactly, but will likely match a page that has been
created for linking to the entry point for the app, eg the
blog page when viewing blog posts.
Also used within ``Page.get_ascendants``, which gets called
in the ``pages.views`` view, for building a list of possible
templates that can be used for the page.
If a valid chain of pages is found, we also assign the pages
to the ``page._ascendants`` attr of the main/first/deepest
page, so that when its ``get_ascendants`` method is called,
the ascendants chain can be re-used without querying the
database again. This occurs at least once, given the second
use-case described above.
"""
if slug == '/':
slugs = [home_slug()] # depends on [control=['if'], data=[]]
else:
# Create a list of slugs within this slug,
# eg: ['about', 'about/team', 'about/team/mike']
parts = slug.split('/')
slugs = ['/'.join(parts[:i]) for i in range(1, len(parts) + 1)]
# Find the deepest page that matches one of our slugs.
# Sorting by "-slug" should ensure that the pages are in
# descendant -> ascendant order.
pages_for_user = self.published(**kwargs)
pages = list(pages_for_user.filter(slug__in=slugs).order_by('-slug'))
if not pages:
return [] # depends on [control=['if'], data=[]]
# Check to see if the other pages retrieved form a valid path
# in the page tree, i.e. pages[0].parent == pages[1],
# pages[1].parent == pages[2], and so on. If they do, assign
# the ascendants to the main/first/deepest page, so that it
# can be re-used on calls to its get_ascendants method.
pages[0]._ascendants = []
for (i, page) in enumerate(pages):
try:
parent = pages[i + 1] # depends on [control=['try'], data=[]]
except IndexError:
# IndexError indicates that this is the last page in
# the list, so it should have no parent.
if page.parent_id:
break # Invalid parent # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
else:
if page.parent_id != parent.id:
break # Invalid parent # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
# Valid parents
pages[0]._ascendants = pages[1:]
return pages |
def namespace_to_dict(obj):
"""If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
"""
if isinstance(obj, (argparse.Namespace, optparse.Values)):
return vars(obj)
return obj | def function[namespace_to_dict, parameter[obj]]:
constant[If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Attribute object at 0x7da18fe93dc0>, <ast.Attribute object at 0x7da18fe90b50>]]]] begin[:]
return[call[name[vars], parameter[name[obj]]]]
return[name[obj]] | keyword[def] identifier[namespace_to_dict] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[argparse] . identifier[Namespace] , identifier[optparse] . identifier[Values] )):
keyword[return] identifier[vars] ( identifier[obj] )
keyword[return] identifier[obj] | def namespace_to_dict(obj):
"""If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
"""
if isinstance(obj, (argparse.Namespace, optparse.Values)):
return vars(obj) # depends on [control=['if'], data=[]]
return obj |
def add_activity_type(self, activity_type):
"""Adds an activity type to an Agent.
Parameters
----------
activity_type : str
The type of activity to add such as 'activity', 'kinase',
'gtpbound'
"""
if activity_type not in self.activity_types:
self.activity_types.append(activity_type) | def function[add_activity_type, parameter[self, activity_type]]:
constant[Adds an activity type to an Agent.
Parameters
----------
activity_type : str
The type of activity to add such as 'activity', 'kinase',
'gtpbound'
]
if compare[name[activity_type] <ast.NotIn object at 0x7da2590d7190> name[self].activity_types] begin[:]
call[name[self].activity_types.append, parameter[name[activity_type]]] | keyword[def] identifier[add_activity_type] ( identifier[self] , identifier[activity_type] ):
literal[string]
keyword[if] identifier[activity_type] keyword[not] keyword[in] identifier[self] . identifier[activity_types] :
identifier[self] . identifier[activity_types] . identifier[append] ( identifier[activity_type] ) | def add_activity_type(self, activity_type):
"""Adds an activity type to an Agent.
Parameters
----------
activity_type : str
The type of activity to add such as 'activity', 'kinase',
'gtpbound'
"""
if activity_type not in self.activity_types:
self.activity_types.append(activity_type) # depends on [control=['if'], data=['activity_type']] |
def get_default_config(gpu_memory_usage=0.75, allow_growth=False):
"""
A helper to create sessions easily.
:param gpu_memory_usage: How much of the gpu should be used for your project.
:param allow_growth: If you want to have a fixed gpus size or if it should grow and use just as much as it needs.
:return: A configuration you can pass to your session when creating it.
"""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_usage
config.gpu_options.allow_growth = allow_growth
return config | def function[get_default_config, parameter[gpu_memory_usage, allow_growth]]:
constant[
A helper to create sessions easily.
:param gpu_memory_usage: How much of the gpu should be used for your project.
:param allow_growth: If you want to have a fixed gpus size or if it should grow and use just as much as it needs.
:return: A configuration you can pass to your session when creating it.
]
variable[config] assign[=] call[name[tf].ConfigProto, parameter[]]
name[config].gpu_options.per_process_gpu_memory_fraction assign[=] name[gpu_memory_usage]
name[config].gpu_options.allow_growth assign[=] name[allow_growth]
return[name[config]] | keyword[def] identifier[get_default_config] ( identifier[gpu_memory_usage] = literal[int] , identifier[allow_growth] = keyword[False] ):
literal[string]
identifier[config] = identifier[tf] . identifier[ConfigProto] ()
identifier[config] . identifier[gpu_options] . identifier[per_process_gpu_memory_fraction] = identifier[gpu_memory_usage]
identifier[config] . identifier[gpu_options] . identifier[allow_growth] = identifier[allow_growth]
keyword[return] identifier[config] | def get_default_config(gpu_memory_usage=0.75, allow_growth=False):
"""
A helper to create sessions easily.
:param gpu_memory_usage: How much of the gpu should be used for your project.
:param allow_growth: If you want to have a fixed gpus size or if it should grow and use just as much as it needs.
:return: A configuration you can pass to your session when creating it.
"""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_usage
config.gpu_options.allow_growth = allow_growth
return config |
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAnnulus(**pixel_params) | def function[to_pixel, parameter[self, wcs, mode]]:
constant[
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
]
variable[pixel_params] assign[=] call[name[self]._to_pixel_params, parameter[name[wcs]]]
return[call[name[RectangularAnnulus], parameter[]]] | keyword[def] identifier[to_pixel] ( identifier[self] , identifier[wcs] , identifier[mode] = literal[string] ):
literal[string]
identifier[pixel_params] = identifier[self] . identifier[_to_pixel_params] ( identifier[wcs] , identifier[mode] = identifier[mode] )
keyword[return] identifier[RectangularAnnulus] (** identifier[pixel_params] ) | def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAnnulus(**pixel_params) |
def arcsIn(G: Graph, n: Node) -> RDFGraph:
""" arcsIn(G, n) is the set of triples in a graph G with object n. """
return RDFGraph(G.triples((None, None, n))) | def function[arcsIn, parameter[G, n]]:
constant[ arcsIn(G, n) is the set of triples in a graph G with object n. ]
return[call[name[RDFGraph], parameter[call[name[G].triples, parameter[tuple[[<ast.Constant object at 0x7da1b10c66e0>, <ast.Constant object at 0x7da1b10c7bb0>, <ast.Name object at 0x7da1b10c6380>]]]]]]] | keyword[def] identifier[arcsIn] ( identifier[G] : identifier[Graph] , identifier[n] : identifier[Node] )-> identifier[RDFGraph] :
literal[string]
keyword[return] identifier[RDFGraph] ( identifier[G] . identifier[triples] (( keyword[None] , keyword[None] , identifier[n] ))) | def arcsIn(G: Graph, n: Node) -> RDFGraph:
""" arcsIn(G, n) is the set of triples in a graph G with object n. """
return RDFGraph(G.triples((None, None, n))) |
def total(self, where=None, xbin1=1, xbin2=-2):
"""
Return the total yield and its associated statistical and
systematic uncertainties.
"""
nominal, _ = self.sys_hist(None, where=where)
integral, stat_error = nominal.integral(
xbin1=xbin1, xbin2=xbin2, error=True)
ups = [0]
dns = [0]
for sys_name in self.sys_names():
low, high = self.sys_hist(sys_name, where=where)
up = high.integral(xbin1=xbin1, xbin2=xbin2) - integral
dn = low.integral(xbin1=xbin1, xbin2=xbin2) - integral
if up > 0:
ups.append(up**2)
else:
dns.append(up**2)
if dn > 0:
ups.append(dn**2)
else:
dns.append(dn**2)
syst_error = (sqrt(sum(ups)), sqrt(sum(dns)))
return integral, stat_error, syst_error | def function[total, parameter[self, where, xbin1, xbin2]]:
constant[
Return the total yield and its associated statistical and
systematic uncertainties.
]
<ast.Tuple object at 0x7da1b110b910> assign[=] call[name[self].sys_hist, parameter[constant[None]]]
<ast.Tuple object at 0x7da1b110b640> assign[=] call[name[nominal].integral, parameter[]]
variable[ups] assign[=] list[[<ast.Constant object at 0x7da1b110a500>]]
variable[dns] assign[=] list[[<ast.Constant object at 0x7da1b1108340>]]
for taget[name[sys_name]] in starred[call[name[self].sys_names, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b1108b80> assign[=] call[name[self].sys_hist, parameter[name[sys_name]]]
variable[up] assign[=] binary_operation[call[name[high].integral, parameter[]] - name[integral]]
variable[dn] assign[=] binary_operation[call[name[low].integral, parameter[]] - name[integral]]
if compare[name[up] greater[>] constant[0]] begin[:]
call[name[ups].append, parameter[binary_operation[name[up] ** constant[2]]]]
if compare[name[dn] greater[>] constant[0]] begin[:]
call[name[ups].append, parameter[binary_operation[name[dn] ** constant[2]]]]
variable[syst_error] assign[=] tuple[[<ast.Call object at 0x7da1b11faf80>, <ast.Call object at 0x7da1b11f98a0>]]
return[tuple[[<ast.Name object at 0x7da1b11f8070>, <ast.Name object at 0x7da1b11fa380>, <ast.Name object at 0x7da1b11f8580>]]] | keyword[def] identifier[total] ( identifier[self] , identifier[where] = keyword[None] , identifier[xbin1] = literal[int] , identifier[xbin2] =- literal[int] ):
literal[string]
identifier[nominal] , identifier[_] = identifier[self] . identifier[sys_hist] ( keyword[None] , identifier[where] = identifier[where] )
identifier[integral] , identifier[stat_error] = identifier[nominal] . identifier[integral] (
identifier[xbin1] = identifier[xbin1] , identifier[xbin2] = identifier[xbin2] , identifier[error] = keyword[True] )
identifier[ups] =[ literal[int] ]
identifier[dns] =[ literal[int] ]
keyword[for] identifier[sys_name] keyword[in] identifier[self] . identifier[sys_names] ():
identifier[low] , identifier[high] = identifier[self] . identifier[sys_hist] ( identifier[sys_name] , identifier[where] = identifier[where] )
identifier[up] = identifier[high] . identifier[integral] ( identifier[xbin1] = identifier[xbin1] , identifier[xbin2] = identifier[xbin2] )- identifier[integral]
identifier[dn] = identifier[low] . identifier[integral] ( identifier[xbin1] = identifier[xbin1] , identifier[xbin2] = identifier[xbin2] )- identifier[integral]
keyword[if] identifier[up] > literal[int] :
identifier[ups] . identifier[append] ( identifier[up] ** literal[int] )
keyword[else] :
identifier[dns] . identifier[append] ( identifier[up] ** literal[int] )
keyword[if] identifier[dn] > literal[int] :
identifier[ups] . identifier[append] ( identifier[dn] ** literal[int] )
keyword[else] :
identifier[dns] . identifier[append] ( identifier[dn] ** literal[int] )
identifier[syst_error] =( identifier[sqrt] ( identifier[sum] ( identifier[ups] )), identifier[sqrt] ( identifier[sum] ( identifier[dns] )))
keyword[return] identifier[integral] , identifier[stat_error] , identifier[syst_error] | def total(self, where=None, xbin1=1, xbin2=-2):
"""
Return the total yield and its associated statistical and
systematic uncertainties.
"""
(nominal, _) = self.sys_hist(None, where=where)
(integral, stat_error) = nominal.integral(xbin1=xbin1, xbin2=xbin2, error=True)
ups = [0]
dns = [0]
for sys_name in self.sys_names():
(low, high) = self.sys_hist(sys_name, where=where)
up = high.integral(xbin1=xbin1, xbin2=xbin2) - integral
dn = low.integral(xbin1=xbin1, xbin2=xbin2) - integral
if up > 0:
ups.append(up ** 2) # depends on [control=['if'], data=['up']]
else:
dns.append(up ** 2)
if dn > 0:
ups.append(dn ** 2) # depends on [control=['if'], data=['dn']]
else:
dns.append(dn ** 2) # depends on [control=['for'], data=['sys_name']]
syst_error = (sqrt(sum(ups)), sqrt(sum(dns)))
return (integral, stat_error, syst_error) |
async def xdel(self, name: str, stream_id: str) -> int:
"""
[NOTICE] Not officially released yet
[NOTICE] In the current implementation, memory is not
really reclaimed until a macro node is completely empty,
so you should not abuse this feature.
remove items from the middle of a stream, just by ID.
:param name: name of the stream
:param stream_id: id of the options appended to the stream.
"""
return await self.execute_command('XDEL', name, stream_id) | <ast.AsyncFunctionDef object at 0x7da1b0831570> | keyword[async] keyword[def] identifier[xdel] ( identifier[self] , identifier[name] : identifier[str] , identifier[stream_id] : identifier[str] )-> identifier[int] :
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[stream_id] ) | async def xdel(self, name: str, stream_id: str) -> int:
"""
[NOTICE] Not officially released yet
[NOTICE] In the current implementation, memory is not
really reclaimed until a macro node is completely empty,
so you should not abuse this feature.
remove items from the middle of a stream, just by ID.
:param name: name of the stream
:param stream_id: id of the options appended to the stream.
"""
return await self.execute_command('XDEL', name, stream_id) |
def get_bulk(self, create=False):
"""Return bulk code"""
result = []
op_type = "index"
if create:
op_type = "create"
meta = self._meta
cmd = {op_type: {"_index": meta.index, "_type": meta.type}}
if meta.parent:
cmd[op_type]['_parent'] = meta.parent
if meta.version:
cmd[op_type]['_version'] = meta.version
if meta.id:
cmd[op_type]['_id'] = meta.id
result.append(json.dumps(cmd, cls=self._meta.connection.encoder))
result.append("\n")
result.append(json.dumps(self, cls=self._meta.connection.encoder))
result.append("\n")
return ''.join(result) | def function[get_bulk, parameter[self, create]]:
constant[Return bulk code]
variable[result] assign[=] list[[]]
variable[op_type] assign[=] constant[index]
if name[create] begin[:]
variable[op_type] assign[=] constant[create]
variable[meta] assign[=] name[self]._meta
variable[cmd] assign[=] dictionary[[<ast.Name object at 0x7da1b0e46d70>], [<ast.Dict object at 0x7da1b0e45600>]]
if name[meta].parent begin[:]
call[call[name[cmd]][name[op_type]]][constant[_parent]] assign[=] name[meta].parent
if name[meta].version begin[:]
call[call[name[cmd]][name[op_type]]][constant[_version]] assign[=] name[meta].version
if name[meta].id begin[:]
call[call[name[cmd]][name[op_type]]][constant[_id]] assign[=] name[meta].id
call[name[result].append, parameter[call[name[json].dumps, parameter[name[cmd]]]]]
call[name[result].append, parameter[constant[
]]]
call[name[result].append, parameter[call[name[json].dumps, parameter[name[self]]]]]
call[name[result].append, parameter[constant[
]]]
return[call[constant[].join, parameter[name[result]]]] | keyword[def] identifier[get_bulk] ( identifier[self] , identifier[create] = keyword[False] ):
literal[string]
identifier[result] =[]
identifier[op_type] = literal[string]
keyword[if] identifier[create] :
identifier[op_type] = literal[string]
identifier[meta] = identifier[self] . identifier[_meta]
identifier[cmd] ={ identifier[op_type] :{ literal[string] : identifier[meta] . identifier[index] , literal[string] : identifier[meta] . identifier[type] }}
keyword[if] identifier[meta] . identifier[parent] :
identifier[cmd] [ identifier[op_type] ][ literal[string] ]= identifier[meta] . identifier[parent]
keyword[if] identifier[meta] . identifier[version] :
identifier[cmd] [ identifier[op_type] ][ literal[string] ]= identifier[meta] . identifier[version]
keyword[if] identifier[meta] . identifier[id] :
identifier[cmd] [ identifier[op_type] ][ literal[string] ]= identifier[meta] . identifier[id]
identifier[result] . identifier[append] ( identifier[json] . identifier[dumps] ( identifier[cmd] , identifier[cls] = identifier[self] . identifier[_meta] . identifier[connection] . identifier[encoder] ))
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[append] ( identifier[json] . identifier[dumps] ( identifier[self] , identifier[cls] = identifier[self] . identifier[_meta] . identifier[connection] . identifier[encoder] ))
identifier[result] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def get_bulk(self, create=False):
"""Return bulk code"""
result = []
op_type = 'index'
if create:
op_type = 'create' # depends on [control=['if'], data=[]]
meta = self._meta
cmd = {op_type: {'_index': meta.index, '_type': meta.type}}
if meta.parent:
cmd[op_type]['_parent'] = meta.parent # depends on [control=['if'], data=[]]
if meta.version:
cmd[op_type]['_version'] = meta.version # depends on [control=['if'], data=[]]
if meta.id:
cmd[op_type]['_id'] = meta.id # depends on [control=['if'], data=[]]
result.append(json.dumps(cmd, cls=self._meta.connection.encoder))
result.append('\n')
result.append(json.dumps(self, cls=self._meta.connection.encoder))
result.append('\n')
return ''.join(result) |
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance) | def function[deprovision, parameter[self, instance_id, details, async_allowed]]:
constant[Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
]
variable[instance] assign[=] call[name[self]._backend.find, parameter[name[instance_id]]]
if <ast.UnaryOp object at 0x7da18f00f9d0> begin[:]
<ast.Raise object at 0x7da18f00fc40>
return[call[name[self]._backend.delete, parameter[name[instance]]]] | keyword[def] identifier[deprovision] ( identifier[self] , identifier[instance_id] : identifier[str] , identifier[details] : identifier[DeprovisionDetails] , identifier[async_allowed] : identifier[bool] )-> identifier[DeprovisionServiceSpec] :
literal[string]
identifier[instance] = identifier[self] . identifier[_backend] . identifier[find] ( identifier[instance_id] )
keyword[if] keyword[not] identifier[instance] . identifier[isProvisioned] ():
keyword[raise] identifier[ErrInstanceDoesNotExist] ()
keyword[return] identifier[self] . identifier[_backend] . identifier[delete] ( identifier[instance] ) | def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist() # depends on [control=['if'], data=[]]
return self._backend.delete(instance) |
def get_literals(self, c, i, depth):
"""
Get a string literal.
Gather all the literal chars up to opening curly or closing brace.
Also gather chars between braces and commas within a group (is_expanding).
"""
result = ['']
is_dollar = False
try:
while c:
ignore_brace = is_dollar
is_dollar = False
if c == '$':
is_dollar = True
elif c == '\\':
c = [self.get_escape(c, i)]
elif not ignore_brace and c == '{':
# Try and get the group
index = i.index
try:
seq = self.get_sequence(next(i), i, depth + 1)
if seq:
c = seq
except StopIteration:
# Searched to end of string
# and still didn't find it.
i.rewind(i.index - index)
elif self.is_expanding() and c in (',', '}'):
# We are Expanding within a group and found a group delimiter
# Return what we gathered before the group delimiters.
i.rewind(1)
return (x for x in result)
# Squash the current set of literals.
result = self.squash(result, [c] if isinstance(c, str) else c)
c = next(i)
except StopIteration:
if self.is_expanding():
return None
return (x for x in result) | def function[get_literals, parameter[self, c, i, depth]]:
constant[
Get a string literal.
Gather all the literal chars up to opening curly or closing brace.
Also gather chars between braces and commas within a group (is_expanding).
]
variable[result] assign[=] list[[<ast.Constant object at 0x7da1b1175510>]]
variable[is_dollar] assign[=] constant[False]
<ast.Try object at 0x7da1b11744f0>
return[<ast.GeneratorExp object at 0x7da1b1020310>] | keyword[def] identifier[get_literals] ( identifier[self] , identifier[c] , identifier[i] , identifier[depth] ):
literal[string]
identifier[result] =[ literal[string] ]
identifier[is_dollar] = keyword[False]
keyword[try] :
keyword[while] identifier[c] :
identifier[ignore_brace] = identifier[is_dollar]
identifier[is_dollar] = keyword[False]
keyword[if] identifier[c] == literal[string] :
identifier[is_dollar] = keyword[True]
keyword[elif] identifier[c] == literal[string] :
identifier[c] =[ identifier[self] . identifier[get_escape] ( identifier[c] , identifier[i] )]
keyword[elif] keyword[not] identifier[ignore_brace] keyword[and] identifier[c] == literal[string] :
identifier[index] = identifier[i] . identifier[index]
keyword[try] :
identifier[seq] = identifier[self] . identifier[get_sequence] ( identifier[next] ( identifier[i] ), identifier[i] , identifier[depth] + literal[int] )
keyword[if] identifier[seq] :
identifier[c] = identifier[seq]
keyword[except] identifier[StopIteration] :
identifier[i] . identifier[rewind] ( identifier[i] . identifier[index] - identifier[index] )
keyword[elif] identifier[self] . identifier[is_expanding] () keyword[and] identifier[c] keyword[in] ( literal[string] , literal[string] ):
identifier[i] . identifier[rewind] ( literal[int] )
keyword[return] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] )
identifier[result] = identifier[self] . identifier[squash] ( identifier[result] ,[ identifier[c] ] keyword[if] identifier[isinstance] ( identifier[c] , identifier[str] ) keyword[else] identifier[c] )
identifier[c] = identifier[next] ( identifier[i] )
keyword[except] identifier[StopIteration] :
keyword[if] identifier[self] . identifier[is_expanding] ():
keyword[return] keyword[None]
keyword[return] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] ) | def get_literals(self, c, i, depth):
"""
Get a string literal.
Gather all the literal chars up to opening curly or closing brace.
Also gather chars between braces and commas within a group (is_expanding).
"""
result = ['']
is_dollar = False
try:
while c:
ignore_brace = is_dollar
is_dollar = False
if c == '$':
is_dollar = True # depends on [control=['if'], data=[]]
elif c == '\\':
c = [self.get_escape(c, i)] # depends on [control=['if'], data=['c']]
elif not ignore_brace and c == '{':
# Try and get the group
index = i.index
try:
seq = self.get_sequence(next(i), i, depth + 1)
if seq:
c = seq # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
# Searched to end of string
# and still didn't find it.
i.rewind(i.index - index) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif self.is_expanding() and c in (',', '}'):
# We are Expanding within a group and found a group delimiter
# Return what we gathered before the group delimiters.
i.rewind(1)
return (x for x in result) # depends on [control=['if'], data=[]]
# Squash the current set of literals.
result = self.squash(result, [c] if isinstance(c, str) else c)
c = next(i) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
if self.is_expanding():
return None # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
return (x for x in result) |
def search(self, query, search_term, search_field, catalog):
"""Performs a search against the catalog and returns the brains
"""
logger.info("Reference Widget Catalog: {}".format(catalog.id))
if not search_term:
return catalog(query)
index = self.get_index(search_field, catalog)
if not index:
logger.warn("*** Index not found: '{}'".format(search_field))
return []
meta = index.meta_type
if meta == "TextIndexNG3":
query[index.id] = "{}*".format(search_term)
elif meta == "ZCTextIndex":
logger.warn("*** Field '{}' ({}). Better use TextIndexNG3"
.format(meta, search_field))
query[index.id] = "{}*".format(search_term)
elif meta in ["FieldIndex", "KeywordIndex"]:
logger.warn("*** Field '{}' ({}). Better use TextIndexNG3"
.format(meta, search_field))
query[index.id] = search_term
else:
logger.warn("*** Index '{}' ({}) not supported"
.format(search_field, meta))
return []
logger.info("Reference Widget Query: {}".format(repr(query)))
return catalog(query) | def function[search, parameter[self, query, search_term, search_field, catalog]]:
constant[Performs a search against the catalog and returns the brains
]
call[name[logger].info, parameter[call[constant[Reference Widget Catalog: {}].format, parameter[name[catalog].id]]]]
if <ast.UnaryOp object at 0x7da1b1d66920> begin[:]
return[call[name[catalog], parameter[name[query]]]]
variable[index] assign[=] call[name[self].get_index, parameter[name[search_field], name[catalog]]]
if <ast.UnaryOp object at 0x7da1b1d65150> begin[:]
call[name[logger].warn, parameter[call[constant[*** Index not found: '{}'].format, parameter[name[search_field]]]]]
return[list[[]]]
variable[meta] assign[=] name[index].meta_type
if compare[name[meta] equal[==] constant[TextIndexNG3]] begin[:]
call[name[query]][name[index].id] assign[=] call[constant[{}*].format, parameter[name[search_term]]]
call[name[logger].info, parameter[call[constant[Reference Widget Query: {}].format, parameter[call[name[repr], parameter[name[query]]]]]]]
return[call[name[catalog], parameter[name[query]]]] | keyword[def] identifier[search] ( identifier[self] , identifier[query] , identifier[search_term] , identifier[search_field] , identifier[catalog] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[catalog] . identifier[id] ))
keyword[if] keyword[not] identifier[search_term] :
keyword[return] identifier[catalog] ( identifier[query] )
identifier[index] = identifier[self] . identifier[get_index] ( identifier[search_field] , identifier[catalog] )
keyword[if] keyword[not] identifier[index] :
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[search_field] ))
keyword[return] []
identifier[meta] = identifier[index] . identifier[meta_type]
keyword[if] identifier[meta] == literal[string] :
identifier[query] [ identifier[index] . identifier[id] ]= literal[string] . identifier[format] ( identifier[search_term] )
keyword[elif] identifier[meta] == literal[string] :
identifier[logger] . identifier[warn] ( literal[string]
. identifier[format] ( identifier[meta] , identifier[search_field] ))
identifier[query] [ identifier[index] . identifier[id] ]= literal[string] . identifier[format] ( identifier[search_term] )
keyword[elif] identifier[meta] keyword[in] [ literal[string] , literal[string] ]:
identifier[logger] . identifier[warn] ( literal[string]
. identifier[format] ( identifier[meta] , identifier[search_field] ))
identifier[query] [ identifier[index] . identifier[id] ]= identifier[search_term]
keyword[else] :
identifier[logger] . identifier[warn] ( literal[string]
. identifier[format] ( identifier[search_field] , identifier[meta] ))
keyword[return] []
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[query] )))
keyword[return] identifier[catalog] ( identifier[query] ) | def search(self, query, search_term, search_field, catalog):
"""Performs a search against the catalog and returns the brains
"""
logger.info('Reference Widget Catalog: {}'.format(catalog.id))
if not search_term:
return catalog(query) # depends on [control=['if'], data=[]]
index = self.get_index(search_field, catalog)
if not index:
logger.warn("*** Index not found: '{}'".format(search_field))
return [] # depends on [control=['if'], data=[]]
meta = index.meta_type
if meta == 'TextIndexNG3':
query[index.id] = '{}*'.format(search_term) # depends on [control=['if'], data=[]]
elif meta == 'ZCTextIndex':
logger.warn("*** Field '{}' ({}). Better use TextIndexNG3".format(meta, search_field))
query[index.id] = '{}*'.format(search_term) # depends on [control=['if'], data=['meta']]
elif meta in ['FieldIndex', 'KeywordIndex']:
logger.warn("*** Field '{}' ({}). Better use TextIndexNG3".format(meta, search_field))
query[index.id] = search_term # depends on [control=['if'], data=['meta']]
else:
logger.warn("*** Index '{}' ({}) not supported".format(search_field, meta))
return []
logger.info('Reference Widget Query: {}'.format(repr(query)))
return catalog(query) |
def get_context(name, doc):
"""Generate a command with given name.
The command can be run immediately after generation.
For example:
dj generate command bar
dj run manage.py bar
"""
name = inflection.underscore(name)
return {
'name': name,
'doc': doc or name
} | def function[get_context, parameter[name, doc]]:
constant[Generate a command with given name.
The command can be run immediately after generation.
For example:
dj generate command bar
dj run manage.py bar
]
variable[name] assign[=] call[name[inflection].underscore, parameter[name[name]]]
return[dictionary[[<ast.Constant object at 0x7da1b0949870>, <ast.Constant object at 0x7da1b094a320>], [<ast.Name object at 0x7da1b0949d50>, <ast.BoolOp object at 0x7da1b09482b0>]]] | keyword[def] identifier[get_context] ( identifier[name] , identifier[doc] ):
literal[string]
identifier[name] = identifier[inflection] . identifier[underscore] ( identifier[name] )
keyword[return] {
literal[string] : identifier[name] ,
literal[string] : identifier[doc] keyword[or] identifier[name]
} | def get_context(name, doc):
"""Generate a command with given name.
The command can be run immediately after generation.
For example:
dj generate command bar
dj run manage.py bar
"""
name = inflection.underscore(name)
return {'name': name, 'doc': doc or name} |
def simplify(self):
"""Simplify the Term."""
def mul(op1, op2):
if op1 == "I":
return 1.0, op2
if op2 == "I":
return 1.0, op1
if op1 == op2:
return 1.0, "I"
if op1 == "X":
return (-1j, "Z") if op2 == "Y" else (1j, "Y")
if op1 == "Y":
return (-1j, "X") if op2 == "Z" else (1j, "Z")
if op1 == "Z":
return (-1j, "Y") if op2 == "X" else (1j, "X")
before = defaultdict(list)
for op in self.ops:
if op.op == "I":
continue
before[op.n].append(op.op)
new_coeff = self.coeff
new_ops = []
for n in sorted(before.keys()):
ops = before[n]
assert ops
k = 1.0
op = ops[0]
for _op in ops[1:]:
_k, op = mul(op, _op)
k *= _k
new_coeff *= k
if new_coeff.imag == 0:
# cast to float
new_coeff = new_coeff.real
if op != "I":
new_ops.append(pauli_from_char(op, n))
return Term(tuple(new_ops), new_coeff) | def function[simplify, parameter[self]]:
constant[Simplify the Term.]
def function[mul, parameter[op1, op2]]:
if compare[name[op1] equal[==] constant[I]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe91120>, <ast.Name object at 0x7da18fe90880>]]]
if compare[name[op2] equal[==] constant[I]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe932b0>, <ast.Name object at 0x7da18fe91510>]]]
if compare[name[op1] equal[==] name[op2]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe93c10>, <ast.Constant object at 0x7da18fe931c0>]]]
if compare[name[op1] equal[==] constant[X]] begin[:]
return[<ast.IfExp object at 0x7da18fe92ef0>]
if compare[name[op1] equal[==] constant[Y]] begin[:]
return[<ast.IfExp object at 0x7da18fe92b00>]
if compare[name[op1] equal[==] constant[Z]] begin[:]
return[<ast.IfExp object at 0x7da18fe92bc0>]
variable[before] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[op]] in starred[name[self].ops] begin[:]
if compare[name[op].op equal[==] constant[I]] begin[:]
continue
call[call[name[before]][name[op].n].append, parameter[name[op].op]]
variable[new_coeff] assign[=] name[self].coeff
variable[new_ops] assign[=] list[[]]
for taget[name[n]] in starred[call[name[sorted], parameter[call[name[before].keys, parameter[]]]]] begin[:]
variable[ops] assign[=] call[name[before]][name[n]]
assert[name[ops]]
variable[k] assign[=] constant[1.0]
variable[op] assign[=] call[name[ops]][constant[0]]
for taget[name[_op]] in starred[call[name[ops]][<ast.Slice object at 0x7da18fe92b60>]] begin[:]
<ast.Tuple object at 0x7da18fe91690> assign[=] call[name[mul], parameter[name[op], name[_op]]]
<ast.AugAssign object at 0x7da18fe90a60>
<ast.AugAssign object at 0x7da18fe93e20>
if compare[name[new_coeff].imag equal[==] constant[0]] begin[:]
variable[new_coeff] assign[=] name[new_coeff].real
if compare[name[op] not_equal[!=] constant[I]] begin[:]
call[name[new_ops].append, parameter[call[name[pauli_from_char], parameter[name[op], name[n]]]]]
return[call[name[Term], parameter[call[name[tuple], parameter[name[new_ops]]], name[new_coeff]]]] | keyword[def] identifier[simplify] ( identifier[self] ):
literal[string]
keyword[def] identifier[mul] ( identifier[op1] , identifier[op2] ):
keyword[if] identifier[op1] == literal[string] :
keyword[return] literal[int] , identifier[op2]
keyword[if] identifier[op2] == literal[string] :
keyword[return] literal[int] , identifier[op1]
keyword[if] identifier[op1] == identifier[op2] :
keyword[return] literal[int] , literal[string]
keyword[if] identifier[op1] == literal[string] :
keyword[return] (- literal[int] , literal[string] ) keyword[if] identifier[op2] == literal[string] keyword[else] ( literal[int] , literal[string] )
keyword[if] identifier[op1] == literal[string] :
keyword[return] (- literal[int] , literal[string] ) keyword[if] identifier[op2] == literal[string] keyword[else] ( literal[int] , literal[string] )
keyword[if] identifier[op1] == literal[string] :
keyword[return] (- literal[int] , literal[string] ) keyword[if] identifier[op2] == literal[string] keyword[else] ( literal[int] , literal[string] )
identifier[before] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[op] keyword[in] identifier[self] . identifier[ops] :
keyword[if] identifier[op] . identifier[op] == literal[string] :
keyword[continue]
identifier[before] [ identifier[op] . identifier[n] ]. identifier[append] ( identifier[op] . identifier[op] )
identifier[new_coeff] = identifier[self] . identifier[coeff]
identifier[new_ops] =[]
keyword[for] identifier[n] keyword[in] identifier[sorted] ( identifier[before] . identifier[keys] ()):
identifier[ops] = identifier[before] [ identifier[n] ]
keyword[assert] identifier[ops]
identifier[k] = literal[int]
identifier[op] = identifier[ops] [ literal[int] ]
keyword[for] identifier[_op] keyword[in] identifier[ops] [ literal[int] :]:
identifier[_k] , identifier[op] = identifier[mul] ( identifier[op] , identifier[_op] )
identifier[k] *= identifier[_k]
identifier[new_coeff] *= identifier[k]
keyword[if] identifier[new_coeff] . identifier[imag] == literal[int] :
identifier[new_coeff] = identifier[new_coeff] . identifier[real]
keyword[if] identifier[op] != literal[string] :
identifier[new_ops] . identifier[append] ( identifier[pauli_from_char] ( identifier[op] , identifier[n] ))
keyword[return] identifier[Term] ( identifier[tuple] ( identifier[new_ops] ), identifier[new_coeff] ) | def simplify(self):
"""Simplify the Term."""
def mul(op1, op2):
if op1 == 'I':
return (1.0, op2) # depends on [control=['if'], data=[]]
if op2 == 'I':
return (1.0, op1) # depends on [control=['if'], data=[]]
if op1 == op2:
return (1.0, 'I') # depends on [control=['if'], data=[]]
if op1 == 'X':
return (-1j, 'Z') if op2 == 'Y' else (1j, 'Y') # depends on [control=['if'], data=[]]
if op1 == 'Y':
return (-1j, 'X') if op2 == 'Z' else (1j, 'Z') # depends on [control=['if'], data=[]]
if op1 == 'Z':
return (-1j, 'Y') if op2 == 'X' else (1j, 'X') # depends on [control=['if'], data=[]]
before = defaultdict(list)
for op in self.ops:
if op.op == 'I':
continue # depends on [control=['if'], data=[]]
before[op.n].append(op.op) # depends on [control=['for'], data=['op']]
new_coeff = self.coeff
new_ops = []
for n in sorted(before.keys()):
ops = before[n]
assert ops
k = 1.0
op = ops[0]
for _op in ops[1:]:
(_k, op) = mul(op, _op)
k *= _k # depends on [control=['for'], data=['_op']]
new_coeff *= k
if new_coeff.imag == 0:
# cast to float
new_coeff = new_coeff.real # depends on [control=['if'], data=[]]
if op != 'I':
new_ops.append(pauli_from_char(op, n)) # depends on [control=['if'], data=['op']] # depends on [control=['for'], data=['n']]
return Term(tuple(new_ops), new_coeff) |
def intersect(self, other): # type: (Term) -> Union[Term, None]
"""
Returns a Term that represents the packages
allowed by both this term and another
"""
if self.dependency.name != other.dependency.name:
raise ValueError(
"{} should refer to {}".format(other, self.dependency.name)
)
if self._compatible_dependency(other.dependency):
if self.is_positive() != other.is_positive():
# foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0
positive = self if self.is_positive() else other
negative = other if self.is_positive() else self
return self._non_empty_term(
positive.constraint.difference(negative.constraint), True
)
elif self.is_positive():
# foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0
return self._non_empty_term(
self.constraint.intersect(other.constraint), True
)
else:
# not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0
return self._non_empty_term(
self.constraint.union(other.constraint), False
)
elif self.is_positive() != other.is_positive():
return self if self.is_positive() else other
else:
return | def function[intersect, parameter[self, other]]:
constant[
Returns a Term that represents the packages
allowed by both this term and another
]
if compare[name[self].dependency.name not_equal[!=] name[other].dependency.name] begin[:]
<ast.Raise object at 0x7da1b21ebb80>
if call[name[self]._compatible_dependency, parameter[name[other].dependency]] begin[:]
if compare[call[name[self].is_positive, parameter[]] not_equal[!=] call[name[other].is_positive, parameter[]]] begin[:]
variable[positive] assign[=] <ast.IfExp object at 0x7da1b21e9270>
variable[negative] assign[=] <ast.IfExp object at 0x7da1b21e8970>
return[call[name[self]._non_empty_term, parameter[call[name[positive].constraint.difference, parameter[name[negative].constraint]], constant[True]]]] | keyword[def] identifier[intersect] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[self] . identifier[dependency] . identifier[name] != identifier[other] . identifier[dependency] . identifier[name] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[other] , identifier[self] . identifier[dependency] . identifier[name] )
)
keyword[if] identifier[self] . identifier[_compatible_dependency] ( identifier[other] . identifier[dependency] ):
keyword[if] identifier[self] . identifier[is_positive] ()!= identifier[other] . identifier[is_positive] ():
identifier[positive] = identifier[self] keyword[if] identifier[self] . identifier[is_positive] () keyword[else] identifier[other]
identifier[negative] = identifier[other] keyword[if] identifier[self] . identifier[is_positive] () keyword[else] identifier[self]
keyword[return] identifier[self] . identifier[_non_empty_term] (
identifier[positive] . identifier[constraint] . identifier[difference] ( identifier[negative] . identifier[constraint] ), keyword[True]
)
keyword[elif] identifier[self] . identifier[is_positive] ():
keyword[return] identifier[self] . identifier[_non_empty_term] (
identifier[self] . identifier[constraint] . identifier[intersect] ( identifier[other] . identifier[constraint] ), keyword[True]
)
keyword[else] :
keyword[return] identifier[self] . identifier[_non_empty_term] (
identifier[self] . identifier[constraint] . identifier[union] ( identifier[other] . identifier[constraint] ), keyword[False]
)
keyword[elif] identifier[self] . identifier[is_positive] ()!= identifier[other] . identifier[is_positive] ():
keyword[return] identifier[self] keyword[if] identifier[self] . identifier[is_positive] () keyword[else] identifier[other]
keyword[else] :
keyword[return] | def intersect(self, other): # type: (Term) -> Union[Term, None]
'\n Returns a Term that represents the packages\n allowed by both this term and another\n '
if self.dependency.name != other.dependency.name:
raise ValueError('{} should refer to {}'.format(other, self.dependency.name)) # depends on [control=['if'], data=[]]
if self._compatible_dependency(other.dependency):
if self.is_positive() != other.is_positive():
# foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0
positive = self if self.is_positive() else other
negative = other if self.is_positive() else self
return self._non_empty_term(positive.constraint.difference(negative.constraint), True) # depends on [control=['if'], data=[]]
elif self.is_positive():
# foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0
return self._non_empty_term(self.constraint.intersect(other.constraint), True) # depends on [control=['if'], data=[]]
else:
# not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0
return self._non_empty_term(self.constraint.union(other.constraint), False) # depends on [control=['if'], data=[]]
elif self.is_positive() != other.is_positive():
return self if self.is_positive() else other # depends on [control=['if'], data=[]]
else:
return |
def say(self, text):
'''
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
'''
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False) | def function[say, parameter[self, text]]:
constant[
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
]
call[name[self]._proxy.setBusy, parameter[constant[True]]]
call[name[self]._proxy.notify, parameter[constant[started-utterance]]]
variable[i] assign[=] constant[0]
for taget[name[word]] in starred[call[name[text].split, parameter[constant[ ]]]] begin[:]
call[name[self]._proxy.notify, parameter[constant[started-word]]]
<ast.Try object at 0x7da204963850>
call[name[self]._proxy.notify, parameter[constant[finished-utterance]]]
call[name[self]._proxy.setBusy, parameter[constant[False]]] | keyword[def] identifier[say] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[_proxy] . identifier[setBusy] ( keyword[True] )
identifier[self] . identifier[_proxy] . identifier[notify] ( literal[string] )
identifier[i] = literal[int]
keyword[for] identifier[word] keyword[in] identifier[text] . identifier[split] ( literal[string] ):
identifier[self] . identifier[_proxy] . identifier[notify] ( literal[string] , identifier[location] = identifier[i] , identifier[length] = identifier[len] ( identifier[word] ))
keyword[try] :
identifier[i] = identifier[text] . identifier[index] ( literal[string] , identifier[i] + literal[int] )+ literal[int]
keyword[except] identifier[Exception] :
keyword[pass]
identifier[self] . identifier[_proxy] . identifier[notify] ( literal[string] , identifier[completed] = keyword[True] )
identifier[self] . identifier[_proxy] . identifier[setBusy] ( keyword[False] ) | def say(self, text):
"""
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
"""
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i + 1) + 1 # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['word']]
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False) |
def open_writer(self, partition=None, blocks=None, **kw):
"""
Open the writer to write records into this table or its partition.
:param partition: partition of this table
:param blocks: block ids to open
:param reopen: the reader will reuse last one, reopen is true means open a new reader.
:type reopen: bool
:param create_partition: if true, the partition will be created if not exist
:type create_partition: bool
:param endpoint: the tunnel service URL
:param upload_id: use existing upload_id to upload data
:param compress_option: compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:param compress_algo: compression algorithm, work when ``compress_option`` is not provided,
can be ``zlib``, ``snappy``
:param compress_level: used for ``zlib``, work when ``compress_option`` is not provided
:param compress_strategy: used for ``zlib``, work when ``compress_option`` is not provided
:return: writer, status means the tunnel writer status
:Example:
>>> with table.open_writer() as writer:
>>> writer.write(records)
>>> with table.open_writer(partition='pt=test', blocks=[0, 1]):
>>> writer.write(0, gen_records(block=0))
>>> writer.write(1, gen_records(block=1)) # we can do this parallel
"""
from ..tunnel.tabletunnel import TableUploadSession
table_object = self
reopen = kw.pop('reopen', False)
commit = kw.pop('commit', True)
create_partition = kw.pop('create_partition', False)
endpoint = kw.pop('endpoint', None)
upload_id = kw.pop('upload_id', None)
if partition and not isinstance(partition, odps_types.PartitionSpec):
partition = odps_types.PartitionSpec(partition)
if create_partition and not self.exist_partition(create_partition):
self.create_partition(partition, if_not_exists=True)
tunnel = self._create_table_tunnel(endpoint=endpoint)
if upload_id is None:
upload_ids = self._upload_ids
upload_id = upload_ids.get(partition) if not reopen else None
upload_session = tunnel.create_upload_session(table=self, partition_spec=partition,
upload_id=upload_id, **kw)
if upload_id and upload_session.status.value != TableUploadSession.Status.Normal.value:
# check upload session status
upload_session = tunnel.create_upload_session(table=self, partition_spec=partition, **kw)
upload_id = None
upload_ids[partition] = upload_session.id
blocks = blocks or upload_session.blocks or [0, ]
blocks_writes = [False] * len(blocks)
blocks_writers = [None] * len(blocks)
if upload_id:
for block in upload_session.blocks:
blocks_writes[blocks.index(block)] = True
class RecordWriter(object):
def __init__(self, table):
self._table = table
self._closed = False
@property
def upload_id(self):
return upload_session.id
@property
def status(self):
return upload_session.status
def write(self, *args, **kwargs):
from types import GeneratorType
from itertools import chain
if self._closed:
raise IOError('Cannot write to a closed writer.')
block_id = kwargs.get('block_id')
if block_id is None:
if isinstance(args[0], six.integer_types):
block_id = args[0]
args = args[1:]
else:
block_id = 0
if len(args) == 1:
arg = args[0]
if isinstance(arg, Record):
records = [arg, ]
elif isinstance(arg, (list, tuple)):
if isinstance(arg[0], Record):
records = arg
elif isinstance(arg[0], (list, tuple)):
records = (table_object.new_record(vals) for vals in arg)
else:
records = [table_object.new_record(arg), ]
elif isinstance(arg, GeneratorType):
try:
# peek the first element and then put back
next_arg = six.next(arg)
chained = chain((next_arg, ), arg)
if isinstance(next_arg, Record):
records = chained
else:
records = (table_object.new_record(vals) for vals in chained)
except StopIteration:
records = ()
else:
raise ValueError('Unsupported record type.')
elif len(args) > 1:
records = args
else:
raise ValueError('Cannot write no records to table.')
compress = kwargs.get('compress', False)
idx = blocks.index(block_id)
writer = blocks_writers[idx]
if writer is None:
writer = blocks_writers[idx] = \
upload_session.open_record_writer(block_id, compress=compress)
for record in records:
writer.write(record)
blocks_writes[idx] = True
def close(self):
[writer.close() for writer in blocks_writers if writer is not None]
if commit:
written_blocks = [block for block, block_write in zip(blocks, blocks_writes) if block_write]
upload_session.commit(written_blocks)
upload_ids[partition] = None
self._closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if an error occurs inside the with block, we do not commit
if exc_val is not None:
return
self.close()
return RecordWriter(self) | def function[open_writer, parameter[self, partition, blocks]]:
constant[
Open the writer to write records into this table or its partition.
:param partition: partition of this table
:param blocks: block ids to open
:param reopen: the reader will reuse last one, reopen is true means open a new reader.
:type reopen: bool
:param create_partition: if true, the partition will be created if not exist
:type create_partition: bool
:param endpoint: the tunnel service URL
:param upload_id: use existing upload_id to upload data
:param compress_option: compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:param compress_algo: compression algorithm, work when ``compress_option`` is not provided,
can be ``zlib``, ``snappy``
:param compress_level: used for ``zlib``, work when ``compress_option`` is not provided
:param compress_strategy: used for ``zlib``, work when ``compress_option`` is not provided
:return: writer, status means the tunnel writer status
:Example:
>>> with table.open_writer() as writer:
>>> writer.write(records)
>>> with table.open_writer(partition='pt=test', blocks=[0, 1]):
>>> writer.write(0, gen_records(block=0))
>>> writer.write(1, gen_records(block=1)) # we can do this parallel
]
from relative_module[tunnel.tabletunnel] import module[TableUploadSession]
variable[table_object] assign[=] name[self]
variable[reopen] assign[=] call[name[kw].pop, parameter[constant[reopen], constant[False]]]
variable[commit] assign[=] call[name[kw].pop, parameter[constant[commit], constant[True]]]
variable[create_partition] assign[=] call[name[kw].pop, parameter[constant[create_partition], constant[False]]]
variable[endpoint] assign[=] call[name[kw].pop, parameter[constant[endpoint], constant[None]]]
variable[upload_id] assign[=] call[name[kw].pop, parameter[constant[upload_id], constant[None]]]
if <ast.BoolOp object at 0x7da20cabc880> begin[:]
variable[partition] assign[=] call[name[odps_types].PartitionSpec, parameter[name[partition]]]
if <ast.BoolOp object at 0x7da20cabf430> begin[:]
call[name[self].create_partition, parameter[name[partition]]]
variable[tunnel] assign[=] call[name[self]._create_table_tunnel, parameter[]]
if compare[name[upload_id] is constant[None]] begin[:]
variable[upload_ids] assign[=] name[self]._upload_ids
variable[upload_id] assign[=] <ast.IfExp object at 0x7da20cabd750>
variable[upload_session] assign[=] call[name[tunnel].create_upload_session, parameter[]]
if <ast.BoolOp object at 0x7da18eb574f0> begin[:]
variable[upload_session] assign[=] call[name[tunnel].create_upload_session, parameter[]]
variable[upload_id] assign[=] constant[None]
call[name[upload_ids]][name[partition]] assign[=] name[upload_session].id
variable[blocks] assign[=] <ast.BoolOp object at 0x7da18eb557b0>
variable[blocks_writes] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18eb546d0>]] * call[name[len], parameter[name[blocks]]]]
variable[blocks_writers] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18eb57c10>]] * call[name[len], parameter[name[blocks]]]]
if name[upload_id] begin[:]
for taget[name[block]] in starred[name[upload_session].blocks] begin[:]
call[name[blocks_writes]][call[name[blocks].index, parameter[name[block]]]] assign[=] constant[True]
class class[RecordWriter, parameter[]] begin[:]
def function[__init__, parameter[self, table]]:
name[self]._table assign[=] name[table]
name[self]._closed assign[=] constant[False]
def function[upload_id, parameter[self]]:
return[name[upload_session].id]
def function[status, parameter[self]]:
return[name[upload_session].status]
def function[write, parameter[self]]:
from relative_module[types] import module[GeneratorType]
from relative_module[itertools] import module[chain]
if name[self]._closed begin[:]
<ast.Raise object at 0x7da18eb57850>
variable[block_id] assign[=] call[name[kwargs].get, parameter[constant[block_id]]]
if compare[name[block_id] is constant[None]] begin[:]
if call[name[isinstance], parameter[call[name[args]][constant[0]], name[six].integer_types]] begin[:]
variable[block_id] assign[=] call[name[args]][constant[0]]
variable[args] assign[=] call[name[args]][<ast.Slice object at 0x7da18eb541f0>]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
variable[arg] assign[=] call[name[args]][constant[0]]
if call[name[isinstance], parameter[name[arg], name[Record]]] begin[:]
variable[records] assign[=] list[[<ast.Name object at 0x7da18eb57c70>]]
variable[compress] assign[=] call[name[kwargs].get, parameter[constant[compress], constant[False]]]
variable[idx] assign[=] call[name[blocks].index, parameter[name[block_id]]]
variable[writer] assign[=] call[name[blocks_writers]][name[idx]]
if compare[name[writer] is constant[None]] begin[:]
variable[writer] assign[=] call[name[upload_session].open_record_writer, parameter[name[block_id]]]
for taget[name[record]] in starred[name[records]] begin[:]
call[name[writer].write, parameter[name[record]]]
call[name[blocks_writes]][name[idx]] assign[=] constant[True]
def function[close, parameter[self]]:
<ast.ListComp object at 0x7da18ede6fb0>
if name[commit] begin[:]
variable[written_blocks] assign[=] <ast.ListComp object at 0x7da18ede68c0>
call[name[upload_session].commit, parameter[name[written_blocks]]]
call[name[upload_ids]][name[partition]] assign[=] constant[None]
name[self]._closed assign[=] constant[True]
def function[__enter__, parameter[self]]:
return[name[self]]
def function[__exit__, parameter[self, exc_type, exc_val, exc_tb]]:
if compare[name[exc_val] is_not constant[None]] begin[:]
return[None]
call[name[self].close, parameter[]]
return[call[name[RecordWriter], parameter[name[self]]]] | keyword[def] identifier[open_writer] ( identifier[self] , identifier[partition] = keyword[None] , identifier[blocks] = keyword[None] ,** identifier[kw] ):
literal[string]
keyword[from] .. identifier[tunnel] . identifier[tabletunnel] keyword[import] identifier[TableUploadSession]
identifier[table_object] = identifier[self]
identifier[reopen] = identifier[kw] . identifier[pop] ( literal[string] , keyword[False] )
identifier[commit] = identifier[kw] . identifier[pop] ( literal[string] , keyword[True] )
identifier[create_partition] = identifier[kw] . identifier[pop] ( literal[string] , keyword[False] )
identifier[endpoint] = identifier[kw] . identifier[pop] ( literal[string] , keyword[None] )
identifier[upload_id] = identifier[kw] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[partition] keyword[and] keyword[not] identifier[isinstance] ( identifier[partition] , identifier[odps_types] . identifier[PartitionSpec] ):
identifier[partition] = identifier[odps_types] . identifier[PartitionSpec] ( identifier[partition] )
keyword[if] identifier[create_partition] keyword[and] keyword[not] identifier[self] . identifier[exist_partition] ( identifier[create_partition] ):
identifier[self] . identifier[create_partition] ( identifier[partition] , identifier[if_not_exists] = keyword[True] )
identifier[tunnel] = identifier[self] . identifier[_create_table_tunnel] ( identifier[endpoint] = identifier[endpoint] )
keyword[if] identifier[upload_id] keyword[is] keyword[None] :
identifier[upload_ids] = identifier[self] . identifier[_upload_ids]
identifier[upload_id] = identifier[upload_ids] . identifier[get] ( identifier[partition] ) keyword[if] keyword[not] identifier[reopen] keyword[else] keyword[None]
identifier[upload_session] = identifier[tunnel] . identifier[create_upload_session] ( identifier[table] = identifier[self] , identifier[partition_spec] = identifier[partition] ,
identifier[upload_id] = identifier[upload_id] ,** identifier[kw] )
keyword[if] identifier[upload_id] keyword[and] identifier[upload_session] . identifier[status] . identifier[value] != identifier[TableUploadSession] . identifier[Status] . identifier[Normal] . identifier[value] :
identifier[upload_session] = identifier[tunnel] . identifier[create_upload_session] ( identifier[table] = identifier[self] , identifier[partition_spec] = identifier[partition] ,** identifier[kw] )
identifier[upload_id] = keyword[None]
identifier[upload_ids] [ identifier[partition] ]= identifier[upload_session] . identifier[id]
identifier[blocks] = identifier[blocks] keyword[or] identifier[upload_session] . identifier[blocks] keyword[or] [ literal[int] ,]
identifier[blocks_writes] =[ keyword[False] ]* identifier[len] ( identifier[blocks] )
identifier[blocks_writers] =[ keyword[None] ]* identifier[len] ( identifier[blocks] )
keyword[if] identifier[upload_id] :
keyword[for] identifier[block] keyword[in] identifier[upload_session] . identifier[blocks] :
identifier[blocks_writes] [ identifier[blocks] . identifier[index] ( identifier[block] )]= keyword[True]
keyword[class] identifier[RecordWriter] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[table] ):
identifier[self] . identifier[_table] = identifier[table]
identifier[self] . identifier[_closed] = keyword[False]
@ identifier[property]
keyword[def] identifier[upload_id] ( identifier[self] ):
keyword[return] identifier[upload_session] . identifier[id]
@ identifier[property]
keyword[def] identifier[status] ( identifier[self] ):
keyword[return] identifier[upload_session] . identifier[status]
keyword[def] identifier[write] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[from] identifier[types] keyword[import] identifier[GeneratorType]
keyword[from] identifier[itertools] keyword[import] identifier[chain]
keyword[if] identifier[self] . identifier[_closed] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[block_id] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[block_id] keyword[is] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[args] [ literal[int] ], identifier[six] . identifier[integer_types] ):
identifier[block_id] = identifier[args] [ literal[int] ]
identifier[args] = identifier[args] [ literal[int] :]
keyword[else] :
identifier[block_id] = literal[int]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[arg] = identifier[args] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[Record] ):
identifier[records] =[ identifier[arg] ,]
keyword[elif] identifier[isinstance] ( identifier[arg] ,( identifier[list] , identifier[tuple] )):
keyword[if] identifier[isinstance] ( identifier[arg] [ literal[int] ], identifier[Record] ):
identifier[records] = identifier[arg]
keyword[elif] identifier[isinstance] ( identifier[arg] [ literal[int] ],( identifier[list] , identifier[tuple] )):
identifier[records] =( identifier[table_object] . identifier[new_record] ( identifier[vals] ) keyword[for] identifier[vals] keyword[in] identifier[arg] )
keyword[else] :
identifier[records] =[ identifier[table_object] . identifier[new_record] ( identifier[arg] ),]
keyword[elif] identifier[isinstance] ( identifier[arg] , identifier[GeneratorType] ):
keyword[try] :
identifier[next_arg] = identifier[six] . identifier[next] ( identifier[arg] )
identifier[chained] = identifier[chain] (( identifier[next_arg] ,), identifier[arg] )
keyword[if] identifier[isinstance] ( identifier[next_arg] , identifier[Record] ):
identifier[records] = identifier[chained]
keyword[else] :
identifier[records] =( identifier[table_object] . identifier[new_record] ( identifier[vals] ) keyword[for] identifier[vals] keyword[in] identifier[chained] )
keyword[except] identifier[StopIteration] :
identifier[records] =()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[len] ( identifier[args] )> literal[int] :
identifier[records] = identifier[args]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[compress] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[idx] = identifier[blocks] . identifier[index] ( identifier[block_id] )
identifier[writer] = identifier[blocks_writers] [ identifier[idx] ]
keyword[if] identifier[writer] keyword[is] keyword[None] :
identifier[writer] = identifier[blocks_writers] [ identifier[idx] ]= identifier[upload_session] . identifier[open_record_writer] ( identifier[block_id] , identifier[compress] = identifier[compress] )
keyword[for] identifier[record] keyword[in] identifier[records] :
identifier[writer] . identifier[write] ( identifier[record] )
identifier[blocks_writes] [ identifier[idx] ]= keyword[True]
keyword[def] identifier[close] ( identifier[self] ):
[ identifier[writer] . identifier[close] () keyword[for] identifier[writer] keyword[in] identifier[blocks_writers] keyword[if] identifier[writer] keyword[is] keyword[not] keyword[None] ]
keyword[if] identifier[commit] :
identifier[written_blocks] =[ identifier[block] keyword[for] identifier[block] , identifier[block_write] keyword[in] identifier[zip] ( identifier[blocks] , identifier[blocks_writes] ) keyword[if] identifier[block_write] ]
identifier[upload_session] . identifier[commit] ( identifier[written_blocks] )
identifier[upload_ids] [ identifier[partition] ]= keyword[None]
identifier[self] . identifier[_closed] = keyword[True]
keyword[def] identifier[__enter__] ( identifier[self] ):
keyword[return] identifier[self]
keyword[def] identifier[__exit__] ( identifier[self] , identifier[exc_type] , identifier[exc_val] , identifier[exc_tb] ):
keyword[if] identifier[exc_val] keyword[is] keyword[not] keyword[None] :
keyword[return]
identifier[self] . identifier[close] ()
keyword[return] identifier[RecordWriter] ( identifier[self] ) | def open_writer(self, partition=None, blocks=None, **kw):
"""
Open the writer to write records into this table or its partition.
:param partition: partition of this table
:param blocks: block ids to open
:param reopen: the reader will reuse last one, reopen is true means open a new reader.
:type reopen: bool
:param create_partition: if true, the partition will be created if not exist
:type create_partition: bool
:param endpoint: the tunnel service URL
:param upload_id: use existing upload_id to upload data
:param compress_option: compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:param compress_algo: compression algorithm, work when ``compress_option`` is not provided,
can be ``zlib``, ``snappy``
:param compress_level: used for ``zlib``, work when ``compress_option`` is not provided
:param compress_strategy: used for ``zlib``, work when ``compress_option`` is not provided
:return: writer, status means the tunnel writer status
:Example:
>>> with table.open_writer() as writer:
>>> writer.write(records)
>>> with table.open_writer(partition='pt=test', blocks=[0, 1]):
>>> writer.write(0, gen_records(block=0))
>>> writer.write(1, gen_records(block=1)) # we can do this parallel
"""
from ..tunnel.tabletunnel import TableUploadSession
table_object = self
reopen = kw.pop('reopen', False)
commit = kw.pop('commit', True)
create_partition = kw.pop('create_partition', False)
endpoint = kw.pop('endpoint', None)
upload_id = kw.pop('upload_id', None)
if partition and (not isinstance(partition, odps_types.PartitionSpec)):
partition = odps_types.PartitionSpec(partition) # depends on [control=['if'], data=[]]
if create_partition and (not self.exist_partition(create_partition)):
self.create_partition(partition, if_not_exists=True) # depends on [control=['if'], data=[]]
tunnel = self._create_table_tunnel(endpoint=endpoint)
if upload_id is None:
upload_ids = self._upload_ids
upload_id = upload_ids.get(partition) if not reopen else None # depends on [control=['if'], data=['upload_id']]
upload_session = tunnel.create_upload_session(table=self, partition_spec=partition, upload_id=upload_id, **kw)
if upload_id and upload_session.status.value != TableUploadSession.Status.Normal.value:
# check upload session status
upload_session = tunnel.create_upload_session(table=self, partition_spec=partition, **kw)
upload_id = None # depends on [control=['if'], data=[]]
upload_ids[partition] = upload_session.id
blocks = blocks or upload_session.blocks or [0]
blocks_writes = [False] * len(blocks)
blocks_writers = [None] * len(blocks)
if upload_id:
for block in upload_session.blocks:
blocks_writes[blocks.index(block)] = True # depends on [control=['for'], data=['block']] # depends on [control=['if'], data=[]]
class RecordWriter(object):
def __init__(self, table):
self._table = table
self._closed = False
@property
def upload_id(self):
return upload_session.id
@property
def status(self):
return upload_session.status
def write(self, *args, **kwargs):
from types import GeneratorType
from itertools import chain
if self._closed:
raise IOError('Cannot write to a closed writer.') # depends on [control=['if'], data=[]]
block_id = kwargs.get('block_id')
if block_id is None:
if isinstance(args[0], six.integer_types):
block_id = args[0]
args = args[1:] # depends on [control=['if'], data=[]]
else:
block_id = 0 # depends on [control=['if'], data=['block_id']]
if len(args) == 1:
arg = args[0]
if isinstance(arg, Record):
records = [arg] # depends on [control=['if'], data=[]]
elif isinstance(arg, (list, tuple)):
if isinstance(arg[0], Record):
records = arg # depends on [control=['if'], data=[]]
elif isinstance(arg[0], (list, tuple)):
records = (table_object.new_record(vals) for vals in arg) # depends on [control=['if'], data=[]]
else:
records = [table_object.new_record(arg)] # depends on [control=['if'], data=[]]
elif isinstance(arg, GeneratorType):
try:
# peek the first element and then put back
next_arg = six.next(arg)
chained = chain((next_arg,), arg)
if isinstance(next_arg, Record):
records = chained # depends on [control=['if'], data=[]]
else:
records = (table_object.new_record(vals) for vals in chained) # depends on [control=['try'], data=[]]
except StopIteration:
records = () # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Unsupported record type.') # depends on [control=['if'], data=[]]
elif len(args) > 1:
records = args # depends on [control=['if'], data=[]]
else:
raise ValueError('Cannot write no records to table.')
compress = kwargs.get('compress', False)
idx = blocks.index(block_id)
writer = blocks_writers[idx]
if writer is None:
writer = blocks_writers[idx] = upload_session.open_record_writer(block_id, compress=compress) # depends on [control=['if'], data=['writer']]
for record in records:
writer.write(record) # depends on [control=['for'], data=['record']]
blocks_writes[idx] = True
def close(self):
[writer.close() for writer in blocks_writers if writer is not None]
if commit:
written_blocks = [block for (block, block_write) in zip(blocks, blocks_writes) if block_write]
upload_session.commit(written_blocks)
upload_ids[partition] = None # depends on [control=['if'], data=[]]
self._closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if an error occurs inside the with block, we do not commit
if exc_val is not None:
return # depends on [control=['if'], data=[]]
self.close()
return RecordWriter(self) |
def parse(binary, **params):
"""Turns a ZIP file into a frozen sample."""
binary = io.BytesIO(binary)
collection = list()
with zipfile.ZipFile(binary, 'r') as zip_:
for zip_info in zip_.infolist():
content_type, encoding = mimetypes.guess_type(zip_info.filename)
content = zip_.read(zip_info)
content = content_encodings.get(encoding).decode(content)
content = content_types.get(content_type).parse(content, **params)
collection.apppend((zip_info.filename, content))
return collection | def function[parse, parameter[binary]]:
constant[Turns a ZIP file into a frozen sample.]
variable[binary] assign[=] call[name[io].BytesIO, parameter[name[binary]]]
variable[collection] assign[=] call[name[list], parameter[]]
with call[name[zipfile].ZipFile, parameter[name[binary], constant[r]]] begin[:]
for taget[name[zip_info]] in starred[call[name[zip_].infolist, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18ede5d20> assign[=] call[name[mimetypes].guess_type, parameter[name[zip_info].filename]]
variable[content] assign[=] call[name[zip_].read, parameter[name[zip_info]]]
variable[content] assign[=] call[call[name[content_encodings].get, parameter[name[encoding]]].decode, parameter[name[content]]]
variable[content] assign[=] call[call[name[content_types].get, parameter[name[content_type]]].parse, parameter[name[content]]]
call[name[collection].apppend, parameter[tuple[[<ast.Attribute object at 0x7da1b1451930>, <ast.Name object at 0x7da1b14521d0>]]]]
return[name[collection]] | keyword[def] identifier[parse] ( identifier[binary] ,** identifier[params] ):
literal[string]
identifier[binary] = identifier[io] . identifier[BytesIO] ( identifier[binary] )
identifier[collection] = identifier[list] ()
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[binary] , literal[string] ) keyword[as] identifier[zip_] :
keyword[for] identifier[zip_info] keyword[in] identifier[zip_] . identifier[infolist] ():
identifier[content_type] , identifier[encoding] = identifier[mimetypes] . identifier[guess_type] ( identifier[zip_info] . identifier[filename] )
identifier[content] = identifier[zip_] . identifier[read] ( identifier[zip_info] )
identifier[content] = identifier[content_encodings] . identifier[get] ( identifier[encoding] ). identifier[decode] ( identifier[content] )
identifier[content] = identifier[content_types] . identifier[get] ( identifier[content_type] ). identifier[parse] ( identifier[content] ,** identifier[params] )
identifier[collection] . identifier[apppend] (( identifier[zip_info] . identifier[filename] , identifier[content] ))
keyword[return] identifier[collection] | def parse(binary, **params):
"""Turns a ZIP file into a frozen sample."""
binary = io.BytesIO(binary)
collection = list()
with zipfile.ZipFile(binary, 'r') as zip_:
for zip_info in zip_.infolist():
(content_type, encoding) = mimetypes.guess_type(zip_info.filename)
content = zip_.read(zip_info)
content = content_encodings.get(encoding).decode(content)
content = content_types.get(content_type).parse(content, **params)
collection.apppend((zip_info.filename, content)) # depends on [control=['for'], data=['zip_info']] # depends on [control=['with'], data=['zip_']]
return collection |
def set(self, index, value=None, dir=False, ttl=None, expiration=None):
"""Updates the node data."""
if bool(dir) is (value is not None):
raise TypeError('Choose one of value or directory')
if (ttl is not None) is (expiration is None):
raise TypeError('Both of ttl and expiration required')
self.value = value
if self.dir != dir:
self.dir = dir
self.nodes = {} if dir else None
self.ttl = ttl
self.expiration = expiration
self.modified_index = index | def function[set, parameter[self, index, value, dir, ttl, expiration]]:
constant[Updates the node data.]
if compare[call[name[bool], parameter[name[dir]]] is compare[name[value] is_not constant[None]]] begin[:]
<ast.Raise object at 0x7da18ede6080>
if compare[compare[name[ttl] is_not constant[None]] is compare[name[expiration] is constant[None]]] begin[:]
<ast.Raise object at 0x7da18ede5390>
name[self].value assign[=] name[value]
if compare[name[self].dir not_equal[!=] name[dir]] begin[:]
name[self].dir assign[=] name[dir]
name[self].nodes assign[=] <ast.IfExp object at 0x7da18ede56c0>
name[self].ttl assign[=] name[ttl]
name[self].expiration assign[=] name[expiration]
name[self].modified_index assign[=] name[index] | keyword[def] identifier[set] ( identifier[self] , identifier[index] , identifier[value] = keyword[None] , identifier[dir] = keyword[False] , identifier[ttl] = keyword[None] , identifier[expiration] = keyword[None] ):
literal[string]
keyword[if] identifier[bool] ( identifier[dir] ) keyword[is] ( identifier[value] keyword[is] keyword[not] keyword[None] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] ( identifier[ttl] keyword[is] keyword[not] keyword[None] ) keyword[is] ( identifier[expiration] keyword[is] keyword[None] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[value] = identifier[value]
keyword[if] identifier[self] . identifier[dir] != identifier[dir] :
identifier[self] . identifier[dir] = identifier[dir]
identifier[self] . identifier[nodes] ={} keyword[if] identifier[dir] keyword[else] keyword[None]
identifier[self] . identifier[ttl] = identifier[ttl]
identifier[self] . identifier[expiration] = identifier[expiration]
identifier[self] . identifier[modified_index] = identifier[index] | def set(self, index, value=None, dir=False, ttl=None, expiration=None):
"""Updates the node data."""
if bool(dir) is (value is not None):
raise TypeError('Choose one of value or directory') # depends on [control=['if'], data=[]]
if (ttl is not None) is (expiration is None):
raise TypeError('Both of ttl and expiration required') # depends on [control=['if'], data=[]]
self.value = value
if self.dir != dir:
self.dir = dir
self.nodes = {} if dir else None # depends on [control=['if'], data=['dir']]
self.ttl = ttl
self.expiration = expiration
self.modified_index = index |
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK | <ast.AsyncFunctionDef object at 0x7da1b0780250> | keyword[async] keyword[def] identifier[issueAccumulator] ( identifier[self] , identifier[schemaId] : identifier[ID] , identifier[iA] ,
identifier[L] )-> identifier[AccumulatorPublicKey] :
literal[string]
identifier[accum] , identifier[tails] , identifier[accPK] , identifier[accSK] = keyword[await] identifier[self] . identifier[_nonRevocationIssuer] . identifier[issueAccumulator] (
identifier[schemaId] , identifier[iA] , identifier[L] )
identifier[accPK] = keyword[await] identifier[self] . identifier[wallet] . identifier[submitAccumPublic] ( identifier[schemaId] = identifier[schemaId] ,
identifier[accumPK] = identifier[accPK] ,
identifier[accum] = identifier[accum] , identifier[tails] = identifier[tails] )
keyword[await] identifier[self] . identifier[wallet] . identifier[submitAccumSecret] ( identifier[schemaId] = identifier[schemaId] ,
identifier[accumSK] = identifier[accSK] )
keyword[return] identifier[accPK] | async def issueAccumulator(self, schemaId: ID, iA, L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
(accum, tails, accPK, accSK) = await self._nonRevocationIssuer.issueAccumulator(schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId, accumPK=accPK, accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId, accumSK=accSK)
return accPK |
def parse_model_file(path):
"""Parse a file as a list of model reactions
The file format is detected and the file is parsed accordinly. The file is
specified as a file path that will be opened for reading. Path can be given
as a string or a context.
"""
context = FilePathContext(path)
format = resolve_format(None, context.filepath)
if format == 'tsv':
logger.debug('Parsing model file {} as TSV'.format(context.filepath))
with context.open('r') as f:
for reaction_id in parse_model_table_file(context, f):
yield reaction_id
elif format == 'yaml':
logger.debug('Parsing model file {} as YAML'.format(context.filepath))
with context.open('r') as f:
for reaction_id in parse_model_yaml_file(context, f):
yield reaction_id | def function[parse_model_file, parameter[path]]:
constant[Parse a file as a list of model reactions
The file format is detected and the file is parsed accordinly. The file is
specified as a file path that will be opened for reading. Path can be given
as a string or a context.
]
variable[context] assign[=] call[name[FilePathContext], parameter[name[path]]]
variable[format] assign[=] call[name[resolve_format], parameter[constant[None], name[context].filepath]]
if compare[name[format] equal[==] constant[tsv]] begin[:]
call[name[logger].debug, parameter[call[constant[Parsing model file {} as TSV].format, parameter[name[context].filepath]]]]
with call[name[context].open, parameter[constant[r]]] begin[:]
for taget[name[reaction_id]] in starred[call[name[parse_model_table_file], parameter[name[context], name[f]]]] begin[:]
<ast.Yield object at 0x7da20c76d000> | keyword[def] identifier[parse_model_file] ( identifier[path] ):
literal[string]
identifier[context] = identifier[FilePathContext] ( identifier[path] )
identifier[format] = identifier[resolve_format] ( keyword[None] , identifier[context] . identifier[filepath] )
keyword[if] identifier[format] == literal[string] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[context] . identifier[filepath] ))
keyword[with] identifier[context] . identifier[open] ( literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[reaction_id] keyword[in] identifier[parse_model_table_file] ( identifier[context] , identifier[f] ):
keyword[yield] identifier[reaction_id]
keyword[elif] identifier[format] == literal[string] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[context] . identifier[filepath] ))
keyword[with] identifier[context] . identifier[open] ( literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[reaction_id] keyword[in] identifier[parse_model_yaml_file] ( identifier[context] , identifier[f] ):
keyword[yield] identifier[reaction_id] | def parse_model_file(path):
"""Parse a file as a list of model reactions
The file format is detected and the file is parsed accordinly. The file is
specified as a file path that will be opened for reading. Path can be given
as a string or a context.
"""
context = FilePathContext(path)
format = resolve_format(None, context.filepath)
if format == 'tsv':
logger.debug('Parsing model file {} as TSV'.format(context.filepath))
with context.open('r') as f:
for reaction_id in parse_model_table_file(context, f):
yield reaction_id # depends on [control=['for'], data=['reaction_id']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
elif format == 'yaml':
logger.debug('Parsing model file {} as YAML'.format(context.filepath))
with context.open('r') as f:
for reaction_id in parse_model_yaml_file(context, f):
yield reaction_id # depends on [control=['for'], data=['reaction_id']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] |
def validate(self, data):
"""
Validate the provided data.
Returns:
dict:
The validated data.
Raises:
serializers.ValidationError:
If the provided password is invalid.
"""
user = self._confirmation.email.user
if (
app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED
and not user.check_password(data["password"])
):
raise serializers.ValidationError(
_("The provided password is invalid.")
)
# Add email to returned data
data["email"] = self._confirmation.email.email
return data | def function[validate, parameter[self, data]]:
constant[
Validate the provided data.
Returns:
dict:
The validated data.
Raises:
serializers.ValidationError:
If the provided password is invalid.
]
variable[user] assign[=] name[self]._confirmation.email.user
if <ast.BoolOp object at 0x7da1afe6d450> begin[:]
<ast.Raise object at 0x7da1afe6d870>
call[name[data]][constant[email]] assign[=] name[self]._confirmation.email.email
return[name[data]] | keyword[def] identifier[validate] ( identifier[self] , identifier[data] ):
literal[string]
identifier[user] = identifier[self] . identifier[_confirmation] . identifier[email] . identifier[user]
keyword[if] (
identifier[app_settings] . identifier[EMAIL_VERIFICATION_PASSWORD_REQUIRED]
keyword[and] keyword[not] identifier[user] . identifier[check_password] ( identifier[data] [ literal[string] ])
):
keyword[raise] identifier[serializers] . identifier[ValidationError] (
identifier[_] ( literal[string] )
)
identifier[data] [ literal[string] ]= identifier[self] . identifier[_confirmation] . identifier[email] . identifier[email]
keyword[return] identifier[data] | def validate(self, data):
"""
Validate the provided data.
Returns:
dict:
The validated data.
Raises:
serializers.ValidationError:
If the provided password is invalid.
"""
user = self._confirmation.email.user
if app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED and (not user.check_password(data['password'])):
raise serializers.ValidationError(_('The provided password is invalid.')) # depends on [control=['if'], data=[]]
# Add email to returned data
data['email'] = self._confirmation.email.email
return data |
def render(self, h, w):
'resets plotter, cancels previous render threads, spawns a new render'
self.needsRefresh = False
cancelThread(*(t for t in self.currentThreads if t.name == 'plotAll_async'))
self.labels.clear()
self.resetCanvasDimensions(h, w)
self.render_async() | def function[render, parameter[self, h, w]]:
constant[resets plotter, cancels previous render threads, spawns a new render]
name[self].needsRefresh assign[=] constant[False]
call[name[cancelThread], parameter[<ast.Starred object at 0x7da18bccab90>]]
call[name[self].labels.clear, parameter[]]
call[name[self].resetCanvasDimensions, parameter[name[h], name[w]]]
call[name[self].render_async, parameter[]] | keyword[def] identifier[render] ( identifier[self] , identifier[h] , identifier[w] ):
literal[string]
identifier[self] . identifier[needsRefresh] = keyword[False]
identifier[cancelThread] (*( identifier[t] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[currentThreads] keyword[if] identifier[t] . identifier[name] == literal[string] ))
identifier[self] . identifier[labels] . identifier[clear] ()
identifier[self] . identifier[resetCanvasDimensions] ( identifier[h] , identifier[w] )
identifier[self] . identifier[render_async] () | def render(self, h, w):
"""resets plotter, cancels previous render threads, spawns a new render"""
self.needsRefresh = False
cancelThread(*(t for t in self.currentThreads if t.name == 'plotAll_async'))
self.labels.clear()
self.resetCanvasDimensions(h, w)
self.render_async() |
def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs):
"""
Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed.
"""
timeout = kwargs.get("timeout")
src = kwargs.get("src")
unit = kwargs.setdefault("unit", "ms")
for i in range(count):
output_text = "ping '{}'".format(dest_addr)
output_text += " from '{}'".format(src) if src else ""
output_text += " ... "
print(output_text, end="")
delay = ping(dest_addr, seq=i, *args, **kwargs)
if delay is None:
print("Timeout > {}s".format(timeout) if timeout else "Timeout")
else:
print("{value}{unit}".format(value=int(delay), unit=unit)) | def function[verbose_ping, parameter[dest_addr, count]]:
constant[
Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed.
]
variable[timeout] assign[=] call[name[kwargs].get, parameter[constant[timeout]]]
variable[src] assign[=] call[name[kwargs].get, parameter[constant[src]]]
variable[unit] assign[=] call[name[kwargs].setdefault, parameter[constant[unit], constant[ms]]]
for taget[name[i]] in starred[call[name[range], parameter[name[count]]]] begin[:]
variable[output_text] assign[=] call[constant[ping '{}'].format, parameter[name[dest_addr]]]
<ast.AugAssign object at 0x7da20e9b1990>
<ast.AugAssign object at 0x7da20e9b1870>
call[name[print], parameter[name[output_text]]]
variable[delay] assign[=] call[name[ping], parameter[name[dest_addr], <ast.Starred object at 0x7da20c6e7df0>]]
if compare[name[delay] is constant[None]] begin[:]
call[name[print], parameter[<ast.IfExp object at 0x7da20c6e75b0>]] | keyword[def] identifier[verbose_ping] ( identifier[dest_addr] : identifier[str] , identifier[count] : identifier[int] = literal[int] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[timeout] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[src] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[unit] = identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[count] ):
identifier[output_text] = literal[string] . identifier[format] ( identifier[dest_addr] )
identifier[output_text] += literal[string] . identifier[format] ( identifier[src] ) keyword[if] identifier[src] keyword[else] literal[string]
identifier[output_text] += literal[string]
identifier[print] ( identifier[output_text] , identifier[end] = literal[string] )
identifier[delay] = identifier[ping] ( identifier[dest_addr] , identifier[seq] = identifier[i] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[delay] keyword[is] keyword[None] :
identifier[print] ( literal[string] . identifier[format] ( identifier[timeout] ) keyword[if] identifier[timeout] keyword[else] literal[string] )
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[value] = identifier[int] ( identifier[delay] ), identifier[unit] = identifier[unit] )) | def verbose_ping(dest_addr: str, count: int=4, *args, **kwargs):
"""
Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed.
"""
timeout = kwargs.get('timeout')
src = kwargs.get('src')
unit = kwargs.setdefault('unit', 'ms')
for i in range(count):
output_text = "ping '{}'".format(dest_addr)
output_text += " from '{}'".format(src) if src else ''
output_text += ' ... '
print(output_text, end='')
delay = ping(dest_addr, *args, seq=i, **kwargs)
if delay is None:
print('Timeout > {}s'.format(timeout) if timeout else 'Timeout') # depends on [control=['if'], data=[]]
else:
print('{value}{unit}'.format(value=int(delay), unit=unit)) # depends on [control=['for'], data=['i']] |
def reverb(self, reverberance=50, high_freq_damping=50, room_scale=100,
stereo_depth=100, pre_delay=0, wet_gain=0, wet_only=False):
'''Add reverberation to the audio using the ‘freeverb’ algorithm.
A reverberation effect is sometimes desirable for concert halls that
are too small or contain so many people that the hall’s natural
reverberance is diminished. Applying a small amount of stereo reverb
to a (dry) mono signal will usually make it sound more natural.
Parameters
----------
reverberance : float, default=50
Percentage of reverberance
high_freq_damping : float, default=50
Percentage of high-frequency damping.
room_scale : float, default=100
Scale of the room as a percentage.
stereo_depth : float, default=100
Stereo depth as a percentage.
pre_delay : float, default=0
Pre-delay in milliseconds.
wet_gain : float, default=0
Amount of wet gain in dB
wet_only : bool, default=False
If True, only outputs the wet signal.
See Also
--------
echo
'''
if (not is_number(reverberance) or reverberance < 0 or
reverberance > 100):
raise ValueError("reverberance must be between 0 and 100")
if (not is_number(high_freq_damping) or high_freq_damping < 0 or
high_freq_damping > 100):
raise ValueError("high_freq_damping must be between 0 and 100")
if (not is_number(room_scale) or room_scale < 0 or
room_scale > 100):
raise ValueError("room_scale must be between 0 and 100")
if (not is_number(stereo_depth) or stereo_depth < 0 or
stereo_depth > 100):
raise ValueError("stereo_depth must be between 0 and 100")
if not is_number(pre_delay) or pre_delay < 0:
raise ValueError("pre_delay must be a positive number")
if not is_number(wet_gain):
raise ValueError("wet_gain must be a number")
if not isinstance(wet_only, bool):
raise ValueError("wet_only must be a boolean.")
effect_args = ['reverb']
if wet_only:
effect_args.append('-w')
effect_args.extend([
'{:f}'.format(reverberance),
'{:f}'.format(high_freq_damping),
'{:f}'.format(room_scale),
'{:f}'.format(stereo_depth),
'{:f}'.format(pre_delay),
'{:f}'.format(wet_gain)
])
self.effects.extend(effect_args)
self.effects_log.append('reverb')
return self | def function[reverb, parameter[self, reverberance, high_freq_damping, room_scale, stereo_depth, pre_delay, wet_gain, wet_only]]:
constant[Add reverberation to the audio using the ‘freeverb’ algorithm.
A reverberation effect is sometimes desirable for concert halls that
are too small or contain so many people that the hall’s natural
reverberance is diminished. Applying a small amount of stereo reverb
to a (dry) mono signal will usually make it sound more natural.
Parameters
----------
reverberance : float, default=50
Percentage of reverberance
high_freq_damping : float, default=50
Percentage of high-frequency damping.
room_scale : float, default=100
Scale of the room as a percentage.
stereo_depth : float, default=100
Stereo depth as a percentage.
pre_delay : float, default=0
Pre-delay in milliseconds.
wet_gain : float, default=0
Amount of wet gain in dB
wet_only : bool, default=False
If True, only outputs the wet signal.
See Also
--------
echo
]
if <ast.BoolOp object at 0x7da1b013dde0> begin[:]
<ast.Raise object at 0x7da1b013ccd0>
if <ast.BoolOp object at 0x7da1b013d750> begin[:]
<ast.Raise object at 0x7da1b013dcf0>
if <ast.BoolOp object at 0x7da1b013ca00> begin[:]
<ast.Raise object at 0x7da1b013c910>
if <ast.BoolOp object at 0x7da1b013ddb0> begin[:]
<ast.Raise object at 0x7da1b013d6f0>
if <ast.BoolOp object at 0x7da1b013ee00> begin[:]
<ast.Raise object at 0x7da1b016fe50>
if <ast.UnaryOp object at 0x7da1b016e5f0> begin[:]
<ast.Raise object at 0x7da1b016ffa0>
if <ast.UnaryOp object at 0x7da1b016de40> begin[:]
<ast.Raise object at 0x7da1b016c160>
variable[effect_args] assign[=] list[[<ast.Constant object at 0x7da1b016d5d0>]]
if name[wet_only] begin[:]
call[name[effect_args].append, parameter[constant[-w]]]
call[name[effect_args].extend, parameter[list[[<ast.Call object at 0x7da1b016cfa0>, <ast.Call object at 0x7da1b016f8b0>, <ast.Call object at 0x7da1b016eef0>, <ast.Call object at 0x7da1b016cd30>, <ast.Call object at 0x7da1b016dba0>, <ast.Call object at 0x7da1b016d3f0>]]]]
call[name[self].effects.extend, parameter[name[effect_args]]]
call[name[self].effects_log.append, parameter[constant[reverb]]]
return[name[self]] | keyword[def] identifier[reverb] ( identifier[self] , identifier[reverberance] = literal[int] , identifier[high_freq_damping] = literal[int] , identifier[room_scale] = literal[int] ,
identifier[stereo_depth] = literal[int] , identifier[pre_delay] = literal[int] , identifier[wet_gain] = literal[int] , identifier[wet_only] = keyword[False] ):
literal[string]
keyword[if] ( keyword[not] identifier[is_number] ( identifier[reverberance] ) keyword[or] identifier[reverberance] < literal[int] keyword[or]
identifier[reverberance] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( keyword[not] identifier[is_number] ( identifier[high_freq_damping] ) keyword[or] identifier[high_freq_damping] < literal[int] keyword[or]
identifier[high_freq_damping] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( keyword[not] identifier[is_number] ( identifier[room_scale] ) keyword[or] identifier[room_scale] < literal[int] keyword[or]
identifier[room_scale] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( keyword[not] identifier[is_number] ( identifier[stereo_depth] ) keyword[or] identifier[stereo_depth] < literal[int] keyword[or]
identifier[stereo_depth] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[is_number] ( identifier[pre_delay] ) keyword[or] identifier[pre_delay] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[is_number] ( identifier[wet_gain] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[wet_only] , identifier[bool] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[effect_args] =[ literal[string] ]
keyword[if] identifier[wet_only] :
identifier[effect_args] . identifier[append] ( literal[string] )
identifier[effect_args] . identifier[extend] ([
literal[string] . identifier[format] ( identifier[reverberance] ),
literal[string] . identifier[format] ( identifier[high_freq_damping] ),
literal[string] . identifier[format] ( identifier[room_scale] ),
literal[string] . identifier[format] ( identifier[stereo_depth] ),
literal[string] . identifier[format] ( identifier[pre_delay] ),
literal[string] . identifier[format] ( identifier[wet_gain] )
])
identifier[self] . identifier[effects] . identifier[extend] ( identifier[effect_args] )
identifier[self] . identifier[effects_log] . identifier[append] ( literal[string] )
keyword[return] identifier[self] | def reverb(self, reverberance=50, high_freq_damping=50, room_scale=100, stereo_depth=100, pre_delay=0, wet_gain=0, wet_only=False):
"""Add reverberation to the audio using the ‘freeverb’ algorithm.
A reverberation effect is sometimes desirable for concert halls that
are too small or contain so many people that the hall’s natural
reverberance is diminished. Applying a small amount of stereo reverb
to a (dry) mono signal will usually make it sound more natural.
Parameters
----------
reverberance : float, default=50
Percentage of reverberance
high_freq_damping : float, default=50
Percentage of high-frequency damping.
room_scale : float, default=100
Scale of the room as a percentage.
stereo_depth : float, default=100
Stereo depth as a percentage.
pre_delay : float, default=0
Pre-delay in milliseconds.
wet_gain : float, default=0
Amount of wet gain in dB
wet_only : bool, default=False
If True, only outputs the wet signal.
See Also
--------
echo
"""
if not is_number(reverberance) or reverberance < 0 or reverberance > 100:
raise ValueError('reverberance must be between 0 and 100') # depends on [control=['if'], data=[]]
if not is_number(high_freq_damping) or high_freq_damping < 0 or high_freq_damping > 100:
raise ValueError('high_freq_damping must be between 0 and 100') # depends on [control=['if'], data=[]]
if not is_number(room_scale) or room_scale < 0 or room_scale > 100:
raise ValueError('room_scale must be between 0 and 100') # depends on [control=['if'], data=[]]
if not is_number(stereo_depth) or stereo_depth < 0 or stereo_depth > 100:
raise ValueError('stereo_depth must be between 0 and 100') # depends on [control=['if'], data=[]]
if not is_number(pre_delay) or pre_delay < 0:
raise ValueError('pre_delay must be a positive number') # depends on [control=['if'], data=[]]
if not is_number(wet_gain):
raise ValueError('wet_gain must be a number') # depends on [control=['if'], data=[]]
if not isinstance(wet_only, bool):
raise ValueError('wet_only must be a boolean.') # depends on [control=['if'], data=[]]
effect_args = ['reverb']
if wet_only:
effect_args.append('-w') # depends on [control=['if'], data=[]]
effect_args.extend(['{:f}'.format(reverberance), '{:f}'.format(high_freq_damping), '{:f}'.format(room_scale), '{:f}'.format(stereo_depth), '{:f}'.format(pre_delay), '{:f}'.format(wet_gain)])
self.effects.extend(effect_args)
self.effects_log.append('reverb')
return self |
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dictionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__,
self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
# We keep track of the bytes left in the file and use it to set a upper
# bound in the number of items that can be read from the different
# arrays
#
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames*4))
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, min( length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames*4) )
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, min( length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions*4) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
max_failed_entries_before_giving_up = 10
for i in xrange( min( export_dir.NumberOfNames, length_until_eof(export_dir.AddressOfNames)/4) ):
symbol_name_address = self.get_dword_from_data(address_of_names, i)
symbol_name = self.get_string_at_rva( symbol_name_address )
try:
symbol_name_offset = self.get_offset_from_rva( symbol_name_address )
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
continue
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4 < len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
if symbol_address is None or symbol_address == 0:
continue
# If the funcion's RVA points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva( symbol_address )
except PEFormatError:
continue
else:
forwarder_str = None
forwarder_offset = None
exports.append(
ExportData(
pe = self,
ordinal = export_dir.Base+symbol_ordinal,
ordinal_offset = self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2*i ),
address = symbol_address,
address_offset = self.get_offset_from_rva( export_dir.AddressOfFunctions + 4*symbol_ordinal ),
name = symbol_name,
name_offset = symbol_name_offset,
forwarder = forwarder_str,
forwarder_offset = forwarder_offset ))
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
for idx in xrange( min(export_dir.NumberOfFunctions, length_until_eof(export_dir.AddressOfFunctions)/4) ):
if not idx+export_dir.Base in ordinals:
try:
symbol_address = self.get_dword_from_data(
address_of_functions, idx)
except PEFormatError:
symbol_address = None
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
if symbol_address == 0:
continue
#
# Checking for forwarder again.
#
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports) | def function[parse_export_directory, parameter[self, rva, size]]:
constant[Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dictionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
]
<ast.Try object at 0x7da1b0c53790>
if <ast.UnaryOp object at 0x7da1b0c508e0> begin[:]
return[None]
def function[length_until_eof, parameter[rva]]:
return[binary_operation[call[name[len], parameter[name[self].__data__]] - call[name[self].get_offset_from_rva, parameter[name[rva]]]]]
<ast.Try object at 0x7da1b0c51060>
variable[exports] assign[=] list[[]]
variable[max_failed_entries_before_giving_up] assign[=] constant[10]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[min], parameter[name[export_dir].NumberOfNames, binary_operation[call[name[length_until_eof], parameter[name[export_dir].AddressOfNames]] / constant[4]]]]]]] begin[:]
variable[symbol_name_address] assign[=] call[name[self].get_dword_from_data, parameter[name[address_of_names], name[i]]]
variable[symbol_name] assign[=] call[name[self].get_string_at_rva, parameter[name[symbol_name_address]]]
<ast.Try object at 0x7da1b0ed5060>
variable[symbol_ordinal] assign[=] call[name[self].get_word_from_data, parameter[name[address_of_name_ordinals], name[i]]]
if compare[binary_operation[name[symbol_ordinal] * constant[4]] less[<] call[name[len], parameter[name[address_of_functions]]]] begin[:]
variable[symbol_address] assign[=] call[name[self].get_dword_from_data, parameter[name[address_of_functions], name[symbol_ordinal]]]
if <ast.BoolOp object at 0x7da1b0ed5390> begin[:]
continue
if <ast.BoolOp object at 0x7da1b0e47fd0> begin[:]
variable[forwarder_str] assign[=] call[name[self].get_string_at_rva, parameter[name[symbol_address]]]
<ast.Try object at 0x7da1b0e47d60>
call[name[exports].append, parameter[call[name[ExportData], parameter[]]]]
variable[ordinals] assign[=] <ast.ListComp object at 0x7da20c76d1e0>
variable[max_failed_entries_before_giving_up] assign[=] constant[10]
for taget[name[idx]] in starred[call[name[xrange], parameter[call[name[min], parameter[name[export_dir].NumberOfFunctions, binary_operation[call[name[length_until_eof], parameter[name[export_dir].AddressOfFunctions]] / constant[4]]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ed1180> begin[:]
<ast.Try object at 0x7da1b0ed26b0>
if compare[name[symbol_address] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0ed3160>
if compare[name[max_failed_entries_before_giving_up] less_or_equal[<=] constant[0]] begin[:]
break
if compare[name[symbol_address] equal[==] constant[0]] begin[:]
continue
if <ast.BoolOp object at 0x7da1b0ed25c0> begin[:]
variable[forwarder_str] assign[=] call[name[self].get_string_at_rva, parameter[name[symbol_address]]]
call[name[exports].append, parameter[call[name[ExportData], parameter[]]]]
return[call[name[ExportDirData], parameter[]]] | keyword[def] identifier[parse_export_directory] ( identifier[self] , identifier[rva] , identifier[size] ):
literal[string]
keyword[try] :
identifier[export_dir] = identifier[self] . identifier[__unpack_data__] (
identifier[self] . identifier[__IMAGE_EXPORT_DIRECTORY_format__] ,
identifier[self] . identifier[get_data] ( identifier[rva] , identifier[Structure] ( identifier[self] . identifier[__IMAGE_EXPORT_DIRECTORY_format__] ). identifier[sizeof] ()),
identifier[file_offset] = identifier[self] . identifier[get_offset_from_rva] ( identifier[rva] ))
keyword[except] identifier[PEFormatError] :
identifier[self] . identifier[__warnings] . identifier[append] (
literal[string] %( identifier[rva] ))
keyword[return]
keyword[if] keyword[not] identifier[export_dir] :
keyword[return]
keyword[def] identifier[length_until_eof] ( identifier[rva] ):
keyword[return] identifier[len] ( identifier[self] . identifier[__data__] )- identifier[self] . identifier[get_offset_from_rva] ( identifier[rva] )
keyword[try] :
identifier[address_of_names] = identifier[self] . identifier[get_data] (
identifier[export_dir] . identifier[AddressOfNames] , identifier[min] ( identifier[length_until_eof] ( identifier[export_dir] . identifier[AddressOfNames] ), identifier[export_dir] . identifier[NumberOfNames] * literal[int] ))
identifier[address_of_name_ordinals] = identifier[self] . identifier[get_data] (
identifier[export_dir] . identifier[AddressOfNameOrdinals] , identifier[min] ( identifier[length_until_eof] ( identifier[export_dir] . identifier[AddressOfNameOrdinals] ), identifier[export_dir] . identifier[NumberOfNames] * literal[int] ))
identifier[address_of_functions] = identifier[self] . identifier[get_data] (
identifier[export_dir] . identifier[AddressOfFunctions] , identifier[min] ( identifier[length_until_eof] ( identifier[export_dir] . identifier[AddressOfFunctions] ), identifier[export_dir] . identifier[NumberOfFunctions] * literal[int] ))
keyword[except] identifier[PEFormatError] :
identifier[self] . identifier[__warnings] . identifier[append] (
literal[string] %( identifier[rva] ))
keyword[return]
identifier[exports] =[]
identifier[max_failed_entries_before_giving_up] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[min] ( identifier[export_dir] . identifier[NumberOfNames] , identifier[length_until_eof] ( identifier[export_dir] . identifier[AddressOfNames] )/ literal[int] )):
identifier[symbol_name_address] = identifier[self] . identifier[get_dword_from_data] ( identifier[address_of_names] , identifier[i] )
identifier[symbol_name] = identifier[self] . identifier[get_string_at_rva] ( identifier[symbol_name_address] )
keyword[try] :
identifier[symbol_name_offset] = identifier[self] . identifier[get_offset_from_rva] ( identifier[symbol_name_address] )
keyword[except] identifier[PEFormatError] :
identifier[max_failed_entries_before_giving_up] -= literal[int]
keyword[if] identifier[max_failed_entries_before_giving_up] <= literal[int] :
keyword[break]
keyword[continue]
identifier[symbol_ordinal] = identifier[self] . identifier[get_word_from_data] (
identifier[address_of_name_ordinals] , identifier[i] )
keyword[if] identifier[symbol_ordinal] * literal[int] < identifier[len] ( identifier[address_of_functions] ):
identifier[symbol_address] = identifier[self] . identifier[get_dword_from_data] (
identifier[address_of_functions] , identifier[symbol_ordinal] )
keyword[else] :
keyword[return] keyword[None]
keyword[if] identifier[symbol_address] keyword[is] keyword[None] keyword[or] identifier[symbol_address] == literal[int] :
keyword[continue]
keyword[if] identifier[symbol_address] >= identifier[rva] keyword[and] identifier[symbol_address] < identifier[rva] + identifier[size] :
identifier[forwarder_str] = identifier[self] . identifier[get_string_at_rva] ( identifier[symbol_address] )
keyword[try] :
identifier[forwarder_offset] = identifier[self] . identifier[get_offset_from_rva] ( identifier[symbol_address] )
keyword[except] identifier[PEFormatError] :
keyword[continue]
keyword[else] :
identifier[forwarder_str] = keyword[None]
identifier[forwarder_offset] = keyword[None]
identifier[exports] . identifier[append] (
identifier[ExportData] (
identifier[pe] = identifier[self] ,
identifier[ordinal] = identifier[export_dir] . identifier[Base] + identifier[symbol_ordinal] ,
identifier[ordinal_offset] = identifier[self] . identifier[get_offset_from_rva] ( identifier[export_dir] . identifier[AddressOfNameOrdinals] + literal[int] * identifier[i] ),
identifier[address] = identifier[symbol_address] ,
identifier[address_offset] = identifier[self] . identifier[get_offset_from_rva] ( identifier[export_dir] . identifier[AddressOfFunctions] + literal[int] * identifier[symbol_ordinal] ),
identifier[name] = identifier[symbol_name] ,
identifier[name_offset] = identifier[symbol_name_offset] ,
identifier[forwarder] = identifier[forwarder_str] ,
identifier[forwarder_offset] = identifier[forwarder_offset] ))
identifier[ordinals] =[ identifier[exp] . identifier[ordinal] keyword[for] identifier[exp] keyword[in] identifier[exports] ]
identifier[max_failed_entries_before_giving_up] = literal[int]
keyword[for] identifier[idx] keyword[in] identifier[xrange] ( identifier[min] ( identifier[export_dir] . identifier[NumberOfFunctions] , identifier[length_until_eof] ( identifier[export_dir] . identifier[AddressOfFunctions] )/ literal[int] )):
keyword[if] keyword[not] identifier[idx] + identifier[export_dir] . identifier[Base] keyword[in] identifier[ordinals] :
keyword[try] :
identifier[symbol_address] = identifier[self] . identifier[get_dword_from_data] (
identifier[address_of_functions] , identifier[idx] )
keyword[except] identifier[PEFormatError] :
identifier[symbol_address] = keyword[None]
keyword[if] identifier[symbol_address] keyword[is] keyword[None] :
identifier[max_failed_entries_before_giving_up] -= literal[int]
keyword[if] identifier[max_failed_entries_before_giving_up] <= literal[int] :
keyword[break]
keyword[if] identifier[symbol_address] == literal[int] :
keyword[continue]
keyword[if] identifier[symbol_address] >= identifier[rva] keyword[and] identifier[symbol_address] < identifier[rva] + identifier[size] :
identifier[forwarder_str] = identifier[self] . identifier[get_string_at_rva] ( identifier[symbol_address] )
keyword[else] :
identifier[forwarder_str] = keyword[None]
identifier[exports] . identifier[append] (
identifier[ExportData] (
identifier[ordinal] = identifier[export_dir] . identifier[Base] + identifier[idx] ,
identifier[address] = identifier[symbol_address] ,
identifier[name] = keyword[None] ,
identifier[forwarder] = identifier[forwarder_str] ))
keyword[return] identifier[ExportDirData] (
identifier[struct] = identifier[export_dir] ,
identifier[symbols] = identifier[exports] ) | def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dictionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof()), file_offset=self.get_offset_from_rva(rva)) # depends on [control=['try'], data=[]]
except PEFormatError:
self.__warnings.append('Error parsing export directory at RVA: 0x%x' % rva)
return # depends on [control=['except'], data=[]]
if not export_dir:
return # depends on [control=['if'], data=[]]
# We keep track of the bytes left in the file and use it to set a upper
# bound in the number of items that can be read from the different
# arrays
#
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
try:
address_of_names = self.get_data(export_dir.AddressOfNames, min(length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames * 4))
address_of_name_ordinals = self.get_data(export_dir.AddressOfNameOrdinals, min(length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames * 4))
address_of_functions = self.get_data(export_dir.AddressOfFunctions, min(length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions * 4)) # depends on [control=['try'], data=[]]
except PEFormatError:
self.__warnings.append('Error parsing export directory at RVA: 0x%x' % rva)
return # depends on [control=['except'], data=[]]
exports = []
max_failed_entries_before_giving_up = 10
for i in xrange(min(export_dir.NumberOfNames, length_until_eof(export_dir.AddressOfNames) / 4)):
symbol_name_address = self.get_dword_from_data(address_of_names, i)
symbol_name = self.get_string_at_rva(symbol_name_address)
try:
symbol_name_offset = self.get_offset_from_rva(symbol_name_address) # depends on [control=['try'], data=[]]
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break # depends on [control=['if'], data=[]]
continue # depends on [control=['except'], data=[]]
symbol_ordinal = self.get_word_from_data(address_of_name_ordinals, i)
if symbol_ordinal * 4 < len(address_of_functions):
symbol_address = self.get_dword_from_data(address_of_functions, symbol_ordinal) # depends on [control=['if'], data=[]]
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
if symbol_address is None or symbol_address == 0:
continue # depends on [control=['if'], data=[]]
# If the funcion's RVA points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address >= rva and symbol_address < rva + size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva(symbol_address) # depends on [control=['try'], data=[]]
except PEFormatError:
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
forwarder_str = None
forwarder_offset = None
exports.append(ExportData(pe=self, ordinal=export_dir.Base + symbol_ordinal, ordinal_offset=self.get_offset_from_rva(export_dir.AddressOfNameOrdinals + 2 * i), address=symbol_address, address_offset=self.get_offset_from_rva(export_dir.AddressOfFunctions + 4 * symbol_ordinal), name=symbol_name, name_offset=symbol_name_offset, forwarder=forwarder_str, forwarder_offset=forwarder_offset)) # depends on [control=['for'], data=['i']]
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
for idx in xrange(min(export_dir.NumberOfFunctions, length_until_eof(export_dir.AddressOfFunctions) / 4)):
if not idx + export_dir.Base in ordinals:
try:
symbol_address = self.get_dword_from_data(address_of_functions, idx) # depends on [control=['try'], data=[]]
except PEFormatError:
symbol_address = None # depends on [control=['except'], data=[]]
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if symbol_address == 0:
continue # depends on [control=['if'], data=[]]
#
# Checking for forwarder again.
#
if symbol_address >= rva and symbol_address < rva + size:
forwarder_str = self.get_string_at_rva(symbol_address) # depends on [control=['if'], data=[]]
else:
forwarder_str = None
exports.append(ExportData(ordinal=export_dir.Base + idx, address=symbol_address, name=None, forwarder=forwarder_str)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['idx']]
return ExportDirData(struct=export_dir, symbols=exports) |
def separate_camel_case(word, first_cap_re, all_cap_re):
"""
What it says on the tin.
Input: - word: A string that may be in camelCase.
Output: - separated_word: A list of strings with camel case separated.
"""
s1 = first_cap_re.sub(r'\1 \2', word)
separated_word = all_cap_re.sub(r'\1 \2', s1)
return separated_word | def function[separate_camel_case, parameter[word, first_cap_re, all_cap_re]]:
constant[
What it says on the tin.
Input: - word: A string that may be in camelCase.
Output: - separated_word: A list of strings with camel case separated.
]
variable[s1] assign[=] call[name[first_cap_re].sub, parameter[constant[\1 \2], name[word]]]
variable[separated_word] assign[=] call[name[all_cap_re].sub, parameter[constant[\1 \2], name[s1]]]
return[name[separated_word]] | keyword[def] identifier[separate_camel_case] ( identifier[word] , identifier[first_cap_re] , identifier[all_cap_re] ):
literal[string]
identifier[s1] = identifier[first_cap_re] . identifier[sub] ( literal[string] , identifier[word] )
identifier[separated_word] = identifier[all_cap_re] . identifier[sub] ( literal[string] , identifier[s1] )
keyword[return] identifier[separated_word] | def separate_camel_case(word, first_cap_re, all_cap_re):
"""
What it says on the tin.
Input: - word: A string that may be in camelCase.
Output: - separated_word: A list of strings with camel case separated.
"""
s1 = first_cap_re.sub('\\1 \\2', word)
separated_word = all_cap_re.sub('\\1 \\2', s1)
return separated_word |
def _submit_part(session, url, part, timeout):
"""
Used by the worker to submit the part data to the storage service URL.
:param session: Storage service session.
:param url: Part url.
:param part: Part data in bytes.
:param timeout: Timeout for storage session.
:return: ETag for the submitted part.
"""
try:
response = session.put(url, data=part, timeout=timeout)
return response.headers.get('etag').strip('"')
except Exception as e:
raise SbgError(
'Failed to submit the part. Reason: {}'.format(
six.text_type(e)
)
) | def function[_submit_part, parameter[session, url, part, timeout]]:
constant[
Used by the worker to submit the part data to the storage service URL.
:param session: Storage service session.
:param url: Part url.
:param part: Part data in bytes.
:param timeout: Timeout for storage session.
:return: ETag for the submitted part.
]
<ast.Try object at 0x7da18f812620> | keyword[def] identifier[_submit_part] ( identifier[session] , identifier[url] , identifier[part] , identifier[timeout] ):
literal[string]
keyword[try] :
identifier[response] = identifier[session] . identifier[put] ( identifier[url] , identifier[data] = identifier[part] , identifier[timeout] = identifier[timeout] )
keyword[return] identifier[response] . identifier[headers] . identifier[get] ( literal[string] ). identifier[strip] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[SbgError] (
literal[string] . identifier[format] (
identifier[six] . identifier[text_type] ( identifier[e] )
)
) | def _submit_part(session, url, part, timeout):
"""
Used by the worker to submit the part data to the storage service URL.
:param session: Storage service session.
:param url: Part url.
:param part: Part data in bytes.
:param timeout: Timeout for storage session.
:return: ETag for the submitted part.
"""
try:
response = session.put(url, data=part, timeout=timeout)
return response.headers.get('etag').strip('"') # depends on [control=['try'], data=[]]
except Exception as e:
raise SbgError('Failed to submit the part. Reason: {}'.format(six.text_type(e))) # depends on [control=['except'], data=['e']] |
def run_migrations_online():
"""
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = context.config.attributes.get("connection", None)
if connectable is None:
options = context.config.get_section(context.config.config_ini_section)
url = options.pop("url")
connectable = create_engine(url, poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=db.metadata,
compare_server_default=True,
)
with context.begin_transaction():
context.run_migrations() | def function[run_migrations_online, parameter[]]:
constant[
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
]
variable[connectable] assign[=] call[name[context].config.attributes.get, parameter[constant[connection], constant[None]]]
if compare[name[connectable] is constant[None]] begin[:]
variable[options] assign[=] call[name[context].config.get_section, parameter[name[context].config.config_ini_section]]
variable[url] assign[=] call[name[options].pop, parameter[constant[url]]]
variable[connectable] assign[=] call[name[create_engine], parameter[name[url]]]
with call[name[connectable].connect, parameter[]] begin[:]
call[name[context].configure, parameter[]]
with call[name[context].begin_transaction, parameter[]] begin[:]
call[name[context].run_migrations, parameter[]] | keyword[def] identifier[run_migrations_online] ():
literal[string]
identifier[connectable] = identifier[context] . identifier[config] . identifier[attributes] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[connectable] keyword[is] keyword[None] :
identifier[options] = identifier[context] . identifier[config] . identifier[get_section] ( identifier[context] . identifier[config] . identifier[config_ini_section] )
identifier[url] = identifier[options] . identifier[pop] ( literal[string] )
identifier[connectable] = identifier[create_engine] ( identifier[url] , identifier[poolclass] = identifier[pool] . identifier[NullPool] )
keyword[with] identifier[connectable] . identifier[connect] () keyword[as] identifier[connection] :
identifier[context] . identifier[configure] (
identifier[connection] = identifier[connection] ,
identifier[target_metadata] = identifier[db] . identifier[metadata] ,
identifier[compare_server_default] = keyword[True] ,
)
keyword[with] identifier[context] . identifier[begin_transaction] ():
identifier[context] . identifier[run_migrations] () | def run_migrations_online():
"""
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = context.config.attributes.get('connection', None)
if connectable is None:
options = context.config.get_section(context.config.config_ini_section)
url = options.pop('url')
connectable = create_engine(url, poolclass=pool.NullPool) # depends on [control=['if'], data=['connectable']]
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=db.metadata, compare_server_default=True)
with context.begin_transaction():
context.run_migrations() # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['connection']] |
def populate_unpaired_line(d_vals, f_f_header, missing_val=None):
"""
used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
if f_f_header is not None:
f_f_flds = [dict(zip(f_f_header, [missing_val] * len(f_f_header)))]
else:
assert(len(d_vals) > 0)
f_f_num_cols = len(d_vals[d_vals.keys()[0]][0])
f_f_flds = [[missing_val] * f_f_num_cols]
return f_f_flds | def function[populate_unpaired_line, parameter[d_vals, f_f_header, missing_val]]:
constant[
used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file.
]
if compare[name[missing_val] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f09c340>
if compare[name[f_f_header] is_not constant[None]] begin[:]
variable[f_f_flds] assign[=] list[[<ast.Call object at 0x7da1b1367bb0>]]
return[name[f_f_flds]] | keyword[def] identifier[populate_unpaired_line] ( identifier[d_vals] , identifier[f_f_header] , identifier[missing_val] = keyword[None] ):
literal[string]
keyword[if] identifier[missing_val] keyword[is] keyword[None] :
keyword[raise] identifier[MissingValueError] ( literal[string] +
literal[string] )
keyword[if] identifier[f_f_header] keyword[is] keyword[not] keyword[None] :
identifier[f_f_flds] =[ identifier[dict] ( identifier[zip] ( identifier[f_f_header] ,[ identifier[missing_val] ]* identifier[len] ( identifier[f_f_header] )))]
keyword[else] :
keyword[assert] ( identifier[len] ( identifier[d_vals] )> literal[int] )
identifier[f_f_num_cols] = identifier[len] ( identifier[d_vals] [ identifier[d_vals] . identifier[keys] ()[ literal[int] ]][ literal[int] ])
identifier[f_f_flds] =[[ identifier[missing_val] ]* identifier[f_f_num_cols] ]
keyword[return] identifier[f_f_flds] | def populate_unpaired_line(d_vals, f_f_header, missing_val=None):
"""
used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file.
"""
if missing_val is None:
raise MissingValueError('Need missing value to output ' + ' unpaired lines') # depends on [control=['if'], data=[]]
if f_f_header is not None:
f_f_flds = [dict(zip(f_f_header, [missing_val] * len(f_f_header)))] # depends on [control=['if'], data=['f_f_header']]
else:
assert len(d_vals) > 0
f_f_num_cols = len(d_vals[d_vals.keys()[0]][0])
f_f_flds = [[missing_val] * f_f_num_cols]
return f_f_flds |
def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode')
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone | def function[importNode, parameter[self, document, node, deep]]:
constant[Implements (well enough for our purposes) DOM node import.]
variable[nodetype] assign[=] name[node].nodeType
if compare[name[nodetype] in tuple[[<ast.Attribute object at 0x7da1b1596950>, <ast.Attribute object at 0x7da1b1597df0>]]] begin[:]
<ast.Raise object at 0x7da1b15962c0>
if compare[name[nodetype] equal[==] name[node].ENTITY_REFERENCE_NODE] begin[:]
variable[deep] assign[=] constant[0]
variable[clone] assign[=] call[name[node].cloneNode, parameter[name[deep]]]
call[name[self]._setOwnerDoc, parameter[name[document], name[clone]]]
name[clone].__imported__ assign[=] constant[1]
return[name[clone]] | keyword[def] identifier[importNode] ( identifier[self] , identifier[document] , identifier[node] , identifier[deep] = literal[int] ):
literal[string]
identifier[nodetype] = identifier[node] . identifier[nodeType]
keyword[if] identifier[nodetype] keyword[in] ( identifier[node] . identifier[DOCUMENT_NODE] , identifier[node] . identifier[DOCUMENT_TYPE_NODE] ):
keyword[raise] identifier[DOMException] ( literal[string] )
keyword[if] identifier[nodetype] == identifier[node] . identifier[ENTITY_REFERENCE_NODE] :
identifier[deep] = literal[int]
identifier[clone] = identifier[node] . identifier[cloneNode] ( identifier[deep] )
identifier[self] . identifier[_setOwnerDoc] ( identifier[document] , identifier[clone] )
identifier[clone] . identifier[__imported__] = literal[int]
keyword[return] identifier[clone] | def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode') # depends on [control=['if'], data=[]]
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0 # depends on [control=['if'], data=[]]
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone |
def run(self):
"""Main loop of KIRA thread."""
while not self.stopped.isSet():
try:
# if the current state is idle, just block and wait forever
# if the current state is any other state, then a timeout of 200ms should
# be reasonable in all cases.
timeout = (self._state != 'idle') and 0.2 or None
rdlist, _, _ = select.select([self._socket.fileno()], [], [], timeout)
if not rdlist:
if self._state != 'idle':
self._state = 'idle'
continue
data = self._socket.recv(1024)
if not data:
# check if the socket is still valid
try:
os.fstat(recv._socket.fileno())
except socket.error:
break
continue
code = utils.mangleIR(data, ignore_errors=True)
codeName = self.codeMap.get(code)
# some manufacturers repeat their IR codes several times in rapid
# succession. by tracking the last code, we can eliminate redundant
# state changes
if codeName and (self._state != codeName):
self._state = codeName
for callback in self._callbacks:
callback(codeName)
except:
time.sleep(0.1) | def function[run, parameter[self]]:
constant[Main loop of KIRA thread.]
while <ast.UnaryOp object at 0x7da207f99120> begin[:]
<ast.Try object at 0x7da207f9add0> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[stopped] . identifier[isSet] ():
keyword[try] :
identifier[timeout] =( identifier[self] . identifier[_state] != literal[string] ) keyword[and] literal[int] keyword[or] keyword[None]
identifier[rdlist] , identifier[_] , identifier[_] = identifier[select] . identifier[select] ([ identifier[self] . identifier[_socket] . identifier[fileno] ()],[],[], identifier[timeout] )
keyword[if] keyword[not] identifier[rdlist] :
keyword[if] identifier[self] . identifier[_state] != literal[string] :
identifier[self] . identifier[_state] = literal[string]
keyword[continue]
identifier[data] = identifier[self] . identifier[_socket] . identifier[recv] ( literal[int] )
keyword[if] keyword[not] identifier[data] :
keyword[try] :
identifier[os] . identifier[fstat] ( identifier[recv] . identifier[_socket] . identifier[fileno] ())
keyword[except] identifier[socket] . identifier[error] :
keyword[break]
keyword[continue]
identifier[code] = identifier[utils] . identifier[mangleIR] ( identifier[data] , identifier[ignore_errors] = keyword[True] )
identifier[codeName] = identifier[self] . identifier[codeMap] . identifier[get] ( identifier[code] )
keyword[if] identifier[codeName] keyword[and] ( identifier[self] . identifier[_state] != identifier[codeName] ):
identifier[self] . identifier[_state] = identifier[codeName]
keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[_callbacks] :
identifier[callback] ( identifier[codeName] )
keyword[except] :
identifier[time] . identifier[sleep] ( literal[int] ) | def run(self):
"""Main loop of KIRA thread."""
while not self.stopped.isSet():
try:
# if the current state is idle, just block and wait forever
# if the current state is any other state, then a timeout of 200ms should
# be reasonable in all cases.
timeout = self._state != 'idle' and 0.2 or None
(rdlist, _, _) = select.select([self._socket.fileno()], [], [], timeout)
if not rdlist:
if self._state != 'idle':
self._state = 'idle' # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
data = self._socket.recv(1024)
if not data:
# check if the socket is still valid
try:
os.fstat(recv._socket.fileno()) # depends on [control=['try'], data=[]]
except socket.error:
break # depends on [control=['except'], data=[]]
continue # depends on [control=['if'], data=[]]
code = utils.mangleIR(data, ignore_errors=True)
codeName = self.codeMap.get(code)
# some manufacturers repeat their IR codes several times in rapid
# succession. by tracking the last code, we can eliminate redundant
# state changes
if codeName and self._state != codeName:
self._state = codeName
for callback in self._callbacks:
callback(codeName) # depends on [control=['for'], data=['callback']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
time.sleep(0.1) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def hsl_to_rgb(h, s=None, l=None):
"""Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsl_to_rgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
"""
if type(h) in [list,tuple]:
h, s, l = h
if s==0: return (l, l, l) # achromatic (gray)
if l<0.5: n2 = l * (1.0 + s)
else: n2 = l+s - (l*s)
n1 = (2.0 * l) - n2
h /= 60.0
hueToRgb = _hue_to_rgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b) | def function[hsl_to_rgb, parameter[h, s, l]]:
constant[Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsl_to_rgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
]
if compare[call[name[type], parameter[name[h]]] in list[[<ast.Name object at 0x7da1b1106fe0>, <ast.Name object at 0x7da1b1106f80>]]] begin[:]
<ast.Tuple object at 0x7da1b1104370> assign[=] name[h]
if compare[name[s] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1106560>, <ast.Name object at 0x7da1b1107fd0>, <ast.Name object at 0x7da1b1106320>]]]
if compare[name[l] less[<] constant[0.5]] begin[:]
variable[n2] assign[=] binary_operation[name[l] * binary_operation[constant[1.0] + name[s]]]
variable[n1] assign[=] binary_operation[binary_operation[constant[2.0] * name[l]] - name[n2]]
<ast.AugAssign object at 0x7da1b1104670>
variable[hueToRgb] assign[=] name[_hue_to_rgb]
variable[r] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], binary_operation[name[h] + constant[2]]]]
variable[g] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], name[h]]]
variable[b] assign[=] call[name[hueToRgb], parameter[name[n1], name[n2], binary_operation[name[h] - constant[2]]]]
return[tuple[[<ast.Name object at 0x7da1b11ea890>, <ast.Name object at 0x7da1b11e9cc0>, <ast.Name object at 0x7da1b11ea590>]]] | keyword[def] identifier[hsl_to_rgb] ( identifier[h] , identifier[s] = keyword[None] , identifier[l] = keyword[None] ):
literal[string]
keyword[if] identifier[type] ( identifier[h] ) keyword[in] [ identifier[list] , identifier[tuple] ]:
identifier[h] , identifier[s] , identifier[l] = identifier[h]
keyword[if] identifier[s] == literal[int] : keyword[return] ( identifier[l] , identifier[l] , identifier[l] )
keyword[if] identifier[l] < literal[int] : identifier[n2] = identifier[l] *( literal[int] + identifier[s] )
keyword[else] : identifier[n2] = identifier[l] + identifier[s] -( identifier[l] * identifier[s] )
identifier[n1] =( literal[int] * identifier[l] )- identifier[n2]
identifier[h] /= literal[int]
identifier[hueToRgb] = identifier[_hue_to_rgb]
identifier[r] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] + literal[int] )
identifier[g] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] )
identifier[b] = identifier[hueToRgb] ( identifier[n1] , identifier[n2] , identifier[h] - literal[int] )
keyword[return] ( identifier[r] , identifier[g] , identifier[b] ) | def hsl_to_rgb(h, s=None, l=None):
"""Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsl_to_rgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
"""
if type(h) in [list, tuple]:
(h, s, l) = h # depends on [control=['if'], data=[]]
if s == 0:
return (l, l, l) # achromatic (gray) # depends on [control=['if'], data=[]]
if l < 0.5:
n2 = l * (1.0 + s) # depends on [control=['if'], data=['l']]
else:
n2 = l + s - l * s
n1 = 2.0 * l - n2
h /= 60.0
hueToRgb = _hue_to_rgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b) |
def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX):
""" Pad a ``tensor`` to ``length`` with ``padding_index``.
Args:
tensor (torch.Tensor [n, ...]): Tensor to pad.
length (int): Pad the ``tensor`` up to ``length``.
padding_index (int, optional): Index to pad tensor with.
Returns
(torch.Tensor [length, ...]) Padded Tensor.
"""
n_padding = length - tensor.shape[0]
assert n_padding >= 0
if n_padding == 0:
return tensor
padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)
return torch.cat((tensor, padding), dim=0) | def function[pad_tensor, parameter[tensor, length, padding_index]]:
constant[ Pad a ``tensor`` to ``length`` with ``padding_index``.
Args:
tensor (torch.Tensor [n, ...]): Tensor to pad.
length (int): Pad the ``tensor`` up to ``length``.
padding_index (int, optional): Index to pad tensor with.
Returns
(torch.Tensor [length, ...]) Padded Tensor.
]
variable[n_padding] assign[=] binary_operation[name[length] - call[name[tensor].shape][constant[0]]]
assert[compare[name[n_padding] greater_or_equal[>=] constant[0]]]
if compare[name[n_padding] equal[==] constant[0]] begin[:]
return[name[tensor]]
variable[padding] assign[=] call[call[name[tensor].new, parameter[name[n_padding], <ast.Starred object at 0x7da18fe92620>]].fill_, parameter[name[padding_index]]]
return[call[name[torch].cat, parameter[tuple[[<ast.Name object at 0x7da18fe90f10>, <ast.Name object at 0x7da18fe90640>]]]]] | keyword[def] identifier[pad_tensor] ( identifier[tensor] , identifier[length] , identifier[padding_index] = identifier[DEFAULT_PADDING_INDEX] ):
literal[string]
identifier[n_padding] = identifier[length] - identifier[tensor] . identifier[shape] [ literal[int] ]
keyword[assert] identifier[n_padding] >= literal[int]
keyword[if] identifier[n_padding] == literal[int] :
keyword[return] identifier[tensor]
identifier[padding] = identifier[tensor] . identifier[new] ( identifier[n_padding] ,* identifier[tensor] . identifier[shape] [ literal[int] :]). identifier[fill_] ( identifier[padding_index] )
keyword[return] identifier[torch] . identifier[cat] (( identifier[tensor] , identifier[padding] ), identifier[dim] = literal[int] ) | def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX):
""" Pad a ``tensor`` to ``length`` with ``padding_index``.
Args:
tensor (torch.Tensor [n, ...]): Tensor to pad.
length (int): Pad the ``tensor`` up to ``length``.
padding_index (int, optional): Index to pad tensor with.
Returns
(torch.Tensor [length, ...]) Padded Tensor.
"""
n_padding = length - tensor.shape[0]
assert n_padding >= 0
if n_padding == 0:
return tensor # depends on [control=['if'], data=[]]
padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)
return torch.cat((tensor, padding), dim=0) |
def setcontents(self, source, **kwargs):
"""Create a new file from a string or file-like object."""
if isinstance(source, six.string_types):
_file = opener.open(source, 'rb')
else:
_file = source
# signals.document_before_content_set.send(self)
data = _file.read()
_fs, filename = opener.parse(self.uri)
_fs.setcontents(filename, data, **kwargs)
_fs.close()
# signals.document_after_content_set.send(self)
if isinstance(source, six.string_types) and hasattr(_file, 'close'):
_file.close() | def function[setcontents, parameter[self, source]]:
constant[Create a new file from a string or file-like object.]
if call[name[isinstance], parameter[name[source], name[six].string_types]] begin[:]
variable[_file] assign[=] call[name[opener].open, parameter[name[source], constant[rb]]]
variable[data] assign[=] call[name[_file].read, parameter[]]
<ast.Tuple object at 0x7da1b287da80> assign[=] call[name[opener].parse, parameter[name[self].uri]]
call[name[_fs].setcontents, parameter[name[filename], name[data]]]
call[name[_fs].close, parameter[]]
if <ast.BoolOp object at 0x7da1b287e170> begin[:]
call[name[_file].close, parameter[]] | keyword[def] identifier[setcontents] ( identifier[self] , identifier[source] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[source] , identifier[six] . identifier[string_types] ):
identifier[_file] = identifier[opener] . identifier[open] ( identifier[source] , literal[string] )
keyword[else] :
identifier[_file] = identifier[source]
identifier[data] = identifier[_file] . identifier[read] ()
identifier[_fs] , identifier[filename] = identifier[opener] . identifier[parse] ( identifier[self] . identifier[uri] )
identifier[_fs] . identifier[setcontents] ( identifier[filename] , identifier[data] ,** identifier[kwargs] )
identifier[_fs] . identifier[close] ()
keyword[if] identifier[isinstance] ( identifier[source] , identifier[six] . identifier[string_types] ) keyword[and] identifier[hasattr] ( identifier[_file] , literal[string] ):
identifier[_file] . identifier[close] () | def setcontents(self, source, **kwargs):
"""Create a new file from a string or file-like object."""
if isinstance(source, six.string_types):
_file = opener.open(source, 'rb') # depends on [control=['if'], data=[]]
else:
_file = source
# signals.document_before_content_set.send(self)
data = _file.read()
(_fs, filename) = opener.parse(self.uri)
_fs.setcontents(filename, data, **kwargs)
_fs.close()
# signals.document_after_content_set.send(self)
if isinstance(source, six.string_types) and hasattr(_file, 'close'):
_file.close() # depends on [control=['if'], data=[]] |
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype | def function[get_phenotype, parameter[self, individual_id]]:
constant[
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
]
variable[phenotype] assign[=] constant[0]
if compare[name[individual_id] in name[self].individuals] begin[:]
variable[phenotype] assign[=] call[name[self].individuals][name[individual_id]].phenotype
return[name[phenotype]] | keyword[def] identifier[get_phenotype] ( identifier[self] , identifier[individual_id] ):
literal[string]
identifier[phenotype] = literal[int]
keyword[if] identifier[individual_id] keyword[in] identifier[self] . identifier[individuals] :
identifier[phenotype] = identifier[self] . identifier[individuals] [ identifier[individual_id] ]. identifier[phenotype]
keyword[return] identifier[phenotype] | def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype # depends on [control=['if'], data=['individual_id']]
return phenotype |
def _GetApplicationDefaultCredentials(
client_info, skip_application_default_credentials=False,
**unused_kwds):
"""Returns ADC with right scopes."""
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files()
except oauth2client.client.ApplicationDefaultCredentialsError:
return None
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if credentials is None:
return None
if not isinstance(credentials, gc) or cp in scopes:
return credentials.create_scoped(scopes)
return None | def function[_GetApplicationDefaultCredentials, parameter[client_info, skip_application_default_credentials]]:
constant[Returns ADC with right scopes.]
variable[scopes] assign[=] call[call[name[client_info]][constant[scope]].split, parameter[]]
if name[skip_application_default_credentials] begin[:]
return[constant[None]]
variable[gc] assign[=] name[oauth2client].client.GoogleCredentials
with name[cache_file_lock] begin[:]
<ast.Try object at 0x7da1b07b9ea0>
variable[cp] assign[=] constant[https://www.googleapis.com/auth/cloud-platform]
if compare[name[credentials] is constant[None]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b07bbe20> begin[:]
return[call[name[credentials].create_scoped, parameter[name[scopes]]]]
return[constant[None]] | keyword[def] identifier[_GetApplicationDefaultCredentials] (
identifier[client_info] , identifier[skip_application_default_credentials] = keyword[False] ,
** identifier[unused_kwds] ):
literal[string]
identifier[scopes] = identifier[client_info] [ literal[string] ]. identifier[split] ()
keyword[if] identifier[skip_application_default_credentials] :
keyword[return] keyword[None]
identifier[gc] = identifier[oauth2client] . identifier[client] . identifier[GoogleCredentials]
keyword[with] identifier[cache_file_lock] :
keyword[try] :
identifier[credentials] = identifier[gc] . identifier[_implicit_credentials_from_files] ()
keyword[except] identifier[oauth2client] . identifier[client] . identifier[ApplicationDefaultCredentialsError] :
keyword[return] keyword[None]
identifier[cp] = literal[string]
keyword[if] identifier[credentials] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[credentials] , identifier[gc] ) keyword[or] identifier[cp] keyword[in] identifier[scopes] :
keyword[return] identifier[credentials] . identifier[create_scoped] ( identifier[scopes] )
keyword[return] keyword[None] | def _GetApplicationDefaultCredentials(client_info, skip_application_default_credentials=False, **unused_kwds):
"""Returns ADC with right scopes."""
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None # depends on [control=['if'], data=[]]
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files() # depends on [control=['try'], data=[]]
except oauth2client.client.ApplicationDefaultCredentialsError:
return None # depends on [control=['except'], data=[]] # depends on [control=['with'], data=[]]
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if credentials is None:
return None # depends on [control=['if'], data=[]]
if not isinstance(credentials, gc) or cp in scopes:
return credentials.create_scoped(scopes) # depends on [control=['if'], data=[]]
return None |
def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message'] | def function[_parse_vcs_section, parameter[self, cfg_handler]]:
constant[Parse [vcs] section
:param cfg_handler:
:return:
]
if compare[constant[vcs] in name[cfg_handler]] begin[:]
variable[cfg] assign[=] call[name[cfg_handler]][constant[vcs]]
if compare[constant[engine] in name[cfg]] begin[:]
name[self].vcs_engine assign[=] call[name[cfg]][constant[engine]]
if compare[constant[tag_params] in name[cfg]] begin[:]
name[self].vcs_tag_params assign[=] call[name[list], parameter[call[name[filter], parameter[constant[None], call[call[name[cfg]][constant[tag_params]].split, parameter[constant[
]]]]]]]
if compare[constant[commit_message] in name[cfg]] begin[:]
name[self].vcs_commit_message assign[=] call[name[cfg]][constant[commit_message]] | keyword[def] identifier[_parse_vcs_section] ( identifier[self] , identifier[cfg_handler] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[cfg_handler] :
identifier[cfg] = identifier[cfg_handler] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[cfg] :
identifier[self] . identifier[vcs_engine] = identifier[cfg] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[cfg] :
identifier[self] . identifier[vcs_tag_params] = identifier[list] ( identifier[filter] ( keyword[None] , identifier[cfg] [ literal[string] ]. identifier[split] ( literal[string] )))
keyword[if] literal[string] keyword[in] identifier[cfg] :
identifier[self] . identifier[vcs_commit_message] = identifier[cfg] [ literal[string] ] | def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine'] # depends on [control=['if'], data=['cfg']]
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split('\n'))) # depends on [control=['if'], data=['cfg']]
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message'] # depends on [control=['if'], data=['cfg']] # depends on [control=['if'], data=['cfg_handler']] |
def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
fp = StringIO.StringIO(lifecycle_config.to_xml())
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {}
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name,
data=fp.getvalue(),
query_args='lifecycle',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body) | def function[configure_lifecycle, parameter[self, lifecycle_config, headers]]:
constant[
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
]
variable[fp] assign[=] call[name[StringIO].StringIO, parameter[call[name[lifecycle_config].to_xml, parameter[]]]]
variable[md5] assign[=] call[name[boto].utils.compute_md5, parameter[name[fp]]]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] dictionary[[], []]
call[name[headers]][constant[Content-MD5]] assign[=] call[name[md5]][constant[1]]
call[name[headers]][constant[Content-Type]] assign[=] constant[text/xml]
variable[response] assign[=] call[name[self].connection.make_request, parameter[constant[PUT], name[self].name]]
variable[body] assign[=] call[name[response].read, parameter[]]
if compare[name[response].status equal[==] constant[200]] begin[:]
return[constant[True]] | keyword[def] identifier[configure_lifecycle] ( identifier[self] , identifier[lifecycle_config] , identifier[headers] = keyword[None] ):
literal[string]
identifier[fp] = identifier[StringIO] . identifier[StringIO] ( identifier[lifecycle_config] . identifier[to_xml] ())
identifier[md5] = identifier[boto] . identifier[utils] . identifier[compute_md5] ( identifier[fp] )
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] ={}
identifier[headers] [ literal[string] ]= identifier[md5] [ literal[int] ]
identifier[headers] [ literal[string] ]= literal[string]
identifier[response] = identifier[self] . identifier[connection] . identifier[make_request] ( literal[string] , identifier[self] . identifier[name] ,
identifier[data] = identifier[fp] . identifier[getvalue] (),
identifier[query_args] = literal[string] ,
identifier[headers] = identifier[headers] )
identifier[body] = identifier[response] . identifier[read] ()
keyword[if] identifier[response] . identifier[status] == literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[self] . identifier[connection] . identifier[provider] . identifier[storage_response_error] (
identifier[response] . identifier[status] , identifier[response] . identifier[reason] , identifier[body] ) | def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
fp = StringIO.StringIO(lifecycle_config.to_xml())
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {} # depends on [control=['if'], data=['headers']]
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name, data=fp.getvalue(), query_args='lifecycle', headers=headers)
body = response.read()
if response.status == 200:
return True # depends on [control=['if'], data=[]]
else:
raise self.connection.provider.storage_response_error(response.status, response.reason, body) |
def p_expr_list(self, p):
'''expr_list : expr_list COMMA expr
| expr'''
if len(p) == 4:
p[1].append(p[3])
p[0] = p[1]
elif len(p) == 2:
p[0] = [p[1]] | def function[p_expr_list, parameter[self, p]]:
constant[expr_list : expr_list COMMA expr
| expr]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[4]] begin[:]
call[call[name[p]][constant[1]].append, parameter[call[name[p]][constant[3]]]]
call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]] | keyword[def] identifier[p_expr_list] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]. identifier[append] ( identifier[p] [ literal[int] ])
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=[ identifier[p] [ literal[int] ]] | def p_expr_list(self, p):
"""expr_list : expr_list COMMA expr
| expr"""
if len(p) == 4:
p[1].append(p[3])
p[0] = p[1] # depends on [control=['if'], data=[]]
elif len(p) == 2:
p[0] = [p[1]] # depends on [control=['if'], data=[]] |
def _derive_db(self):
"""
this may not have a client on __init__, so this is deferred to
a @property descriptor
"""
client = self
while hasattr(client, 'proxied'):
client = client.proxied
if hasattr(client, 'client'):
client = client.client
self._connection_kwargs = client.connection_pool.connection_kwargs
self._db = self._connection_kwargs['db']
else:
self._connection_kwargs = False
self._db = 'n/a' | def function[_derive_db, parameter[self]]:
constant[
this may not have a client on __init__, so this is deferred to
a @property descriptor
]
variable[client] assign[=] name[self]
while call[name[hasattr], parameter[name[client], constant[proxied]]] begin[:]
variable[client] assign[=] name[client].proxied
if call[name[hasattr], parameter[name[client], constant[client]]] begin[:]
variable[client] assign[=] name[client].client
name[self]._connection_kwargs assign[=] name[client].connection_pool.connection_kwargs
name[self]._db assign[=] call[name[self]._connection_kwargs][constant[db]] | keyword[def] identifier[_derive_db] ( identifier[self] ):
literal[string]
identifier[client] = identifier[self]
keyword[while] identifier[hasattr] ( identifier[client] , literal[string] ):
identifier[client] = identifier[client] . identifier[proxied]
keyword[if] identifier[hasattr] ( identifier[client] , literal[string] ):
identifier[client] = identifier[client] . identifier[client]
identifier[self] . identifier[_connection_kwargs] = identifier[client] . identifier[connection_pool] . identifier[connection_kwargs]
identifier[self] . identifier[_db] = identifier[self] . identifier[_connection_kwargs] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[_connection_kwargs] = keyword[False]
identifier[self] . identifier[_db] = literal[string] | def _derive_db(self):
"""
this may not have a client on __init__, so this is deferred to
a @property descriptor
"""
client = self
while hasattr(client, 'proxied'):
client = client.proxied # depends on [control=['while'], data=[]]
if hasattr(client, 'client'):
client = client.client
self._connection_kwargs = client.connection_pool.connection_kwargs
self._db = self._connection_kwargs['db'] # depends on [control=['if'], data=[]]
else:
self._connection_kwargs = False
self._db = 'n/a' |
def run(self):
"""
Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
qclog = open(self.qclog_file, 'w')
p = subprocess.Popen(self.current_command, stdout=qclog)
return p | def function[run, parameter[self]]:
constant[
Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring.
]
variable[qclog] assign[=] call[name[open], parameter[name[self].qclog_file, constant[w]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[name[self].current_command]]
return[name[p]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[qclog] = identifier[open] ( identifier[self] . identifier[qclog_file] , literal[string] )
identifier[p] = identifier[subprocess] . identifier[Popen] ( identifier[self] . identifier[current_command] , identifier[stdout] = identifier[qclog] )
keyword[return] identifier[p] | def run(self):
"""
Perform the actual QChem run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
qclog = open(self.qclog_file, 'w')
p = subprocess.Popen(self.current_command, stdout=qclog)
return p |
def _force_text_recursive(data):
"""
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
"""
if isinstance(data, list):
ret = [
_force_text_recursive(item) for item in data
]
if isinstance(data, ReturnList):
return ReturnList(ret, serializer=data.serializer)
return data
elif isinstance(data, dict):
ret = {
key: _force_text_recursive(value)
for key, value in data.items()
}
if isinstance(data, ReturnDict):
return ReturnDict(ret, serializer=data.serializer)
return data
return force_text(data) | def function[_force_text_recursive, parameter[data]]:
constant[
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
]
if call[name[isinstance], parameter[name[data], name[list]]] begin[:]
variable[ret] assign[=] <ast.ListComp object at 0x7da204564910>
if call[name[isinstance], parameter[name[data], name[ReturnList]]] begin[:]
return[call[name[ReturnList], parameter[name[ret]]]]
return[name[data]]
return[call[name[force_text], parameter[name[data]]]] | keyword[def] identifier[_force_text_recursive] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[list] ):
identifier[ret] =[
identifier[_force_text_recursive] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[data]
]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[ReturnList] ):
keyword[return] identifier[ReturnList] ( identifier[ret] , identifier[serializer] = identifier[data] . identifier[serializer] )
keyword[return] identifier[data]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[ret] ={
identifier[key] : identifier[_force_text_recursive] ( identifier[value] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[data] . identifier[items] ()
}
keyword[if] identifier[isinstance] ( identifier[data] , identifier[ReturnDict] ):
keyword[return] identifier[ReturnDict] ( identifier[ret] , identifier[serializer] = identifier[data] . identifier[serializer] )
keyword[return] identifier[data]
keyword[return] identifier[force_text] ( identifier[data] ) | def _force_text_recursive(data):
"""
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
"""
if isinstance(data, list):
ret = [_force_text_recursive(item) for item in data]
if isinstance(data, ReturnList):
return ReturnList(ret, serializer=data.serializer) # depends on [control=['if'], data=[]]
return data # depends on [control=['if'], data=[]]
elif isinstance(data, dict):
ret = {key: _force_text_recursive(value) for (key, value) in data.items()}
if isinstance(data, ReturnDict):
return ReturnDict(ret, serializer=data.serializer) # depends on [control=['if'], data=[]]
return data # depends on [control=['if'], data=[]]
return force_text(data) |
async def state(self) -> GroupState:
"""Return the current group state"""
act = self.service.action("X_GetState")
res = await act.async_call()
return GroupState.make(**res) | <ast.AsyncFunctionDef object at 0x7da18f00f6a0> | keyword[async] keyword[def] identifier[state] ( identifier[self] )-> identifier[GroupState] :
literal[string]
identifier[act] = identifier[self] . identifier[service] . identifier[action] ( literal[string] )
identifier[res] = keyword[await] identifier[act] . identifier[async_call] ()
keyword[return] identifier[GroupState] . identifier[make] (** identifier[res] ) | async def state(self) -> GroupState:
"""Return the current group state"""
act = self.service.action('X_GetState')
res = await act.async_call()
return GroupState.make(**res) |
def register_std(self):
''' Registers all the standard checkers in the given checker.
If not present already, the standard checkers schema (STD_NAMESPACE) is
added to the checker's namespace with an empty prefix.
'''
self._namespace.register(STD_NAMESPACE, '')
for cond in _ALL_CHECKERS:
self.register(cond, STD_NAMESPACE, _ALL_CHECKERS[cond]) | def function[register_std, parameter[self]]:
constant[ Registers all the standard checkers in the given checker.
If not present already, the standard checkers schema (STD_NAMESPACE) is
added to the checker's namespace with an empty prefix.
]
call[name[self]._namespace.register, parameter[name[STD_NAMESPACE], constant[]]]
for taget[name[cond]] in starred[name[_ALL_CHECKERS]] begin[:]
call[name[self].register, parameter[name[cond], name[STD_NAMESPACE], call[name[_ALL_CHECKERS]][name[cond]]]] | keyword[def] identifier[register_std] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_namespace] . identifier[register] ( identifier[STD_NAMESPACE] , literal[string] )
keyword[for] identifier[cond] keyword[in] identifier[_ALL_CHECKERS] :
identifier[self] . identifier[register] ( identifier[cond] , identifier[STD_NAMESPACE] , identifier[_ALL_CHECKERS] [ identifier[cond] ]) | def register_std(self):
""" Registers all the standard checkers in the given checker.
If not present already, the standard checkers schema (STD_NAMESPACE) is
added to the checker's namespace with an empty prefix.
"""
self._namespace.register(STD_NAMESPACE, '')
for cond in _ALL_CHECKERS:
self.register(cond, STD_NAMESPACE, _ALL_CHECKERS[cond]) # depends on [control=['for'], data=['cond']] |
def setWorkingCollisionBoundsInfo(self, unQuadsCount):
"""Sets the Collision Bounds in the working copy."""
fn = self.function_table.setWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
fn(byref(pQuadsBuffer), unQuadsCount)
return pQuadsBuffer | def function[setWorkingCollisionBoundsInfo, parameter[self, unQuadsCount]]:
constant[Sets the Collision Bounds in the working copy.]
variable[fn] assign[=] name[self].function_table.setWorkingCollisionBoundsInfo
variable[pQuadsBuffer] assign[=] call[name[HmdQuad_t], parameter[]]
call[name[fn], parameter[call[name[byref], parameter[name[pQuadsBuffer]]], name[unQuadsCount]]]
return[name[pQuadsBuffer]] | keyword[def] identifier[setWorkingCollisionBoundsInfo] ( identifier[self] , identifier[unQuadsCount] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[setWorkingCollisionBoundsInfo]
identifier[pQuadsBuffer] = identifier[HmdQuad_t] ()
identifier[fn] ( identifier[byref] ( identifier[pQuadsBuffer] ), identifier[unQuadsCount] )
keyword[return] identifier[pQuadsBuffer] | def setWorkingCollisionBoundsInfo(self, unQuadsCount):
"""Sets the Collision Bounds in the working copy."""
fn = self.function_table.setWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
fn(byref(pQuadsBuffer), unQuadsCount)
return pQuadsBuffer |
def selections_group(self):
"""Group all selections"""
cmd.group('Structures', '%s %s %sCartoon' % (self.protname, self.ligname, self.protname))
cmd.group('Interactions', 'Hydrophobic HBonds HalogenBonds WaterBridges PiCation PiStackingP PiStackingT '
'Saltbridges MetalComplexes')
cmd.group('Atoms', '')
cmd.group('Atoms.Protein', 'Hydrophobic-P HBondAccept-P HBondDonor-P HalogenAccept Centroids-P PiCatRing-P '
'StackRings-P PosCharge-P NegCharge-P AllBSRes Chargecenter-P Metal-P')
cmd.group('Atoms.Ligand', 'Hydrophobic-L HBondAccept-L HBondDonor-L HalogenDonor Centroids-L NegCharge-L '
'PosCharge-L NegCharge-L ChargeCenter-L StackRings-L PiCatRing-L Metal-L Metal-M '
'Unpaired-HBA Unpaired-HBD Unpaired-HAL Unpaired-RINGS')
cmd.group('Atoms.Other', 'Water Metal-W')
cmd.order('*', 'y') | def function[selections_group, parameter[self]]:
constant[Group all selections]
call[name[cmd].group, parameter[constant[Structures], binary_operation[constant[%s %s %sCartoon] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f09d330>, <ast.Attribute object at 0x7da18f09fa60>, <ast.Attribute object at 0x7da18f09df90>]]]]]
call[name[cmd].group, parameter[constant[Interactions], constant[Hydrophobic HBonds HalogenBonds WaterBridges PiCation PiStackingP PiStackingT Saltbridges MetalComplexes]]]
call[name[cmd].group, parameter[constant[Atoms], constant[]]]
call[name[cmd].group, parameter[constant[Atoms.Protein], constant[Hydrophobic-P HBondAccept-P HBondDonor-P HalogenAccept Centroids-P PiCatRing-P StackRings-P PosCharge-P NegCharge-P AllBSRes Chargecenter-P Metal-P]]]
call[name[cmd].group, parameter[constant[Atoms.Ligand], constant[Hydrophobic-L HBondAccept-L HBondDonor-L HalogenDonor Centroids-L NegCharge-L PosCharge-L NegCharge-L ChargeCenter-L StackRings-L PiCatRing-L Metal-L Metal-M Unpaired-HBA Unpaired-HBD Unpaired-HAL Unpaired-RINGS]]]
call[name[cmd].group, parameter[constant[Atoms.Other], constant[Water Metal-W]]]
call[name[cmd].order, parameter[constant[*], constant[y]]] | keyword[def] identifier[selections_group] ( identifier[self] ):
literal[string]
identifier[cmd] . identifier[group] ( literal[string] , literal[string] %( identifier[self] . identifier[protname] , identifier[self] . identifier[ligname] , identifier[self] . identifier[protname] ))
identifier[cmd] . identifier[group] ( literal[string] , literal[string]
literal[string] )
identifier[cmd] . identifier[group] ( literal[string] , literal[string] )
identifier[cmd] . identifier[group] ( literal[string] , literal[string]
literal[string] )
identifier[cmd] . identifier[group] ( literal[string] , literal[string]
literal[string]
literal[string] )
identifier[cmd] . identifier[group] ( literal[string] , literal[string] )
identifier[cmd] . identifier[order] ( literal[string] , literal[string] ) | def selections_group(self):
"""Group all selections"""
cmd.group('Structures', '%s %s %sCartoon' % (self.protname, self.ligname, self.protname))
cmd.group('Interactions', 'Hydrophobic HBonds HalogenBonds WaterBridges PiCation PiStackingP PiStackingT Saltbridges MetalComplexes')
cmd.group('Atoms', '')
cmd.group('Atoms.Protein', 'Hydrophobic-P HBondAccept-P HBondDonor-P HalogenAccept Centroids-P PiCatRing-P StackRings-P PosCharge-P NegCharge-P AllBSRes Chargecenter-P Metal-P')
cmd.group('Atoms.Ligand', 'Hydrophobic-L HBondAccept-L HBondDonor-L HalogenDonor Centroids-L NegCharge-L PosCharge-L NegCharge-L ChargeCenter-L StackRings-L PiCatRing-L Metal-L Metal-M Unpaired-HBA Unpaired-HBD Unpaired-HAL Unpaired-RINGS')
cmd.group('Atoms.Other', 'Water Metal-W')
cmd.order('*', 'y') |
def get_size(size):
# type: (int) -> Size
"""Get viewport size from given size.
:type size: int
:return: viewport size enum
:rtype: Size
"""
if size in range(0, 600):
return Size.XSMALL
elif size in range(600, 960):
return Size.SMALL
elif size in range(960, 1280):
return Size.MEDIUM
elif size in range(1280, 1920):
return Size.LARGE
elif size >= 1920:
return Size.XLARGE
raise AskSdkException("Unknown size group value: {}".format(size)) | def function[get_size, parameter[size]]:
constant[Get viewport size from given size.
:type size: int
:return: viewport size enum
:rtype: Size
]
if compare[name[size] in call[name[range], parameter[constant[0], constant[600]]]] begin[:]
return[name[Size].XSMALL]
<ast.Raise object at 0x7da1b19db8b0> | keyword[def] identifier[get_size] ( identifier[size] ):
literal[string]
keyword[if] identifier[size] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[return] identifier[Size] . identifier[XSMALL]
keyword[elif] identifier[size] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[return] identifier[Size] . identifier[SMALL]
keyword[elif] identifier[size] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[return] identifier[Size] . identifier[MEDIUM]
keyword[elif] identifier[size] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[return] identifier[Size] . identifier[LARGE]
keyword[elif] identifier[size] >= literal[int] :
keyword[return] identifier[Size] . identifier[XLARGE]
keyword[raise] identifier[AskSdkException] ( literal[string] . identifier[format] ( identifier[size] )) | def get_size(size):
# type: (int) -> Size
'Get viewport size from given size.\n\n :type size: int\n :return: viewport size enum\n :rtype: Size\n '
if size in range(0, 600):
return Size.XSMALL # depends on [control=['if'], data=[]]
elif size in range(600, 960):
return Size.SMALL # depends on [control=['if'], data=[]]
elif size in range(960, 1280):
return Size.MEDIUM # depends on [control=['if'], data=[]]
elif size in range(1280, 1920):
return Size.LARGE # depends on [control=['if'], data=[]]
elif size >= 1920:
return Size.XLARGE # depends on [control=['if'], data=[]]
raise AskSdkException('Unknown size group value: {}'.format(size)) |
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds) | def function[add_to_parser, parameter[self, parser, group]]:
constant[ Add this object's information to the parser.
]
return[call[name[parser].add_argument_group, parameter[<ast.Starred object at 0x7da18ede40d0>]]] | keyword[def] identifier[add_to_parser] ( identifier[self] , identifier[parser] , identifier[group] ):
literal[string]
keyword[return] identifier[parser] . identifier[add_argument_group] (* identifier[self] . identifier[args] ,** identifier[self] . identifier[kwds] ) | def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds) |
def generateRevision(self):
"""
Generates the revision file for this builder.
"""
revpath = self.sourcePath()
if not os.path.exists(revpath):
return
# determine the revision location
revfile = os.path.join(revpath, self.revisionFilename())
mode = ''
# test for svn revision
try:
args = ['svn', 'info', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'svn'
except WindowsError:
try:
args = ['git', 'rev-parse', 'HEAD', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'git'
except WindowsError:
return
# process SVN revision
rev = None
if mode == 'svn':
for line in proc.stdout:
data = re.match('^Revision: (\d+)', line)
if data:
rev = int(data.group(1))
break
if rev is not None:
try:
f = open(revfile, 'w')
f.write('__revision__ = {0}\n'.format(rev))
f.close()
except IOError:
pass | def function[generateRevision, parameter[self]]:
constant[
Generates the revision file for this builder.
]
variable[revpath] assign[=] call[name[self].sourcePath, parameter[]]
if <ast.UnaryOp object at 0x7da20c990ca0> begin[:]
return[None]
variable[revfile] assign[=] call[name[os].path.join, parameter[name[revpath], call[name[self].revisionFilename, parameter[]]]]
variable[mode] assign[=] constant[]
<ast.Try object at 0x7da20c991630>
variable[rev] assign[=] constant[None]
if compare[name[mode] equal[==] constant[svn]] begin[:]
for taget[name[line]] in starred[name[proc].stdout] begin[:]
variable[data] assign[=] call[name[re].match, parameter[constant[^Revision: (\d+)], name[line]]]
if name[data] begin[:]
variable[rev] assign[=] call[name[int], parameter[call[name[data].group, parameter[constant[1]]]]]
break
if compare[name[rev] is_not constant[None]] begin[:]
<ast.Try object at 0x7da20c993370> | keyword[def] identifier[generateRevision] ( identifier[self] ):
literal[string]
identifier[revpath] = identifier[self] . identifier[sourcePath] ()
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[revpath] ):
keyword[return]
identifier[revfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[revpath] , identifier[self] . identifier[revisionFilename] ())
identifier[mode] = literal[string]
keyword[try] :
identifier[args] =[ literal[string] , literal[string] , identifier[revpath] ]
identifier[proc] = identifier[subprocess] . identifier[Popen] ( identifier[args] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] )
identifier[mode] = literal[string]
keyword[except] identifier[WindowsError] :
keyword[try] :
identifier[args] =[ literal[string] , literal[string] , literal[string] , identifier[revpath] ]
identifier[proc] = identifier[subprocess] . identifier[Popen] ( identifier[args] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] )
identifier[mode] = literal[string]
keyword[except] identifier[WindowsError] :
keyword[return]
identifier[rev] = keyword[None]
keyword[if] identifier[mode] == literal[string] :
keyword[for] identifier[line] keyword[in] identifier[proc] . identifier[stdout] :
identifier[data] = identifier[re] . identifier[match] ( literal[string] , identifier[line] )
keyword[if] identifier[data] :
identifier[rev] = identifier[int] ( identifier[data] . identifier[group] ( literal[int] ))
keyword[break]
keyword[if] identifier[rev] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[f] = identifier[open] ( identifier[revfile] , literal[string] )
identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[rev] ))
identifier[f] . identifier[close] ()
keyword[except] identifier[IOError] :
keyword[pass] | def generateRevision(self):
"""
Generates the revision file for this builder.
"""
revpath = self.sourcePath()
if not os.path.exists(revpath):
return # depends on [control=['if'], data=[]]
# determine the revision location
revfile = os.path.join(revpath, self.revisionFilename())
mode = ''
# test for svn revision
try:
args = ['svn', 'info', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'svn' # depends on [control=['try'], data=[]]
except WindowsError:
try:
args = ['git', 'rev-parse', 'HEAD', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'git' # depends on [control=['try'], data=[]]
except WindowsError:
return # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
# process SVN revision
rev = None
if mode == 'svn':
for line in proc.stdout:
data = re.match('^Revision: (\\d+)', line)
if data:
rev = int(data.group(1))
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
if rev is not None:
try:
f = open(revfile, 'w')
f.write('__revision__ = {0}\n'.format(rev))
f.close() # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['rev']] |
def Equals(self, other):
"""
Test for equality.
Args:
other (obj):
Returns:
bool: True `other` equals self.
"""
if other is None:
return False
if other.PrevHash.ToBytes() == self.PrevHash.ToBytes() and other.PrevIndex == self.PrevIndex:
return True
return False | def function[Equals, parameter[self, other]]:
constant[
Test for equality.
Args:
other (obj):
Returns:
bool: True `other` equals self.
]
if compare[name[other] is constant[None]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da20c7c9030> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[Equals] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[other] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[other] . identifier[PrevHash] . identifier[ToBytes] ()== identifier[self] . identifier[PrevHash] . identifier[ToBytes] () keyword[and] identifier[other] . identifier[PrevIndex] == identifier[self] . identifier[PrevIndex] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def Equals(self, other):
"""
Test for equality.
Args:
other (obj):
Returns:
bool: True `other` equals self.
"""
if other is None:
return False # depends on [control=['if'], data=[]]
if other.PrevHash.ToBytes() == self.PrevHash.ToBytes() and other.PrevIndex == self.PrevIndex:
return True # depends on [control=['if'], data=[]]
return False |
def to_dict(self):
"""
Converts an InternalData object to a dict
:rtype: dict[str, str]
:return: A dict representation of the object
"""
data = {
"auth_info": self.auth_info.to_dict(),
"requester": self.requester,
"requester_name": self.requester_name,
"attributes": self.attributes,
"subject_id": self.subject_id,
"subject_type": self.subject_type,
}
data.update(
{
"user_id": self.subject_id,
"hash_type": self.subject_type,
"name_id": self.subject_id,
"approved_attributes": self.attributes,
}
)
return data | def function[to_dict, parameter[self]]:
constant[
Converts an InternalData object to a dict
:rtype: dict[str, str]
:return: A dict representation of the object
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b157b6d0>, <ast.Constant object at 0x7da1b157b6a0>, <ast.Constant object at 0x7da1b157b670>, <ast.Constant object at 0x7da1b157a740>, <ast.Constant object at 0x7da1b157a7a0>, <ast.Constant object at 0x7da1b157a800>], [<ast.Call object at 0x7da1b157a890>, <ast.Attribute object at 0x7da1b157aaa0>, <ast.Attribute object at 0x7da1b157ab90>, <ast.Attribute object at 0x7da1b157abf0>, <ast.Attribute object at 0x7da1b157ac50>, <ast.Attribute object at 0x7da1b157acb0>]]
call[name[data].update, parameter[dictionary[[<ast.Constant object at 0x7da1b157afb0>, <ast.Constant object at 0x7da1b157afe0>, <ast.Constant object at 0x7da1b157b010>, <ast.Constant object at 0x7da1b157b040>], [<ast.Attribute object at 0x7da1b157b640>, <ast.Attribute object at 0x7da1b157b5e0>, <ast.Attribute object at 0x7da1b157af80>, <ast.Attribute object at 0x7da1b157af20>]]]]
return[name[data]] | keyword[def] identifier[to_dict] ( identifier[self] ):
literal[string]
identifier[data] ={
literal[string] : identifier[self] . identifier[auth_info] . identifier[to_dict] (),
literal[string] : identifier[self] . identifier[requester] ,
literal[string] : identifier[self] . identifier[requester_name] ,
literal[string] : identifier[self] . identifier[attributes] ,
literal[string] : identifier[self] . identifier[subject_id] ,
literal[string] : identifier[self] . identifier[subject_type] ,
}
identifier[data] . identifier[update] (
{
literal[string] : identifier[self] . identifier[subject_id] ,
literal[string] : identifier[self] . identifier[subject_type] ,
literal[string] : identifier[self] . identifier[subject_id] ,
literal[string] : identifier[self] . identifier[attributes] ,
}
)
keyword[return] identifier[data] | def to_dict(self):
"""
Converts an InternalData object to a dict
:rtype: dict[str, str]
:return: A dict representation of the object
"""
data = {'auth_info': self.auth_info.to_dict(), 'requester': self.requester, 'requester_name': self.requester_name, 'attributes': self.attributes, 'subject_id': self.subject_id, 'subject_type': self.subject_type}
data.update({'user_id': self.subject_id, 'hash_type': self.subject_type, 'name_id': self.subject_id, 'approved_attributes': self.attributes})
return data |
def _process_db_rows_iter(self, cursor):
'''
Turn the low-level rows from the result of a standard query join
into higher-level statements, yielded iteratively. Note this might lead to
idle transaction errors?
'''
#Be aware of: http://packages.python.org/psycopg2/faq.html#problems-with-transactions-handling
#The results will come back grouped by the raw relationship IDs, in order
for relid, relgroup in groupby(cursor, itemgetter(0)):
curr_rel = None
attrs = None
#Each relgroup are the DB rows corresponding to a single relationship,
#With redundant origin/rel/target but the sequence of attributes
for row in relgroup:
(rawid, origin, rel, target, a_name, a_val) = row
#self._logger.debug('Row: {0}'.format(repr(row)))
if not curr_rel: curr_rel = (origin, rel, target)
if a_name:
if not attrs:
attrs = {}
curr_rel = (origin, rel, target, attrs)
attrs[a_name] = a_val
yield curr_rel
cursor.close()
self._conn.rollback() #Finish with the transaction
return | def function[_process_db_rows_iter, parameter[self, cursor]]:
constant[
Turn the low-level rows from the result of a standard query join
into higher-level statements, yielded iteratively. Note this might lead to
idle transaction errors?
]
for taget[tuple[[<ast.Name object at 0x7da1b26ac1f0>, <ast.Name object at 0x7da1b26adc90>]]] in starred[call[name[groupby], parameter[name[cursor], call[name[itemgetter], parameter[constant[0]]]]]] begin[:]
variable[curr_rel] assign[=] constant[None]
variable[attrs] assign[=] constant[None]
for taget[name[row]] in starred[name[relgroup]] begin[:]
<ast.Tuple object at 0x7da1b26aead0> assign[=] name[row]
if <ast.UnaryOp object at 0x7da1b26aeb00> begin[:]
variable[curr_rel] assign[=] tuple[[<ast.Name object at 0x7da1b26af730>, <ast.Name object at 0x7da1b26ada50>, <ast.Name object at 0x7da1b26ae9b0>]]
if name[a_name] begin[:]
if <ast.UnaryOp object at 0x7da1b26af040> begin[:]
variable[attrs] assign[=] dictionary[[], []]
variable[curr_rel] assign[=] tuple[[<ast.Name object at 0x7da1b26aed40>, <ast.Name object at 0x7da1b26acf70>, <ast.Name object at 0x7da1b26af4c0>, <ast.Name object at 0x7da1b26adcc0>]]
call[name[attrs]][name[a_name]] assign[=] name[a_val]
<ast.Yield object at 0x7da1b26adb10>
call[name[cursor].close, parameter[]]
call[name[self]._conn.rollback, parameter[]]
return[None] | keyword[def] identifier[_process_db_rows_iter] ( identifier[self] , identifier[cursor] ):
literal[string]
keyword[for] identifier[relid] , identifier[relgroup] keyword[in] identifier[groupby] ( identifier[cursor] , identifier[itemgetter] ( literal[int] )):
identifier[curr_rel] = keyword[None]
identifier[attrs] = keyword[None]
keyword[for] identifier[row] keyword[in] identifier[relgroup] :
( identifier[rawid] , identifier[origin] , identifier[rel] , identifier[target] , identifier[a_name] , identifier[a_val] )= identifier[row]
keyword[if] keyword[not] identifier[curr_rel] : identifier[curr_rel] =( identifier[origin] , identifier[rel] , identifier[target] )
keyword[if] identifier[a_name] :
keyword[if] keyword[not] identifier[attrs] :
identifier[attrs] ={}
identifier[curr_rel] =( identifier[origin] , identifier[rel] , identifier[target] , identifier[attrs] )
identifier[attrs] [ identifier[a_name] ]= identifier[a_val]
keyword[yield] identifier[curr_rel]
identifier[cursor] . identifier[close] ()
identifier[self] . identifier[_conn] . identifier[rollback] ()
keyword[return] | def _process_db_rows_iter(self, cursor):
"""
Turn the low-level rows from the result of a standard query join
into higher-level statements, yielded iteratively. Note this might lead to
idle transaction errors?
"""
#Be aware of: http://packages.python.org/psycopg2/faq.html#problems-with-transactions-handling
#The results will come back grouped by the raw relationship IDs, in order
for (relid, relgroup) in groupby(cursor, itemgetter(0)):
curr_rel = None
attrs = None
#Each relgroup are the DB rows corresponding to a single relationship,
#With redundant origin/rel/target but the sequence of attributes
for row in relgroup:
(rawid, origin, rel, target, a_name, a_val) = row
#self._logger.debug('Row: {0}'.format(repr(row)))
if not curr_rel:
curr_rel = (origin, rel, target) # depends on [control=['if'], data=[]]
if a_name:
if not attrs:
attrs = {}
curr_rel = (origin, rel, target, attrs) # depends on [control=['if'], data=[]]
attrs[a_name] = a_val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
yield curr_rel # depends on [control=['for'], data=[]]
cursor.close()
self._conn.rollback() #Finish with the transaction
return |
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
file = super(VersatileImageField, self).pre_save(model_instance, add)
self.update_ppoi_field(model_instance)
return file | def function[pre_save, parameter[self, model_instance, add]]:
constant[Return field's value just before saving.]
variable[file] assign[=] call[call[name[super], parameter[name[VersatileImageField], name[self]]].pre_save, parameter[name[model_instance], name[add]]]
call[name[self].update_ppoi_field, parameter[name[model_instance]]]
return[name[file]] | keyword[def] identifier[pre_save] ( identifier[self] , identifier[model_instance] , identifier[add] ):
literal[string]
identifier[file] = identifier[super] ( identifier[VersatileImageField] , identifier[self] ). identifier[pre_save] ( identifier[model_instance] , identifier[add] )
identifier[self] . identifier[update_ppoi_field] ( identifier[model_instance] )
keyword[return] identifier[file] | def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
file = super(VersatileImageField, self).pre_save(model_instance, add)
self.update_ppoi_field(model_instance)
return file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.